CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2016-3698
|
https://www.cvedetails.com/cve/CVE-2016-3698/
|
CWE-284
|
https://github.com/jpirko/libndp/commit/a4892df306e0532487f1634ba6d4c6d4bb381c7f
|
a4892df306e0532487f1634ba6d4c6d4bb381c7f
|
libndp: validate the IPv6 hop limit
None of the NDP messages should ever come from a non-local network; as
stated in RFC4861's 6.1.1 (RS), 6.1.2 (RA), 7.1.1 (NS), 7.1.2 (NA),
and 8.1. (redirect):
- The IP Hop Limit field has a value of 255, i.e., the packet
could not possibly have been forwarded by a router.
This fixes CVE-2016-3698.
Reported by: Julien BERNARD <[email protected]>
Signed-off-by: Lubomir Rintel <[email protected]>
Signed-off-by: Jiri Pirko <[email protected]>
|
uint8_t ndp_msgra_curhoplimit(struct ndp_msgra *msgra)
{
return msgra->ra->nd_ra_curhoplimit;
}
|
uint8_t ndp_msgra_curhoplimit(struct ndp_msgra *msgra)
{
return msgra->ra->nd_ra_curhoplimit;
}
|
C
|
libndp
| 0 |
CVE-2015-1191
|
https://www.cvedetails.com/cve/CVE-2015-1191/
|
CWE-22
|
https://github.com/madler/pigz/commit/fdad1406b3ec809f4954ff7cdf9e99eb18c2458f
|
fdad1406b3ec809f4954ff7cdf9e99eb18c2458f
|
When decompressing with -N or -NT, strip any path from header name.
This uses the path of the compressed file combined with the name
from the header as the name of the decompressed output file. Any
path information in the header name is stripped. This avoids a
possible vulnerability where absolute or descending paths are put
in the gzip header.
|
local int complain(char *fmt, ...)
{
va_list ap;
if (g.verbosity > 0) {
fprintf(stderr, "%s: ", g.prog);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
putc('\n', stderr);
fflush(stderr);
}
return 0;
}
|
local int complain(char *fmt, ...)
{
va_list ap;
if (g.verbosity > 0) {
fprintf(stderr, "%s: ", g.prog);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
putc('\n', stderr);
fflush(stderr);
}
return 0;
}
|
C
|
pigz
| 0 |
CVE-2015-7515
|
https://www.cvedetails.com/cve/CVE-2015-7515/
| null |
https://github.com/torvalds/linux/commit/8e20cf2bce122ce9262d6034ee5d5b76fbb92f96
|
8e20cf2bce122ce9262d6034ee5d5b76fbb92f96
|
Input: aiptek - fix crash on detecting device without endpoints
The aiptek driver crashes in aiptek_probe() when a specially crafted USB
device without endpoints is detected. This fix adds a check that the device
has proper configuration expected by the driver. Also an error return value
is changed to more matching one in one of the error paths.
Reported-by: Ralf Spenneberg <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Dmitry Torokhov <[email protected]>
|
static ssize_t show_tabletMouseMiddle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n",
map_val_to_str(mouse_button_map,
aiptek->curSetting.mouseButtonMiddle));
}
|
static ssize_t show_tabletMouseMiddle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n",
map_val_to_str(mouse_button_map,
aiptek->curSetting.mouseButtonMiddle));
}
|
C
|
linux
| 0 |
CVE-2015-1214
|
https://www.cvedetails.com/cve/CVE-2015-1214/
|
CWE-190
|
https://github.com/chromium/chromium/commit/a81c185f34b34ef8410239506825b185b332c00b
|
a81c185f34b34ef8410239506825b185b332c00b
|
Add data usage tracking for chrome services
Add data usage tracking for captive portal, web resource and signin services
BUG=655749
Review-Url: https://codereview.chromium.org/2643013004
Cr-Commit-Position: refs/heads/master@{#445810}
|
void GaiaCookieManagerService::StartFetchingListAccounts() {
VLOG(1) << "GaiaCookieManagerService::ListAccounts";
gaia_auth_fetcher_.reset(signin_client_->CreateGaiaAuthFetcher(
this, GetSourceForRequest(requests_.front()),
signin_client_->GetURLRequestContext()));
gaia_auth_fetcher_->StartListAccounts();
}
|
void GaiaCookieManagerService::StartFetchingListAccounts() {
VLOG(1) << "GaiaCookieManagerService::ListAccounts";
gaia_auth_fetcher_.reset(signin_client_->CreateGaiaAuthFetcher(
this, GetSourceForRequest(requests_.front()),
signin_client_->GetURLRequestContext()));
gaia_auth_fetcher_->StartListAccounts();
}
|
C
|
Chrome
| 0 |
CVE-2013-2871
|
https://www.cvedetails.com/cve/CVE-2013-2871/
|
CWE-20
|
https://github.com/chromium/chromium/commit/bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9
|
bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9
|
Setting input.x-webkit-speech should not cause focus change
In r150866, we introduced element()->focus() in destroyShadowSubtree()
to retain focus on <input> when its type attribute gets changed.
But when x-webkit-speech attribute is changed, the element is detached
before calling destroyShadowSubtree() and element()->focus() failed
This patch moves detach() after destroyShadowSubtree() to fix the
problem.
BUG=243818
TEST=fast/forms/input-type-change-focusout.html
NOTRY=true
Review URL: https://chromiumcodereview.appspot.com/16084005
git-svn-id: svn://svn.chromium.org/blink/trunk@151444 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
String HTMLInputElement::accept() const
{
return fastGetAttribute(acceptAttr);
}
|
String HTMLInputElement::accept() const
{
return fastGetAttribute(acceptAttr);
}
|
C
|
Chrome
| 0 |
CVE-2017-18030
|
https://www.cvedetails.com/cve/CVE-2017-18030/
|
CWE-125
|
https://git.qemu.org/?p=qemu.git;a=commit;h=f153b563f8cf121aebf5a2fff5f0110faf58ccb3
|
f153b563f8cf121aebf5a2fff5f0110faf58ccb3
| null |
static void cirrus_vga_write_sr(CirrusVGAState * s, uint32_t val)
{
switch (s->vga.sr_index) {
case 0x00: // Standard VGA
case 0x01: // Standard VGA
case 0x02: // Standard VGA
case 0x03: // Standard VGA
case 0x04: // Standard VGA
s->vga.sr[s->vga.sr_index] = val & sr_mask[s->vga.sr_index];
if (s->vga.sr_index == 1)
s->vga.update_retrace_info(&s->vga);
break;
case 0x06: // Unlock Cirrus extensions
val &= 0x17;
if (val == 0x12) {
s->vga.sr[s->vga.sr_index] = 0x12;
} else {
s->vga.sr[s->vga.sr_index] = 0x0f;
}
break;
case 0x10:
case 0x30:
case 0x50:
case 0x70: // Graphics Cursor X
case 0x90:
case 0xb0:
case 0xd0:
case 0xf0: // Graphics Cursor X
s->vga.sr[0x10] = val;
s->vga.hw_cursor_x = (val << 3) | (s->vga.sr_index >> 5);
break;
case 0x11:
case 0x31:
case 0x51:
case 0x71: // Graphics Cursor Y
case 0x91:
case 0xb1:
case 0xd1:
case 0xf1: // Graphics Cursor Y
s->vga.sr[0x11] = val;
s->vga.hw_cursor_y = (val << 3) | (s->vga.sr_index >> 5);
break;
case 0x07: // Extended Sequencer Mode
cirrus_update_memory_access(s);
case 0x08: // EEPROM Control
case 0x09: // Scratch Register 0
case 0x0a: // Scratch Register 1
case 0x0b: // VCLK 0
case 0x0c: // VCLK 1
case 0x0d: // VCLK 2
case 0x0e: // VCLK 3
case 0x0f: // DRAM Control
case 0x13: // Graphics Cursor Pattern Address
case 0x14: // Scratch Register 2
case 0x15: // Scratch Register 3
case 0x16: // Performance Tuning Register
case 0x18: // Signature Generator Control
case 0x19: // Signature Generator Result
case 0x1a: // Signature Generator Result
case 0x1b: // VCLK 0 Denominator & Post
case 0x1c: // VCLK 1 Denominator & Post
case 0x1d: // VCLK 2 Denominator & Post
case 0x1e: // VCLK 3 Denominator & Post
case 0x1f: // BIOS Write Enable and MCLK select
s->vga.sr[s->vga.sr_index] = val;
#ifdef DEBUG_CIRRUS
printf("cirrus: handled outport sr_index %02x, sr_value %02x\n",
s->vga.sr_index, val);
#endif
break;
case 0x12: // Graphics Cursor Attribute
s->vga.sr[0x12] = val;
s->vga.force_shadow = !!(val & CIRRUS_CURSOR_SHOW);
#ifdef DEBUG_CIRRUS
printf("cirrus: cursor ctl SR12=%02x (force shadow: %d)\n",
val, s->vga.force_shadow);
#endif
break;
case 0x17: // Configuration Readback and Extended Control
s->vga.sr[s->vga.sr_index] = (s->vga.sr[s->vga.sr_index] & 0x38)
| (val & 0xc7);
cirrus_update_memory_access(s);
break;
default:
#ifdef DEBUG_CIRRUS
printf("cirrus: outport sr_index %02x, sr_value %02x\n",
s->vga.sr_index, val);
#endif
break;
}
}
|
static void cirrus_vga_write_sr(CirrusVGAState * s, uint32_t val)
{
switch (s->vga.sr_index) {
case 0x00: // Standard VGA
case 0x01: // Standard VGA
case 0x02: // Standard VGA
case 0x03: // Standard VGA
case 0x04: // Standard VGA
s->vga.sr[s->vga.sr_index] = val & sr_mask[s->vga.sr_index];
if (s->vga.sr_index == 1)
s->vga.update_retrace_info(&s->vga);
break;
case 0x06: // Unlock Cirrus extensions
val &= 0x17;
if (val == 0x12) {
s->vga.sr[s->vga.sr_index] = 0x12;
} else {
s->vga.sr[s->vga.sr_index] = 0x0f;
}
break;
case 0x10:
case 0x30:
case 0x50:
case 0x70: // Graphics Cursor X
case 0x90:
case 0xb0:
case 0xd0:
case 0xf0: // Graphics Cursor X
s->vga.sr[0x10] = val;
s->vga.hw_cursor_x = (val << 3) | (s->vga.sr_index >> 5);
break;
case 0x11:
case 0x31:
case 0x51:
case 0x71: // Graphics Cursor Y
case 0x91:
case 0xb1:
case 0xd1:
case 0xf1: // Graphics Cursor Y
s->vga.sr[0x11] = val;
s->vga.hw_cursor_y = (val << 3) | (s->vga.sr_index >> 5);
break;
case 0x07: // Extended Sequencer Mode
cirrus_update_memory_access(s);
case 0x08: // EEPROM Control
case 0x09: // Scratch Register 0
case 0x0a: // Scratch Register 1
case 0x0b: // VCLK 0
case 0x0c: // VCLK 1
case 0x0d: // VCLK 2
case 0x0e: // VCLK 3
case 0x0f: // DRAM Control
case 0x13: // Graphics Cursor Pattern Address
case 0x14: // Scratch Register 2
case 0x15: // Scratch Register 3
case 0x16: // Performance Tuning Register
case 0x18: // Signature Generator Control
case 0x19: // Signature Generator Result
case 0x1a: // Signature Generator Result
case 0x1b: // VCLK 0 Denominator & Post
case 0x1c: // VCLK 1 Denominator & Post
case 0x1d: // VCLK 2 Denominator & Post
case 0x1e: // VCLK 3 Denominator & Post
case 0x1f: // BIOS Write Enable and MCLK select
s->vga.sr[s->vga.sr_index] = val;
#ifdef DEBUG_CIRRUS
printf("cirrus: handled outport sr_index %02x, sr_value %02x\n",
s->vga.sr_index, val);
#endif
break;
case 0x12: // Graphics Cursor Attribute
s->vga.sr[0x12] = val;
s->vga.force_shadow = !!(val & CIRRUS_CURSOR_SHOW);
#ifdef DEBUG_CIRRUS
printf("cirrus: cursor ctl SR12=%02x (force shadow: %d)\n",
val, s->vga.force_shadow);
#endif
break;
case 0x17: // Configuration Readback and Extended Control
s->vga.sr[s->vga.sr_index] = (s->vga.sr[s->vga.sr_index] & 0x38)
| (val & 0xc7);
cirrus_update_memory_access(s);
break;
default:
#ifdef DEBUG_CIRRUS
printf("cirrus: outport sr_index %02x, sr_value %02x\n",
s->vga.sr_index, val);
#endif
break;
}
}
|
C
|
qemu
| 0 |
CVE-2018-17204
|
https://www.cvedetails.com/cve/CVE-2018-17204/
|
CWE-617
|
https://github.com/openvswitch/ovs/commit/4af6da3b275b764b1afe194df6499b33d2bf4cde
|
4af6da3b275b764b1afe194df6499b33d2bf4cde
|
ofp-group: Don't assert-fail decoding bad OF1.5 group mod type or command.
When decoding a group mod, the current code validates the group type and
command after the whole group mod has been decoded. The OF1.5 decoder,
however, tries to use the type and command earlier, when it might still be
invalid. This caused an assertion failure (via OVS_NOT_REACHED). This
commit fixes the problem.
ovs-vswitchd does not enable support for OpenFlow 1.5 by default.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9249
Signed-off-by: Ben Pfaff <[email protected]>
Reviewed-by: Yifeng Sun <[email protected]>
|
ofputil_bucket_check_duplicate_id(const struct ovs_list *buckets)
{
struct ofputil_bucket *i, *j;
LIST_FOR_EACH (i, list_node, buckets) {
LIST_FOR_EACH_REVERSE (j, list_node, buckets) {
if (i == j) {
break;
}
if (i->bucket_id == j->bucket_id) {
return true;
}
}
}
return false;
}
|
ofputil_bucket_check_duplicate_id(const struct ovs_list *buckets)
{
struct ofputil_bucket *i, *j;
LIST_FOR_EACH (i, list_node, buckets) {
LIST_FOR_EACH_REVERSE (j, list_node, buckets) {
if (i == j) {
break;
}
if (i->bucket_id == j->bucket_id) {
return true;
}
}
}
return false;
}
|
C
|
ovs
| 0 |
CVE-2012-2880
|
https://www.cvedetails.com/cve/CVE-2012-2880/
|
CWE-362
|
https://github.com/chromium/chromium/commit/fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
|
fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
|
[Sync] Cleanup all tab sync enabling logic now that its on by default.
BUG=none
TEST=
Review URL: https://chromiumcodereview.appspot.com/10443046
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139462 0039d316-1c4b-4281-b951-d872f2087c98
|
void ExpectLocalOrderIsByServerId() {
ReadTransaction trans(FROM_HERE, directory());
Id null_id;
Entry low(&trans, GET_BY_ID, low_id_);
Entry mid(&trans, GET_BY_ID, mid_id_);
Entry high(&trans, GET_BY_ID, high_id_);
EXPECT_TRUE(low.good());
EXPECT_TRUE(mid.good());
EXPECT_TRUE(high.good());
EXPECT_TRUE(low.Get(PREV_ID) == null_id);
EXPECT_TRUE(mid.Get(PREV_ID) == low_id_);
EXPECT_TRUE(high.Get(PREV_ID) == mid_id_);
EXPECT_TRUE(high.Get(NEXT_ID) == null_id);
EXPECT_TRUE(mid.Get(NEXT_ID) == high_id_);
EXPECT_TRUE(low.Get(NEXT_ID) == mid_id_);
}
|
void ExpectLocalOrderIsByServerId() {
ReadTransaction trans(FROM_HERE, directory());
Id null_id;
Entry low(&trans, GET_BY_ID, low_id_);
Entry mid(&trans, GET_BY_ID, mid_id_);
Entry high(&trans, GET_BY_ID, high_id_);
EXPECT_TRUE(low.good());
EXPECT_TRUE(mid.good());
EXPECT_TRUE(high.good());
EXPECT_TRUE(low.Get(PREV_ID) == null_id);
EXPECT_TRUE(mid.Get(PREV_ID) == low_id_);
EXPECT_TRUE(high.Get(PREV_ID) == mid_id_);
EXPECT_TRUE(high.Get(NEXT_ID) == null_id);
EXPECT_TRUE(mid.Get(NEXT_ID) == high_id_);
EXPECT_TRUE(low.Get(NEXT_ID) == mid_id_);
}
|
C
|
Chrome
| 0 |
CVE-2017-6001
|
https://www.cvedetails.com/cve/CVE-2017-6001/
|
CWE-362
|
https://github.com/torvalds/linux/commit/321027c1fe77f892f4ea07846aeae08cefbbb290
|
321027c1fe77f892f4ea07846aeae08cefbbb290
|
perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race
Di Shen reported a race between two concurrent sys_perf_event_open()
calls where both try and move the same pre-existing software group
into a hardware context.
The problem is exactly that described in commit:
f63a8daa5812 ("perf: Fix event->ctx locking")
... where, while we wait for a ctx->mutex acquisition, the event->ctx
relation can have changed under us.
That very same commit failed to recognise sys_perf_event_context() as an
external access vector to the events and thereby didn't apply the
established locking rules correctly.
So while one sys_perf_event_open() call is stuck waiting on
mutex_lock_double(), the other (which owns said locks) moves the group
about. So by the time the former sys_perf_event_open() acquires the
locks, the context we've acquired is stale (and possibly dead).
Apply the established locking rules as per perf_event_ctx_lock_nested()
to the mutex_lock_double() for the 'move_group' case. This obviously means
we need to validate state after we acquire the locks.
Reported-by: Di Shen (Keen Lab)
Tested-by: John Dias <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Min Chong <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Fixes: f63a8daa5812 ("perf: Fix event->ctx locking")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static void perf_duration_warn(struct irq_work *w)
{
printk_ratelimited(KERN_INFO
"perf: interrupt took too long (%lld > %lld), lowering "
"kernel.perf_event_max_sample_rate to %d\n",
__report_avg, __report_allowed,
sysctl_perf_event_sample_rate);
}
|
static void perf_duration_warn(struct irq_work *w)
{
printk_ratelimited(KERN_INFO
"perf: interrupt took too long (%lld > %lld), lowering "
"kernel.perf_event_max_sample_rate to %d\n",
__report_avg, __report_allowed,
sysctl_perf_event_sample_rate);
}
|
C
|
linux
| 0 |
CVE-2012-2816
|
https://www.cvedetails.com/cve/CVE-2012-2816/
| null |
https://github.com/chromium/chromium/commit/cd0bd79d6ebdb72183e6f0833673464cc10b3600
|
cd0bd79d6ebdb72183e6f0833673464cc10b3600
|
Convert plugin and GPU process to brokered handle duplication.
BUG=119250
Review URL: https://chromiumcodereview.appspot.com/9958034
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98
|
void WebGraphicsContext3DCommandBufferImpl::synthesizeGLError(
WGC3Denum error) {
if (std::find(synthetic_errors_.begin(), synthetic_errors_.end(), error) ==
synthetic_errors_.end()) {
synthetic_errors_.push_back(error);
}
}
|
void WebGraphicsContext3DCommandBufferImpl::synthesizeGLError(
WGC3Denum error) {
if (std::find(synthetic_errors_.begin(), synthetic_errors_.end(), error) ==
synthetic_errors_.end()) {
synthetic_errors_.push_back(error);
}
}
|
C
|
Chrome
| 0 |
CVE-2017-5551
|
https://www.cvedetails.com/cve/CVE-2017-5551/
| null |
https://github.com/torvalds/linux/commit/497de07d89c1410d76a15bec2bb41f24a2a89f31
|
497de07d89c1410d76a15bec2bb41f24a2a89f31
|
tmpfs: clear S_ISGID when setting posix ACLs
This change was missed the tmpfs modification in In CVE-2016-7097
commit 073931017b49 ("posix_acl: Clear SGID bit when setting
file permissions")
It can test by xfstest generic/375, which failed to clear
setgid bit in the following test case on tmpfs:
touch $testfile
chown 100:100 $testfile
chmod 2755 $testfile
_runas -u 100 -g 101 -- setfacl -m u::rwx,g::rwx,o::rwx $testfile
Signed-off-by: Gu Zheng <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
posix_acl_create(struct inode *dir, umode_t *mode,
struct posix_acl **default_acl, struct posix_acl **acl)
{
struct posix_acl *p;
struct posix_acl *clone;
int ret;
*acl = NULL;
*default_acl = NULL;
if (S_ISLNK(*mode) || !IS_POSIXACL(dir))
return 0;
p = get_acl(dir, ACL_TYPE_DEFAULT);
if (!p || p == ERR_PTR(-EOPNOTSUPP)) {
*mode &= ~current_umask();
return 0;
}
if (IS_ERR(p))
return PTR_ERR(p);
ret = -ENOMEM;
clone = posix_acl_clone(p, GFP_NOFS);
if (!clone)
goto err_release;
ret = posix_acl_create_masq(clone, mode);
if (ret < 0)
goto err_release_clone;
if (ret == 0)
posix_acl_release(clone);
else
*acl = clone;
if (!S_ISDIR(*mode))
posix_acl_release(p);
else
*default_acl = p;
return 0;
err_release_clone:
posix_acl_release(clone);
err_release:
posix_acl_release(p);
return ret;
}
|
posix_acl_create(struct inode *dir, umode_t *mode,
struct posix_acl **default_acl, struct posix_acl **acl)
{
struct posix_acl *p;
struct posix_acl *clone;
int ret;
*acl = NULL;
*default_acl = NULL;
if (S_ISLNK(*mode) || !IS_POSIXACL(dir))
return 0;
p = get_acl(dir, ACL_TYPE_DEFAULT);
if (!p || p == ERR_PTR(-EOPNOTSUPP)) {
*mode &= ~current_umask();
return 0;
}
if (IS_ERR(p))
return PTR_ERR(p);
ret = -ENOMEM;
clone = posix_acl_clone(p, GFP_NOFS);
if (!clone)
goto err_release;
ret = posix_acl_create_masq(clone, mode);
if (ret < 0)
goto err_release_clone;
if (ret == 0)
posix_acl_release(clone);
else
*acl = clone;
if (!S_ISDIR(*mode))
posix_acl_release(p);
else
*default_acl = p;
return 0;
err_release_clone:
posix_acl_release(clone);
err_release:
posix_acl_release(p);
return ret;
}
|
C
|
linux
| 0 |
CVE-2018-17205
|
https://www.cvedetails.com/cve/CVE-2018-17205/
|
CWE-617
|
https://github.com/openvswitch/ovs/commit/0befd1f3745055c32940f5faf9559be6a14395e6
|
0befd1f3745055c32940f5faf9559be6a14395e6
|
ofproto: Fix OVS crash when reverting old flows in bundle commit
During bundle commit flows which are added in bundle are applied
to ofproto in-order. In case if a flow cannot be added (e.g. flow
action is go-to group id which does not exist), OVS tries to
revert back all previous flows which were successfully applied
from the same bundle. This is possible since OVS maintains list
of old flows which were replaced by flows from the bundle.
While reinserting old flows ovs asserts due to check on rule
state != RULE_INITIALIZED. This will work only for new flows, but
for old flow the rule state will be RULE_REMOVED. This is causing
an assert and OVS crash.
The ovs assert check should be modified to != RULE_INSERTED to prevent
any existing rule being re-inserted and allow new rules and old rules
(in case of revert) to get inserted.
Here is an example to trigger the assert:
$ ovs-vsctl add-br br-test -- set Bridge br-test datapath_type=netdev
$ cat flows.txt
flow add table=1,priority=0,in_port=2,actions=NORMAL
flow add table=1,priority=0,in_port=3,actions=NORMAL
$ ovs-ofctl dump-flows -OOpenflow13 br-test
cookie=0x0, duration=2.465s, table=1, n_packets=0, n_bytes=0, priority=0,in_port=2 actions=NORMAL
cookie=0x0, duration=2.465s, table=1, n_packets=0, n_bytes=0, priority=0,in_port=3 actions=NORMAL
$ cat flow-modify.txt
flow modify table=1,priority=0,in_port=2,actions=drop
flow modify table=1,priority=0,in_port=3,actions=group:10
$ ovs-ofctl bundle br-test flow-modify.txt -OOpenflow13
First flow rule will be modified since it is a valid rule. However second
rule is invalid since no group with id 10 exists. Bundle commit tries to
revert (insert) the first rule to old flow which results in ovs_assert at
ofproto_rule_insert__() since old rule->state = RULE_REMOVED.
Signed-off-by: Vishal Deep Ajmera <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
|
ofport_remove_usage(struct ofproto *ofproto, ofp_port_t ofp_port)
{
struct ofport_usage *usage;
HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port),
&ofproto->ofport_usage) {
if (usage->ofp_port == ofp_port) {
hmap_remove(&ofproto->ofport_usage, &usage->hmap_node);
free(usage);
break;
}
}
}
|
ofport_remove_usage(struct ofproto *ofproto, ofp_port_t ofp_port)
{
struct ofport_usage *usage;
HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port),
&ofproto->ofport_usage) {
if (usage->ofp_port == ofp_port) {
hmap_remove(&ofproto->ofport_usage, &usage->hmap_node);
free(usage);
break;
}
}
}
|
C
|
ovs
| 0 |
CVE-2016-6327
|
https://www.cvedetails.com/cve/CVE-2016-6327/
|
CWE-476
|
https://github.com/torvalds/linux/commit/51093254bf879bc9ce96590400a87897c7498463
|
51093254bf879bc9ce96590400a87897c7498463
|
IB/srpt: Simplify srpt_handle_tsk_mgmt()
Let the target core check task existence instead of the SRP target
driver. Additionally, let the target core check the validity of the
task management request instead of the ib_srpt driver.
This patch fixes the following kernel crash:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000001
IP: [<ffffffffa0565f37>] srpt_handle_new_iu+0x6d7/0x790 [ib_srpt]
Oops: 0002 [#1] SMP
Call Trace:
[<ffffffffa05660ce>] srpt_process_completion+0xde/0x570 [ib_srpt]
[<ffffffffa056669f>] srpt_compl_thread+0x13f/0x160 [ib_srpt]
[<ffffffff8109726f>] kthread+0xcf/0xe0
[<ffffffff81613cfc>] ret_from_fork+0x7c/0xb0
Signed-off-by: Bart Van Assche <[email protected]>
Fixes: 3e4f574857ee ("ib_srpt: Convert TMR path to target_submit_tmr")
Tested-by: Alex Estrin <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: Nicholas Bellinger <[email protected]>
Cc: Sagi Grimberg <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Doug Ledford <[email protected]>
|
static void srpt_event_handler(struct ib_event_handler *handler,
struct ib_event *event)
{
struct srpt_device *sdev;
struct srpt_port *sport;
sdev = ib_get_client_data(event->device, &srpt_client);
if (!sdev || sdev->device != event->device)
return;
pr_debug("ASYNC event= %d on device= %s\n", event->event,
srpt_sdev_name(sdev));
switch (event->event) {
case IB_EVENT_PORT_ERR:
if (event->element.port_num <= sdev->device->phys_port_cnt) {
sport = &sdev->port[event->element.port_num - 1];
sport->lid = 0;
sport->sm_lid = 0;
}
break;
case IB_EVENT_PORT_ACTIVE:
case IB_EVENT_LID_CHANGE:
case IB_EVENT_PKEY_CHANGE:
case IB_EVENT_SM_CHANGE:
case IB_EVENT_CLIENT_REREGISTER:
case IB_EVENT_GID_CHANGE:
/* Refresh port data asynchronously. */
if (event->element.port_num <= sdev->device->phys_port_cnt) {
sport = &sdev->port[event->element.port_num - 1];
if (!sport->lid && !sport->sm_lid)
schedule_work(&sport->work);
}
break;
default:
pr_err("received unrecognized IB event %d\n",
event->event);
break;
}
}
|
static void srpt_event_handler(struct ib_event_handler *handler,
struct ib_event *event)
{
struct srpt_device *sdev;
struct srpt_port *sport;
sdev = ib_get_client_data(event->device, &srpt_client);
if (!sdev || sdev->device != event->device)
return;
pr_debug("ASYNC event= %d on device= %s\n", event->event,
srpt_sdev_name(sdev));
switch (event->event) {
case IB_EVENT_PORT_ERR:
if (event->element.port_num <= sdev->device->phys_port_cnt) {
sport = &sdev->port[event->element.port_num - 1];
sport->lid = 0;
sport->sm_lid = 0;
}
break;
case IB_EVENT_PORT_ACTIVE:
case IB_EVENT_LID_CHANGE:
case IB_EVENT_PKEY_CHANGE:
case IB_EVENT_SM_CHANGE:
case IB_EVENT_CLIENT_REREGISTER:
case IB_EVENT_GID_CHANGE:
/* Refresh port data asynchronously. */
if (event->element.port_num <= sdev->device->phys_port_cnt) {
sport = &sdev->port[event->element.port_num - 1];
if (!sport->lid && !sport->sm_lid)
schedule_work(&sport->work);
}
break;
default:
pr_err("received unrecognized IB event %d\n",
event->event);
break;
}
}
|
C
|
linux
| 0 |
CVE-2011-2795
|
https://www.cvedetails.com/cve/CVE-2011-2795/
|
CWE-264
|
https://github.com/chromium/chromium/commit/73edae623529f04c668268de49d00324b96166a2
|
73edae623529f04c668268de49d00324b96166a2
|
There are too many poorly named functions to create a fragment from markup
https://bugs.webkit.org/show_bug.cgi?id=87339
Reviewed by Eric Seidel.
Source/WebCore:
Moved all functions that create a fragment from markup to markup.h/cpp.
There should be no behavioral change.
* dom/Range.cpp:
(WebCore::Range::createContextualFragment):
* dom/Range.h: Removed createDocumentFragmentForElement.
* dom/ShadowRoot.cpp:
(WebCore::ShadowRoot::setInnerHTML):
* editing/markup.cpp:
(WebCore::createFragmentFromMarkup):
(WebCore::createFragmentForInnerOuterHTML): Renamed from createFragmentFromSource.
(WebCore::createFragmentForTransformToFragment): Moved from XSLTProcessor.
(WebCore::removeElementPreservingChildren): Moved from Range.
(WebCore::createContextualFragment): Ditto.
* editing/markup.h:
* html/HTMLElement.cpp:
(WebCore::HTMLElement::setInnerHTML):
(WebCore::HTMLElement::setOuterHTML):
(WebCore::HTMLElement::insertAdjacentHTML):
* inspector/DOMPatchSupport.cpp:
(WebCore::DOMPatchSupport::patchNode): Added a FIXME since this code should be using
one of the functions listed in markup.h
* xml/XSLTProcessor.cpp:
(WebCore::XSLTProcessor::transformToFragment):
Source/WebKit/qt:
Replace calls to Range::createDocumentFragmentForElement by calls to
createContextualDocumentFragment.
* Api/qwebelement.cpp:
(QWebElement::appendInside):
(QWebElement::prependInside):
(QWebElement::prependOutside):
(QWebElement::appendOutside):
(QWebElement::encloseContentsWith):
(QWebElement::encloseWith):
git-svn-id: svn://svn.chromium.org/blink/trunk@118414 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void Range::setStartBefore(Node* refNode, ExceptionCode& ec)
{
if (!m_start.container()) {
ec = INVALID_STATE_ERR;
return;
}
if (!refNode) {
ec = NOT_FOUND_ERR;
return;
}
if (refNode->document() != m_ownerDocument) {
ec = WRONG_DOCUMENT_ERR;
return;
}
ec = 0;
checkNodeBA(refNode, ec);
if (ec)
return;
setStart(refNode->parentNode(), refNode->nodeIndex(), ec);
}
|
void Range::setStartBefore(Node* refNode, ExceptionCode& ec)
{
if (!m_start.container()) {
ec = INVALID_STATE_ERR;
return;
}
if (!refNode) {
ec = NOT_FOUND_ERR;
return;
}
if (refNode->document() != m_ownerDocument) {
ec = WRONG_DOCUMENT_ERR;
return;
}
ec = 0;
checkNodeBA(refNode, ec);
if (ec)
return;
setStart(refNode->parentNode(), refNode->nodeIndex(), ec);
}
|
C
|
Chrome
| 0 |
CVE-2015-5289
|
https://www.cvedetails.com/cve/CVE-2015-5289/
|
CWE-119
|
https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=08fa47c4850cea32c3116665975bca219fbf2fe6
|
08fa47c4850cea32c3116665975bca219fbf2fe6
| null |
sn_array_end(void *state)
{
StripnullState *_state = (StripnullState *) state;
appendStringInfoCharMacro(_state->strval, ']');
}
|
sn_array_end(void *state)
{
StripnullState *_state = (StripnullState *) state;
appendStringInfoCharMacro(_state->strval, ']');
}
|
C
|
postgresql
| 0 |
CVE-2015-8950
|
https://www.cvedetails.com/cve/CVE-2015-8950/
|
CWE-200
|
https://github.com/torvalds/linux/commit/6829e274a623187c24f7cfc0e3d35f25d087fcc5
|
6829e274a623187c24f7cfc0e3d35f25d087fcc5
|
arm64: dma-mapping: always clear allocated buffers
Buffers allocated by dma_alloc_coherent() are always zeroed on Alpha,
ARM (32bit), MIPS, PowerPC, x86/x86_64 and probably other architectures.
It turned out that some drivers rely on this 'feature'. Allocated buffer
might be also exposed to userspace with dma_mmap() call, so clearing it
is desired from security point of view to avoid exposing random memory
to userspace. This patch unifies dma_alloc_coherent() behavior on ARM64
architecture with other implementations by unconditionally zeroing
allocated buffer.
Cc: <[email protected]> # v3.14+
Signed-off-by: Marek Szyprowski <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
|
static int __init atomic_pool_init(void)
{
pgprot_t prot = __pgprot(PROT_NORMAL_NC);
unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
struct page *page;
void *addr;
unsigned int pool_size_order = get_order(atomic_pool_size);
if (dev_get_cma_area(NULL))
page = dma_alloc_from_contiguous(NULL, nr_pages,
pool_size_order);
else
page = alloc_pages(GFP_DMA, pool_size_order);
if (page) {
int ret;
void *page_addr = page_address(page);
memset(page_addr, 0, atomic_pool_size);
__dma_flush_range(page_addr, page_addr + atomic_pool_size);
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
if (!atomic_pool)
goto free_page;
addr = dma_common_contiguous_remap(page, atomic_pool_size,
VM_USERMAP, prot, atomic_pool_init);
if (!addr)
goto destroy_genpool;
ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
page_to_phys(page),
atomic_pool_size, -1);
if (ret)
goto remove_mapping;
gen_pool_set_algo(atomic_pool,
gen_pool_first_fit_order_align,
(void *)PAGE_SHIFT);
pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
atomic_pool_size / 1024);
return 0;
}
goto out;
remove_mapping:
dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
gen_pool_destroy(atomic_pool);
atomic_pool = NULL;
free_page:
if (!dma_release_from_contiguous(NULL, page, nr_pages))
__free_pages(page, pool_size_order);
out:
pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
atomic_pool_size / 1024);
return -ENOMEM;
}
|
static int __init atomic_pool_init(void)
{
pgprot_t prot = __pgprot(PROT_NORMAL_NC);
unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
struct page *page;
void *addr;
unsigned int pool_size_order = get_order(atomic_pool_size);
if (dev_get_cma_area(NULL))
page = dma_alloc_from_contiguous(NULL, nr_pages,
pool_size_order);
else
page = alloc_pages(GFP_DMA, pool_size_order);
if (page) {
int ret;
void *page_addr = page_address(page);
memset(page_addr, 0, atomic_pool_size);
__dma_flush_range(page_addr, page_addr + atomic_pool_size);
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
if (!atomic_pool)
goto free_page;
addr = dma_common_contiguous_remap(page, atomic_pool_size,
VM_USERMAP, prot, atomic_pool_init);
if (!addr)
goto destroy_genpool;
ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
page_to_phys(page),
atomic_pool_size, -1);
if (ret)
goto remove_mapping;
gen_pool_set_algo(atomic_pool,
gen_pool_first_fit_order_align,
(void *)PAGE_SHIFT);
pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
atomic_pool_size / 1024);
return 0;
}
goto out;
remove_mapping:
dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
gen_pool_destroy(atomic_pool);
atomic_pool = NULL;
free_page:
if (!dma_release_from_contiguous(NULL, page, nr_pages))
__free_pages(page, pool_size_order);
out:
pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
atomic_pool_size / 1024);
return -ENOMEM;
}
|
C
|
linux
| 0 |
CVE-2013-7262
|
https://www.cvedetails.com/cve/CVE-2013-7262/
|
CWE-89
|
https://github.com/mapserver/mapserver/commit/3a10f6b829297dae63492a8c63385044bc6953ed
|
3a10f6b829297dae63492a8c63385044bc6953ed
|
Fix potential SQL Injection with postgis TIME filters (#4834)
|
int msPostGISHexDecode(unsigned char *dest, const char *src, int srclen)
{
if (src && *src && (srclen % 2 == 0) ) {
unsigned char *p = dest;
int i;
for ( i=0; i<srclen; i+=2 ) {
register unsigned char b1=0, b2=0;
register unsigned char c1 = src[i];
register unsigned char c2 = src[i + 1];
b1 = msPostGISHexDecodeChar[c1];
b2 = msPostGISHexDecodeChar[c2];
*p++ = (b1 << 4) | b2;
}
return(p-dest);
}
return 0;
}
|
int msPostGISHexDecode(unsigned char *dest, const char *src, int srclen)
{
if (src && *src && (srclen % 2 == 0) ) {
unsigned char *p = dest;
int i;
for ( i=0; i<srclen; i+=2 ) {
register unsigned char b1=0, b2=0;
register unsigned char c1 = src[i];
register unsigned char c2 = src[i + 1];
b1 = msPostGISHexDecodeChar[c1];
b2 = msPostGISHexDecodeChar[c2];
*p++ = (b1 << 4) | b2;
}
return(p-dest);
}
return 0;
}
|
C
|
mapserver
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d1a59e4e845a01d7d7b80ef184b672752a9eae4d
|
d1a59e4e845a01d7d7b80ef184b672752a9eae4d
|
Fixing cross-process postMessage replies on more than two iterations.
When two frames are replying to each other using event.source across processes,
after the first two replies, things break down. The root cause is that in
RenderViewImpl::GetFrameByMappedID, the lookup was incorrect. It is now
properly searching for the remote frame id and returning the local one.
BUG=153445
Review URL: https://chromiumcodereview.appspot.com/11040015
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@159924 0039d316-1c4b-4281-b951-d872f2087c98
|
bool RenderViewImpl::isSelectTrailingWhitespaceEnabled() {
#if defined(OS_WIN)
return true;
#else
return false;
#endif
}
|
bool RenderViewImpl::isSelectTrailingWhitespaceEnabled() {
#if defined(OS_WIN)
return true;
#else
return false;
#endif
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/04cca6c05e4923f1b91e0dddf053e088456d8645
|
04cca6c05e4923f1b91e0dddf053e088456d8645
|
https://bugs.webkit.org/show_bug.cgi?id=45164
Reviewed by Dan Bernstein.
REGRESSION: <a><img align=top></a> Clickable area too large
Make sure to clamp hit testing of quirky inline flow boxes the same way we already clamped
painting.
Source/WebCore:
* rendering/InlineFlowBox.cpp:
(WebCore::InlineFlowBox::nodeAtPoint):
LayoutTests:
* fast/inline/inline-position-top-align-expected.txt: Added.
* fast/inline/inline-position-top-align.html: Added.
git-svn-id: svn://svn.chromium.org/blink/trunk@81055 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
bool InlineFlowBox::nodeAtPoint(const HitTestRequest& request, HitTestResult& result, int x, int y, int tx, int ty)
{
IntRect overflowRect(visualOverflowRect());
flipForWritingMode(overflowRect);
overflowRect.move(tx, ty);
if (!overflowRect.intersects(result.rectForPoint(x, y)))
return false;
for (InlineBox* curr = lastChild(); curr; curr = curr->prevOnLine()) {
if ((curr->renderer()->isText() || !curr->boxModelObject()->hasSelfPaintingLayer()) && curr->nodeAtPoint(request, result, x, y, tx, ty)) {
renderer()->updateHitTestResult(result, IntPoint(x - tx, y - ty));
return true;
}
}
// Now check ourselves. Pixel snap hit testing.
IntRect frameRect = roundedFrameRect();
int minX = frameRect.x();
int minY = frameRect.y();
int width = frameRect.width();
int height = frameRect.height();
// Constrain our hit testing to the line top and bottom if necessary.
bool noQuirksMode = renderer()->document()->inNoQuirksMode();
if (!hasTextChildren() && !noQuirksMode) {
RootInlineBox* rootBox = root();
int& top = isHorizontal() ? minY : minX;
int& logicalHeight = isHorizontal() ? height : width;
int bottom = min(rootBox->lineBottom(), top + logicalHeight);
top = max(rootBox->lineTop(), top);
logicalHeight = bottom - top;
}
// Move x/y to our coordinates.
IntRect rect(minX, minY, width, height);
flipForWritingMode(rect);
rect.move(tx, ty);
if (visibleToHitTesting() && rect.intersects(result.rectForPoint(x, y))) {
renderer()->updateHitTestResult(result, flipForWritingMode(IntPoint(x - tx, y - ty))); // Don't add in m_x or m_y here, we want coords in the containing block's space.
if (!result.addNodeToRectBasedTestResult(renderer()->node(), x, y, rect))
return true;
}
return false;
}
|
bool InlineFlowBox::nodeAtPoint(const HitTestRequest& request, HitTestResult& result, int x, int y, int tx, int ty)
{
IntRect overflowRect(visualOverflowRect());
flipForWritingMode(overflowRect);
overflowRect.move(tx, ty);
if (!overflowRect.intersects(result.rectForPoint(x, y)))
return false;
for (InlineBox* curr = lastChild(); curr; curr = curr->prevOnLine()) {
if ((curr->renderer()->isText() || !curr->boxModelObject()->hasSelfPaintingLayer()) && curr->nodeAtPoint(request, result, x, y, tx, ty)) {
renderer()->updateHitTestResult(result, IntPoint(x - tx, y - ty));
return true;
}
}
FloatPoint boxOrigin = locationIncludingFlipping();
boxOrigin.move(tx, ty);
FloatRect rect(boxOrigin, IntSize(width(), height()));
if (visibleToHitTesting() && rect.intersects(result.rectForPoint(x, y))) {
renderer()->updateHitTestResult(result, flipForWritingMode(IntPoint(x - tx, y - ty))); // Don't add in m_x or m_y here, we want coords in the containing block's space.
if (!result.addNodeToRectBasedTestResult(renderer()->node(), x, y, rect))
return true;
}
return false;
}
|
C
|
Chrome
| 1 |
CVE-2013-0885
|
https://www.cvedetails.com/cve/CVE-2013-0885/
|
CWE-264
|
https://github.com/chromium/chromium/commit/f335421145bb7f82c60fb9d61babcd6ce2e4b21e
|
f335421145bb7f82c60fb9d61babcd6ce2e4b21e
|
Tighten restrictions on hosted apps calling extension APIs
Only allow component apps to make any API calls, and for them only allow the namespaces they explicitly have permission for (plus chrome.test - I need to see if I can rework some WebStore tests to remove even this).
BUG=172369
Review URL: https://chromiumcodereview.appspot.com/12095095
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@180426 0039d316-1c4b-4281-b951-d872f2087c98
|
bool Extension::LoadContentScripts(string16* error) {
if (!manifest_->HasKey(keys::kContentScripts))
return true;
ListValue* list_value;
if (!manifest_->GetList(keys::kContentScripts, &list_value)) {
*error = ASCIIToUTF16(errors::kInvalidContentScriptsList);
return false;
}
for (size_t i = 0; i < list_value->GetSize(); ++i) {
DictionaryValue* content_script = NULL;
if (!list_value->GetDictionary(i, &content_script)) {
*error = ErrorUtils::FormatErrorMessageUTF16(
errors::kInvalidContentScript, base::IntToString(i));
return false;
}
UserScript script;
if (!LoadUserScriptHelper(content_script, i, error, &script))
return false; // Failed to parse script context definition.
script.set_extension_id(id());
if (converted_from_user_script_) {
script.set_emulate_greasemonkey(true);
script.set_match_all_frames(true); // Greasemonkey matches all frames.
}
content_scripts_.push_back(script);
}
return true;
}
|
bool Extension::LoadContentScripts(string16* error) {
if (!manifest_->HasKey(keys::kContentScripts))
return true;
ListValue* list_value;
if (!manifest_->GetList(keys::kContentScripts, &list_value)) {
*error = ASCIIToUTF16(errors::kInvalidContentScriptsList);
return false;
}
for (size_t i = 0; i < list_value->GetSize(); ++i) {
DictionaryValue* content_script = NULL;
if (!list_value->GetDictionary(i, &content_script)) {
*error = ErrorUtils::FormatErrorMessageUTF16(
errors::kInvalidContentScript, base::IntToString(i));
return false;
}
UserScript script;
if (!LoadUserScriptHelper(content_script, i, error, &script))
return false; // Failed to parse script context definition.
script.set_extension_id(id());
if (converted_from_user_script_) {
script.set_emulate_greasemonkey(true);
script.set_match_all_frames(true); // Greasemonkey matches all frames.
}
content_scripts_.push_back(script);
}
return true;
}
|
C
|
Chrome
| 0 |
CVE-2018-6057
|
https://www.cvedetails.com/cve/CVE-2018-6057/
|
CWE-732
|
https://github.com/chromium/chromium/commit/c0c8978849ac57e4ecd613ddc8ff7852a2054734
|
c0c8978849ac57e4ecd613ddc8ff7852a2054734
|
android: Fix sensors in device service.
This patch fixes a bug that prevented more than one sensor data
to be available at once when using the device motion/orientation
API.
The issue was introduced by this other patch [1] which fixed
some security-related issues in the way shared memory region
handles are managed throughout Chromium (more details at
https://crbug.com/789959).
The device service´s sensor implementation doesn´t work
correctly because it assumes it is possible to create a
writable mapping of a given shared memory region at any
time. This assumption is not correct on Android, once an
Ashmem region has been turned read-only, such mappings
are no longer possible.
To fix the implementation, this CL changes the following:
- PlatformSensor used to require moving a
mojo::ScopedSharedBufferMapping into the newly-created
instance. Said mapping being owned by and destroyed
with the PlatformSensor instance.
With this patch, the constructor instead takes a single
pointer to the corresponding SensorReadingSharedBuffer,
i.e. the area in memory where the sensor-specific
reading data is located, and can be either updated
or read-from.
Note that the PlatformSensor does not own the mapping
anymore.
- PlatformSensorProviderBase holds the *single* writable
mapping that is used to store all SensorReadingSharedBuffer
buffers. It is created just after the region itself,
and thus can be used even after the region's access
mode has been changed to read-only.
Addresses within the mapping will be passed to
PlatformSensor constructors, computed from the
mapping's base address plus a sensor-specific
offset.
The mapping is now owned by the
PlatformSensorProviderBase instance.
Note that, security-wise, nothing changes, because all
mojo::ScopedSharedBufferMapping before the patch actually
pointed to the same writable-page in memory anyway.
Since unit or integration tests didn't catch the regression
when [1] was submitted, this patch was tested manually by
running a newly-built Chrome apk in the Android emulator
and on a real device running Android O.
[1] https://chromium-review.googlesource.com/c/chromium/src/+/805238
BUG=805146
[email protected],[email protected],[email protected],[email protected]
Change-Id: I7d60a1cad278f48c361d2ece5a90de10eb082b44
Reviewed-on: https://chromium-review.googlesource.com/891180
Commit-Queue: David Turner <[email protected]>
Reviewed-by: Reilly Grant <[email protected]>
Reviewed-by: Matthew Cary <[email protected]>
Reviewed-by: Alexandr Ilin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#532607}
|
PlatformSensorFusionTest() {
provider_ = std::make_unique<FakePlatformSensorProvider>();
PlatformSensorProvider::SetProviderForTesting(provider_.get());
}
|
PlatformSensorFusionTest() {
provider_ = std::make_unique<FakePlatformSensorProvider>();
PlatformSensorProvider::SetProviderForTesting(provider_.get());
}
|
C
|
Chrome
| 0 |
CVE-2012-2119
|
https://www.cvedetails.com/cve/CVE-2012-2119/
|
CWE-119
|
https://github.com/torvalds/linux/commit/b92946e2919134ebe2a4083e4302236295ea2a73
|
b92946e2919134ebe2a4083e4302236295ea2a73
|
macvtap: zerocopy: validate vectors before building skb
There're several reasons that the vectors need to be validated:
- Return error when caller provides vectors whose num is greater than UIO_MAXIOV.
- Linearize part of skb when userspace provides vectors grater than MAX_SKB_FRAGS.
- Return error when userspace provides vectors whose total length may exceed
- MAX_SKB_FRAGS * PAGE_SIZE.
Signed-off-by: Jason Wang <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
|
static int macvtap_newlink(struct net *src_net,
struct net_device *dev,
struct nlattr *tb[],
struct nlattr *data[])
{
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
return macvlan_common_newlink(src_net, dev, tb, data,
macvtap_receive, macvtap_forward);
}
|
static int macvtap_newlink(struct net *src_net,
struct net_device *dev,
struct nlattr *tb[],
struct nlattr *data[])
{
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
return macvlan_common_newlink(src_net, dev, tb, data,
macvtap_receive, macvtap_forward);
}
|
C
|
linux
| 0 |
CVE-2018-16790
|
https://www.cvedetails.com/cve/CVE-2018-16790/
|
CWE-125
|
https://github.com/mongodb/mongo-c-driver/commit/0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
|
0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
|
Fix for CVE-2018-16790 -- Verify bounds before binary length read.
As reported here: https://jira.mongodb.org/browse/CDRIVER-2819,
a heap overread occurs due a failure to correctly verify data
bounds.
In the original check, len - o returns the data left including the
sizeof(l) we just read. Instead, the comparison should check
against the data left NOT including the binary int32, i.e. just
subtype (byte*) instead of int32 subtype (byte*).
Added in test for corrupted BSON example.
|
visit_corrupt (const bson_iter_t *iter, void *data)
{
*((bool *) data) = true;
}
|
visit_corrupt (const bson_iter_t *iter, void *data)
{
*((bool *) data) = true;
}
|
C
|
mongo-c-driver
| 0 |
CVE-2011-2840
|
https://www.cvedetails.com/cve/CVE-2011-2840/
|
CWE-20
|
https://github.com/chromium/chromium/commit/2db5a2048dfcacfe5ad4311c2b1e435c4c67febc
|
2db5a2048dfcacfe5ad4311c2b1e435c4c67febc
|
chromeos: fix bug where "aw snap" page replaces first tab if it was a NTP when closing window with > 1 tab.
BUG=chromium-os:12088
TEST=verify bug per bug report.
Review URL: http://codereview.chromium.org/6882058
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@83031 0039d316-1c4b-4281-b951-d872f2087c98
|
TabStripModel::InsertionPolicy TabStripModel::insertion_policy() const {
return order_controller_->insertion_policy();
}
|
TabStripModel::InsertionPolicy TabStripModel::insertion_policy() const {
return order_controller_->insertion_policy();
}
|
C
|
Chrome
| 0 |
CVE-2016-1618
|
https://www.cvedetails.com/cve/CVE-2016-1618/
|
CWE-310
|
https://github.com/chromium/chromium/commit/0d151e09e13a704e9738ea913d117df7282e6c7d
|
0d151e09e13a704e9738ea913d117df7282e6c7d
|
Add assertions that the empty Platform::cryptographicallyRandomValues() overrides are not being used.
These implementations are not safe and look scary if not accompanied by an assertion. Also one of the comments was incorrect.
BUG=552749
Review URL: https://codereview.chromium.org/1419293005
Cr-Commit-Position: refs/heads/master@{#359229}
|
BlinkMediaTestSuite::~BlinkMediaTestSuite() {}
|
BlinkMediaTestSuite::~BlinkMediaTestSuite() {}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/e49d943e9f5f90411313e64d0ae6b646edc85043
|
e49d943e9f5f90411313e64d0ae6b646edc85043
|
Use document referrer policy when preloading
Previously, preload requests used the referrer policy from meta tags
encountered during scanning, but not from headers delivered with the
page. This CL uses the document's current referrer policy when the
preload scan starts.
BUG=605451
Review-Url: https://codereview.chromium.org/1913983002
Cr-Commit-Position: refs/heads/master@{#390264}
|
static bool mediaAttributeMatches(const MediaValuesCached& mediaValues, const String& attributeValue)
{
MediaQuerySet* mediaQueries = MediaQuerySet::createOffMainThread(attributeValue);
MediaQueryEvaluator mediaQueryEvaluator(mediaValues);
return mediaQueryEvaluator.eval(mediaQueries);
}
|
static bool mediaAttributeMatches(const MediaValuesCached& mediaValues, const String& attributeValue)
{
MediaQuerySet* mediaQueries = MediaQuerySet::createOffMainThread(attributeValue);
MediaQueryEvaluator mediaQueryEvaluator(mediaValues);
return mediaQueryEvaluator.eval(mediaQueries);
}
|
C
|
Chrome
| 0 |
CVE-2013-2871
|
https://www.cvedetails.com/cve/CVE-2013-2871/
|
CWE-20
|
https://github.com/chromium/chromium/commit/bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9
|
bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9
|
Setting input.x-webkit-speech should not cause focus change
In r150866, we introduced element()->focus() in destroyShadowSubtree()
to retain focus on <input> when its type attribute gets changed.
But when x-webkit-speech attribute is changed, the element is detached
before calling destroyShadowSubtree() and element()->focus() failed
This patch moves detach() after destroyShadowSubtree() to fix the
problem.
BUG=243818
TEST=fast/forms/input-type-change-focusout.html
NOTRY=true
Review URL: https://chromiumcodereview.appspot.com/16084005
git-svn-id: svn://svn.chromium.org/blink/trunk@151444 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void HTMLInputElement::restoreFormControlState(const FormControlState& state)
{
m_inputType->restoreFormControlState(state);
m_stateRestored = true;
}
|
void HTMLInputElement::restoreFormControlState(const FormControlState& state)
{
m_inputType->restoreFormControlState(state);
m_stateRestored = true;
}
|
C
|
Chrome
| 0 |
CVE-2018-8970
|
https://www.cvedetails.com/cve/CVE-2018-8970/
|
CWE-295
|
https://github.com/libressl-portable/openbsd/commit/0654414afcce51a16d35d05060190a3ec4618d42
|
0654414afcce51a16d35d05060190a3ec4618d42
|
Call strlen() if name length provided is 0, like OpenSSL does.
Issue notice by Christian Heimes <[email protected]>
ok deraadt@ jsing@
|
X509_VERIFY_PARAM_set1_policies(X509_VERIFY_PARAM *param,
STACK_OF(ASN1_OBJECT) *policies)
{
int i;
ASN1_OBJECT *oid, *doid;
if (!param)
return 0;
if (param->policies)
sk_ASN1_OBJECT_pop_free(param->policies, ASN1_OBJECT_free);
if (!policies) {
param->policies = NULL;
return 1;
}
param->policies = sk_ASN1_OBJECT_new_null();
if (!param->policies)
return 0;
for (i = 0; i < sk_ASN1_OBJECT_num(policies); i++) {
oid = sk_ASN1_OBJECT_value(policies, i);
doid = OBJ_dup(oid);
if (!doid)
return 0;
if (!sk_ASN1_OBJECT_push(param->policies, doid)) {
ASN1_OBJECT_free(doid);
return 0;
}
}
param->flags |= X509_V_FLAG_POLICY_CHECK;
return 1;
}
|
X509_VERIFY_PARAM_set1_policies(X509_VERIFY_PARAM *param,
STACK_OF(ASN1_OBJECT) *policies)
{
int i;
ASN1_OBJECT *oid, *doid;
if (!param)
return 0;
if (param->policies)
sk_ASN1_OBJECT_pop_free(param->policies, ASN1_OBJECT_free);
if (!policies) {
param->policies = NULL;
return 1;
}
param->policies = sk_ASN1_OBJECT_new_null();
if (!param->policies)
return 0;
for (i = 0; i < sk_ASN1_OBJECT_num(policies); i++) {
oid = sk_ASN1_OBJECT_value(policies, i);
doid = OBJ_dup(oid);
if (!doid)
return 0;
if (!sk_ASN1_OBJECT_push(param->policies, doid)) {
ASN1_OBJECT_free(doid);
return 0;
}
}
param->flags |= X509_V_FLAG_POLICY_CHECK;
return 1;
}
|
C
|
openbsd
| 0 |
CVE-2016-2440
|
https://www.cvedetails.com/cve/CVE-2016-2440/
|
CWE-264
|
https://android.googlesource.com/platform/frameworks/native/+/a59b827869a2ea04022dd225007f29af8d61837a
|
a59b827869a2ea04022dd225007f29af8d61837a
|
Fix issue #27252896: Security Vulnerability -- weak binder
Sending transaction to freed BBinder through weak handle
can cause use of a (mostly) freed object. We need to try to
safely promote to a strong reference first.
Change-Id: Ic9c6940fa824980472e94ed2dfeca52a6b0fd342
(cherry picked from commit c11146106f94e07016e8e26e4f8628f9a0c73199)
|
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_TRANSACTION thr " << (void*)pthread_self() << " / hand "
<< handle << " / code " << TypeCode(code) << ": "
<< indent << data << dedent << endl;
}
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
#if 0
if (code == 4) { // relayout
ALOGI(">>>>>> CALLING transaction 4");
} else {
ALOGI(">>>>>> CALLING transaction %d", code);
}
#endif
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
#if 0
if (code == 4) { // relayout
ALOGI("<<<<<< RETURNING transaction 4");
} else {
ALOGI("<<<<<< RETURNING transaction %d", code);
}
#endif
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_REPLY thr " << (void*)pthread_self() << " / hand "
<< handle << ": ";
if (reply) alog << indent << *reply << dedent << endl;
else alog << "(none requested)" << endl;
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
|
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_TRANSACTION thr " << (void*)pthread_self() << " / hand "
<< handle << " / code " << TypeCode(code) << ": "
<< indent << data << dedent << endl;
}
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
#if 0
if (code == 4) { // relayout
ALOGI(">>>>>> CALLING transaction 4");
} else {
ALOGI(">>>>>> CALLING transaction %d", code);
}
#endif
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
#if 0
if (code == 4) { // relayout
ALOGI("<<<<<< RETURNING transaction 4");
} else {
ALOGI("<<<<<< RETURNING transaction %d", code);
}
#endif
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_REPLY thr " << (void*)pthread_self() << " / hand "
<< handle << ": ";
if (reply) alog << indent << *reply << dedent << endl;
else alog << "(none requested)" << endl;
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
|
C
|
Android
| 0 |
CVE-2018-16790
|
https://www.cvedetails.com/cve/CVE-2018-16790/
|
CWE-125
|
https://github.com/mongodb/mongo-c-driver/commit/0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
|
0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
|
Fix for CVE-2018-16790 -- Verify bounds before binary length read.
As reported here: https://jira.mongodb.org/browse/CDRIVER-2819,
a heap overread occurs due a failure to correctly verify data
bounds.
In the original check, len - o returns the data left including the
sizeof(l) we just read. Instead, the comparison should check
against the data left NOT including the binary int32, i.e. just
subtype (byte*) instead of int32 subtype (byte*).
Added in test for corrupted BSON example.
|
test_bson_regex_lengths (void)
{
bson_t new = BSON_INITIALIZER;
bson_oid_t oid;
bson_oid_init_from_string (&oid, "1234567890abcdef12345678");
bson_append_oid (&new, "0123456", -1, &oid);
bson_append_regex (&new,
"0_________1_________2_________3___4",
-1,
"0_________1_________2_________3_________4_________5___4",
"i");
ASSERT (new.len == 121);
ASSERT (new.flags &BSON_FLAG_STATIC);
ASSERT (!(new.flags &BSON_FLAG_INLINE));
bson_destroy (&new);
}
|
test_bson_regex_lengths (void)
{
bson_t new = BSON_INITIALIZER;
bson_oid_t oid;
bson_oid_init_from_string (&oid, "1234567890abcdef12345678");
bson_append_oid (&new, "0123456", -1, &oid);
bson_append_regex (&new,
"0_________1_________2_________3___4",
-1,
"0_________1_________2_________3_________4_________5___4",
"i");
ASSERT (new.len == 121);
ASSERT (new.flags &BSON_FLAG_STATIC);
ASSERT (!(new.flags &BSON_FLAG_INLINE));
bson_destroy (&new);
}
|
C
|
mongo-c-driver
| 0 |
CVE-2018-1000852
|
https://www.cvedetails.com/cve/CVE-2018-1000852/
| null |
https://github.com/FreeRDP/FreeRDP/pull/4871/commits/baee520e3dd9be6511c45a14c5f5e77784de1471
|
baee520e3dd9be6511c45a14c5f5e77784de1471
|
Fix for #4866: Added additional length checks
|
static UINT drdynvc_virtual_channel_event_data_received(drdynvcPlugin* drdynvc,
void* pData, UINT32 dataLength, UINT32 totalLength, UINT32 dataFlags)
{
wStream* data_in;
if ((dataFlags & CHANNEL_FLAG_SUSPEND) || (dataFlags & CHANNEL_FLAG_RESUME))
{
return CHANNEL_RC_OK;
}
if (dataFlags & CHANNEL_FLAG_FIRST)
{
if (drdynvc->data_in)
Stream_Free(drdynvc->data_in, TRUE);
drdynvc->data_in = Stream_New(NULL, totalLength);
}
if (!(data_in = drdynvc->data_in))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Stream_New failed!");
return CHANNEL_RC_NO_MEMORY;
}
if (!Stream_EnsureRemainingCapacity(data_in, dataLength))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Stream_EnsureRemainingCapacity failed!");
Stream_Free(drdynvc->data_in, TRUE);
drdynvc->data_in = NULL;
return ERROR_INTERNAL_ERROR;
}
Stream_Write(data_in, pData, dataLength);
if (dataFlags & CHANNEL_FLAG_LAST)
{
if (Stream_Capacity(data_in) != Stream_GetPosition(data_in))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "drdynvc_plugin_process_received: read error");
return ERROR_INVALID_DATA;
}
drdynvc->data_in = NULL;
Stream_SealLength(data_in);
Stream_SetPosition(data_in, 0);
if (!MessageQueue_Post(drdynvc->queue, NULL, 0, (void*) data_in, NULL))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "MessageQueue_Post failed!");
return ERROR_INTERNAL_ERROR;
}
}
return CHANNEL_RC_OK;
}
|
static UINT drdynvc_virtual_channel_event_data_received(drdynvcPlugin* drdynvc,
void* pData, UINT32 dataLength, UINT32 totalLength, UINT32 dataFlags)
{
wStream* data_in;
if ((dataFlags & CHANNEL_FLAG_SUSPEND) || (dataFlags & CHANNEL_FLAG_RESUME))
{
return CHANNEL_RC_OK;
}
if (dataFlags & CHANNEL_FLAG_FIRST)
{
if (drdynvc->data_in)
Stream_Free(drdynvc->data_in, TRUE);
drdynvc->data_in = Stream_New(NULL, totalLength);
}
if (!(data_in = drdynvc->data_in))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Stream_New failed!");
return CHANNEL_RC_NO_MEMORY;
}
if (!Stream_EnsureRemainingCapacity(data_in, (int) dataLength))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Stream_EnsureRemainingCapacity failed!");
Stream_Free(drdynvc->data_in, TRUE);
drdynvc->data_in = NULL;
return ERROR_INTERNAL_ERROR;
}
Stream_Write(data_in, pData, dataLength);
if (dataFlags & CHANNEL_FLAG_LAST)
{
if (Stream_Capacity(data_in) != Stream_GetPosition(data_in))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "drdynvc_plugin_process_received: read error");
return ERROR_INVALID_DATA;
}
drdynvc->data_in = NULL;
Stream_SealLength(data_in);
Stream_SetPosition(data_in, 0);
if (!MessageQueue_Post(drdynvc->queue, NULL, 0, (void*) data_in, NULL))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "MessageQueue_Post failed!");
return ERROR_INTERNAL_ERROR;
}
}
return CHANNEL_RC_OK;
}
|
C
|
FreeRDP
| 1 |
CVE-2016-7915
|
https://www.cvedetails.com/cve/CVE-2016-7915/
|
CWE-125
|
https://github.com/torvalds/linux/commit/50220dead1650609206efe91f0cc116132d59b3f
|
50220dead1650609206efe91f0cc116132d59b3f
|
HID: core: prevent out-of-bound readings
Plugging a Logitech DJ receiver with KASAN activated raises a bunch of
out-of-bound readings.
The fields are allocated up to MAX_USAGE, meaning that potentially, we do
not have enough fields to fit the incoming values.
Add checks and silence KASAN.
Signed-off-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>
|
s32 hid_snto32(__u32 value, unsigned n)
{
return snto32(value, n);
}
|
s32 hid_snto32(__u32 value, unsigned n)
{
return snto32(value, n);
}
|
C
|
linux
| 0 |
CVE-2011-2782
|
https://www.cvedetails.com/cve/CVE-2011-2782/
|
CWE-264
|
https://github.com/chromium/chromium/commit/20d1c99d9b53a0b2b419aae0075494a9d0b86daf
|
20d1c99d9b53a0b2b419aae0075494a9d0b86daf
|
Ensure URL is updated after a cross-site navigation is pre-empted by
an "ignored" navigation.
BUG=77507
TEST=NavigationControllerTest.LoadURL_IgnorePreemptsPending
Review URL: http://codereview.chromium.org/6826015
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@81307 0039d316-1c4b-4281-b951-d872f2087c98
|
void NavigationController::RendererDidNavigateToNewPage(
const ViewHostMsg_FrameNavigate_Params& params, bool* did_replace_entry) {
NavigationEntry* new_entry;
if (pending_entry_) {
new_entry = new NavigationEntry(*pending_entry_);
new_entry->set_page_type(NORMAL_PAGE);
} else {
new_entry = new NavigationEntry;
}
new_entry->set_url(params.url);
if (new_entry->update_virtual_url_with_url())
UpdateVirtualURLToURL(new_entry, params.url);
new_entry->set_referrer(params.referrer);
new_entry->set_page_id(params.page_id);
new_entry->set_transition_type(params.transition);
new_entry->set_site_instance(tab_contents_->GetSiteInstance());
new_entry->set_has_post_data(params.is_post);
InsertOrReplaceEntry(new_entry, *did_replace_entry);
}
|
void NavigationController::RendererDidNavigateToNewPage(
const ViewHostMsg_FrameNavigate_Params& params, bool* did_replace_entry) {
NavigationEntry* new_entry;
if (pending_entry_) {
new_entry = new NavigationEntry(*pending_entry_);
new_entry->set_page_type(NORMAL_PAGE);
} else {
new_entry = new NavigationEntry;
}
new_entry->set_url(params.url);
if (new_entry->update_virtual_url_with_url())
UpdateVirtualURLToURL(new_entry, params.url);
new_entry->set_referrer(params.referrer);
new_entry->set_page_id(params.page_id);
new_entry->set_transition_type(params.transition);
new_entry->set_site_instance(tab_contents_->GetSiteInstance());
new_entry->set_has_post_data(params.is_post);
InsertOrReplaceEntry(new_entry, *did_replace_entry);
}
|
C
|
Chrome
| 0 |
CVE-2016-3835
|
https://www.cvedetails.com/cve/CVE-2016-3835/
|
CWE-200
|
https://android.googlesource.com/platform/hardware/qcom/media/+/7558d03e6498e970b761aa44fff6b2c659202d95
|
7558d03e6498e970b761aa44fff6b2c659202d95
|
DO NOT MERGE mm-video-v4l2: venc: add checks before accessing heap pointers
Heap pointers do not point to user virtual addresses in case
of secure session.
Set them to NULL and add checks to avoid accesing them
Bug: 28815329
Bug: 28920116
Change-Id: I94fd5808e753b58654d65e175d3857ef46ffba26
|
void omx_video::omx_release_meta_buffer(OMX_BUFFERHEADERTYPE *buffer)
{
if (buffer && meta_mode_enable) {
encoder_media_buffer_type *media_ptr;
struct pmem Input_pmem;
unsigned int index_pmem = 0;
bool meta_error = false;
index_pmem = (buffer - m_inp_mem_ptr);
if (mUsesColorConversion &&
(index_pmem < m_sInPortDef.nBufferCountActual)) {
if (!dev_free_buf((&m_pInput_pmem[index_pmem]),PORT_INDEX_IN)) {
DEBUG_PRINT_ERROR("omx_release_meta_buffer dev free failed");
}
} else {
media_ptr = (encoder_media_buffer_type *) buffer->pBuffer;
if (media_ptr && media_ptr->meta_handle) {
if (media_ptr->buffer_type == kMetadataBufferTypeCameraSource &&
media_ptr->meta_handle->numFds == 1 &&
media_ptr->meta_handle->numInts >= 2) {
Input_pmem.fd = media_ptr->meta_handle->data[0];
Input_pmem.buffer = media_ptr;
Input_pmem.size = media_ptr->meta_handle->data[2];
Input_pmem.offset = media_ptr->meta_handle->data[1];
DEBUG_PRINT_LOW("EBD fd = %d, offset = %d, size = %d",Input_pmem.fd,
Input_pmem.offset,
Input_pmem.size);
} else if (media_ptr->buffer_type == kMetadataBufferTypeGrallocSource) {
private_handle_t *handle = (private_handle_t *)media_ptr->meta_handle;
Input_pmem.buffer = media_ptr;
Input_pmem.fd = handle->fd;
Input_pmem.offset = 0;
Input_pmem.size = handle->size;
} else {
meta_error = true;
DEBUG_PRINT_ERROR(" Meta Error set in EBD");
}
if (!meta_error)
meta_error = !dev_free_buf(&Input_pmem,PORT_INDEX_IN);
if (meta_error) {
DEBUG_PRINT_ERROR(" Warning dev_free_buf failed flush value is %d",
input_flush_progress);
}
}
}
}
}
|
void omx_video::omx_release_meta_buffer(OMX_BUFFERHEADERTYPE *buffer)
{
if (buffer && meta_mode_enable) {
encoder_media_buffer_type *media_ptr;
struct pmem Input_pmem;
unsigned int index_pmem = 0;
bool meta_error = false;
index_pmem = (buffer - m_inp_mem_ptr);
if (mUsesColorConversion &&
(index_pmem < m_sInPortDef.nBufferCountActual)) {
if (!dev_free_buf((&m_pInput_pmem[index_pmem]),PORT_INDEX_IN)) {
DEBUG_PRINT_ERROR("omx_release_meta_buffer dev free failed");
}
} else {
media_ptr = (encoder_media_buffer_type *) buffer->pBuffer;
if (media_ptr && media_ptr->meta_handle) {
if (media_ptr->buffer_type == kMetadataBufferTypeCameraSource &&
media_ptr->meta_handle->numFds == 1 &&
media_ptr->meta_handle->numInts >= 2) {
Input_pmem.fd = media_ptr->meta_handle->data[0];
Input_pmem.buffer = media_ptr;
Input_pmem.size = media_ptr->meta_handle->data[2];
Input_pmem.offset = media_ptr->meta_handle->data[1];
DEBUG_PRINT_LOW("EBD fd = %d, offset = %d, size = %d",Input_pmem.fd,
Input_pmem.offset,
Input_pmem.size);
} else if (media_ptr->buffer_type == kMetadataBufferTypeGrallocSource) {
private_handle_t *handle = (private_handle_t *)media_ptr->meta_handle;
Input_pmem.buffer = media_ptr;
Input_pmem.fd = handle->fd;
Input_pmem.offset = 0;
Input_pmem.size = handle->size;
} else {
meta_error = true;
DEBUG_PRINT_ERROR(" Meta Error set in EBD");
}
if (!meta_error)
meta_error = !dev_free_buf(&Input_pmem,PORT_INDEX_IN);
if (meta_error) {
DEBUG_PRINT_ERROR(" Warning dev_free_buf failed flush value is %d",
input_flush_progress);
}
}
}
}
}
|
C
|
Android
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
[email protected]
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void testInterfaceEmptyAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
v8SetReturnValueFast(info, WTF::getPtr(imp->testInterfaceEmptyAttribute()), imp);
}
|
static void testInterfaceEmptyAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
v8SetReturnValueFast(info, WTF::getPtr(imp->testInterfaceEmptyAttribute()), imp);
}
|
C
|
Chrome
| 0 |
CVE-2013-6051
|
https://www.cvedetails.com/cve/CVE-2013-6051/
| null |
https://git.savannah.gnu.org/gitweb/?p=quagga.git;a=commitdiff;h=8794e8d229dc9fe29ea31424883433d4880ef408
|
8794e8d229dc9fe29ea31424883433d4880ef408
| null |
bgp_attr_nexthop (struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
in_addr_t nexthop_h, nexthop_n;
/* Check nexthop attribute length. */
if (length != 4)
{
zlog (peer->log, LOG_ERR, "Nexthop attribute length isn't four [%d]",
length);
return bgp_attr_malformed (args,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
/* According to section 6.3 of RFC4271, syntactically incorrect NEXT_HOP
attribute must result in a NOTIFICATION message (this is implemented below).
At the same time, semantically incorrect NEXT_HOP is more likely to be just
logged locally (this is implemented somewhere else). The UPDATE message
gets ignored in any of these cases. */
nexthop_n = stream_get_ipv4 (peer->ibuf);
nexthop_h = ntohl (nexthop_n);
if (IPV4_NET0 (nexthop_h) || IPV4_NET127 (nexthop_h) || IPV4_CLASS_DE (nexthop_h))
{
char buf[INET_ADDRSTRLEN];
inet_ntop (AF_INET, &nexthop_h, buf, INET_ADDRSTRLEN);
zlog (peer->log, LOG_ERR, "Martian nexthop %s", buf);
return bgp_attr_malformed (args,
BGP_NOTIFY_UPDATE_INVAL_NEXT_HOP,
args->total);
}
attr->nexthop.s_addr = nexthop_n;
attr->flag |= ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP);
return BGP_ATTR_PARSE_PROCEED;
}
|
bgp_attr_nexthop (struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
in_addr_t nexthop_h, nexthop_n;
/* Check nexthop attribute length. */
if (length != 4)
{
zlog (peer->log, LOG_ERR, "Nexthop attribute length isn't four [%d]",
length);
return bgp_attr_malformed (args,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
/* According to section 6.3 of RFC4271, syntactically incorrect NEXT_HOP
attribute must result in a NOTIFICATION message (this is implemented below).
At the same time, semantically incorrect NEXT_HOP is more likely to be just
logged locally (this is implemented somewhere else). The UPDATE message
gets ignored in any of these cases. */
nexthop_n = stream_get_ipv4 (peer->ibuf);
nexthop_h = ntohl (nexthop_n);
if (IPV4_NET0 (nexthop_h) || IPV4_NET127 (nexthop_h) || IPV4_CLASS_DE (nexthop_h))
{
char buf[INET_ADDRSTRLEN];
inet_ntop (AF_INET, &nexthop_h, buf, INET_ADDRSTRLEN);
zlog (peer->log, LOG_ERR, "Martian nexthop %s", buf);
return bgp_attr_malformed (args,
BGP_NOTIFY_UPDATE_INVAL_NEXT_HOP,
args->total);
}
attr->nexthop.s_addr = nexthop_n;
attr->flag |= ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP);
return BGP_ATTR_PARSE_PROCEED;
}
|
C
|
savannah
| 0 |
CVE-2016-10517
|
https://www.cvedetails.com/cve/CVE-2016-10517/
|
CWE-254
|
https://github.com/antirez/redis/commit/874804da0c014a7d704b3d285aa500098a931f50
|
874804da0c014a7d704b3d285aa500098a931f50
|
Security: Cross Protocol Scripting protection.
This is an attempt at mitigating problems due to cross protocol
scripting, an attack targeting services using line oriented protocols
like Redis that can accept HTTP requests as valid protocol, by
discarding the invalid parts and accepting the payloads sent, for
example, via a POST request.
For this to be effective, when we detect POST and Host: and terminate
the connection asynchronously, the networking code was modified in order
to never process further input. It was later verified that in a
pipelined request containing a POST command, the successive commands are
not executed.
|
void addReplyBulkSds(client *c, sds s) {
addReplySds(c,sdscatfmt(sdsempty(),"$%u\r\n",
(unsigned long)sdslen(s)));
addReplySds(c,s);
addReply(c,shared.crlf);
}
|
void addReplyBulkSds(client *c, sds s) {
addReplySds(c,sdscatfmt(sdsempty(),"$%u\r\n",
(unsigned long)sdslen(s)));
addReplySds(c,s);
addReply(c,shared.crlf);
}
|
C
|
redis
| 0 |
CVE-2014-8116
|
https://www.cvedetails.com/cve/CVE-2014-8116/
|
CWE-399
|
https://github.com/file/file/commit/d7cdad007c507e6c79f51f058dd77fab70ceb9f6
|
d7cdad007c507e6c79f51f058dd77fab70ceb9f6
|
Stop reporting bad capabilities after the first few.
|
toomany(struct magic_set *ms, const char *name, uint16_t num)
{
if (file_printf(ms, ", too many %s header sections (%u)", name, num
) == -1)
return -1;
return 0;
}
|
toomany(struct magic_set *ms, const char *name, uint16_t num)
{
if (file_printf(ms, ", too many %s header sections (%u)", name, num
) == -1)
return -1;
return 0;
}
|
C
|
file
| 0 |
CVE-2018-20169
|
https://www.cvedetails.com/cve/CVE-2018-20169/
|
CWE-400
|
https://github.com/torvalds/linux/commit/704620afc70cf47abb9d6a1a57f3825d2bca49cf
|
704620afc70cf47abb9d6a1a57f3825d2bca49cf
|
USB: check usb_get_extra_descriptor for proper size
When reading an extra descriptor, we need to properly check the minimum
and maximum size allowed, to prevent from invalid data being sent by a
device.
Reported-by: Hui Peng <[email protected]>
Reported-by: Mathias Payer <[email protected]>
Co-developed-by: Linus Torvalds <[email protected]>
Signed-off-by: Hui Peng <[email protected]>
Signed-off-by: Mathias Payer <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int usb_disable_remote_wakeup(struct usb_device *udev)
{
if (udev->speed < USB_SPEED_SUPER)
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
else
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE,
USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
|
static int usb_disable_remote_wakeup(struct usb_device *udev)
{
if (udev->speed < USB_SPEED_SUPER)
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
else
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE,
USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
|
C
|
linux
| 0 |
CVE-2012-2875
|
https://www.cvedetails.com/cve/CVE-2012-2875/
| null |
https://github.com/chromium/chromium/commit/d345af9ed62ee5f431be327967f41c3cc3fe936a
|
d345af9ed62ee5f431be327967f41c3cc3fe936a
|
[BlackBerry] Adapt to new BlackBerry::Platform::TouchPoint API
https://bugs.webkit.org/show_bug.cgi?id=105143
RIM PR 171941
Reviewed by Rob Buis.
Internally reviewed by George Staikos.
Source/WebCore:
TouchPoint instances now provide document coordinates for the viewport
and content position of the touch event. The pixel coordinates stored
in the TouchPoint should no longer be needed in WebKit.
Also adapt to new method names and encapsulation of TouchPoint data
members.
No change in behavior, no new tests.
* platform/blackberry/PlatformTouchPointBlackBerry.cpp:
(WebCore::PlatformTouchPoint::PlatformTouchPoint):
Source/WebKit/blackberry:
TouchPoint instances now provide document coordinates for the viewport
and content position of the touch event. The pixel coordinates stored
in the TouchPoint should no longer be needed in WebKit. One exception
is when passing events to a full screen plugin.
Also adapt to new method names and encapsulation of TouchPoint data
members.
* Api/WebPage.cpp:
(BlackBerry::WebKit::WebPage::touchEvent):
(BlackBerry::WebKit::WebPage::touchPointAsMouseEvent):
(BlackBerry::WebKit::WebPagePrivate::dispatchTouchEventToFullScreenPlugin):
(BlackBerry::WebKit::WebPagePrivate::dispatchTouchPointAsMouseEventToFullScreenPlugin):
* WebKitSupport/InputHandler.cpp:
(BlackBerry::WebKit::InputHandler::shouldRequestSpellCheckingOptionsForPoint):
* WebKitSupport/InputHandler.h:
(InputHandler):
* WebKitSupport/TouchEventHandler.cpp:
(BlackBerry::WebKit::TouchEventHandler::doFatFingers):
(BlackBerry::WebKit::TouchEventHandler::handleTouchPoint):
* WebKitSupport/TouchEventHandler.h:
(TouchEventHandler):
Tools:
Adapt to new method names and encapsulation of TouchPoint data members.
* DumpRenderTree/blackberry/EventSender.cpp:
(addTouchPointCallback):
(updateTouchPointCallback):
(touchEndCallback):
(releaseTouchPointCallback):
(sendTouchEvent):
git-svn-id: svn://svn.chromium.org/blink/trunk@137880 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void WebPagePrivate::setPreventsScreenDimming(bool keepAwake)
{
if (keepAwake) {
if (!m_preventIdleDimmingCount)
m_client->setPreventsScreenIdleDimming(true);
m_preventIdleDimmingCount++;
} else if (m_preventIdleDimmingCount > 0) {
m_preventIdleDimmingCount--;
if (!m_preventIdleDimmingCount)
m_client->setPreventsScreenIdleDimming(false);
} else
ASSERT_NOT_REACHED(); // SetPreventsScreenIdleDimming(false) called too many times.
}
|
void WebPagePrivate::setPreventsScreenDimming(bool keepAwake)
{
if (keepAwake) {
if (!m_preventIdleDimmingCount)
m_client->setPreventsScreenIdleDimming(true);
m_preventIdleDimmingCount++;
} else if (m_preventIdleDimmingCount > 0) {
m_preventIdleDimmingCount--;
if (!m_preventIdleDimmingCount)
m_client->setPreventsScreenIdleDimming(false);
} else
ASSERT_NOT_REACHED(); // SetPreventsScreenIdleDimming(false) called too many times.
}
|
C
|
Chrome
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
void sc_format_asn1_entry(struct sc_asn1_entry *entry, void *parm, void *arg,
int set_present)
{
entry->parm = parm;
entry->arg = arg;
if (set_present)
entry->flags |= SC_ASN1_PRESENT;
}
|
void sc_format_asn1_entry(struct sc_asn1_entry *entry, void *parm, void *arg,
int set_present)
{
entry->parm = parm;
entry->arg = arg;
if (set_present)
entry->flags |= SC_ASN1_PRESENT;
}
|
C
|
OpenSC
| 0 |
CVE-2015-6773
|
https://www.cvedetails.com/cve/CVE-2015-6773/
|
CWE-119
|
https://github.com/chromium/chromium/commit/33827275411b33371e7bb750cce20f11de85002d
|
33827275411b33371e7bb750cce20f11de85002d
|
Move SelectionTemplate::is_handle_visible_ to FrameSelection
This patch moves |is_handle_visible_| to |FrameSelection| from |SelectionTemplate|
since handle visibility is used only for setting |FrameSelection|, hence it is
a redundant member variable of |SelectionTemplate|.
Bug: 742093
Change-Id: I3add4da3844fb40be34dcb4d4b46b5fa6fed1d7e
Reviewed-on: https://chromium-review.googlesource.com/595389
Commit-Queue: Yoshifumi Inoue <[email protected]>
Reviewed-by: Xiaocheng Hu <[email protected]>
Reviewed-by: Kent Tamura <[email protected]>
Cr-Commit-Position: refs/heads/master@{#491660}
|
bool SelectionEditor::NeedsUpdateVisibleSelection() const {
return cached_visible_selection_in_dom_tree_is_dirty_ ||
style_version_for_dom_tree_ != GetDocument().StyleVersion();
}
|
bool SelectionEditor::NeedsUpdateVisibleSelection() const {
return cached_visible_selection_in_dom_tree_is_dirty_ ||
style_version_for_dom_tree_ != GetDocument().StyleVersion();
}
|
C
|
Chrome
| 0 |
CVE-2018-20784
|
https://www.cvedetails.com/cve/CVE-2018-20784/
|
CWE-400
|
https://github.com/torvalds/linux/commit/c40f7d74c741a907cfaeb73a7697081881c497d0
|
c40f7d74c741a907cfaeb73a7697081881c497d0
|
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (!vruntime_normalized(p)) {
/*
* Fix up our vruntime so that the current sleep doesn't
* cause 'unlimited' sleep bonus.
*/
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
detach_entity_cfs_rq(se);
}
|
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (!vruntime_normalized(p)) {
/*
* Fix up our vruntime so that the current sleep doesn't
* cause 'unlimited' sleep bonus.
*/
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
detach_entity_cfs_rq(se);
}
|
C
|
linux
| 0 |
CVE-2016-9793
|
https://www.cvedetails.com/cve/CVE-2016-9793/
|
CWE-119
|
https://github.com/torvalds/linux/commit/b98b0bc8c431e3ceb4b26b0dfc8db509518fb290
|
b98b0bc8c431e3ceb4b26b0dfc8db509518fb290
|
net: avoid signed overflows for SO_{SND|RCV}BUFFORCE
CAP_NET_ADMIN users should not be allowed to set negative
sk_sndbuf or sk_rcvbuf values, as it can lead to various memory
corruptions, crashes, OOM...
Note that before commit 82981930125a ("net: cleanups in
sock_setsockopt()"), the bug was even more serious, since SO_SNDBUF
and SO_RCVBUF were vulnerable.
This needs to be backported to all known linux kernels.
Again, many thanks to syzkaller team for discovering this gem.
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int req_prot_init(const struct proto *prot)
{
struct request_sock_ops *rsk_prot = prot->rsk_prot;
if (!rsk_prot)
return 0;
rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
prot->name);
if (!rsk_prot->slab_name)
return -ENOMEM;
rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
rsk_prot->obj_size, 0,
prot->slab_flags, NULL);
if (!rsk_prot->slab) {
pr_crit("%s: Can't create request sock SLAB cache!\n",
prot->name);
return -ENOMEM;
}
return 0;
}
|
static int req_prot_init(const struct proto *prot)
{
struct request_sock_ops *rsk_prot = prot->rsk_prot;
if (!rsk_prot)
return 0;
rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
prot->name);
if (!rsk_prot->slab_name)
return -ENOMEM;
rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
rsk_prot->obj_size, 0,
prot->slab_flags, NULL);
if (!rsk_prot->slab) {
pr_crit("%s: Can't create request sock SLAB cache!\n",
prot->name);
return -ENOMEM;
}
return 0;
}
|
C
|
linux
| 0 |
CVE-2011-2799
|
https://www.cvedetails.com/cve/CVE-2011-2799/
|
CWE-399
|
https://github.com/chromium/chromium/commit/5a2de6455f565783c73e53eae2c8b953e7d48520
|
5a2de6455f565783c73e53eae2c8b953e7d48520
|
2011-06-02 Joone Hur <[email protected]>
Reviewed by Martin Robinson.
[GTK] Only load dictionaries if spell check is enabled
https://bugs.webkit.org/show_bug.cgi?id=32879
We don't need to call enchant if enable-spell-checking is false.
* webkit/webkitwebview.cpp:
(webkit_web_view_update_settings): Skip loading dictionaries when enable-spell-checking is false.
(webkit_web_view_settings_notify): Ditto.
git-svn-id: svn://svn.chromium.org/blink/trunk@87925 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void webkit_web_view_set_scroll_adjustments(WebKitWebView* webView, GtkAdjustment* horizontalAdjustment, GtkAdjustment* verticalAdjustment)
{
Page* page = core(webView);
if (!page)
return;
WebKit::ChromeClient* client = static_cast<WebKit::ChromeClient*>(page->chrome()->client());
client->adjustmentWatcher()->setHorizontalAdjustment(horizontalAdjustment);
client->adjustmentWatcher()->setVerticalAdjustment(verticalAdjustment);
}
|
static void webkit_web_view_set_scroll_adjustments(WebKitWebView* webView, GtkAdjustment* horizontalAdjustment, GtkAdjustment* verticalAdjustment)
{
Page* page = core(webView);
if (!page)
return;
WebKit::ChromeClient* client = static_cast<WebKit::ChromeClient*>(page->chrome()->client());
client->adjustmentWatcher()->setHorizontalAdjustment(horizontalAdjustment);
client->adjustmentWatcher()->setVerticalAdjustment(verticalAdjustment);
}
|
C
|
Chrome
| 0 |
CVE-2018-20067
|
https://www.cvedetails.com/cve/CVE-2018-20067/
|
CWE-254
|
https://github.com/chromium/chromium/commit/a7d715ae5b654d1f98669fd979a00282a7229044
|
a7d715ae5b654d1f98669fd979a00282a7229044
|
Prevent renderer initiated back navigation to cancel a browser one.
Renderer initiated back/forward navigations must not be able to cancel ongoing
browser initiated navigation if they are not user initiated.
Note: 'normal' renderer initiated navigation uses the
FrameHost::BeginNavigation() path. A code similar to this patch is done
in NavigatorImpl::OnBeginNavigation().
Test:
-----
Added: NavigationBrowserTest.
* HistoryBackInBeforeUnload
* HistoryBackInBeforeUnloadAfterSetTimeout
* HistoryBackCancelPendingNavigationNoUserGesture
* HistoryBackCancelPendingNavigationUserGesture
Fixed:
* (WPT) .../the-history-interface/traverse_the_history_2.html
* (WPT) .../the-history-interface/traverse_the_history_3.html
* (WPT) .../the-history-interface/traverse_the_history_4.html
* (WPT) .../the-history-interface/traverse_the_history_5.html
Bug: 879965
Change-Id: I1a9bfaaea1ffc219e6c32f6e676b660e746c578c
Reviewed-on: https://chromium-review.googlesource.com/1209744
Commit-Queue: Arthur Sonzogni <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Mustaq Ahmed <[email protected]>
Reviewed-by: Camille Lamy <[email protected]>
Reviewed-by: Charlie Reis <[email protected]>
Cr-Commit-Position: refs/heads/master@{#592823}
|
void LocalFrameClientImpl::DidBlockFramebust(const KURL& url) {
web_frame_->Client()->DidBlockFramebust(url);
}
|
void LocalFrameClientImpl::DidBlockFramebust(const KURL& url) {
web_frame_->Client()->DidBlockFramebust(url);
}
|
C
|
Chrome
| 0 |
CVE-2017-5118
|
https://www.cvedetails.com/cve/CVE-2017-5118/
|
CWE-732
|
https://github.com/chromium/chromium/commit/0ab2412a104d2f235d7b9fe19d30ef605a410832
|
0ab2412a104d2f235d7b9fe19d30ef605a410832
|
Inherit CSP when we inherit the security origin
This prevents attacks that use main window navigation to get out of the
existing csp constraints such as the related bug
Bug: 747847
Change-Id: I1e57b50da17f65d38088205b0a3c7c49ef2ae4d8
Reviewed-on: https://chromium-review.googlesource.com/592027
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Andy Paicu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#492333}
|
Node* Document::cloneNode(bool deep, ExceptionState&) {
Document* clone = CloneDocumentWithoutChildren();
clone->CloneDataFromDocument(*this);
if (deep)
CloneChildNodes(clone);
return clone;
}
|
Node* Document::cloneNode(bool deep, ExceptionState&) {
Document* clone = CloneDocumentWithoutChildren();
clone->CloneDataFromDocument(*this);
if (deep)
CloneChildNodes(clone);
return clone;
}
|
C
|
Chrome
| 0 |
CVE-2013-4205
|
https://www.cvedetails.com/cve/CVE-2013-4205/
|
CWE-399
|
https://github.com/torvalds/linux/commit/6160968cee8b90a5dd95318d716e31d7775c4ef3
|
6160968cee8b90a5dd95318d716e31d7775c4ef3
|
userns: unshare_userns(&cred) should not populate cred on failure
unshare_userns(new_cred) does *new_cred = prepare_creds() before
create_user_ns() which can fail. However, the caller expects that
it doesn't need to take care of new_cred if unshare_userns() fails.
We could change the single caller, sys_unshare(), but I think it
would be more clean to avoid the side effects on failure, so with
this patch unshare_userns() does put_cred() itself and initializes
*new_cred only if create_user_ns() succeeeds.
Cc: [email protected]
Signed-off-by: Oleg Nesterov <[email protected]>
Reviewed-by: Andy Lutomirski <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
{
projid_t projid;
projid = from_kprojid(targ, kprojid);
if (projid == (projid_t) -1)
projid = OVERFLOW_PROJID;
return projid;
}
|
projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
{
projid_t projid;
projid = from_kprojid(targ, kprojid);
if (projid == (projid_t) -1)
projid = OVERFLOW_PROJID;
return projid;
}
|
C
|
linux
| 0 |
CVE-2017-13039
|
https://www.cvedetails.com/cve/CVE-2017-13039/
|
CWE-125
|
https://github.com/the-tcpdump-group/tcpdump/commit/e0a5a02b0fc1900a69d6c37ed0aab36fb8494e6d
|
e0a5a02b0fc1900a69d6c37ed0aab36fb8494e6d
|
CVE-2017-13039/IKEv1: Do more bounds checking.
Have ikev1_attrmap_print() and ikev1_attr_print() do full bounds
checking, and return null on a bounds overflow. Have their callers
check for a null return.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s), modified
so the capture file won't be rejected as an invalid capture.
|
ikev2_t_print(netdissect_options *ndo, int tcount,
const struct isakmp_gen *ext, u_int item_len,
const u_char *ep)
{
const struct ikev2_t *p;
struct ikev2_t t;
uint16_t t_id;
const u_char *cp;
const char *idstr;
const struct attrmap *map;
size_t nmap;
const u_char *ep2;
p = (const struct ikev2_t *)ext;
ND_TCHECK(*p);
UNALIGNED_MEMCPY(&t, ext, sizeof(t));
ikev2_pay_print(ndo, NPSTR(ISAKMP_NPTYPE_T), t.h.critical);
t_id = ntohs(t.t_id);
map = NULL;
nmap = 0;
switch (t.t_type) {
case IV2_T_ENCR:
idstr = STR_OR_ID(t_id, esp_p_map);
map = encr_t_map;
nmap = sizeof(encr_t_map)/sizeof(encr_t_map[0]);
break;
case IV2_T_PRF:
idstr = STR_OR_ID(t_id, prf_p_map);
break;
case IV2_T_INTEG:
idstr = STR_OR_ID(t_id, integ_p_map);
break;
case IV2_T_DH:
idstr = STR_OR_ID(t_id, dh_p_map);
break;
case IV2_T_ESN:
idstr = STR_OR_ID(t_id, esn_p_map);
break;
default:
idstr = NULL;
break;
}
if (idstr)
ND_PRINT((ndo," #%u type=%s id=%s ", tcount,
STR_OR_ID(t.t_type, ikev2_t_type_map),
idstr));
else
ND_PRINT((ndo," #%u type=%s id=%u ", tcount,
STR_OR_ID(t.t_type, ikev2_t_type_map),
t.t_id));
cp = (const u_char *)(p + 1);
ep2 = (const u_char *)p + item_len;
while (cp < ep && cp < ep2) {
if (map && nmap) {
cp = ikev1_attrmap_print(ndo, cp, ep2, map, nmap);
} else
cp = ikev1_attr_print(ndo, cp, ep2);
if (cp == NULL)
goto trunc;
}
if (ep < ep2)
ND_PRINT((ndo,"..."));
return cp;
trunc:
ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_T)));
return NULL;
}
|
ikev2_t_print(netdissect_options *ndo, int tcount,
const struct isakmp_gen *ext, u_int item_len,
const u_char *ep)
{
const struct ikev2_t *p;
struct ikev2_t t;
uint16_t t_id;
const u_char *cp;
const char *idstr;
const struct attrmap *map;
size_t nmap;
const u_char *ep2;
p = (const struct ikev2_t *)ext;
ND_TCHECK(*p);
UNALIGNED_MEMCPY(&t, ext, sizeof(t));
ikev2_pay_print(ndo, NPSTR(ISAKMP_NPTYPE_T), t.h.critical);
t_id = ntohs(t.t_id);
map = NULL;
nmap = 0;
switch (t.t_type) {
case IV2_T_ENCR:
idstr = STR_OR_ID(t_id, esp_p_map);
map = encr_t_map;
nmap = sizeof(encr_t_map)/sizeof(encr_t_map[0]);
break;
case IV2_T_PRF:
idstr = STR_OR_ID(t_id, prf_p_map);
break;
case IV2_T_INTEG:
idstr = STR_OR_ID(t_id, integ_p_map);
break;
case IV2_T_DH:
idstr = STR_OR_ID(t_id, dh_p_map);
break;
case IV2_T_ESN:
idstr = STR_OR_ID(t_id, esn_p_map);
break;
default:
idstr = NULL;
break;
}
if (idstr)
ND_PRINT((ndo," #%u type=%s id=%s ", tcount,
STR_OR_ID(t.t_type, ikev2_t_type_map),
idstr));
else
ND_PRINT((ndo," #%u type=%s id=%u ", tcount,
STR_OR_ID(t.t_type, ikev2_t_type_map),
t.t_id));
cp = (const u_char *)(p + 1);
ep2 = (const u_char *)p + item_len;
while (cp < ep && cp < ep2) {
if (map && nmap) {
cp = ikev1_attrmap_print(ndo, cp, (ep < ep2) ? ep : ep2,
map, nmap);
} else
cp = ikev1_attr_print(ndo, cp, (ep < ep2) ? ep : ep2);
}
if (ep < ep2)
ND_PRINT((ndo,"..."));
return cp;
trunc:
ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_T)));
return NULL;
}
|
C
|
tcpdump
| 1 |
CVE-2016-7529
|
https://www.cvedetails.com/cve/CVE-2016-7529/
|
CWE-125
|
https://github.com/ImageMagick/ImageMagick/commit/a2e1064f288a353bc5fef7f79ccb7683759e775c
|
a2e1064f288a353bc5fef7f79ccb7683759e775c
|
https://github.com/ImageMagick/ImageMagick/issues/104
|
static MagickBooleanType load_level(Image *image,XCFDocInfo *inDocInfo,
XCFLayerInfo *inLayerInfo,ExceptionInfo *exception)
{
int
destLeft = 0,
destTop = 0;
Image*
tile_image;
MagickBooleanType
status;
MagickOffsetType
saved_pos,
offset,
offset2;
register ssize_t
i;
size_t
width,
height,
ntiles,
ntile_rows,
ntile_cols,
tile_image_width,
tile_image_height;
/* start reading the data */
width=ReadBlobMSBLong(image);
height=ReadBlobMSBLong(image);
/* read in the first tile offset.
* if it is '0', then this tile level is empty
* and we can simply return.
*/
offset=(MagickOffsetType) ReadBlobMSBLong(image);
if (offset == 0)
return(MagickTrue);
/* Initialise the reference for the in-memory tile-compression
*/
ntile_rows=(height+TILE_HEIGHT-1)/TILE_HEIGHT;
ntile_cols=(width+TILE_WIDTH-1)/TILE_WIDTH;
ntiles=ntile_rows*ntile_cols;
for (i = 0; i < (ssize_t) ntiles; i++)
{
status=MagickFalse;
if (offset == 0)
ThrowBinaryException(CorruptImageError,"NotEnoughTiles",image->filename);
/* save the current position as it is where the
* next tile offset is stored.
*/
saved_pos=TellBlob(image);
/* read in the offset of the next tile so we can calculate the amount
of data needed for this tile*/
offset2=(MagickOffsetType)ReadBlobMSBLong(image);
/* if the offset is 0 then we need to read in the maximum possible
allowing for negative compression */
if (offset2 == 0)
offset2=(MagickOffsetType) (offset + TILE_WIDTH * TILE_WIDTH * 4* 1.5);
/* seek to the tile offset */
offset=SeekBlob(image, offset, SEEK_SET);
/*
Allocate the image for the tile. NOTE: the last tile in a row or
column may not be a full tile!
*/
tile_image_width=(size_t) (destLeft == (int) ntile_cols-1 ?
(int) width % TILE_WIDTH : TILE_WIDTH);
if (tile_image_width == 0)
tile_image_width=TILE_WIDTH;
tile_image_height = (size_t) (destTop == (int) ntile_rows-1 ?
(int) height % TILE_HEIGHT : TILE_HEIGHT);
if (tile_image_height == 0)
tile_image_height=TILE_HEIGHT;
tile_image=CloneImage(inLayerInfo->image,tile_image_width,
tile_image_height,MagickTrue,exception);
/* read in the tile */
switch (inDocInfo->compression)
{
case COMPRESS_NONE:
if (load_tile(image,tile_image,inDocInfo,inLayerInfo,(size_t) (offset2-offset),exception) == 0)
status=MagickTrue;
break;
case COMPRESS_RLE:
if (load_tile_rle (image,tile_image,inDocInfo,inLayerInfo,
(int) (offset2-offset),exception) == 0)
status=MagickTrue;
break;
case COMPRESS_ZLIB:
ThrowBinaryException(CoderError,"ZipCompressNotSupported",
image->filename)
case COMPRESS_FRACTAL:
ThrowBinaryException(CoderError,"FractalCompressNotSupported",
image->filename)
}
/* composite the tile onto the layer's image, and then destroy it */
(void) CompositeImage(inLayerInfo->image,tile_image,CopyCompositeOp,
MagickTrue,destLeft * TILE_WIDTH,destTop*TILE_HEIGHT,exception);
tile_image=DestroyImage(tile_image);
/* adjust tile position */
destLeft++;
if (destLeft >= (int) ntile_cols)
{
destLeft = 0;
destTop++;
}
if (status != MagickFalse)
return(MagickFalse);
/* restore the saved position so we'll be ready to
* read the next offset.
*/
offset=SeekBlob(image, saved_pos, SEEK_SET);
/* read in the offset of the next tile */
offset=(MagickOffsetType) ReadBlobMSBLong(image);
}
if (offset != 0)
ThrowBinaryException(CorruptImageError,"CorruptImage",image->filename)
return(MagickTrue);
}
|
static MagickBooleanType load_level(Image *image,XCFDocInfo *inDocInfo,
XCFLayerInfo *inLayerInfo,ExceptionInfo *exception)
{
int
destLeft = 0,
destTop = 0;
Image*
tile_image;
MagickBooleanType
status;
MagickOffsetType
saved_pos,
offset,
offset2;
register ssize_t
i;
size_t
width,
height,
ntiles,
ntile_rows,
ntile_cols,
tile_image_width,
tile_image_height;
/* start reading the data */
width=ReadBlobMSBLong(image);
height=ReadBlobMSBLong(image);
/* read in the first tile offset.
* if it is '0', then this tile level is empty
* and we can simply return.
*/
offset=(MagickOffsetType) ReadBlobMSBLong(image);
if (offset == 0)
return(MagickTrue);
/* Initialise the reference for the in-memory tile-compression
*/
ntile_rows=(height+TILE_HEIGHT-1)/TILE_HEIGHT;
ntile_cols=(width+TILE_WIDTH-1)/TILE_WIDTH;
ntiles=ntile_rows*ntile_cols;
for (i = 0; i < (ssize_t) ntiles; i++)
{
status=MagickFalse;
if (offset == 0)
ThrowBinaryException(CorruptImageError,"NotEnoughTiles",image->filename);
/* save the current position as it is where the
* next tile offset is stored.
*/
saved_pos=TellBlob(image);
/* read in the offset of the next tile so we can calculate the amount
of data needed for this tile*/
offset2=(MagickOffsetType)ReadBlobMSBLong(image);
/* if the offset is 0 then we need to read in the maximum possible
allowing for negative compression */
if (offset2 == 0)
offset2=(MagickOffsetType) (offset + TILE_WIDTH * TILE_WIDTH * 4* 1.5);
/* seek to the tile offset */
offset=SeekBlob(image, offset, SEEK_SET);
/*
Allocate the image for the tile. NOTE: the last tile in a row or
column may not be a full tile!
*/
tile_image_width=(size_t) (destLeft == (int) ntile_cols-1 ?
(int) width % TILE_WIDTH : TILE_WIDTH);
if (tile_image_width == 0)
tile_image_width=TILE_WIDTH;
tile_image_height = (size_t) (destTop == (int) ntile_rows-1 ?
(int) height % TILE_HEIGHT : TILE_HEIGHT);
if (tile_image_height == 0)
tile_image_height=TILE_HEIGHT;
tile_image=CloneImage(inLayerInfo->image,tile_image_width,
tile_image_height,MagickTrue,exception);
/* read in the tile */
switch (inDocInfo->compression)
{
case COMPRESS_NONE:
if (load_tile(image,tile_image,inDocInfo,inLayerInfo,(size_t) (offset2-offset),exception) == 0)
status=MagickTrue;
break;
case COMPRESS_RLE:
if (load_tile_rle (image,tile_image,inDocInfo,inLayerInfo,
(int) (offset2-offset),exception) == 0)
status=MagickTrue;
break;
case COMPRESS_ZLIB:
ThrowBinaryException(CoderError,"ZipCompressNotSupported",
image->filename)
case COMPRESS_FRACTAL:
ThrowBinaryException(CoderError,"FractalCompressNotSupported",
image->filename)
}
/* composite the tile onto the layer's image, and then destroy it */
(void) CompositeImage(inLayerInfo->image,tile_image,CopyCompositeOp,
MagickTrue,destLeft * TILE_WIDTH,destTop*TILE_HEIGHT,exception);
tile_image=DestroyImage(tile_image);
/* adjust tile position */
destLeft++;
if (destLeft >= (int) ntile_cols)
{
destLeft = 0;
destTop++;
}
if (status != MagickFalse)
return(MagickFalse);
/* restore the saved position so we'll be ready to
* read the next offset.
*/
offset=SeekBlob(image, saved_pos, SEEK_SET);
/* read in the offset of the next tile */
offset=(MagickOffsetType) ReadBlobMSBLong(image);
}
if (offset != 0)
ThrowBinaryException(CorruptImageError,"CorruptImage",image->filename)
return(MagickTrue);
}
|
C
|
ImageMagick
| 0 |
CVE-2016-3951
|
https://www.cvedetails.com/cve/CVE-2016-3951/
| null |
https://github.com/torvalds/linux/commit/4d06dd537f95683aba3651098ae288b7cbff8274
|
4d06dd537f95683aba3651098ae288b7cbff8274
|
cdc_ncm: do not call usbnet_link_change from cdc_ncm_bind
usbnet_link_change will call schedule_work and should be
avoided if bind is failing. Otherwise we will end up with
scheduled work referring to a netdev which has gone away.
Instead of making the call conditional, we can just defer
it to usbnet_probe, using the driver_info flag made for
this purpose.
Fixes: 8a34b0ae8778 ("usbnet: cdc_ncm: apply usbnet_link_change")
Reported-by: Andrey Konovalov <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Signed-off-by: Bjørn Mork <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static ssize_t cdc_ncm_show_tx_max(struct device *d, struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
return sprintf(buf, "%u\n", ctx->tx_max);
}
|
static ssize_t cdc_ncm_show_tx_max(struct device *d, struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
return sprintf(buf, "%u\n", ctx->tx_max);
}
|
C
|
linux
| 0 |
CVE-2016-3760
|
https://www.cvedetails.com/cve/CVE-2016-3760/
|
CWE-20
|
https://android.googlesource.com/platform/system/bt/+/37c88107679d36c419572732b4af6e18bb2f7dce
|
37c88107679d36c419572732b4af6e18bb2f7dce
|
Add guest mode functionality (2/3)
Add a flag to enable() to start Bluetooth in restricted
mode. In restricted mode, all devices that are paired during
restricted mode are deleted upon leaving restricted mode.
Right now restricted mode is only entered while a guest
user is active.
Bug: 27410683
Change-Id: I8f23d28ef0aa3a8df13d469c73005c8e1b894d19
|
static void le_test_mode(bt_status_t status, uint16_t packet_count)
{
bdt_log("LE TEST MODE END status:%s number_of_packets:%d", dump_bt_status(status), packet_count);
}
|
static void le_test_mode(bt_status_t status, uint16_t packet_count)
{
bdt_log("LE TEST MODE END status:%s number_of_packets:%d", dump_bt_status(status), packet_count);
}
|
C
|
Android
| 0 |
CVE-2017-18216
|
https://www.cvedetails.com/cve/CVE-2017-18216/
|
CWE-476
|
https://github.com/torvalds/linux/commit/853bc26a7ea39e354b9f8889ae7ad1492ffa28d2
|
853bc26a7ea39e354b9f8889ae7ad1492ffa28d2
|
ocfs2: subsystem.su_mutex is required while accessing the item->ci_parent
The subsystem.su_mutex is required while accessing the item->ci_parent,
otherwise, NULL pointer dereference to the item->ci_parent will be
triggered in the following situation:
add node delete node
sys_write
vfs_write
configfs_write_file
o2nm_node_store
o2nm_node_local_write
do_rmdir
vfs_rmdir
configfs_rmdir
mutex_lock(&subsys->su_mutex);
unlink_obj
item->ci_group = NULL;
item->ci_parent = NULL;
to_o2nm_cluster_from_node
node->nd_item.ci_parent->ci_parent
BUG since of NULL pointer dereference to nd_item.ci_parent
Moreover, the o2nm_cluster also should be protected by the
subsystem.su_mutex.
[[email protected]: v2]
Link: http://lkml.kernel.org/r/[email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Alex Chen <[email protected]>
Reviewed-by: Jun Piao <[email protected]>
Reviewed-by: Joseph Qi <[email protected]>
Cc: Mark Fasheh <[email protected]>
Cc: Joel Becker <[email protected]>
Cc: Junxiao Bi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static struct o2nm_node *to_o2nm_node(struct config_item *item)
{
return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
}
|
static struct o2nm_node *to_o2nm_node(struct config_item *item)
{
return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/5041f984669fe3a989a84c348eb838c8f7233f6b
|
5041f984669fe3a989a84c348eb838c8f7233f6b
|
AutoFill: Release the cached frame when we receive the frameDestroyed() message
from WebKit.
BUG=48857
TEST=none
Review URL: http://codereview.chromium.org/3173005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55789 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderView::OnAutoFillSuggestionsReturned(
int query_id,
const std::vector<string16>& values,
const std::vector<string16>& labels,
const std::vector<string16>& icons,
const std::vector<int>& unique_ids) {
autofill_helper_.SuggestionsReceived(
query_id, values, labels, icons, unique_ids);
}
|
void RenderView::OnAutoFillSuggestionsReturned(
int query_id,
const std::vector<string16>& values,
const std::vector<string16>& labels,
const std::vector<string16>& icons,
const std::vector<int>& unique_ids) {
autofill_helper_.SuggestionsReceived(
query_id, values, labels, icons, unique_ids);
}
|
C
|
Chrome
| 0 |
CVE-2012-6701
|
https://www.cvedetails.com/cve/CVE-2012-6701/
| null |
https://github.com/torvalds/linux/commit/a70b52ec1aaeaf60f4739edb1b422827cb6f3893
|
a70b52ec1aaeaf60f4739edb1b422827cb6f3893
|
vfs: make AIO use the proper rw_verify_area() area helpers
We had for some reason overlooked the AIO interface, and it didn't use
the proper rw_verify_area() helper function that checks (for example)
mandatory locking on the file, and that the size of the access doesn't
cause us to overflow the provided offset limits etc.
Instead, AIO did just the security_file_permission() thing (that
rw_verify_area() also does) directly.
This fixes it to do all the proper helper functions, which not only
means that now mandatory file locking works with AIO too, we can
actually remove lines of code.
Reported-by: Manish Honap <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
static int aio_setup_ring(struct kioctx *ctx)
{
struct aio_ring *ring;
struct aio_ring_info *info = &ctx->ring_info;
unsigned nr_events = ctx->max_reqs;
unsigned long size;
int nr_pages;
/* Compensate for the ring buffer's head/tail overlap entry */
nr_events += 2; /* 1 is required, 2 for good luck */
size = sizeof(struct aio_ring);
size += sizeof(struct io_event) * nr_events;
nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
if (nr_pages < 0)
return -EINVAL;
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
info->nr = 0;
info->ring_pages = info->internal_pages;
if (nr_pages > AIO_RING_PAGES) {
info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!info->ring_pages)
return -ENOMEM;
}
info->mmap_size = nr_pages * PAGE_SIZE;
dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
down_write(&ctx->mm->mmap_sem);
info->mmap_base = do_mmap(NULL, 0, info->mmap_size,
PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
0);
if (IS_ERR((void *)info->mmap_base)) {
up_write(&ctx->mm->mmap_sem);
info->mmap_size = 0;
aio_free_ring(ctx);
return -EAGAIN;
}
dprintk("mmap address: 0x%08lx\n", info->mmap_base);
info->nr_pages = get_user_pages(current, ctx->mm,
info->mmap_base, nr_pages,
1, 0, info->ring_pages, NULL);
up_write(&ctx->mm->mmap_sem);
if (unlikely(info->nr_pages != nr_pages)) {
aio_free_ring(ctx);
return -EAGAIN;
}
ctx->user_id = info->mmap_base;
info->nr = nr_events; /* trusted copy */
ring = kmap_atomic(info->ring_pages[0]);
ring->nr = nr_events; /* user copy */
ring->id = ctx->user_id;
ring->head = ring->tail = 0;
ring->magic = AIO_RING_MAGIC;
ring->compat_features = AIO_RING_COMPAT_FEATURES;
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
ring->header_length = sizeof(struct aio_ring);
kunmap_atomic(ring);
return 0;
}
|
static int aio_setup_ring(struct kioctx *ctx)
{
struct aio_ring *ring;
struct aio_ring_info *info = &ctx->ring_info;
unsigned nr_events = ctx->max_reqs;
unsigned long size;
int nr_pages;
/* Compensate for the ring buffer's head/tail overlap entry */
nr_events += 2; /* 1 is required, 2 for good luck */
size = sizeof(struct aio_ring);
size += sizeof(struct io_event) * nr_events;
nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
if (nr_pages < 0)
return -EINVAL;
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
info->nr = 0;
info->ring_pages = info->internal_pages;
if (nr_pages > AIO_RING_PAGES) {
info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!info->ring_pages)
return -ENOMEM;
}
info->mmap_size = nr_pages * PAGE_SIZE;
dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
down_write(&ctx->mm->mmap_sem);
info->mmap_base = do_mmap(NULL, 0, info->mmap_size,
PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
0);
if (IS_ERR((void *)info->mmap_base)) {
up_write(&ctx->mm->mmap_sem);
info->mmap_size = 0;
aio_free_ring(ctx);
return -EAGAIN;
}
dprintk("mmap address: 0x%08lx\n", info->mmap_base);
info->nr_pages = get_user_pages(current, ctx->mm,
info->mmap_base, nr_pages,
1, 0, info->ring_pages, NULL);
up_write(&ctx->mm->mmap_sem);
if (unlikely(info->nr_pages != nr_pages)) {
aio_free_ring(ctx);
return -EAGAIN;
}
ctx->user_id = info->mmap_base;
info->nr = nr_events; /* trusted copy */
ring = kmap_atomic(info->ring_pages[0]);
ring->nr = nr_events; /* user copy */
ring->id = ctx->user_id;
ring->head = ring->tail = 0;
ring->magic = AIO_RING_MAGIC;
ring->compat_features = AIO_RING_COMPAT_FEATURES;
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
ring->header_length = sizeof(struct aio_ring);
kunmap_atomic(ring);
return 0;
}
|
C
|
linux
| 0 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Geoff Lang <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#657568}
|
error::Error GLES2DecoderImpl::HandleGetUniformiv(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
const volatile gles2::cmds::GetUniformiv& c =
*static_cast<const volatile gles2::cmds::GetUniformiv*>(cmd_data);
GLuint program = c.program;
GLint fake_location = c.location;
GLuint service_id;
GLenum result_type;
GLsizei result_size;
GLint real_location = -1;
Error error;
cmds::GetUniformiv::Result* result;
if (GetUniformSetup<GLint>(program, fake_location, c.params_shm_id,
c.params_shm_offset, &error, &real_location,
&service_id, &result, &result_type,
&result_size)) {
api()->glGetUniformivFn(service_id, real_location, result->GetData());
}
return error;
}
|
error::Error GLES2DecoderImpl::HandleGetUniformiv(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
const volatile gles2::cmds::GetUniformiv& c =
*static_cast<const volatile gles2::cmds::GetUniformiv*>(cmd_data);
GLuint program = c.program;
GLint fake_location = c.location;
GLuint service_id;
GLenum result_type;
GLsizei result_size;
GLint real_location = -1;
Error error;
cmds::GetUniformiv::Result* result;
if (GetUniformSetup<GLint>(program, fake_location, c.params_shm_id,
c.params_shm_offset, &error, &real_location,
&service_id, &result, &result_type,
&result_size)) {
api()->glGetUniformivFn(service_id, real_location, result->GetData());
}
return error;
}
|
C
|
Chrome
| 0 |
CVE-2016-5199
|
https://www.cvedetails.com/cve/CVE-2016-5199/
|
CWE-119
|
https://github.com/chromium/chromium/commit/c995d4fe5e96f4d6d4a88b7867279b08e72d2579
|
c995d4fe5e96f4d6d4a88b7867279b08e72d2579
|
Move IsDataSaverEnabledByUser to be a static method and use it
This method now officially becomes the source of truth that
everything in the code base eventually calls into to determine whether
or not DataSaver is enabled.
Bug: 934399
Change-Id: Iae837b710ace8cc3101188f79d02cbc2d4f0fd93
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1537242
Reviewed-by: Joshua Pawlicki <[email protected]>
Reviewed-by: Tarun Bansal <[email protected]>
Commit-Queue: Robert Ogden <[email protected]>
Cr-Commit-Position: refs/heads/master@{#643948}
|
bool ChromeContentBrowserClient::ShouldSwapBrowsingInstancesForNavigation(
SiteInstance* site_instance,
const GURL& current_url,
const GURL& new_url) {
#if BUILDFLAG(ENABLE_EXTENSIONS)
return ChromeContentBrowserClientExtensionsPart::
ShouldSwapBrowsingInstancesForNavigation(
site_instance, current_url, new_url);
#else
return false;
#endif
}
|
bool ChromeContentBrowserClient::ShouldSwapBrowsingInstancesForNavigation(
SiteInstance* site_instance,
const GURL& current_url,
const GURL& new_url) {
#if BUILDFLAG(ENABLE_EXTENSIONS)
return ChromeContentBrowserClientExtensionsPart::
ShouldSwapBrowsingInstancesForNavigation(
site_instance, current_url, new_url);
#else
return false;
#endif
}
|
C
|
Chrome
| 0 |
CVE-2017-18203
|
https://www.cvedetails.com/cve/CVE-2017-18203/
|
CWE-362
|
https://github.com/torvalds/linux/commit/b9a41d21dceadf8104812626ef85dc56ee8a60ed
|
b9a41d21dceadf8104812626ef85dc56ee8a60ed
|
dm: fix race between dm_get_from_kobject() and __dm_destroy()
The following BUG_ON was hit when testing repeat creation and removal of
DM devices:
kernel BUG at drivers/md/dm.c:2919!
CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44
Call Trace:
[<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a
[<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e
[<ffffffff817b46d1>] ? mutex_lock+0x26/0x44
[<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf
[<ffffffff811de257>] kernfs_seq_show+0x23/0x25
[<ffffffff81199118>] seq_read+0x16f/0x325
[<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f
[<ffffffff8117b625>] __vfs_read+0x26/0x9d
[<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44
[<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9
[<ffffffff8117be9d>] vfs_read+0x8f/0xcf
[<ffffffff81193e34>] ? __fdget_pos+0x12/0x41
[<ffffffff8117c686>] SyS_read+0x4b/0x76
[<ffffffff817b606e>] system_call_fastpath+0x12/0x71
The bug can be easily triggered, if an extra delay (e.g. 10ms) is added
between the test of DMF_FREEING & DMF_DELETING and dm_get() in
dm_get_from_kobject().
To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and
dm_get() are done in an atomic way, so _minor_lock is used.
The other callers of dm_get() have also been checked to be OK: some
callers invoke dm_get() under _minor_lock, some callers invoke it under
_hash_lock, and dm_start_request() invoke it after increasing
md->open_count.
Cc: [email protected]
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Mike Snitzer <[email protected]>
|
static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
fmode_t mode;
int r;
r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_preempt)
r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
else
r = -EOPNOTSUPP;
bdput(bdev);
return r;
}
|
static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
fmode_t mode;
int r;
r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_preempt)
r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
else
r = -EOPNOTSUPP;
bdput(bdev);
return r;
}
|
C
|
linux
| 0 |
CVE-2019-5837
|
https://www.cvedetails.com/cve/CVE-2019-5837/
|
CWE-200
|
https://github.com/chromium/chromium/commit/04aaacb936a08d70862d6d9d7e8354721ae46be8
|
04aaacb936a08d70862d6d9d7e8354721ae46be8
|
Reland "AppCache: Add padding to cross-origin responses."
This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7
Initialized CacheRecord::padding_size to 0.
Original change's description:
> AppCache: Add padding to cross-origin responses.
>
> Bug: 918293
> Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059
> Commit-Queue: Staphany Park <[email protected]>
> Reviewed-by: Victor Costan <[email protected]>
> Reviewed-by: Marijn Kruisselbrink <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#644624}
Bug: 918293
Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906
Reviewed-by: Victor Costan <[email protected]>
Commit-Queue: Staphany Park <[email protected]>
Cr-Commit-Position: refs/heads/master@{#644719}
|
bool AppCacheDatabase::FindEntriesForUrl(
const GURL& url, std::vector<EntryRecord>* records) {
DCHECK(records && records->empty());
if (!LazyOpen(kDontCreate))
return false;
static const char kSql[] =
"SELECT cache_id, url, flags, response_id, response_size, padding_size "
"FROM Entries"
" WHERE url = ?";
sql::Statement statement(db_->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindString(0, url.spec());
while (statement.Step()) {
records->push_back(EntryRecord());
ReadEntryRecord(statement, &records->back());
DCHECK(records->back().url == url);
}
return statement.Succeeded();
}
|
bool AppCacheDatabase::FindEntriesForUrl(
const GURL& url, std::vector<EntryRecord>* records) {
DCHECK(records && records->empty());
if (!LazyOpen(kDontCreate))
return false;
static const char kSql[] =
"SELECT cache_id, url, flags, response_id, response_size FROM Entries"
" WHERE url = ?";
sql::Statement statement(db_->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindString(0, url.spec());
while (statement.Step()) {
records->push_back(EntryRecord());
ReadEntryRecord(statement, &records->back());
DCHECK(records->back().url == url);
}
return statement.Succeeded();
}
|
C
|
Chrome
| 1 |
CVE-2014-1445
|
https://www.cvedetails.com/cve/CVE-2014-1445/
|
CWE-399
|
https://github.com/torvalds/linux/commit/2b13d06c9584b4eb773f1e80bbaedab9a1c344e1
|
2b13d06c9584b4eb773f1e80bbaedab9a1c344e1
|
wanxl: fix info leak in ioctl
The wanxl_ioctl() code fails to initialize the two padding bytes of
struct sync_serial_settings after the ->loopback member. Add an explicit
memset(0) before filling the structure to avoid the info leak.
Signed-off-by: Salva Peiró <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings line;
port_t *port = dev_to_port(dev);
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&line, 0, sizeof(line));
line.clock_type = get_status(port)->clocking;
line.clock_rate = 0;
line.loopback = 0;
if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
size))
return -EFAULT;
if (line.clock_type != CLOCK_EXT &&
line.clock_type != CLOCK_TXFROMRX)
return -EINVAL; /* No such clock setting */
if (line.loopback != 0)
return -EINVAL;
get_status(port)->clocking = line.clock_type;
return 0;
default:
return hdlc_ioctl(dev, ifr, cmd);
}
}
|
static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings line;
port_t *port = dev_to_port(dev);
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
line.clock_type = get_status(port)->clocking;
line.clock_rate = 0;
line.loopback = 0;
if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
size))
return -EFAULT;
if (line.clock_type != CLOCK_EXT &&
line.clock_type != CLOCK_TXFROMRX)
return -EINVAL; /* No such clock setting */
if (line.loopback != 0)
return -EINVAL;
get_status(port)->clocking = line.clock_type;
return 0;
default:
return hdlc_ioctl(dev, ifr, cmd);
}
}
|
C
|
linux
| 1 |
CVE-2016-10197
|
https://www.cvedetails.com/cve/CVE-2016-10197/
|
CWE-125
|
https://github.com/libevent/libevent/commit/ec65c42052d95d2c23d1d837136d1cf1d9ecef9e
|
ec65c42052d95d2c23d1d837136d1cf1d9ecef9e
|
evdns: fix searching empty hostnames
From #332:
Here follows a bug report by **Guido Vranken** via the _Tor bug bounty program_. Please credit Guido accordingly.
## Bug report
The DNS code of Libevent contains this rather obvious OOB read:
```c
static char *
search_make_new(const struct search_state *const state, int n, const char *const base_name) {
const size_t base_len = strlen(base_name);
const char need_to_append_dot = base_name[base_len - 1] == '.' ? 0 : 1;
```
If the length of ```base_name``` is 0, then line 3125 reads 1 byte before the buffer. This will trigger a crash on ASAN-protected builds.
To reproduce:
Build libevent with ASAN:
```
$ CFLAGS='-fomit-frame-pointer -fsanitize=address' ./configure && make -j4
```
Put the attached ```resolv.conf``` and ```poc.c``` in the source directory and then do:
```
$ gcc -fsanitize=address -fomit-frame-pointer poc.c .libs/libevent.a
$ ./a.out
=================================================================
==22201== ERROR: AddressSanitizer: heap-buffer-overflow on address 0x60060000efdf at pc 0x4429da bp 0x7ffe1ed47300 sp 0x7ffe1ed472f8
READ of size 1 at 0x60060000efdf thread T0
```
P.S. we can add a check earlier, but since this is very uncommon, I didn't add it.
Fixes: #332
|
reply_parse(struct evdns_base *base, u8 *packet, int length) {
int j = 0, k = 0; /* index into packet */
u16 t_; /* used by the macros */
u32 t32_; /* used by the macros */
char tmp_name[256], cmp_name[256]; /* used by the macros */
int name_matches = 0;
u16 trans_id, questions, answers, authority, additional, datalength;
u16 flags = 0;
u32 ttl, ttl_r = 0xffffffff;
struct reply reply;
struct request *req = NULL;
unsigned int i;
ASSERT_LOCKED(base);
GET16(trans_id);
GET16(flags);
GET16(questions);
GET16(answers);
GET16(authority);
GET16(additional);
(void) authority; /* suppress "unused variable" warnings. */
(void) additional; /* suppress "unused variable" warnings. */
req = request_find_from_trans_id(base, trans_id);
if (!req) return -1;
EVUTIL_ASSERT(req->base == base);
memset(&reply, 0, sizeof(reply));
/* If it's not an answer, it doesn't correspond to any request. */
if (!(flags & 0x8000)) return -1; /* must be an answer */
if ((flags & 0x020f) && (flags & 0x020f) != DNS_ERR_NOTEXIST) {
/* there was an error and it's not NXDOMAIN */
goto err;
}
/* if (!answers) return; */ /* must have an answer of some form */
/* This macro skips a name in the DNS reply. */
#define SKIP_NAME \
do { tmp_name[0] = '\0'; \
if (name_parse(packet, length, &j, tmp_name, \
sizeof(tmp_name))<0) \
goto err; \
} while (0)
reply.type = req->request_type;
/* skip over each question in the reply */
for (i = 0; i < questions; ++i) {
/* the question looks like
* <label:name><u16:type><u16:class>
*/
tmp_name[0] = '\0';
cmp_name[0] = '\0';
k = j;
if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name)) < 0)
goto err;
if (name_parse(req->request, req->request_len, &k,
cmp_name, sizeof(cmp_name))<0)
goto err;
if (!base->global_randomize_case) {
if (strcmp(tmp_name, cmp_name) == 0)
name_matches = 1;
} else {
if (evutil_ascii_strcasecmp(tmp_name, cmp_name) == 0)
name_matches = 1;
}
j += 4;
if (j > length)
goto err;
}
if (!name_matches)
goto err;
/* now we have the answer section which looks like
* <label:name><u16:type><u16:class><u32:ttl><u16:len><data...>
*/
for (i = 0; i < answers; ++i) {
u16 type, class;
SKIP_NAME;
GET16(type);
GET16(class);
GET32(ttl);
GET16(datalength);
if (type == TYPE_A && class == CLASS_INET) {
int addrcount, addrtocopy;
if (req->request_type != TYPE_A) {
j += datalength; continue;
}
if ((datalength & 3) != 0) /* not an even number of As. */
goto err;
addrcount = datalength >> 2;
addrtocopy = MIN(MAX_V4_ADDRS - reply.data.a.addrcount, (unsigned)addrcount);
ttl_r = MIN(ttl_r, ttl);
/* we only bother with the first four addresses. */
if (j + 4*addrtocopy > length) goto err;
memcpy(&reply.data.a.addresses[reply.data.a.addrcount],
packet + j, 4*addrtocopy);
j += 4*addrtocopy;
reply.data.a.addrcount += addrtocopy;
reply.have_answer = 1;
if (reply.data.a.addrcount == MAX_V4_ADDRS) break;
} else if (type == TYPE_PTR && class == CLASS_INET) {
if (req->request_type != TYPE_PTR) {
j += datalength; continue;
}
if (name_parse(packet, length, &j, reply.data.ptr.name,
sizeof(reply.data.ptr.name))<0)
goto err;
ttl_r = MIN(ttl_r, ttl);
reply.have_answer = 1;
break;
} else if (type == TYPE_CNAME) {
char cname[HOST_NAME_MAX];
if (!req->put_cname_in_ptr || *req->put_cname_in_ptr) {
j += datalength; continue;
}
if (name_parse(packet, length, &j, cname,
sizeof(cname))<0)
goto err;
*req->put_cname_in_ptr = mm_strdup(cname);
} else if (type == TYPE_AAAA && class == CLASS_INET) {
int addrcount, addrtocopy;
if (req->request_type != TYPE_AAAA) {
j += datalength; continue;
}
if ((datalength & 15) != 0) /* not an even number of AAAAs. */
goto err;
addrcount = datalength >> 4; /* each address is 16 bytes long */
addrtocopy = MIN(MAX_V6_ADDRS - reply.data.aaaa.addrcount, (unsigned)addrcount);
ttl_r = MIN(ttl_r, ttl);
/* we only bother with the first four addresses. */
if (j + 16*addrtocopy > length) goto err;
memcpy(&reply.data.aaaa.addresses[reply.data.aaaa.addrcount],
packet + j, 16*addrtocopy);
reply.data.aaaa.addrcount += addrtocopy;
j += 16*addrtocopy;
reply.have_answer = 1;
if (reply.data.aaaa.addrcount == MAX_V6_ADDRS) break;
} else {
/* skip over any other type of resource */
j += datalength;
}
}
if (!reply.have_answer) {
for (i = 0; i < authority; ++i) {
u16 type, class;
SKIP_NAME;
GET16(type);
GET16(class);
GET32(ttl);
GET16(datalength);
if (type == TYPE_SOA && class == CLASS_INET) {
u32 serial, refresh, retry, expire, minimum;
SKIP_NAME;
SKIP_NAME;
GET32(serial);
GET32(refresh);
GET32(retry);
GET32(expire);
GET32(minimum);
(void)expire;
(void)retry;
(void)refresh;
(void)serial;
ttl_r = MIN(ttl_r, ttl);
ttl_r = MIN(ttl_r, minimum);
} else {
/* skip over any other type of resource */
j += datalength;
}
}
}
if (ttl_r == 0xffffffff)
ttl_r = 0;
reply_handle(req, flags, ttl_r, &reply);
return 0;
err:
if (req)
reply_handle(req, flags, 0, NULL);
return -1;
}
|
reply_parse(struct evdns_base *base, u8 *packet, int length) {
int j = 0, k = 0; /* index into packet */
u16 t_; /* used by the macros */
u32 t32_; /* used by the macros */
char tmp_name[256], cmp_name[256]; /* used by the macros */
int name_matches = 0;
u16 trans_id, questions, answers, authority, additional, datalength;
u16 flags = 0;
u32 ttl, ttl_r = 0xffffffff;
struct reply reply;
struct request *req = NULL;
unsigned int i;
ASSERT_LOCKED(base);
GET16(trans_id);
GET16(flags);
GET16(questions);
GET16(answers);
GET16(authority);
GET16(additional);
(void) authority; /* suppress "unused variable" warnings. */
(void) additional; /* suppress "unused variable" warnings. */
req = request_find_from_trans_id(base, trans_id);
if (!req) return -1;
EVUTIL_ASSERT(req->base == base);
memset(&reply, 0, sizeof(reply));
/* If it's not an answer, it doesn't correspond to any request. */
if (!(flags & 0x8000)) return -1; /* must be an answer */
if ((flags & 0x020f) && (flags & 0x020f) != DNS_ERR_NOTEXIST) {
/* there was an error and it's not NXDOMAIN */
goto err;
}
/* if (!answers) return; */ /* must have an answer of some form */
/* This macro skips a name in the DNS reply. */
#define SKIP_NAME \
do { tmp_name[0] = '\0'; \
if (name_parse(packet, length, &j, tmp_name, \
sizeof(tmp_name))<0) \
goto err; \
} while (0)
reply.type = req->request_type;
/* skip over each question in the reply */
for (i = 0; i < questions; ++i) {
/* the question looks like
* <label:name><u16:type><u16:class>
*/
tmp_name[0] = '\0';
cmp_name[0] = '\0';
k = j;
if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name)) < 0)
goto err;
if (name_parse(req->request, req->request_len, &k,
cmp_name, sizeof(cmp_name))<0)
goto err;
if (!base->global_randomize_case) {
if (strcmp(tmp_name, cmp_name) == 0)
name_matches = 1;
} else {
if (evutil_ascii_strcasecmp(tmp_name, cmp_name) == 0)
name_matches = 1;
}
j += 4;
if (j > length)
goto err;
}
if (!name_matches)
goto err;
/* now we have the answer section which looks like
* <label:name><u16:type><u16:class><u32:ttl><u16:len><data...>
*/
for (i = 0; i < answers; ++i) {
u16 type, class;
SKIP_NAME;
GET16(type);
GET16(class);
GET32(ttl);
GET16(datalength);
if (type == TYPE_A && class == CLASS_INET) {
int addrcount, addrtocopy;
if (req->request_type != TYPE_A) {
j += datalength; continue;
}
if ((datalength & 3) != 0) /* not an even number of As. */
goto err;
addrcount = datalength >> 2;
addrtocopy = MIN(MAX_V4_ADDRS - reply.data.a.addrcount, (unsigned)addrcount);
ttl_r = MIN(ttl_r, ttl);
/* we only bother with the first four addresses. */
if (j + 4*addrtocopy > length) goto err;
memcpy(&reply.data.a.addresses[reply.data.a.addrcount],
packet + j, 4*addrtocopy);
j += 4*addrtocopy;
reply.data.a.addrcount += addrtocopy;
reply.have_answer = 1;
if (reply.data.a.addrcount == MAX_V4_ADDRS) break;
} else if (type == TYPE_PTR && class == CLASS_INET) {
if (req->request_type != TYPE_PTR) {
j += datalength; continue;
}
if (name_parse(packet, length, &j, reply.data.ptr.name,
sizeof(reply.data.ptr.name))<0)
goto err;
ttl_r = MIN(ttl_r, ttl);
reply.have_answer = 1;
break;
} else if (type == TYPE_CNAME) {
char cname[HOST_NAME_MAX];
if (!req->put_cname_in_ptr || *req->put_cname_in_ptr) {
j += datalength; continue;
}
if (name_parse(packet, length, &j, cname,
sizeof(cname))<0)
goto err;
*req->put_cname_in_ptr = mm_strdup(cname);
} else if (type == TYPE_AAAA && class == CLASS_INET) {
int addrcount, addrtocopy;
if (req->request_type != TYPE_AAAA) {
j += datalength; continue;
}
if ((datalength & 15) != 0) /* not an even number of AAAAs. */
goto err;
addrcount = datalength >> 4; /* each address is 16 bytes long */
addrtocopy = MIN(MAX_V6_ADDRS - reply.data.aaaa.addrcount, (unsigned)addrcount);
ttl_r = MIN(ttl_r, ttl);
/* we only bother with the first four addresses. */
if (j + 16*addrtocopy > length) goto err;
memcpy(&reply.data.aaaa.addresses[reply.data.aaaa.addrcount],
packet + j, 16*addrtocopy);
reply.data.aaaa.addrcount += addrtocopy;
j += 16*addrtocopy;
reply.have_answer = 1;
if (reply.data.aaaa.addrcount == MAX_V6_ADDRS) break;
} else {
/* skip over any other type of resource */
j += datalength;
}
}
if (!reply.have_answer) {
for (i = 0; i < authority; ++i) {
u16 type, class;
SKIP_NAME;
GET16(type);
GET16(class);
GET32(ttl);
GET16(datalength);
if (type == TYPE_SOA && class == CLASS_INET) {
u32 serial, refresh, retry, expire, minimum;
SKIP_NAME;
SKIP_NAME;
GET32(serial);
GET32(refresh);
GET32(retry);
GET32(expire);
GET32(minimum);
(void)expire;
(void)retry;
(void)refresh;
(void)serial;
ttl_r = MIN(ttl_r, ttl);
ttl_r = MIN(ttl_r, minimum);
} else {
/* skip over any other type of resource */
j += datalength;
}
}
}
if (ttl_r == 0xffffffff)
ttl_r = 0;
reply_handle(req, flags, ttl_r, &reply);
return 0;
err:
if (req)
reply_handle(req, flags, 0, NULL);
return -1;
}
|
C
|
libevent
| 0 |
CVE-2015-1788
|
https://www.cvedetails.com/cve/CVE-2015-1788/
|
CWE-399
|
https://github.com/openssl/openssl/commit/4924b37ee01f71ae19c94a8934b80eeb2f677932
|
4924b37ee01f71ae19c94a8934b80eeb2f677932
|
bn/bn_gf2m.c: avoid infinite loop wich malformed ECParamters.
CVE-2015-1788
Reviewed-by: Matt Caswell <[email protected]>
|
int BN_GF2m_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
{
int ret = 0;
const int max = BN_num_bits(p) + 1;
int *arr = NULL;
bn_check_top(a);
bn_check_top(p);
if ((arr = OPENSSL_malloc(sizeof(*arr) * max)) == NULL)
goto err;
ret = BN_GF2m_poly2arr(p, arr, max);
if (!ret || ret > max) {
BNerr(BN_F_BN_GF2M_MOD_SQR, BN_R_INVALID_LENGTH);
goto err;
}
ret = BN_GF2m_mod_sqr_arr(r, a, arr, ctx);
bn_check_top(r);
err:
OPENSSL_free(arr);
return ret;
}
|
int BN_GF2m_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
{
int ret = 0;
const int max = BN_num_bits(p) + 1;
int *arr = NULL;
bn_check_top(a);
bn_check_top(p);
if ((arr = OPENSSL_malloc(sizeof(*arr) * max)) == NULL)
goto err;
ret = BN_GF2m_poly2arr(p, arr, max);
if (!ret || ret > max) {
BNerr(BN_F_BN_GF2M_MOD_SQR, BN_R_INVALID_LENGTH);
goto err;
}
ret = BN_GF2m_mod_sqr_arr(r, a, arr, ctx);
bn_check_top(r);
err:
OPENSSL_free(arr);
return ret;
}
|
C
|
openssl
| 0 |
CVE-2018-14395
|
https://www.cvedetails.com/cve/CVE-2018-14395/
|
CWE-369
|
https://github.com/FFmpeg/FFmpeg/commit/fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582
|
fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582
|
avformat/movenc: Write version 2 of audio atom if channels is not known
The version 1 needs the channel count and would divide by 0
Fixes: division by 0
Fixes: fpe_movenc.c_1108_1.ogg
Fixes: fpe_movenc.c_1108_2.ogg
Fixes: fpe_movenc.c_1108_3.wav
Found-by: #CHEN HONGXU# <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
static int mov_write_dfla_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0);
ffio_wfourcc(pb, "dfLa");
avio_w8(pb, 0); /* version */
avio_wb24(pb, 0); /* flags */
/* Expect the encoder to pass a METADATA_BLOCK_TYPE_STREAMINFO. */
if (track->par->extradata_size != FLAC_STREAMINFO_SIZE)
return AVERROR_INVALIDDATA;
/* TODO: Write other METADATA_BLOCK_TYPEs if the encoder makes them available. */
avio_w8(pb, 1 << 7 | FLAC_METADATA_TYPE_STREAMINFO); /* LastMetadataBlockFlag << 7 | BlockType */
avio_wb24(pb, track->par->extradata_size); /* Length */
avio_write(pb, track->par->extradata, track->par->extradata_size); /* BlockData[Length] */
return update_size(pb, pos);
}
|
static int mov_write_dfla_tag(AVIOContext *pb, MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0);
ffio_wfourcc(pb, "dfLa");
avio_w8(pb, 0); /* version */
avio_wb24(pb, 0); /* flags */
/* Expect the encoder to pass a METADATA_BLOCK_TYPE_STREAMINFO. */
if (track->par->extradata_size != FLAC_STREAMINFO_SIZE)
return AVERROR_INVALIDDATA;
/* TODO: Write other METADATA_BLOCK_TYPEs if the encoder makes them available. */
avio_w8(pb, 1 << 7 | FLAC_METADATA_TYPE_STREAMINFO); /* LastMetadataBlockFlag << 7 | BlockType */
avio_wb24(pb, track->par->extradata_size); /* Length */
avio_write(pb, track->par->extradata, track->par->extradata_size); /* BlockData[Length] */
return update_size(pb, pos);
}
|
C
|
FFmpeg
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
[email protected]
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void raisesExceptionGetterLongAttributeAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
ExceptionState exceptionState(ExceptionState::SetterContext, "raisesExceptionGetterLongAttribute", "TestObjectPython", info.Holder(), info.GetIsolate());
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
V8TRYCATCH_EXCEPTION_VOID(int, cppValue, toInt32(jsValue, exceptionState), exceptionState);
imp->setRaisesExceptionGetterLongAttribute(cppValue);
}
|
static void raisesExceptionGetterLongAttributeAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
ExceptionState exceptionState(ExceptionState::SetterContext, "raisesExceptionGetterLongAttribute", "TestObjectPython", info.Holder(), info.GetIsolate());
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
V8TRYCATCH_EXCEPTION_VOID(int, cppValue, toInt32(jsValue, exceptionState), exceptionState);
imp->setRaisesExceptionGetterLongAttribute(cppValue);
}
|
C
|
Chrome
| 0 |
CVE-2018-13006
|
https://www.cvedetails.com/cve/CVE-2018-13006/
|
CWE-125
|
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
fixed 2 possible heap overflows (inc. #1088)
|
GF_Err sbgp_Write(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_Err e;
GF_SampleGroupBox *p = (GF_SampleGroupBox*)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, p->grouping_type);
if (p->version==1)
gf_bs_write_u32(bs, p->grouping_type_parameter);
gf_bs_write_u32(bs, p->entry_count);
for (i = 0; i<p->entry_count; i++ ) {
gf_bs_write_u32(bs, p->sample_entries[i].sample_count);
gf_bs_write_u32(bs, p->sample_entries[i].group_description_index);
}
return GF_OK;
}
|
GF_Err sbgp_Write(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_Err e;
GF_SampleGroupBox *p = (GF_SampleGroupBox*)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, p->grouping_type);
if (p->version==1)
gf_bs_write_u32(bs, p->grouping_type_parameter);
gf_bs_write_u32(bs, p->entry_count);
for (i = 0; i<p->entry_count; i++ ) {
gf_bs_write_u32(bs, p->sample_entries[i].sample_count);
gf_bs_write_u32(bs, p->sample_entries[i].group_description_index);
}
return GF_OK;
}
|
C
|
gpac
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d1a59e4e845a01d7d7b80ef184b672752a9eae4d
|
d1a59e4e845a01d7d7b80ef184b672752a9eae4d
|
Fixing cross-process postMessage replies on more than two iterations.
When two frames are replying to each other using event.source across processes,
after the first two replies, things break down. The root cause is that in
RenderViewImpl::GetFrameByMappedID, the lookup was incorrect. It is now
properly searching for the remote frame id and returning the local one.
BUG=153445
Review URL: https://chromiumcodereview.appspot.com/11040015
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@159924 0039d316-1c4b-4281-b951-d872f2087c98
|
bool RenderViewImpl::willCheckAndDispatchMessageEvent(
WebKit::WebFrame* sourceFrame,
WebKit::WebFrame* targetFrame,
WebKit::WebSecurityOrigin target_origin,
WebKit::WebDOMMessageEvent event) {
if (!is_swapped_out_)
return false;
ViewMsg_PostMessage_Params params;
params.data = event.data().toString();
params.source_origin = event.origin();
if (!target_origin.isNull())
params.target_origin = target_origin.toString();
params.source_routing_id = MSG_ROUTING_NONE;
RenderViewImpl* source_view = FromWebView(sourceFrame->view());
if (source_view)
params.source_routing_id = source_view->routing_id();
params.source_frame_id = sourceFrame->identifier();
params.target_process_id = target_process_id_;
params.target_routing_id = target_routing_id_;
std::map<int,int>::iterator it = active_frame_id_map_.find(
targetFrame->identifier());
if (it != active_frame_id_map_.end()) {
params.target_frame_id = it->second;
} else {
params.target_frame_id = 0;
}
Send(new ViewHostMsg_RouteMessageEvent(routing_id_, params));
return true;
}
|
bool RenderViewImpl::willCheckAndDispatchMessageEvent(
WebKit::WebFrame* sourceFrame,
WebKit::WebFrame* targetFrame,
WebKit::WebSecurityOrigin target_origin,
WebKit::WebDOMMessageEvent event) {
if (!is_swapped_out_)
return false;
ViewMsg_PostMessage_Params params;
params.data = event.data().toString();
params.source_origin = event.origin();
if (!target_origin.isNull())
params.target_origin = target_origin.toString();
params.source_routing_id = MSG_ROUTING_NONE;
RenderViewImpl* source_view = FromWebView(sourceFrame->view());
if (source_view)
params.source_routing_id = source_view->routing_id();
params.source_frame_id = sourceFrame->identifier();
params.target_process_id = target_process_id_;
params.target_routing_id = target_routing_id_;
std::map<int,int>::iterator it = active_frame_id_map_.find(
targetFrame->identifier());
if (it != active_frame_id_map_.end()) {
params.target_frame_id = it->second;
} else {
params.target_frame_id = 0;
}
Send(new ViewHostMsg_RouteMessageEvent(routing_id_, params));
return true;
}
|
C
|
Chrome
| 0 |
CVE-2012-5148
|
https://www.cvedetails.com/cve/CVE-2012-5148/
|
CWE-20
|
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
|
e89cfcb9090e8c98129ae9160c513f504db74599
|
Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
|
MiniMoveAnimation(TabStripGtk* tabstrip,
int from_index,
int to_index,
const gfx::Rect& start_bounds)
: TabAnimation(tabstrip, MINI_MOVE),
tab_(tabstrip->GetTabAt(to_index)),
start_bounds_(start_bounds),
from_index_(from_index),
to_index_(to_index) {
int tab_count = tabstrip->GetTabCount();
int start_mini_count = tabstrip->GetMiniTabCount();
int end_mini_count = start_mini_count;
if (tabstrip->GetTabAt(to_index)->mini())
start_mini_count--;
else
start_mini_count++;
GenerateStartAndEndWidths(tab_count, tab_count, start_mini_count,
end_mini_count);
target_bounds_ = tabstrip->GetIdealBounds(to_index);
tab_->set_animating_mini_change(true);
}
|
MiniMoveAnimation(TabStripGtk* tabstrip,
int from_index,
int to_index,
const gfx::Rect& start_bounds)
: TabAnimation(tabstrip, MINI_MOVE),
tab_(tabstrip->GetTabAt(to_index)),
start_bounds_(start_bounds),
from_index_(from_index),
to_index_(to_index) {
int tab_count = tabstrip->GetTabCount();
int start_mini_count = tabstrip->GetMiniTabCount();
int end_mini_count = start_mini_count;
if (tabstrip->GetTabAt(to_index)->mini())
start_mini_count--;
else
start_mini_count++;
GenerateStartAndEndWidths(tab_count, tab_count, start_mini_count,
end_mini_count);
target_bounds_ = tabstrip->GetIdealBounds(to_index);
tab_->set_animating_mini_change(true);
}
|
C
|
Chrome
| 0 |
CVE-2018-6178
|
https://www.cvedetails.com/cve/CVE-2018-6178/
|
CWE-254
|
https://github.com/chromium/chromium/commit/fbeba958bb83c05ec8cc54e285a4a9ca10d1b311
|
fbeba958bb83c05ec8cc54e285a4a9ca10d1b311
|
Allow to specify elide behavior for confrim infobar message
Used in "<extension name> is debugging this browser" infobar.
Bug: 823194
Change-Id: Iff6627097c020cccca8f7cc3e21a803a41fd8f2c
Reviewed-on: https://chromium-review.googlesource.com/1048064
Commit-Queue: Dmitry Gozman <[email protected]>
Reviewed-by: Devlin <[email protected]>
Reviewed-by: Peter Kasting <[email protected]>
Cr-Commit-Position: refs/heads/master@{#557245}
|
GURL GlobalConfirmInfoBar::DelegateProxy::GetLinkURL() const {
return global_info_bar_ ? global_info_bar_->delegate_->GetLinkURL()
: GURL();
}
|
GURL GlobalConfirmInfoBar::DelegateProxy::GetLinkURL() const {
return global_info_bar_ ? global_info_bar_->delegate_->GetLinkURL()
: GURL();
}
|
C
|
Chrome
| 0 |
CVE-2018-6196
|
https://www.cvedetails.com/cve/CVE-2018-6196/
|
CWE-835
|
https://github.com/tats/w3m/commit/8354763b90490d4105695df52674d0fcef823e92
|
8354763b90490d4105695df52674d0fcef823e92
|
Prevent negative indent value in feed_table_block_tag()
Bug-Debian: https://github.com/tats/w3m/issues/88
|
feed_table1(struct table *tbl, Str tok, struct table_mode *mode, int width)
{
Str tokbuf;
int status;
char *line;
if (!tok)
return;
tokbuf = Strnew();
status = R_ST_NORMAL;
line = tok->ptr;
while (read_token
(tokbuf, &line, &status, mode->pre_mode & TBLM_PREMODE, 0))
feed_table(tbl, tokbuf->ptr, mode, width, TRUE);
}
|
feed_table1(struct table *tbl, Str tok, struct table_mode *mode, int width)
{
Str tokbuf;
int status;
char *line;
if (!tok)
return;
tokbuf = Strnew();
status = R_ST_NORMAL;
line = tok->ptr;
while (read_token
(tokbuf, &line, &status, mode->pre_mode & TBLM_PREMODE, 0))
feed_table(tbl, tokbuf->ptr, mode, width, TRUE);
}
|
C
|
w3m
| 0 |
CVE-2013-2878
|
https://www.cvedetails.com/cve/CVE-2013-2878/
|
CWE-119
|
https://github.com/chromium/chromium/commit/09fbb829eab7ee25e90bb4e9c2f4973c6c62d0f3
|
09fbb829eab7ee25e90bb4e9c2f4973c6c62d0f3
|
Upgrade a TextIterator ASSERT to a RELEASE_ASSERT as a defensive measure.
BUG=156930,177197
[email protected]
Review URL: https://codereview.chromium.org/15057010
git-svn-id: svn://svn.chromium.org/blink/trunk@150123 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
inline void SearchBuffer::prependContext(const UChar* characters, size_t length)
{
ASSERT(m_needsMoreContext);
ASSERT(m_prefixLength == m_buffer.size());
if (!length)
return;
m_atBreak = false;
size_t wordBoundaryContextStart = length;
if (wordBoundaryContextStart) {
U16_BACK_1(characters, 0, wordBoundaryContextStart);
wordBoundaryContextStart = startOfLastWordBoundaryContext(characters, wordBoundaryContextStart);
}
size_t usableLength = min(m_buffer.capacity() - m_prefixLength, length - wordBoundaryContextStart);
m_buffer.prepend(characters + length - usableLength, usableLength);
m_prefixLength += usableLength;
if (wordBoundaryContextStart || m_prefixLength == m_buffer.capacity())
m_needsMoreContext = false;
}
|
inline void SearchBuffer::prependContext(const UChar* characters, size_t length)
{
ASSERT(m_needsMoreContext);
ASSERT(m_prefixLength == m_buffer.size());
if (!length)
return;
m_atBreak = false;
size_t wordBoundaryContextStart = length;
if (wordBoundaryContextStart) {
U16_BACK_1(characters, 0, wordBoundaryContextStart);
wordBoundaryContextStart = startOfLastWordBoundaryContext(characters, wordBoundaryContextStart);
}
size_t usableLength = min(m_buffer.capacity() - m_prefixLength, length - wordBoundaryContextStart);
m_buffer.prepend(characters + length - usableLength, usableLength);
m_prefixLength += usableLength;
if (wordBoundaryContextStart || m_prefixLength == m_buffer.capacity())
m_needsMoreContext = false;
}
|
C
|
Chrome
| 0 |
CVE-2017-5019
|
https://www.cvedetails.com/cve/CVE-2017-5019/
|
CWE-416
|
https://github.com/chromium/chromium/commit/f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
|
f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
|
Convert FrameHostMsg_DidAddMessageToConsole to Mojo.
Note: Since this required changing the test
RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually
re-introduced https://crbug.com/666714 locally (the bug the test was
added for), and reran the test to confirm that it still covers the bug.
Bug: 786836
Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270
Commit-Queue: Lowell Manners <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Camille Lamy <[email protected]>
Cr-Commit-Position: refs/heads/master@{#653137}
|
void ServiceWorkerContextCore::DidGetRegistrationsForDeleteForOrigin(
base::OnceCallback<void(blink::ServiceWorkerStatusCode)> callback,
blink::ServiceWorkerStatusCode status,
const std::vector<scoped_refptr<ServiceWorkerRegistration>>&
registrations) {
if (status != blink::ServiceWorkerStatusCode::kOk) {
std::move(callback).Run(status);
return;
}
if (registrations.empty()) {
std::move(callback).Run(blink::ServiceWorkerStatusCode::kOk);
return;
}
int* expected_calls = new int(2 * registrations.size());
base::RepeatingCallback<void(blink::ServiceWorkerStatusCode)> barrier =
base::BindRepeating(SuccessReportingCallback, base::Owned(expected_calls),
base::AdaptCallbackForRepeating(std::move(callback)));
for (const auto& registration : registrations) {
DCHECK(registration);
if (!registration->is_deleted()) {
RegistrationDeletionListener::WaitForDeletion(
registration,
base::BindOnce(barrier, blink::ServiceWorkerStatusCode::kOk));
} else {
barrier.Run(blink::ServiceWorkerStatusCode::kOk);
}
job_coordinator_->Abort(registration->scope());
UnregisterServiceWorker(registration->scope(), barrier);
}
}
|
void ServiceWorkerContextCore::DidGetRegistrationsForDeleteForOrigin(
base::OnceCallback<void(blink::ServiceWorkerStatusCode)> callback,
blink::ServiceWorkerStatusCode status,
const std::vector<scoped_refptr<ServiceWorkerRegistration>>&
registrations) {
if (status != blink::ServiceWorkerStatusCode::kOk) {
std::move(callback).Run(status);
return;
}
if (registrations.empty()) {
std::move(callback).Run(blink::ServiceWorkerStatusCode::kOk);
return;
}
int* expected_calls = new int(2 * registrations.size());
base::RepeatingCallback<void(blink::ServiceWorkerStatusCode)> barrier =
base::BindRepeating(SuccessReportingCallback, base::Owned(expected_calls),
base::AdaptCallbackForRepeating(std::move(callback)));
for (const auto& registration : registrations) {
DCHECK(registration);
if (!registration->is_deleted()) {
RegistrationDeletionListener::WaitForDeletion(
registration,
base::BindOnce(barrier, blink::ServiceWorkerStatusCode::kOk));
} else {
barrier.Run(blink::ServiceWorkerStatusCode::kOk);
}
job_coordinator_->Abort(registration->scope());
UnregisterServiceWorker(registration->scope(), barrier);
}
}
|
C
|
Chrome
| 0 |
CVE-2013-4247
|
https://www.cvedetails.com/cve/CVE-2013-4247/
|
CWE-189
|
https://github.com/torvalds/linux/commit/1fc29bacedeabb278080e31bb9c1ecb49f143c3b
|
1fc29bacedeabb278080e31bb9c1ecb49f143c3b
|
cifs: fix off-by-one bug in build_unc_path_to_root
commit 839db3d10a (cifs: fix up handling of prefixpath= option) changed
the code such that the vol->prepath no longer contained a leading
delimiter and then fixed up the places that accessed that field to
account for that change.
One spot in build_unc_path_to_root was missed however. When doing the
pointer addition on pos, that patch failed to account for the fact that
we had already incremented "pos" by one when adding the length of the
prepath. This caused a buffer overrun by one byte.
This patch fixes the problem by correcting the handling of "pos".
Cc: <[email protected]> # v3.8+
Reported-by: Marcus Moeller <[email protected]>
Reported-by: Ken Fallon <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]>
|
dequeue_mid(struct mid_q_entry *mid, bool malformed)
{
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
spin_lock(&GlobalMid_Lock);
if (!malformed)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
mid->mid_state = MID_RESPONSE_MALFORMED;
list_del_init(&mid->qhead);
spin_unlock(&GlobalMid_Lock);
}
|
dequeue_mid(struct mid_q_entry *mid, bool malformed)
{
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
spin_lock(&GlobalMid_Lock);
if (!malformed)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
mid->mid_state = MID_RESPONSE_MALFORMED;
list_del_init(&mid->qhead);
spin_unlock(&GlobalMid_Lock);
}
|
C
|
linux
| 0 |
CVE-2012-3412
|
https://www.cvedetails.com/cve/CVE-2012-3412/
|
CWE-189
|
https://github.com/torvalds/linux/commit/68cb695ccecf949d48949e72f8ce591fdaaa325c
|
68cb695ccecf949d48949e72f8ce591fdaaa325c
|
sfc: Fix maximum number of TSO segments and minimum TX queue size
[ Upstream commit 7e6d06f0de3f74ca929441add094518ae332257c ]
Currently an skb requiring TSO may not fit within a minimum-size TX
queue. The TX queue selected for the skb may stall and trigger the TX
watchdog repeatedly (since the problem skb will be retried after the
TX reset). This issue is designated as CVE-2012-3412.
Set the maximum number of TSO segments for our devices to 100. This
should make no difference to behaviour unless the actual MSS is less
than about 700. Increase the minimum TX queue size accordingly to
allow for 2 worst-case skbs, so that there will definitely be space
to add an skb after we wake a queue.
To avoid invalidating existing configurations, change
efx_ethtool_set_ringparam() to fix up values that are too small rather
than returning -EINVAL.
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
|
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
unsigned index, type;
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
index = skb_get_queue_mapping(skb);
type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
if (index >= efx->n_tx_channels) {
index -= efx->n_tx_channels;
type |= EFX_TXQ_TYPE_HIGHPRI;
}
tx_queue = efx_get_tx_queue(efx, index, type);
return efx_enqueue_skb(tx_queue, skb);
}
|
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
unsigned index, type;
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
index = skb_get_queue_mapping(skb);
type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
if (index >= efx->n_tx_channels) {
index -= efx->n_tx_channels;
type |= EFX_TXQ_TYPE_HIGHPRI;
}
tx_queue = efx_get_tx_queue(efx, index, type);
return efx_enqueue_skb(tx_queue, skb);
}
|
C
|
linux
| 0 |
CVE-2016-1621
|
https://www.cvedetails.com/cve/CVE-2016-1621/
|
CWE-119
|
https://android.googlesource.com/platform/external/libvpx/+/5a9753fca56f0eeb9f61e342b2fccffc364f9426
|
5a9753fca56f0eeb9f61e342b2fccffc364f9426
|
Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
|
virtual void SetUp() {
width_ = GET_PARAM(0);
height_ = GET_PARAM(1);
sixtap_predict_ = GET_PARAM(2);
memset(src_, 0, kSrcSize);
memset(dst_, 0, kDstSize);
memset(dst_c_, 0, kDstSize);
}
|
virtual void SetUp() {
width_ = GET_PARAM(0);
height_ = GET_PARAM(1);
sixtap_predict_ = GET_PARAM(2);
memset(src_, 0, kSrcSize);
memset(dst_, 0, kDstSize);
memset(dst_c_, 0, kDstSize);
}
|
C
|
Android
| 0 |
CVE-2017-6903
|
https://www.cvedetails.com/cve/CVE-2017-6903/
|
CWE-269
|
https://github.com/iortcw/iortcw/commit/b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
|
b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
|
All: Don't load .pk3s as .dlls, and don't load user config files from .pk3s
|
void CL_NextDownload( void ) {
char *s;
char *remoteName, *localName;
qboolean useCURL = qfalse;
if( *clc.downloadName && !autoupdateStarted ) {
char *zippath = FS_BuildOSPath(Cvar_VariableString("fs_homepath"), clc.downloadName, "");
zippath[strlen(zippath)-1] = '\0';
if(!FS_CompareZipChecksum(zippath))
Com_Error(ERR_DROP, "Incorrect checksum for file: %s", clc.downloadName);
}
*clc.downloadTempName = *clc.downloadName = 0;
Cvar_Set("cl_downloadName", "");
if ( *clc.downloadList ) {
s = clc.downloadList;
if ( *s == '@' ) {
s++;
}
remoteName = s;
if ( ( s = strchr( s, '@' ) ) == NULL ) {
CL_DownloadsComplete();
return;
}
*s++ = 0;
localName = s;
if ( ( s = strchr( s, '@' ) ) != NULL ) {
*s++ = 0;
} else {
s = localName + strlen( localName ); // point at the nul byte
}
#ifdef USE_CURL
if(!(cl_allowDownload->integer & DLF_NO_REDIRECT)) {
if(clc.sv_allowDownload & DLF_NO_REDIRECT) {
Com_Printf("WARNING: server does not "
"allow download redirection "
"(sv_allowDownload is %d)\n",
clc.sv_allowDownload);
}
else if(!*clc.sv_dlURL) {
Com_Printf("WARNING: server allows "
"download redirection, but does not "
"have sv_dlURL set\n");
}
else if(!CL_cURL_Init()) {
Com_Printf("WARNING: could not load "
"cURL library\n");
}
else {
CL_cURL_BeginDownload(localName, va("%s/%s",
clc.sv_dlURL, remoteName));
useCURL = qtrue;
}
}
else if(!(clc.sv_allowDownload & DLF_NO_REDIRECT)) {
Com_Printf("WARNING: server allows download "
"redirection, but it disabled by client "
"configuration (cl_allowDownload is %d)\n",
cl_allowDownload->integer);
}
#endif /* USE_CURL */
if(!useCURL) {
if((cl_allowDownload->integer & DLF_NO_UDP)) {
Com_Error(ERR_DROP, "UDP Downloads are "
"disabled on your client. "
"(cl_allowDownload is %d)",
cl_allowDownload->integer);
return;
}
else {
CL_BeginDownload( localName, remoteName );
}
}
clc.downloadRestart = qtrue;
memmove( clc.downloadList, s, strlen( s ) + 1 );
return;
}
CL_DownloadsComplete();
}
|
void CL_NextDownload( void ) {
char *s;
char *remoteName, *localName;
qboolean useCURL = qfalse;
if( *clc.downloadName && !autoupdateStarted ) {
char *zippath = FS_BuildOSPath(Cvar_VariableString("fs_homepath"), clc.downloadName, "");
zippath[strlen(zippath)-1] = '\0';
if(!FS_CompareZipChecksum(zippath))
Com_Error(ERR_DROP, "Incorrect checksum for file: %s", clc.downloadName);
}
*clc.downloadTempName = *clc.downloadName = 0;
Cvar_Set("cl_downloadName", "");
if ( *clc.downloadList ) {
s = clc.downloadList;
if ( *s == '@' ) {
s++;
}
remoteName = s;
if ( ( s = strchr( s, '@' ) ) == NULL ) {
CL_DownloadsComplete();
return;
}
*s++ = 0;
localName = s;
if ( ( s = strchr( s, '@' ) ) != NULL ) {
*s++ = 0;
} else {
s = localName + strlen( localName ); // point at the nul byte
}
#ifdef USE_CURL
if(!(cl_allowDownload->integer & DLF_NO_REDIRECT)) {
if(clc.sv_allowDownload & DLF_NO_REDIRECT) {
Com_Printf("WARNING: server does not "
"allow download redirection "
"(sv_allowDownload is %d)\n",
clc.sv_allowDownload);
}
else if(!*clc.sv_dlURL) {
Com_Printf("WARNING: server allows "
"download redirection, but does not "
"have sv_dlURL set\n");
}
else if(!CL_cURL_Init()) {
Com_Printf("WARNING: could not load "
"cURL library\n");
}
else {
CL_cURL_BeginDownload(localName, va("%s/%s",
clc.sv_dlURL, remoteName));
useCURL = qtrue;
}
}
else if(!(clc.sv_allowDownload & DLF_NO_REDIRECT)) {
Com_Printf("WARNING: server allows download "
"redirection, but it disabled by client "
"configuration (cl_allowDownload is %d)\n",
cl_allowDownload->integer);
}
#endif /* USE_CURL */
if(!useCURL) {
if((cl_allowDownload->integer & DLF_NO_UDP)) {
Com_Error(ERR_DROP, "UDP Downloads are "
"disabled on your client. "
"(cl_allowDownload is %d)",
cl_allowDownload->integer);
return;
}
else {
CL_BeginDownload( localName, remoteName );
}
}
clc.downloadRestart = qtrue;
memmove( clc.downloadList, s, strlen( s ) + 1 );
return;
}
CL_DownloadsComplete();
}
|
C
|
OpenJK
| 0 |
CVE-2011-3964
|
https://www.cvedetails.com/cve/CVE-2011-3964/
| null |
https://github.com/chromium/chromium/commit/0c14577c9905bd8161159ec7eaac810c594508d0
|
0c14577c9905bd8161159ec7eaac810c594508d0
|
Change omnibox behavior when stripping javascript schema to navigate after stripping the schema on drag drop.
BUG=109245
TEST=N/A
Review URL: http://codereview.chromium.org/9116016
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@116692 0039d316-1c4b-4281-b951-d872f2087c98
|
DWORD OmniboxViewWin::EditDropTarget::OnDrop(IDataObject* data_object,
DWORD key_state,
POINT cursor_position,
DWORD effect) {
effect = OnDragOver(data_object, key_state, cursor_position, effect);
ui::OSExchangeData os_data(new ui::OSExchangeDataProviderWin(data_object));
views::DropTargetEvent event(os_data, cursor_position.x, cursor_position.y,
ui::DragDropTypes::DropEffectToDragOperation(effect));
int drag_operation = edit_->OnPerformDropImpl(event, edit_->in_drag());
if (!drag_has_url_)
ResetDropHighlights();
return ui::DragDropTypes::DragOperationToDropEffect(drag_operation);
}
|
DWORD OmniboxViewWin::EditDropTarget::OnDrop(IDataObject* data_object,
DWORD key_state,
POINT cursor_position,
DWORD effect) {
effect = OnDragOver(data_object, key_state, cursor_position, effect);
ui::OSExchangeData os_data(new ui::OSExchangeDataProviderWin(data_object));
views::DropTargetEvent event(os_data, cursor_position.x, cursor_position.y,
ui::DragDropTypes::DropEffectToDragOperation(effect));
int drag_operation = edit_->OnPerformDropImpl(event, edit_->in_drag());
if (!drag_has_url_)
ResetDropHighlights();
return ui::DragDropTypes::DragOperationToDropEffect(drag_operation);
}
|
C
|
Chrome
| 0 |
CVE-2011-3102
|
https://www.cvedetails.com/cve/CVE-2011-3102/
|
CWE-189
|
https://github.com/chromium/chromium/commit/4c46d7a5b0af9b7d320e709291b270ab7cf07e83
|
4c46d7a5b0af9b7d320e709291b270ab7cf07e83
|
Fix XPointer bug.
BUG=125462
[email protected]
[email protected]
Review URL: https://chromiumcodereview.appspot.com/10344022
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@135174 0039d316-1c4b-4281-b951-d872f2087c98
|
xmlXPtrEvalChildSeq(xmlXPathParserContextPtr ctxt, xmlChar *name) {
/*
* XPointer don't allow by syntax to address in mutirooted trees
* this might prove useful in some cases, warn about it.
*/
if ((name == NULL) && (CUR == '/') && (NXT(1) != '1')) {
xmlXPtrErr(ctxt, XML_XPTR_CHILDSEQ_START,
"warning: ChildSeq not starting by /1\n", NULL);
}
if (name != NULL) {
valuePush(ctxt, xmlXPathNewString(name));
xmlFree(name);
xmlXPathIdFunction(ctxt, 1);
CHECK_ERROR;
}
while (CUR == '/') {
int child = 0;
NEXT;
while ((CUR >= '0') && (CUR <= '9')) {
child = child * 10 + (CUR - '0');
NEXT;
}
xmlXPtrGetChildNo(ctxt, child);
}
}
|
xmlXPtrEvalChildSeq(xmlXPathParserContextPtr ctxt, xmlChar *name) {
/*
* XPointer don't allow by syntax to address in mutirooted trees
* this might prove useful in some cases, warn about it.
*/
if ((name == NULL) && (CUR == '/') && (NXT(1) != '1')) {
xmlXPtrErr(ctxt, XML_XPTR_CHILDSEQ_START,
"warning: ChildSeq not starting by /1\n", NULL);
}
if (name != NULL) {
valuePush(ctxt, xmlXPathNewString(name));
xmlFree(name);
xmlXPathIdFunction(ctxt, 1);
CHECK_ERROR;
}
while (CUR == '/') {
int child = 0;
NEXT;
while ((CUR >= '0') && (CUR <= '9')) {
child = child * 10 + (CUR - '0');
NEXT;
}
xmlXPtrGetChildNo(ctxt, child);
}
}
|
C
|
Chrome
| 0 |
CVE-2018-17206
|
https://www.cvedetails.com/cve/CVE-2018-17206/
| null |
https://github.com/openvswitch/ovs/commit/9237a63c47bd314b807cda0bd2216264e82edbe8
|
9237a63c47bd314b807cda0bd2216264e82edbe8
|
ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
get_actions_from_instruction(const struct ofp11_instruction *inst,
const struct ofp_action_header **actions,
size_t *actions_len)
{
*actions = ALIGNED_CAST(const struct ofp_action_header *, inst + 1);
*actions_len = ntohs(inst->len) - sizeof *inst;
}
|
get_actions_from_instruction(const struct ofp11_instruction *inst,
const struct ofp_action_header **actions,
size_t *actions_len)
{
*actions = ALIGNED_CAST(const struct ofp_action_header *, inst + 1);
*actions_len = ntohs(inst->len) - sizeof *inst;
}
|
C
|
ovs
| 0 |
CVE-2017-7375
|
https://www.cvedetails.com/cve/CVE-2017-7375/
|
CWE-611
|
https://android.googlesource.com/platform/external/libxml2/+/308396a55280f69ad4112d4f9892f4cbeff042aa
|
308396a55280f69ad4112d4f9892f4cbeff042aa
|
DO NOT MERGE: Add validation for eternal enities
https://bugzilla.gnome.org/show_bug.cgi?id=780691
Bug: 36556310
Change-Id: I9450743e167c3c73af5e4071f3fc85e81d061648
(cherry picked from commit bef9af3d89d241bcb518c20cba6da2a2fd9ba049)
|
xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
xmlChar limit = 0;
xmlChar *buf = NULL;
xmlChar *rep = NULL;
size_t len = 0;
size_t buf_size = 0;
int c, l, in_space = 0;
xmlChar *current = NULL;
xmlEntityPtr ent;
if (NXT(0) == '"') {
ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE;
limit = '"';
NEXT;
} else if (NXT(0) == '\'') {
limit = '\'';
ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE;
NEXT;
} else {
xmlFatalErr(ctxt, XML_ERR_ATTRIBUTE_NOT_STARTED, NULL);
return(NULL);
}
/*
* allocate a translation buffer.
*/
buf_size = XML_PARSER_BUFFER_SIZE;
buf = (xmlChar *) xmlMallocAtomic(buf_size);
if (buf == NULL) goto mem_error;
/*
* OK loop until we reach one of the ending char or a size limit.
*/
c = CUR_CHAR(l);
while (((NXT(0) != limit) && /* checked */
(IS_CHAR(c)) && (c != '<')) &&
(ctxt->instate != XML_PARSER_EOF)) {
/*
* Impose a reasonable limit on attribute size, unless XML_PARSE_HUGE
* special option is given
*/
if ((len > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
goto mem_error;
}
if (c == 0) break;
if (c == '&') {
in_space = 0;
if (NXT(1) == '#') {
int val = xmlParseCharRef(ctxt);
if (val == '&') {
if (ctxt->replaceEntities) {
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
buf[len++] = '&';
} else {
/*
* The reparsing will be done in xmlStringGetNodeList()
* called by the attribute() function in SAX.c
*/
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
buf[len++] = '&';
buf[len++] = '#';
buf[len++] = '3';
buf[len++] = '8';
buf[len++] = ';';
}
} else if (val != 0) {
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
len += xmlCopyChar(0, &buf[len], val);
}
} else {
ent = xmlParseEntityRef(ctxt);
ctxt->nbentities++;
if (ent != NULL)
ctxt->nbentities += ent->owner;
if ((ent != NULL) &&
(ent->etype == XML_INTERNAL_PREDEFINED_ENTITY)) {
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
if ((ctxt->replaceEntities == 0) &&
(ent->content[0] == '&')) {
buf[len++] = '&';
buf[len++] = '#';
buf[len++] = '3';
buf[len++] = '8';
buf[len++] = ';';
} else {
buf[len++] = ent->content[0];
}
} else if ((ent != NULL) &&
(ctxt->replaceEntities != 0)) {
if (ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) {
++ctxt->depth;
rep = xmlStringDecodeEntities(ctxt, ent->content,
XML_SUBSTITUTE_REF,
0, 0, 0);
--ctxt->depth;
if (rep != NULL) {
current = rep;
while (*current != 0) { /* non input consuming */
if ((*current == 0xD) || (*current == 0xA) ||
(*current == 0x9)) {
buf[len++] = 0x20;
current++;
} else
buf[len++] = *current++;
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
}
xmlFree(rep);
rep = NULL;
}
} else {
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
if (ent->content != NULL)
buf[len++] = ent->content[0];
}
} else if (ent != NULL) {
int i = xmlStrlen(ent->name);
const xmlChar *cur = ent->name;
/*
* This may look absurd but is needed to detect
* entities problems
*/
if ((ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) &&
(ent->content != NULL) && (ent->checked == 0)) {
unsigned long oldnbent = ctxt->nbentities;
++ctxt->depth;
rep = xmlStringDecodeEntities(ctxt, ent->content,
XML_SUBSTITUTE_REF, 0, 0, 0);
--ctxt->depth;
ent->checked = (ctxt->nbentities - oldnbent + 1) * 2;
if (rep != NULL) {
if (xmlStrchr(rep, '<'))
ent->checked |= 1;
xmlFree(rep);
rep = NULL;
}
}
/*
* Just output the reference
*/
buf[len++] = '&';
while (len + i + 10 > buf_size) {
growBuffer(buf, i + 10);
}
for (;i > 0;i--)
buf[len++] = *cur++;
buf[len++] = ';';
}
}
} else {
if ((c == 0x20) || (c == 0xD) || (c == 0xA) || (c == 0x9)) {
if ((len != 0) || (!normalize)) {
if ((!normalize) || (!in_space)) {
COPY_BUF(l,buf,len,0x20);
while (len + 10 > buf_size) {
growBuffer(buf, 10);
}
}
in_space = 1;
}
} else {
in_space = 0;
COPY_BUF(l,buf,len,c);
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
}
NEXTL(l);
}
GROW;
c = CUR_CHAR(l);
}
if (ctxt->instate == XML_PARSER_EOF)
goto error;
if ((in_space) && (normalize)) {
while ((len > 0) && (buf[len - 1] == 0x20)) len--;
}
buf[len] = 0;
if (RAW == '<') {
xmlFatalErr(ctxt, XML_ERR_LT_IN_ATTRIBUTE, NULL);
} else if (RAW != limit) {
if ((c != 0) && (!IS_CHAR(c))) {
xmlFatalErrMsg(ctxt, XML_ERR_INVALID_CHAR,
"invalid character in attribute value\n");
} else {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue: ' expected\n");
}
} else
NEXT;
/*
* There we potentially risk an overflow, don't allow attribute value of
* length more than INT_MAX it is a very reasonnable assumption !
*/
if (len >= INT_MAX) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
goto mem_error;
}
if (attlen != NULL) *attlen = (int) len;
return(buf);
mem_error:
xmlErrMemory(ctxt, NULL);
error:
if (buf != NULL)
xmlFree(buf);
if (rep != NULL)
xmlFree(rep);
return(NULL);
}
|
xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
xmlChar limit = 0;
xmlChar *buf = NULL;
xmlChar *rep = NULL;
size_t len = 0;
size_t buf_size = 0;
int c, l, in_space = 0;
xmlChar *current = NULL;
xmlEntityPtr ent;
if (NXT(0) == '"') {
ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE;
limit = '"';
NEXT;
} else if (NXT(0) == '\'') {
limit = '\'';
ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE;
NEXT;
} else {
xmlFatalErr(ctxt, XML_ERR_ATTRIBUTE_NOT_STARTED, NULL);
return(NULL);
}
/*
* allocate a translation buffer.
*/
buf_size = XML_PARSER_BUFFER_SIZE;
buf = (xmlChar *) xmlMallocAtomic(buf_size);
if (buf == NULL) goto mem_error;
/*
* OK loop until we reach one of the ending char or a size limit.
*/
c = CUR_CHAR(l);
while (((NXT(0) != limit) && /* checked */
(IS_CHAR(c)) && (c != '<')) &&
(ctxt->instate != XML_PARSER_EOF)) {
/*
* Impose a reasonable limit on attribute size, unless XML_PARSE_HUGE
* special option is given
*/
if ((len > XML_MAX_TEXT_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
goto mem_error;
}
if (c == 0) break;
if (c == '&') {
in_space = 0;
if (NXT(1) == '#') {
int val = xmlParseCharRef(ctxt);
if (val == '&') {
if (ctxt->replaceEntities) {
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
buf[len++] = '&';
} else {
/*
* The reparsing will be done in xmlStringGetNodeList()
* called by the attribute() function in SAX.c
*/
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
buf[len++] = '&';
buf[len++] = '#';
buf[len++] = '3';
buf[len++] = '8';
buf[len++] = ';';
}
} else if (val != 0) {
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
len += xmlCopyChar(0, &buf[len], val);
}
} else {
ent = xmlParseEntityRef(ctxt);
ctxt->nbentities++;
if (ent != NULL)
ctxt->nbentities += ent->owner;
if ((ent != NULL) &&
(ent->etype == XML_INTERNAL_PREDEFINED_ENTITY)) {
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
if ((ctxt->replaceEntities == 0) &&
(ent->content[0] == '&')) {
buf[len++] = '&';
buf[len++] = '#';
buf[len++] = '3';
buf[len++] = '8';
buf[len++] = ';';
} else {
buf[len++] = ent->content[0];
}
} else if ((ent != NULL) &&
(ctxt->replaceEntities != 0)) {
if (ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) {
++ctxt->depth;
rep = xmlStringDecodeEntities(ctxt, ent->content,
XML_SUBSTITUTE_REF,
0, 0, 0);
--ctxt->depth;
if (rep != NULL) {
current = rep;
while (*current != 0) { /* non input consuming */
if ((*current == 0xD) || (*current == 0xA) ||
(*current == 0x9)) {
buf[len++] = 0x20;
current++;
} else
buf[len++] = *current++;
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
}
xmlFree(rep);
rep = NULL;
}
} else {
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
if (ent->content != NULL)
buf[len++] = ent->content[0];
}
} else if (ent != NULL) {
int i = xmlStrlen(ent->name);
const xmlChar *cur = ent->name;
/*
* This may look absurd but is needed to detect
* entities problems
*/
if ((ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) &&
(ent->content != NULL) && (ent->checked == 0)) {
unsigned long oldnbent = ctxt->nbentities;
++ctxt->depth;
rep = xmlStringDecodeEntities(ctxt, ent->content,
XML_SUBSTITUTE_REF, 0, 0, 0);
--ctxt->depth;
ent->checked = (ctxt->nbentities - oldnbent + 1) * 2;
if (rep != NULL) {
if (xmlStrchr(rep, '<'))
ent->checked |= 1;
xmlFree(rep);
rep = NULL;
}
}
/*
* Just output the reference
*/
buf[len++] = '&';
while (len + i + 10 > buf_size) {
growBuffer(buf, i + 10);
}
for (;i > 0;i--)
buf[len++] = *cur++;
buf[len++] = ';';
}
}
} else {
if ((c == 0x20) || (c == 0xD) || (c == 0xA) || (c == 0x9)) {
if ((len != 0) || (!normalize)) {
if ((!normalize) || (!in_space)) {
COPY_BUF(l,buf,len,0x20);
while (len + 10 > buf_size) {
growBuffer(buf, 10);
}
}
in_space = 1;
}
} else {
in_space = 0;
COPY_BUF(l,buf,len,c);
if (len + 10 > buf_size) {
growBuffer(buf, 10);
}
}
NEXTL(l);
}
GROW;
c = CUR_CHAR(l);
}
if (ctxt->instate == XML_PARSER_EOF)
goto error;
if ((in_space) && (normalize)) {
while ((len > 0) && (buf[len - 1] == 0x20)) len--;
}
buf[len] = 0;
if (RAW == '<') {
xmlFatalErr(ctxt, XML_ERR_LT_IN_ATTRIBUTE, NULL);
} else if (RAW != limit) {
if ((c != 0) && (!IS_CHAR(c))) {
xmlFatalErrMsg(ctxt, XML_ERR_INVALID_CHAR,
"invalid character in attribute value\n");
} else {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue: ' expected\n");
}
} else
NEXT;
/*
* There we potentially risk an overflow, don't allow attribute value of
* length more than INT_MAX it is a very reasonnable assumption !
*/
if (len >= INT_MAX) {
xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
"AttValue length too long\n");
goto mem_error;
}
if (attlen != NULL) *attlen = (int) len;
return(buf);
mem_error:
xmlErrMemory(ctxt, NULL);
error:
if (buf != NULL)
xmlFree(buf);
if (rep != NULL)
xmlFree(rep);
return(NULL);
}
|
C
|
Android
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d193f6bb5aa5bdc05e07f314abacf7d7bc466d3d
|
d193f6bb5aa5bdc05e07f314abacf7d7bc466d3d
|
cc: Make the PictureLayerImpl raster source null until commit.
No need to make a raster source that is never used.
R=enne, vmpstr
BUG=387116
Review URL: https://codereview.chromium.org/809433003
Cr-Commit-Position: refs/heads/master@{#308466}
|
LowResTilingsSettings() { create_low_res_tiling = true; }
|
LowResTilingsSettings() { create_low_res_tiling = true; }
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d1a59e4e845a01d7d7b80ef184b672752a9eae4d
|
d1a59e4e845a01d7d7b80ef184b672752a9eae4d
|
Fixing cross-process postMessage replies on more than two iterations.
When two frames are replying to each other using event.source across processes,
after the first two replies, things break down. The root cause is that in
RenderViewImpl::GetFrameByMappedID, the lookup was incorrect. It is now
properly searching for the remote frame id and returning the local one.
BUG=153445
Review URL: https://chromiumcodereview.appspot.com/11040015
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@159924 0039d316-1c4b-4281-b951-d872f2087c98
|
static RenderViewImpl* FromRoutingID(int32 routing_id) {
return static_cast<RenderViewImpl*>(
ChildThread::current()->ResolveRoute(routing_id));
}
|
static RenderViewImpl* FromRoutingID(int32 routing_id) {
return static_cast<RenderViewImpl*>(
ChildThread::current()->ResolveRoute(routing_id));
}
|
C
|
Chrome
| 0 |
CVE-2017-5011
|
https://www.cvedetails.com/cve/CVE-2017-5011/
|
CWE-200
|
https://github.com/chromium/chromium/commit/eea3300239f0b53e172a320eb8de59d0bea65f27
|
eea3300239f0b53e172a320eb8de59d0bea65f27
|
DevTools: move front-end URL handling to DevToolsUIBindingds
BUG=662859
Review-Url: https://codereview.chromium.org/2607833002
Cr-Commit-Position: refs/heads/master@{#440926}
|
std::string GetMimeTypeForPath(const std::string& path) {
std::string filename = PathWithoutParams(path);
if (base::EndsWith(filename, ".html", base::CompareCase::INSENSITIVE_ASCII)) {
return "text/html";
} else if (base::EndsWith(filename, ".css",
base::CompareCase::INSENSITIVE_ASCII)) {
return "text/css";
} else if (base::EndsWith(filename, ".js",
base::CompareCase::INSENSITIVE_ASCII)) {
return "application/javascript";
} else if (base::EndsWith(filename, ".png",
base::CompareCase::INSENSITIVE_ASCII)) {
return "image/png";
} else if (base::EndsWith(filename, ".gif",
base::CompareCase::INSENSITIVE_ASCII)) {
return "image/gif";
} else if (base::EndsWith(filename, ".svg",
base::CompareCase::INSENSITIVE_ASCII)) {
return "image/svg+xml";
} else if (base::EndsWith(filename, ".manifest",
base::CompareCase::INSENSITIVE_ASCII)) {
return "text/cache-manifest";
}
return "text/html";
}
|
std::string GetMimeTypeForPath(const std::string& path) {
std::string filename = PathWithoutParams(path);
if (base::EndsWith(filename, ".html", base::CompareCase::INSENSITIVE_ASCII)) {
return "text/html";
} else if (base::EndsWith(filename, ".css",
base::CompareCase::INSENSITIVE_ASCII)) {
return "text/css";
} else if (base::EndsWith(filename, ".js",
base::CompareCase::INSENSITIVE_ASCII)) {
return "application/javascript";
} else if (base::EndsWith(filename, ".png",
base::CompareCase::INSENSITIVE_ASCII)) {
return "image/png";
} else if (base::EndsWith(filename, ".gif",
base::CompareCase::INSENSITIVE_ASCII)) {
return "image/gif";
} else if (base::EndsWith(filename, ".svg",
base::CompareCase::INSENSITIVE_ASCII)) {
return "image/svg+xml";
} else if (base::EndsWith(filename, ".manifest",
base::CompareCase::INSENSITIVE_ASCII)) {
return "text/cache-manifest";
}
return "text/html";
}
|
C
|
Chrome
| 0 |
CVE-2013-4591
|
https://www.cvedetails.com/cve/CVE-2013-4591/
|
CWE-119
|
https://github.com/torvalds/linux/commit/7d3e91a89b7adbc2831334def9e494dd9892f9af
|
7d3e91a89b7adbc2831334def9e494dd9892f9af
|
NFSv4: Check for buffer length in __nfs4_get_acl_uncached
Commit 1f1ea6c "NFSv4: Fix buffer overflow checking in
__nfs4_get_acl_uncached" accidently dropped the checking for too small
result buffer length.
If someone uses getxattr on "system.nfs4_acl" on an NFSv4 mount
supporting ACLs, the ACL has not been cached and the buffer suplied is
too short, we still copy the complete ACL, resulting in kernel and user
space memory corruption.
Signed-off-by: Sven Wegener <[email protected]>
Cc: [email protected]
Signed-off-by: Trond Myklebust <[email protected]>
|
bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
{
const struct nfs_pgio_header *hdr = data->header;
/* Don't request attributes for pNFS or O_DIRECT writes */
if (data->ds_clp != NULL || hdr->dreq != NULL)
return false;
/* Otherwise, request attributes if and only if we don't hold
* a delegation
*/
return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
}
|
bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
{
const struct nfs_pgio_header *hdr = data->header;
/* Don't request attributes for pNFS or O_DIRECT writes */
if (data->ds_clp != NULL || hdr->dreq != NULL)
return false;
/* Otherwise, request attributes if and only if we don't hold
* a delegation
*/
return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
}
|
C
|
linux
| 0 |
CVE-2015-5330
|
https://www.cvedetails.com/cve/CVE-2015-5330/
|
CWE-200
|
https://git.samba.org/?p=samba.git;a=commit;h=0454b95657846fcecf0f51b6f1194faac02518bd
|
0454b95657846fcecf0f51b6f1194faac02518bd
| null |
static char *ldb_dn_canonical(TALLOC_CTX *mem_ctx, struct ldb_dn *dn, int ex_format) {
unsigned int i;
TALLOC_CTX *tmpctx;
char *cracked = NULL;
const char *format = (ex_format ? "\n" : "/" );
if ( ! ldb_dn_validate(dn)) {
return NULL;
}
tmpctx = talloc_new(mem_ctx);
/* Walk backwards down the DN, grabbing 'dc' components at first */
for (i = dn->comp_num - 1; i != (unsigned int) -1; i--) {
if (ldb_attr_cmp(dn->components[i].name, "dc") != 0) {
break;
}
if (cracked) {
cracked = talloc_asprintf(tmpctx, "%s.%s",
ldb_dn_escape_value(tmpctx,
dn->components[i].value),
cracked);
} else {
cracked = ldb_dn_escape_value(tmpctx,
dn->components[i].value);
}
if (!cracked) {
goto done;
}
}
/* Only domain components? Finish here */
if (i == (unsigned int) -1) {
cracked = talloc_strdup_append_buffer(cracked, format);
talloc_steal(mem_ctx, cracked);
goto done;
}
/* Now walk backwards appending remaining components */
for (; i > 0; i--) {
cracked = talloc_asprintf_append_buffer(cracked, "/%s",
ldb_dn_escape_value(tmpctx,
dn->components[i].value));
if (!cracked) {
goto done;
}
}
/* Last one, possibly a newline for the 'ex' format */
cracked = talloc_asprintf_append_buffer(cracked, "%s%s", format,
ldb_dn_escape_value(tmpctx,
dn->components[i].value));
talloc_steal(mem_ctx, cracked);
done:
talloc_free(tmpctx);
return cracked;
}
|
static char *ldb_dn_canonical(TALLOC_CTX *mem_ctx, struct ldb_dn *dn, int ex_format) {
unsigned int i;
TALLOC_CTX *tmpctx;
char *cracked = NULL;
const char *format = (ex_format ? "\n" : "/" );
if ( ! ldb_dn_validate(dn)) {
return NULL;
}
tmpctx = talloc_new(mem_ctx);
/* Walk backwards down the DN, grabbing 'dc' components at first */
for (i = dn->comp_num - 1; i != (unsigned int) -1; i--) {
if (ldb_attr_cmp(dn->components[i].name, "dc") != 0) {
break;
}
if (cracked) {
cracked = talloc_asprintf(tmpctx, "%s.%s",
ldb_dn_escape_value(tmpctx,
dn->components[i].value),
cracked);
} else {
cracked = ldb_dn_escape_value(tmpctx,
dn->components[i].value);
}
if (!cracked) {
goto done;
}
}
/* Only domain components? Finish here */
if (i == (unsigned int) -1) {
cracked = talloc_strdup_append_buffer(cracked, format);
talloc_steal(mem_ctx, cracked);
goto done;
}
/* Now walk backwards appending remaining components */
for (; i > 0; i--) {
cracked = talloc_asprintf_append_buffer(cracked, "/%s",
ldb_dn_escape_value(tmpctx,
dn->components[i].value));
if (!cracked) {
goto done;
}
}
/* Last one, possibly a newline for the 'ex' format */
cracked = talloc_asprintf_append_buffer(cracked, "%s%s", format,
ldb_dn_escape_value(tmpctx,
dn->components[i].value));
talloc_steal(mem_ctx, cracked);
done:
talloc_free(tmpctx);
return cracked;
}
|
C
|
samba
| 0 |
CVE-2016-10150
|
https://www.cvedetails.com/cve/CVE-2016-10150/
|
CWE-416
|
https://github.com/torvalds/linux/commit/a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
|
a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
|
KVM: use after free in kvm_ioctl_create_device()
We should move the ops->destroy(dev) after the list_del(&dev->vm_node)
so that we don't use "dev" after freeing it.
Fixes: a28ebea2adc4 ("KVM: Protect device ops->create and list_add with kvm->lock")
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Signed-off-by: Radim Krčmář <[email protected]>
|
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
struct page **pages, int nr_pages)
{
unsigned long addr;
gfn_t entry;
addr = gfn_to_hva_many(slot, gfn, &entry);
if (kvm_is_error_hva(addr))
return -1;
if (entry < nr_pages)
return 0;
return __get_user_pages_fast(addr, nr_pages, 1, pages);
}
|
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
struct page **pages, int nr_pages)
{
unsigned long addr;
gfn_t entry;
addr = gfn_to_hva_many(slot, gfn, &entry);
if (kvm_is_error_hva(addr))
return -1;
if (entry < nr_pages)
return 0;
return __get_user_pages_fast(addr, nr_pages, 1, pages);
}
|
C
|
linux
| 0 |
CVE-2017-8825
|
https://www.cvedetails.com/cve/CVE-2017-8825/
|
CWE-476
|
https://github.com/dinhviethoa/libetpan/commit/1fe8fbc032ccda1db9af66d93016b49c16c1f22d
|
1fe8fbc032ccda1db9af66d93016b49c16c1f22d
|
Fixed crash #274
|
int mailimf_date_time_parse(const char * message, size_t length,
size_t * indx,
struct mailimf_date_time ** result)
{
size_t cur_token;
int day_of_week;
struct mailimf_date_time * date_time;
int day;
int month;
int year;
int hour;
int min;
int sec;
int zone;
int r;
cur_token = * indx;
day_of_week = -1;
r = mailimf_day_of_week_parse(message, length, &cur_token, &day_of_week);
if (r == MAILIMF_NO_ERROR) {
r = mailimf_comma_parse(message, length, &cur_token);
if (r == MAILIMF_ERROR_PARSE) {
}
else if (r != MAILIMF_NO_ERROR) {
return r;
}
}
else if (r != MAILIMF_ERROR_PARSE)
return r;
day = 0;
month = 0;
year = 0;
r = mailimf_date_parse(message, length, &cur_token, &day, &month, &year);
if (r == MAILIMF_ERROR_PARSE) {
r = mailimf_broken_date_parse(message, length, &cur_token, &day, &month, &year);
}
else if (r != MAILIMF_NO_ERROR) {
return r;
}
r = mailimf_fws_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR)
return r;
hour = 0;
min = 0;
sec = 0;
zone = 0;
r = mailimf_time_parse(message, length, &cur_token,
&hour, &min, &sec, &zone);
if (r != MAILIMF_NO_ERROR)
return r;
date_time = mailimf_date_time_new(day, month, year, hour, min, sec, zone);
if (date_time == NULL)
return MAILIMF_ERROR_MEMORY;
* indx = cur_token;
* result = date_time;
return MAILIMF_NO_ERROR;
}
|
int mailimf_date_time_parse(const char * message, size_t length,
size_t * indx,
struct mailimf_date_time ** result)
{
size_t cur_token;
int day_of_week;
struct mailimf_date_time * date_time;
int day;
int month;
int year;
int hour;
int min;
int sec;
int zone;
int r;
cur_token = * indx;
day_of_week = -1;
r = mailimf_day_of_week_parse(message, length, &cur_token, &day_of_week);
if (r == MAILIMF_NO_ERROR) {
r = mailimf_comma_parse(message, length, &cur_token);
if (r == MAILIMF_ERROR_PARSE) {
}
else if (r != MAILIMF_NO_ERROR) {
return r;
}
}
else if (r != MAILIMF_ERROR_PARSE)
return r;
day = 0;
month = 0;
year = 0;
r = mailimf_date_parse(message, length, &cur_token, &day, &month, &year);
if (r == MAILIMF_ERROR_PARSE) {
r = mailimf_broken_date_parse(message, length, &cur_token, &day, &month, &year);
}
else if (r != MAILIMF_NO_ERROR) {
return r;
}
r = mailimf_fws_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR)
return r;
hour = 0;
min = 0;
sec = 0;
zone = 0;
r = mailimf_time_parse(message, length, &cur_token,
&hour, &min, &sec, &zone);
if (r != MAILIMF_NO_ERROR)
return r;
date_time = mailimf_date_time_new(day, month, year, hour, min, sec, zone);
if (date_time == NULL)
return MAILIMF_ERROR_MEMORY;
* indx = cur_token;
* result = date_time;
return MAILIMF_NO_ERROR;
}
|
C
|
libetpan
| 0 |
CVE-2016-1615
|
https://www.cvedetails.com/cve/CVE-2016-1615/
|
CWE-254
|
https://github.com/chromium/chromium/commit/b399a05453d7b3e2dfdec67865fefe6953bcc59e
|
b399a05453d7b3e2dfdec67865fefe6953bcc59e
|
Allocate a FrameSinkId for RenderWidgetHostViewAura in mus+ash
RenderWidgetHostViewChildFrame expects its parent to have a valid
FrameSinkId. Make sure RenderWidgetHostViewAura has a FrameSinkId even
if DelegatedFrameHost is not used (in mus+ash).
BUG=706553
[email protected]
Review-Url: https://codereview.chromium.org/2847253003
Cr-Commit-Position: refs/heads/master@{#468179}
|
void RenderWidgetHostViewAura::OnBeginFrameDidNotSwap(
const cc::BeginFrameAck& ack) {
delegated_frame_host_->BeginFrameDidNotSwap(ack);
}
|
void RenderWidgetHostViewAura::OnBeginFrameDidNotSwap(
const cc::BeginFrameAck& ack) {
delegated_frame_host_->BeginFrameDidNotSwap(ack);
}
|
C
|
Chrome
| 0 |
CVE-2019-5780
|
https://www.cvedetails.com/cve/CVE-2019-5780/
|
CWE-20
|
https://github.com/chromium/chromium/commit/0328261c41b1b7495e1d4d4cf958f41a08aff38b
|
0328261c41b1b7495e1d4d4cf958f41a08aff38b
|
mac: Do not let synthetic events toggle "Allow JavaScript From AppleEvents"
Bug: 891697
Change-Id: I49eb77963515637df739c9d2ce83530d4e21cf15
Reviewed-on: https://chromium-review.googlesource.com/c/1308771
Reviewed-by: Elly Fong-Jones <[email protected]>
Commit-Queue: Robert Sesek <[email protected]>
Cr-Commit-Position: refs/heads/master@{#604268}
|
void BrowserCommandController::UpdateCommandsForFullscreenMode() {
if (is_locked_fullscreen_)
return;
const bool is_fullscreen = window() && window()->IsFullscreen();
const bool show_main_ui = IsShowingMainUI();
const bool show_location_bar = IsShowingLocationBar();
const bool main_not_fullscreen = show_main_ui && !is_fullscreen;
command_updater_.UpdateCommandEnabled(IDC_OPEN_CURRENT_URL, show_main_ui);
command_updater_.UpdateCommandEnabled(
IDC_SHOW_AS_TAB,
!browser_->is_type_tabbed() && !is_fullscreen);
command_updater_.UpdateCommandEnabled(IDC_FOCUS_TOOLBAR, show_main_ui);
command_updater_.UpdateCommandEnabled(IDC_FOCUS_LOCATION, show_location_bar);
command_updater_.UpdateCommandEnabled(IDC_FOCUS_SEARCH, show_main_ui);
command_updater_.UpdateCommandEnabled(
IDC_FOCUS_MENU_BAR, main_not_fullscreen);
command_updater_.UpdateCommandEnabled(
IDC_FOCUS_NEXT_PANE, main_not_fullscreen);
command_updater_.UpdateCommandEnabled(
IDC_FOCUS_PREVIOUS_PANE, main_not_fullscreen);
command_updater_.UpdateCommandEnabled(
IDC_FOCUS_BOOKMARKS, main_not_fullscreen);
command_updater_.UpdateCommandEnabled(
IDC_FOCUS_INACTIVE_POPUP_FOR_ACCESSIBILITY, main_not_fullscreen);
command_updater_.UpdateCommandEnabled(IDC_DEVELOPER_MENU, show_main_ui);
#if defined(GOOGLE_CHROME_BUILD)
command_updater_.UpdateCommandEnabled(IDC_FEEDBACK, show_main_ui);
#endif
UpdateShowSyncState(show_main_ui);
command_updater_.UpdateCommandEnabled(IDC_EDIT_SEARCH_ENGINES, show_main_ui);
command_updater_.UpdateCommandEnabled(IDC_VIEW_PASSWORDS, show_main_ui);
command_updater_.UpdateCommandEnabled(IDC_ABOUT, show_main_ui);
command_updater_.UpdateCommandEnabled(IDC_SHOW_APP_MENU, show_main_ui);
if (base::debug::IsProfilingSupported())
command_updater_.UpdateCommandEnabled(IDC_PROFILING_ENABLED, show_main_ui);
#if !defined(OS_MACOSX)
const bool fullscreen_enabled = is_fullscreen ||
profile()->GetPrefs()->GetBoolean(prefs::kFullscreenAllowed);
#else
const bool fullscreen_enabled = true;
#endif
command_updater_.UpdateCommandEnabled(IDC_FULLSCREEN, fullscreen_enabled);
command_updater_.UpdateCommandEnabled(IDC_TOGGLE_FULLSCREEN_TOOLBAR,
fullscreen_enabled);
UpdateCommandsForBookmarkBar();
UpdateCommandsForIncognitoAvailability();
UpdateCommandsForHostedAppAvailability();
}
|
void BrowserCommandController::UpdateCommandsForFullscreenMode() {
if (is_locked_fullscreen_)
return;
const bool is_fullscreen = window() && window()->IsFullscreen();
const bool show_main_ui = IsShowingMainUI();
const bool show_location_bar = IsShowingLocationBar();
const bool main_not_fullscreen = show_main_ui && !is_fullscreen;
command_updater_.UpdateCommandEnabled(IDC_OPEN_CURRENT_URL, show_main_ui);
command_updater_.UpdateCommandEnabled(
IDC_SHOW_AS_TAB,
!browser_->is_type_tabbed() && !is_fullscreen);
command_updater_.UpdateCommandEnabled(IDC_FOCUS_TOOLBAR, show_main_ui);
command_updater_.UpdateCommandEnabled(IDC_FOCUS_LOCATION, show_location_bar);
command_updater_.UpdateCommandEnabled(IDC_FOCUS_SEARCH, show_main_ui);
command_updater_.UpdateCommandEnabled(
IDC_FOCUS_MENU_BAR, main_not_fullscreen);
command_updater_.UpdateCommandEnabled(
IDC_FOCUS_NEXT_PANE, main_not_fullscreen);
command_updater_.UpdateCommandEnabled(
IDC_FOCUS_PREVIOUS_PANE, main_not_fullscreen);
command_updater_.UpdateCommandEnabled(
IDC_FOCUS_BOOKMARKS, main_not_fullscreen);
command_updater_.UpdateCommandEnabled(
IDC_FOCUS_INACTIVE_POPUP_FOR_ACCESSIBILITY, main_not_fullscreen);
command_updater_.UpdateCommandEnabled(IDC_DEVELOPER_MENU, show_main_ui);
#if defined(GOOGLE_CHROME_BUILD)
command_updater_.UpdateCommandEnabled(IDC_FEEDBACK, show_main_ui);
#endif
UpdateShowSyncState(show_main_ui);
command_updater_.UpdateCommandEnabled(IDC_EDIT_SEARCH_ENGINES, show_main_ui);
command_updater_.UpdateCommandEnabled(IDC_VIEW_PASSWORDS, show_main_ui);
command_updater_.UpdateCommandEnabled(IDC_ABOUT, show_main_ui);
command_updater_.UpdateCommandEnabled(IDC_SHOW_APP_MENU, show_main_ui);
if (base::debug::IsProfilingSupported())
command_updater_.UpdateCommandEnabled(IDC_PROFILING_ENABLED, show_main_ui);
#if !defined(OS_MACOSX)
const bool fullscreen_enabled = is_fullscreen ||
profile()->GetPrefs()->GetBoolean(prefs::kFullscreenAllowed);
#else
const bool fullscreen_enabled = true;
#endif
command_updater_.UpdateCommandEnabled(IDC_FULLSCREEN, fullscreen_enabled);
command_updater_.UpdateCommandEnabled(IDC_TOGGLE_FULLSCREEN_TOOLBAR,
fullscreen_enabled);
UpdateCommandsForBookmarkBar();
UpdateCommandsForIncognitoAvailability();
UpdateCommandsForHostedAppAvailability();
}
|
C
|
Chrome
| 0 |
CVE-2013-3222
|
https://www.cvedetails.com/cve/CVE-2013-3222/
|
CWE-200
|
https://github.com/torvalds/linux/commit/9b3e617f3df53822345a8573b6d358f6b9e5ed87
|
9b3e617f3df53822345a8573b6d358f6b9e5ed87
|
atm: update msg_namelen in vcc_recvmsg()
The current code does not fill the msg_name member in case it is set.
It also does not set the msg_namelen member to 0 and therefore makes
net/socket.c leak the local, uninitialized sockaddr_storage variable
to userland -- 128 bytes of kernel stack memory.
Fix that by simply setting msg_namelen to 0 as obviously nobody cared
about vcc_recvmsg() not filling the msg_name in case it was set.
Signed-off-by: Mathias Krause <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int check_tp(const struct atm_trafprm *tp)
{
/* @@@ Should be merged with adjust_tp */
if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
return 0;
if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
!tp->max_pcr)
return -EINVAL;
if (tp->min_pcr == ATM_MAX_PCR)
return -EINVAL;
if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
tp->min_pcr > tp->max_pcr)
return -EINVAL;
/*
* We allow pcr to be outside [min_pcr,max_pcr], because later
* adjustment may still push it in the valid range.
*/
return 0;
}
|
static int check_tp(const struct atm_trafprm *tp)
{
/* @@@ Should be merged with adjust_tp */
if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
return 0;
if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
!tp->max_pcr)
return -EINVAL;
if (tp->min_pcr == ATM_MAX_PCR)
return -EINVAL;
if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
tp->min_pcr > tp->max_pcr)
return -EINVAL;
/*
* We allow pcr to be outside [min_pcr,max_pcr], because later
* adjustment may still push it in the valid range.
*/
return 0;
}
|
C
|
linux
| 0 |
CVE-2014-9710
|
https://www.cvedetails.com/cve/CVE-2014-9710/
|
CWE-362
|
https://github.com/torvalds/linux/commit/5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339
|
5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339
|
Btrfs: make xattr replace operations atomic
Replacing a xattr consists of doing a lookup for its existing value, delete
the current value from the respective leaf, release the search path and then
finally insert the new value. This leaves a time window where readers (getxattr,
listxattrs) won't see any value for the xattr. Xattrs are used to store ACLs,
so this has security implications.
This change also fixes 2 other existing issues which were:
*) Deleting the old xattr value without verifying first if the new xattr will
fit in the existing leaf item (in case multiple xattrs are packed in the
same item due to name hash collision);
*) Returning -EEXIST when the flag XATTR_CREATE is given and the xattr doesn't
exist but we have have an existing item that packs muliple xattrs with
the same name hash as the input xattr. In this case we should return ENOSPC.
A test case for xfstests follows soon.
Thanks to Alexandre Oliva for reporting the non-atomicity of the xattr replace
implementation.
Reported-by: Alexandre Oliva <[email protected]>
Signed-off-by: Filipe Manana <[email protected]>
Signed-off-by: Chris Mason <[email protected]>
|
__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb_root, u64 time_seq)
{
struct tree_mod_elem *tm;
struct tree_mod_elem *found = NULL;
u64 root_logical = eb_root->start;
int looped = 0;
if (!time_seq)
return NULL;
/*
* the very last operation that's logged for a root is the replacement
* operation (if it is replaced at all). this has the index of the *new*
* root, making it the very first operation that's logged for this root.
*/
while (1) {
tm = tree_mod_log_search_oldest(fs_info, root_logical,
time_seq);
if (!looped && !tm)
return NULL;
/*
* if there are no tree operation for the oldest root, we simply
* return it. this should only happen if that (old) root is at
* level 0.
*/
if (!tm)
break;
/*
* if there's an operation that's not a root replacement, we
* found the oldest version of our root. normally, we'll find a
* MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
*/
if (tm->op != MOD_LOG_ROOT_REPLACE)
break;
found = tm;
root_logical = tm->old_root.logical;
looped = 1;
}
/* if there's no old root to return, return what we found instead */
if (!found)
found = tm;
return found;
}
|
__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb_root, u64 time_seq)
{
struct tree_mod_elem *tm;
struct tree_mod_elem *found = NULL;
u64 root_logical = eb_root->start;
int looped = 0;
if (!time_seq)
return NULL;
/*
* the very last operation that's logged for a root is the replacement
* operation (if it is replaced at all). this has the index of the *new*
* root, making it the very first operation that's logged for this root.
*/
while (1) {
tm = tree_mod_log_search_oldest(fs_info, root_logical,
time_seq);
if (!looped && !tm)
return NULL;
/*
* if there are no tree operation for the oldest root, we simply
* return it. this should only happen if that (old) root is at
* level 0.
*/
if (!tm)
break;
/*
* if there's an operation that's not a root replacement, we
* found the oldest version of our root. normally, we'll find a
* MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
*/
if (tm->op != MOD_LOG_ROOT_REPLACE)
break;
found = tm;
root_logical = tm->old_root.logical;
looped = 1;
}
/* if there's no old root to return, return what we found instead */
if (!found)
found = tm;
return found;
}
|
C
|
linux
| 0 |
CVE-2016-2499
|
https://www.cvedetails.com/cve/CVE-2016-2499/
|
CWE-200
|
https://android.googlesource.com/platform/frameworks/av/+/dd3546765710ce8dd49eb23901d90345dec8282f
|
dd3546765710ce8dd49eb23901d90345dec8282f
|
AudioSource: initialize variables
to prevent info leak
Bug: 27855172
Change-Id: I3d33e0a9cc5cf8a758d7b0794590b09c43a24561
|
void AudioSource::signalBufferReturned(MediaBuffer *buffer) {
ALOGV("signalBufferReturned: %p", buffer->data());
Mutex::Autolock autoLock(mLock);
--mNumClientOwnedBuffers;
buffer->setObserver(0);
buffer->release();
mFrameEncodingCompletionCondition.signal();
return;
}
|
void AudioSource::signalBufferReturned(MediaBuffer *buffer) {
ALOGV("signalBufferReturned: %p", buffer->data());
Mutex::Autolock autoLock(mLock);
--mNumClientOwnedBuffers;
buffer->setObserver(0);
buffer->release();
mFrameEncodingCompletionCondition.signal();
return;
}
|
C
|
Android
| 0 |
CVE-2018-18345
|
https://www.cvedetails.com/cve/CVE-2018-18345/
| null |
https://github.com/chromium/chromium/commit/2078096efde1976b0fa9c820df90cedbfb2b13bc
|
2078096efde1976b0fa9c820df90cedbfb2b13bc
|
Lock down blob/filesystem URL creation with a stronger CPSP::CanCommitURL()
ChildProcessSecurityPolicy::CanCommitURL() is a security check that's
supposed to tell whether a given renderer process is allowed to commit
a given URL. It is currently used to validate (1) blob and filesystem
URL creation, and (2) Origin headers. Currently, it has scheme-based
checks that disallow things like web renderers creating
blob/filesystem URLs in chrome-extension: origins, but it cannot stop
one web origin from creating those URLs for another origin.
This CL locks down its use for (1) to also consult
CanAccessDataForOrigin(). With site isolation, this will check origin
locks and ensure that foo.com cannot create blob/filesystem URLs for
other origins.
For now, this CL does not provide the same enforcements for (2),
Origin header validation, which has additional constraints that need
to be solved first (see https://crbug.com/515309).
Bug: 886976, 888001
Change-Id: I743ef05469e4000b2c0bee840022162600cc237f
Reviewed-on: https://chromium-review.googlesource.com/1235343
Commit-Queue: Alex Moshchuk <[email protected]>
Reviewed-by: Charlie Reis <[email protected]>
Cr-Commit-Position: refs/heads/master@{#594914}
|
void SecurityExploitBrowserTest::TestFileChooserWithPath(
const base::FilePath& path) {
GURL foo("http://foo.com/simple_page.html");
NavigateToURL(shell(), foo);
EXPECT_EQ(base::ASCIIToUTF16("OK"), shell()->web_contents()->GetTitle());
RenderFrameHost* compromised_renderer =
shell()->web_contents()->GetMainFrame();
RenderProcessHostKillWaiter kill_waiter(compromised_renderer->GetProcess());
blink::mojom::FileChooserParams params;
params.default_file_name = path;
FrameHostMsg_RunFileChooser evil(compromised_renderer->GetRoutingID(),
params);
IpcSecurityTestUtil::PwnMessageReceived(
compromised_renderer->GetProcess()->GetChannel(), evil);
EXPECT_EQ(bad_message::RFH_FILE_CHOOSER_PATH, kill_waiter.Wait());
}
|
void SecurityExploitBrowserTest::TestFileChooserWithPath(
const base::FilePath& path) {
GURL foo("http://foo.com/simple_page.html");
NavigateToURL(shell(), foo);
EXPECT_EQ(base::ASCIIToUTF16("OK"), shell()->web_contents()->GetTitle());
RenderFrameHost* compromised_renderer =
shell()->web_contents()->GetMainFrame();
RenderProcessHostKillWaiter kill_waiter(compromised_renderer->GetProcess());
blink::mojom::FileChooserParams params;
params.default_file_name = path;
FrameHostMsg_RunFileChooser evil(compromised_renderer->GetRoutingID(),
params);
IpcSecurityTestUtil::PwnMessageReceived(
compromised_renderer->GetProcess()->GetChannel(), evil);
EXPECT_EQ(bad_message::RFH_FILE_CHOOSER_PATH, kill_waiter.Wait());
}
|
C
|
Chrome
| 0 |
CVE-2016-5158
|
https://www.cvedetails.com/cve/CVE-2016-5158/
|
CWE-190
|
https://github.com/chromium/chromium/commit/6a310d99a741f9ba5e4e537c5ec49d3adbe5876f
|
6a310d99a741f9ba5e4e537c5ec49d3adbe5876f
|
Position info (item n of m) incorrect if hidden focusable items in list
Bug: 836997
Change-Id: I971fa7076f72d51829b36af8e379260d48ca25ec
Reviewed-on: https://chromium-review.googlesource.com/c/1450235
Commit-Queue: Aaron Leventhal <[email protected]>
Reviewed-by: Nektarios Paisios <[email protected]>
Cr-Commit-Position: refs/heads/master@{#628890}
|
void RunAomTest(const base::FilePath::CharType* file_path) {
base::FilePath test_path = GetTestFilePath("accessibility", "aom");
{
base::ScopedAllowBlockingForTesting allow_blocking;
ASSERT_TRUE(base::PathExists(test_path)) << test_path.LossyDisplayName();
}
base::FilePath aom_file = test_path.Append(base::FilePath(file_path));
RunTest(aom_file, "accessibility/aom");
}
|
void RunAomTest(const base::FilePath::CharType* file_path) {
base::FilePath test_path = GetTestFilePath("accessibility", "aom");
{
base::ScopedAllowBlockingForTesting allow_blocking;
ASSERT_TRUE(base::PathExists(test_path)) << test_path.LossyDisplayName();
}
base::FilePath aom_file = test_path.Append(base::FilePath(file_path));
RunTest(aom_file, "accessibility/aom");
}
|
C
|
Chrome
| 0 |
CVE-2016-3839
|
https://www.cvedetails.com/cve/CVE-2016-3839/
|
CWE-284
|
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
|
472271b153c5dc53c28beac55480a8d8434b2d5c
|
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
|
void GKI_disable(void) {
pthread_mutex_lock(&gki_cb.lock);
}
|
void GKI_disable(void) {
pthread_mutex_lock(&gki_cb.lock);
}
|
C
|
Android
| 0 |
CVE-2015-7513
|
https://www.cvedetails.com/cve/CVE-2015-7513/
| null |
https://github.com/torvalds/linux/commit/0185604c2d82c560dab2f2933a18f797e74ab5a8
|
0185604c2d82c560dab2f2933a18f797e74ab5a8
|
KVM: x86: Reload pit counters for all channels when restoring state
Currently if userspace restores the pit counters with a count of 0
on channels 1 or 2 and the guest attempts to read the count on those
channels, then KVM will perform a mod of 0 and crash. This will ensure
that 0 values are converted to 65536 as per the spec.
This is CVE-2015-7513.
Signed-off-by: Andy Honig <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
return vcpu->arch.smbase;
}
|
static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
return vcpu->arch.smbase;
}
|
C
|
linux
| 0 |
CVE-2015-8877
|
https://www.cvedetails.com/cve/CVE-2015-8877/
|
CWE-399
|
https://github.com/libgd/libgd/commit/4751b606fa38edc456d627140898a7ec679fcc24
|
4751b606fa38edc456d627140898a7ec679fcc24
|
gdImageScaleTwoPass memory leak fix
Fixing memory leak in gdImageScaleTwoPass, as reported by @cmb69 and
confirmed by @vapier. This bug actually bit me in production and I'm
very thankful that it was reported with an easy fix.
Fixes #173.
|
_gdScalePass(const gdImagePtr pSrc, const unsigned int src_len,
const gdImagePtr pDst, const unsigned int dst_len,
const unsigned int num_lines,
const gdAxis axis)
{
unsigned int line_ndx;
LineContribType * contrib;
/* Same dim, just copy it. */
assert(dst_len != src_len); // TODO: caller should handle this.
contrib = _gdContributionsCalc(dst_len, src_len,
(double)dst_len / (double)src_len,
pSrc->interpolation);
if (contrib == NULL) {
return 0;
}
/* Scale each line */
for (line_ndx = 0; line_ndx < num_lines; line_ndx++) {
_gdScaleOneAxis(pSrc, pDst, dst_len, line_ndx, contrib, axis);
}
_gdContributionsFree (contrib);
return 1;
}/* _gdScalePass*/
|
_gdScalePass(const gdImagePtr pSrc, const unsigned int src_len,
const gdImagePtr pDst, const unsigned int dst_len,
const unsigned int num_lines,
const gdAxis axis)
{
unsigned int line_ndx;
LineContribType * contrib;
/* Same dim, just copy it. */
assert(dst_len != src_len); // TODO: caller should handle this.
contrib = _gdContributionsCalc(dst_len, src_len,
(double)dst_len / (double)src_len,
pSrc->interpolation);
if (contrib == NULL) {
return 0;
}
/* Scale each line */
for (line_ndx = 0; line_ndx < num_lines; line_ndx++) {
_gdScaleOneAxis(pSrc, pDst, dst_len, line_ndx, contrib, axis);
}
_gdContributionsFree (contrib);
return 1;
}/* _gdScalePass*/
|
C
|
libgd
| 0 |
CVE-2014-1743
|
https://www.cvedetails.com/cve/CVE-2014-1743/
|
CWE-399
|
https://github.com/chromium/chromium/commit/6d9425ec7badda912555d46ea7abcfab81fdd9b9
|
6d9425ec7badda912555d46ea7abcfab81fdd9b9
|
sync compositor: pass simple gfx types by const ref
See bug for reasoning
BUG=159273
Review URL: https://codereview.chromium.org/1417893006
Cr-Commit-Position: refs/heads/master@{#356653}
|
void BrowserViewRenderer::DidUpdateContent() {
TRACE_EVENT_INSTANT0("android_webview",
"BrowserViewRenderer::DidUpdateContent",
TRACE_EVENT_SCOPE_THREAD);
clear_view_ = false;
if (on_new_picture_enable_)
client_->OnNewPicture();
}
|
void BrowserViewRenderer::DidUpdateContent() {
TRACE_EVENT_INSTANT0("android_webview",
"BrowserViewRenderer::DidUpdateContent",
TRACE_EVENT_SCOPE_THREAD);
clear_view_ = false;
if (on_new_picture_enable_)
client_->OnNewPicture();
}
|
C
|
Chrome
| 0 |
CVE-2013-2206
|
https://www.cvedetails.com/cve/CVE-2013-2206/
| null |
https://github.com/torvalds/linux/commit/f2815633504b442ca0b0605c16bf3d88a3a0fcea
|
f2815633504b442ca0b0605c16bf3d88a3a0fcea
|
sctp: Use correct sideffect command in duplicate cookie handling
When SCTP is done processing a duplicate cookie chunk, it tries
to delete a newly created association. For that, it has to set
the right association for the side-effect processing to work.
However, when it uses the SCTP_CMD_NEW_ASOC command, that performs
more work then really needed (like hashing the associationa and
assigning it an id) and there is no point to do that only to
delete the association as a next step. In fact, it also creates
an impossible condition where an association may be found by
the getsockopt() call, and that association is empty. This
causes a crash in some sctp getsockopts.
The solution is rather simple. We simply use SCTP_CMD_SET_ASOC
command that doesn't have all the overhead and does exactly
what we need.
Reported-by: Karl Heiss <[email protected]>
Tested-by: Karl Heiss <[email protected]>
CC: Neil Horman <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Acked-by: Neil Horman <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
goto discard_noforce;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
return SCTP_DISPOSITION_CONSUME;
discard_noforce:
return SCTP_DISPOSITION_DISCARD;
}
|
sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
goto discard_noforce;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
return SCTP_DISPOSITION_CONSUME;
discard_noforce:
return SCTP_DISPOSITION_DISCARD;
}
|
C
|
linux
| 0 |
CVE-2018-10717
|
https://www.cvedetails.com/cve/CVE-2018-10717/
|
CWE-119
|
https://github.com/miniupnp/ngiflib/commit/cf429e0a2fe26b5f01ce0c8e9b79432e94509b6e
|
cf429e0a2fe26b5f01ce0c8e9b79432e94509b6e
|
fix "pixel overrun"
fixes #3
|
void GifImgDestroy(struct ngiflib_img * i) {
if(i==NULL) return;
if(i->next) GifImgDestroy(i->next);
if(i->palette && (i->palette != i->parent->palette))
ngiflib_free(i->palette);
ngiflib_free(i);
}
|
void GifImgDestroy(struct ngiflib_img * i) {
if(i==NULL) return;
if(i->next) GifImgDestroy(i->next);
if(i->palette && (i->palette != i->parent->palette))
ngiflib_free(i->palette);
ngiflib_free(i);
}
|
C
|
ngiflib
| 0 |
CVE-2014-4617
|
https://www.cvedetails.com/cve/CVE-2014-4617/
|
CWE-20
|
https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=commit;h=014b2103fcb12f261135e3954f26e9e07b39e342
|
014b2103fcb12f261135e3954f26e9e07b39e342
| null |
release_context (compress_filter_context_t *ctx)
{
xfree (ctx);
}
|
release_context (compress_filter_context_t *ctx)
{
xfree (ctx);
}
|
C
|
gnupg
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.