CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2016-9601
|
https://www.cvedetails.com/cve/CVE-2016-9601/
|
CWE-119
|
http://git.ghostscript.com/?p=jbig2dec.git;a=commit;h=e698d5c11d27212aa1098bc5b1673a3378563092
|
e698d5c11d27212aa1098bc5b1673a3378563092
| null |
jbig2_image_resize(Jbig2Ctx *ctx, Jbig2Image *image, int width, int height)
jbig2_image_resize(Jbig2Ctx *ctx, Jbig2Image *image, uint32_t width, uint32_t height)
{
if (width == image->width) {
/* check for integer multiplication overflow */
int64_t check = ((int64_t) image->stride) * ((int64_t) height);
if (check != (int)check) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "integer multiplication overflow during resize stride(%d)*height(%d)", image->stride, height);
return NULL;
}
/* use the same stride, just change the length */
image->data = jbig2_renew(ctx, image->data, uint8_t, (int)check);
if (image->data == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "could not resize image buffer!");
return NULL;
}
if (height > image->height) {
memset(image->data + image->height * image->stride, 0, (height - image->height) * image->stride);
}
image->height = height;
} else {
/* we must allocate a new image buffer and copy */
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, -1, "jbig2_image_resize called with a different width (NYI)");
}
return NULL;
}
|
jbig2_image_resize(Jbig2Ctx *ctx, Jbig2Image *image, int width, int height)
{
if (width == image->width) {
/* check for integer multiplication overflow */
int64_t check = ((int64_t) image->stride) * ((int64_t) height);
if (check != (int)check) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "integer multiplication overflow during resize stride(%d)*height(%d)", image->stride, height);
return NULL;
}
/* use the same stride, just change the length */
image->data = jbig2_renew(ctx, image->data, uint8_t, (int)check);
if (image->data == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "could not resize image buffer!");
return NULL;
}
if (height > image->height) {
memset(image->data + image->height * image->stride, 0, (height - image->height) * image->stride);
}
image->height = height;
} else {
/* we must allocate a new image buffer and copy */
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, -1, "jbig2_image_resize called with a different width (NYI)");
}
return NULL;
}
|
C
|
ghostscript
| 1 |
CVE-2011-3209
|
https://www.cvedetails.com/cve/CVE-2011-3209/
|
CWE-189
|
https://github.com/torvalds/linux/commit/f8bd2258e2d520dff28c855658bd24bdafb5102d
|
f8bd2258e2d520dff28c855658bd24bdafb5102d
|
remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: john stultz <[email protected]>
Cc: Christoph Lameter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
}
|
static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/7cb8e1ae121cf6b14aa0a59cc708de630c0ef965
|
7cb8e1ae121cf6b14aa0a59cc708de630c0ef965
|
Move variations prefs into the variations component
These prefs are used by variations code that is targeted for componentization.
BUG=382865
TBR=thakis
Review URL: https://codereview.chromium.org/1265423003
Cr-Commit-Position: refs/heads/master@{#343661}
|
VariationsSeedStore::VerifySeedSignature(
const std::string& seed_bytes,
const std::string& base64_seed_signature) {
if (!SignatureVerificationEnabled())
return VARIATIONS_SEED_SIGNATURE_ENUM_SIZE;
if (base64_seed_signature.empty())
return VARIATIONS_SEED_SIGNATURE_MISSING;
std::string signature;
if (!base::Base64Decode(base64_seed_signature, &signature))
return VARIATIONS_SEED_SIGNATURE_DECODE_FAILED;
crypto::SignatureVerifier verifier;
if (!verifier.VerifyInit(
kECDSAWithSHA256AlgorithmID, sizeof(kECDSAWithSHA256AlgorithmID),
reinterpret_cast<const uint8*>(signature.data()), signature.size(),
kPublicKey, arraysize(kPublicKey))) {
return VARIATIONS_SEED_SIGNATURE_INVALID_SIGNATURE;
}
verifier.VerifyUpdate(reinterpret_cast<const uint8*>(seed_bytes.data()),
seed_bytes.size());
if (verifier.VerifyFinal())
return VARIATIONS_SEED_SIGNATURE_VALID;
return VARIATIONS_SEED_SIGNATURE_INVALID_SEED;
}
|
VariationsSeedStore::VerifySeedSignature(
const std::string& seed_bytes,
const std::string& base64_seed_signature) {
if (!SignatureVerificationEnabled())
return VARIATIONS_SEED_SIGNATURE_ENUM_SIZE;
if (base64_seed_signature.empty())
return VARIATIONS_SEED_SIGNATURE_MISSING;
std::string signature;
if (!base::Base64Decode(base64_seed_signature, &signature))
return VARIATIONS_SEED_SIGNATURE_DECODE_FAILED;
crypto::SignatureVerifier verifier;
if (!verifier.VerifyInit(
kECDSAWithSHA256AlgorithmID, sizeof(kECDSAWithSHA256AlgorithmID),
reinterpret_cast<const uint8*>(signature.data()), signature.size(),
kPublicKey, arraysize(kPublicKey))) {
return VARIATIONS_SEED_SIGNATURE_INVALID_SIGNATURE;
}
verifier.VerifyUpdate(reinterpret_cast<const uint8*>(seed_bytes.data()),
seed_bytes.size());
if (verifier.VerifyFinal())
return VARIATIONS_SEED_SIGNATURE_VALID;
return VARIATIONS_SEED_SIGNATURE_INVALID_SEED;
}
|
C
|
Chrome
| 0 |
CVE-2016-6136
|
https://www.cvedetails.com/cve/CVE-2016-6136/
|
CWE-362
|
https://github.com/torvalds/linux/commit/43761473c254b45883a64441dd0bc85a42f3645c
|
43761473c254b45883a64441dd0bc85a42f3645c
|
audit: fix a double fetch in audit_log_single_execve_arg()
There is a double fetch problem in audit_log_single_execve_arg()
where we first check the execve(2) argumnets for any "bad" characters
which would require hex encoding and then re-fetch the arguments for
logging in the audit record[1]. Of course this leaves a window of
opportunity for an unsavory application to munge with the data.
This patch reworks things by only fetching the argument data once[2]
into a buffer where it is scanned and logged into the audit
records(s). In addition to fixing the double fetch, this patch
improves on the original code in a few other ways: better handling
of large arguments which require encoding, stricter record length
checking, and some performance improvements (completely unverified,
but we got rid of some strlen() calls, that's got to be a good
thing).
As part of the development of this patch, I've also created a basic
regression test for the audit-testsuite, the test can be tracked on
GitHub at the following link:
* https://github.com/linux-audit/audit-testsuite/issues/25
[1] If you pay careful attention, there is actually a triple fetch
problem due to a strnlen_user() call at the top of the function.
[2] This is a tiny white lie, we do make a call to strnlen_user()
prior to fetching the argument data. I don't like it, but due to the
way the audit record is structured we really have no choice unless we
copy the entire argument at once (which would require a rather
wasteful allocation). The good news is that with this patch the
kernel no longer relies on this strnlen_user() value for anything
beyond recording it in the log, we also update it with a trustworthy
value whenever possible.
Reported-by: Pengfei Wang <[email protected]>
Cc: <[email protected]>
Signed-off-by: Paul Moore <[email protected]>
|
static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
{
struct audit_entry *e;
enum audit_state state;
rcu_read_lock();
list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) {
if (audit_filter_rules(tsk, &e->rule, NULL, NULL,
&state, true)) {
if (state == AUDIT_RECORD_CONTEXT)
*key = kstrdup(e->rule.filterkey, GFP_ATOMIC);
rcu_read_unlock();
return state;
}
}
rcu_read_unlock();
return AUDIT_BUILD_CONTEXT;
}
|
static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
{
struct audit_entry *e;
enum audit_state state;
rcu_read_lock();
list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) {
if (audit_filter_rules(tsk, &e->rule, NULL, NULL,
&state, true)) {
if (state == AUDIT_RECORD_CONTEXT)
*key = kstrdup(e->rule.filterkey, GFP_ATOMIC);
rcu_read_unlock();
return state;
}
}
rcu_read_unlock();
return AUDIT_BUILD_CONTEXT;
}
|
C
|
linux
| 0 |
CVE-2011-3209
|
https://www.cvedetails.com/cve/CVE-2011-3209/
|
CWE-189
|
https://github.com/torvalds/linux/commit/f8bd2258e2d520dff28c855658bd24bdafb5102d
|
f8bd2258e2d520dff28c855658bd24bdafb5102d
|
remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: john stultz <[email protected]>
Cc: Christoph Lameter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int count_inuse(struct page *page)
{
return page->inuse;
}
|
static int count_inuse(struct page *page)
{
return page->inuse;
}
|
C
|
linux
| 0 |
CVE-2011-1768
|
https://www.cvedetails.com/cve/CVE-2011-1768/
|
CWE-362
|
https://github.com/torvalds/linux/commit/d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978
|
d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978
|
tunnels: fix netns vs proto registration ordering
Same stuff as in ip_gre patch: receive hook can be called before netns
setup is done, oopsing in net_generic().
Signed-off-by: Alexey Dobriyan <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
return skb_network_header(skb)[IP6CB(skb)->nhoff];
}
|
static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
return skb_network_header(skb)[IP6CB(skb)->nhoff];
}
|
C
|
linux
| 0 |
CVE-2018-20843
|
https://www.cvedetails.com/cve/CVE-2018-20843/
|
CWE-611
|
https://github.com/libexpat/libexpat/pull/262/commits/11f8838bf99ea0a6f0b76f9760c43704d00c4ff6
|
11f8838bf99ea0a6f0b76f9760c43704d00c4ff6
|
xmlparse.c: Fix extraction of namespace prefix from XML name (#186)
|
XML_SetDefaultHandler(XML_Parser parser,
XML_DefaultHandler handler)
{
if (parser == NULL)
return;
parser->m_defaultHandler = handler;
parser->m_defaultExpandInternalEntities = XML_FALSE;
}
|
XML_SetDefaultHandler(XML_Parser parser,
XML_DefaultHandler handler)
{
if (parser == NULL)
return;
parser->m_defaultHandler = handler;
parser->m_defaultExpandInternalEntities = XML_FALSE;
}
|
C
|
libexpat
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/820957a3386e960334be3b93b48636e749d38ea3
|
820957a3386e960334be3b93b48636e749d38ea3
|
Make WebContentsDelegate::OpenColorChooser return NULL on failure
Changing WebContentsDelegate::OpenColorChooser to return NULL on failure so we don't put the same ColorChooser into two scoped_ptrs(WebContentsImpl::color_chooser_)
BUG=331790
Review URL: https://codereview.chromium.org/128053002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@244710 0039d316-1c4b-4281-b951-d872f2087c98
|
void WebContentsImpl::UpdateState(RenderViewHost* rvh,
int32 page_id,
const PageState& page_state) {
if (rvh != GetRenderViewHost() &&
!GetRenderManager()->IsRVHOnSwappedOutList(
static_cast<RenderViewHostImpl*>(rvh)))
return;
int entry_index = controller_.GetEntryIndexWithPageID(
rvh->GetSiteInstance(), page_id);
if (entry_index < 0)
return;
NavigationEntry* entry = controller_.GetEntryAtIndex(entry_index);
if (page_state == entry->GetPageState())
return; // Nothing to update.
entry->SetPageState(page_state);
controller_.NotifyEntryChanged(entry, entry_index);
}
|
void WebContentsImpl::UpdateState(RenderViewHost* rvh,
int32 page_id,
const PageState& page_state) {
if (rvh != GetRenderViewHost() &&
!GetRenderManager()->IsRVHOnSwappedOutList(
static_cast<RenderViewHostImpl*>(rvh)))
return;
int entry_index = controller_.GetEntryIndexWithPageID(
rvh->GetSiteInstance(), page_id);
if (entry_index < 0)
return;
NavigationEntry* entry = controller_.GetEntryAtIndex(entry_index);
if (page_state == entry->GetPageState())
return; // Nothing to update.
entry->SetPageState(page_state);
controller_.NotifyEntryChanged(entry, entry_index);
}
|
C
|
Chrome
| 0 |
CVE-2013-6623
|
https://www.cvedetails.com/cve/CVE-2013-6623/
|
CWE-119
|
https://github.com/chromium/chromium/commit/9fd9d629fcf836bb0d6210015d33a299cf6bca34
|
9fd9d629fcf836bb0d6210015d33a299cf6bca34
|
Make the policy fetch for first time login blocking
The CL makes policy fetching for first time login blocking for all users, except the ones that are known to be non-enterprise users.
BUG=334584
Review URL: https://codereview.chromium.org/330843002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@282925 0039d316-1c4b-4281-b951-d872f2087c98
|
void InProcessBrowserTest::QuitBrowsers() {
if (chrome::GetTotalBrowserCount() == 0) {
chrome::NotifyAppTerminating();
return;
}
base::MessageLoopForUI::current()->PostTask(FROM_HERE,
base::Bind(&chrome::AttemptExit));
content::RunMessageLoop();
#if defined(OS_MACOSX)
content::RunAllPendingInMessageLoop();
delete autorelease_pool_;
autorelease_pool_ = NULL;
#endif
}
|
void InProcessBrowserTest::QuitBrowsers() {
if (chrome::GetTotalBrowserCount() == 0) {
chrome::NotifyAppTerminating();
return;
}
base::MessageLoopForUI::current()->PostTask(FROM_HERE,
base::Bind(&chrome::AttemptExit));
content::RunMessageLoop();
#if defined(OS_MACOSX)
content::RunAllPendingInMessageLoop();
delete autorelease_pool_;
autorelease_pool_ = NULL;
#endif
}
|
C
|
Chrome
| 0 |
CVE-2016-10165
|
https://www.cvedetails.com/cve/CVE-2016-10165/
|
CWE-125
|
https://github.com/mm2/Little-CMS/commit/5ca71a7bc18b6897ab21d815d15e218e204581e2
|
5ca71a7bc18b6897ab21d815d15e218e204581e2
|
Added an extra check to MLU bounds
Thanks to Ibrahim el-sayed for spotting the bug
|
void *Type_ParametricCurve_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag)
{
static const int ParamsByType[] = { 1, 3, 4, 5, 7 };
cmsFloat64Number Params[10];
cmsUInt16Number Type;
int i, n;
cmsToneCurve* NewGamma;
if (!_cmsReadUInt16Number(io, &Type)) return NULL;
if (!_cmsReadUInt16Number(io, NULL)) return NULL; // Reserved
if (Type > 4) {
cmsSignalError(self->ContextID, cmsERROR_UNKNOWN_EXTENSION, "Unknown parametric curve type '%d'", Type);
return NULL;
}
memset(Params, 0, sizeof(Params));
n = ParamsByType[Type];
for (i=0; i < n; i++) {
if (!_cmsRead15Fixed16Number(io, &Params[i])) return NULL;
}
NewGamma = cmsBuildParametricToneCurve(self ->ContextID, Type+1, Params);
*nItems = 1;
return NewGamma;
cmsUNUSED_PARAMETER(SizeOfTag);
}
|
void *Type_ParametricCurve_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag)
{
static const int ParamsByType[] = { 1, 3, 4, 5, 7 };
cmsFloat64Number Params[10];
cmsUInt16Number Type;
int i, n;
cmsToneCurve* NewGamma;
if (!_cmsReadUInt16Number(io, &Type)) return NULL;
if (!_cmsReadUInt16Number(io, NULL)) return NULL; // Reserved
if (Type > 4) {
cmsSignalError(self->ContextID, cmsERROR_UNKNOWN_EXTENSION, "Unknown parametric curve type '%d'", Type);
return NULL;
}
memset(Params, 0, sizeof(Params));
n = ParamsByType[Type];
for (i=0; i < n; i++) {
if (!_cmsRead15Fixed16Number(io, &Params[i])) return NULL;
}
NewGamma = cmsBuildParametricToneCurve(self ->ContextID, Type+1, Params);
*nItems = 1;
return NewGamma;
cmsUNUSED_PARAMETER(SizeOfTag);
}
|
C
|
Little-CMS
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
static int setcos_generate_store_key(sc_card_t *card,
struct sc_cardctl_setcos_gen_store_key_info *data)
{
struct sc_apdu apdu;
u8 sbuf[SC_MAX_APDU_BUFFER_SIZE];
int r, len;
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE);
/* Setup key-generation parameters */
len = 0;
if (data->op_type == OP_TYPE_GENERATE)
sbuf[len++] = 0x92; /* algo ID: RSA CRT */
else
sbuf[len++] = 0x9A; /* algo ID: EXTERNALLY GENERATED RSA CRT */
sbuf[len++] = 0x00;
sbuf[len++] = data->mod_len / 256; /* 2 bytes for modulus bitlength */
sbuf[len++] = data->mod_len % 256;
sbuf[len++] = data->pubexp_len / 256; /* 2 bytes for pubexp bitlength */
sbuf[len++] = data->pubexp_len % 256;
memcpy(sbuf + len, data->pubexp, (data->pubexp_len + 7) / 8);
len += (data->pubexp_len + 7) / 8;
if (data->op_type == OP_TYPE_STORE) {
sbuf[len++] = data->primep_len / 256;
sbuf[len++] = data->primep_len % 256;
memcpy(sbuf + len, data->primep, (data->primep_len + 7) / 8);
len += (data->primep_len + 7) / 8;
sbuf[len++] = data->primeq_len / 256;
sbuf[len++] = data->primeq_len % 256;
memcpy(sbuf + len, data->primeq, (data->primeq_len + 7) / 8);
len += (data->primeq_len + 7) / 8;
}
sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x46, 0x00, 0x00);
apdu.cla = 0x00;
apdu.data = sbuf;
apdu.datalen = len;
apdu.lc = len;
r = sc_transmit_apdu(card, &apdu);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed");
r = sc_check_sw(card, apdu.sw1, apdu.sw2);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "STORE/GENERATE_KEY returned error");
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r);
}
|
static int setcos_generate_store_key(sc_card_t *card,
struct sc_cardctl_setcos_gen_store_key_info *data)
{
struct sc_apdu apdu;
u8 sbuf[SC_MAX_APDU_BUFFER_SIZE];
int r, len;
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE);
/* Setup key-generation parameters */
len = 0;
if (data->op_type == OP_TYPE_GENERATE)
sbuf[len++] = 0x92; /* algo ID: RSA CRT */
else
sbuf[len++] = 0x9A; /* algo ID: EXTERNALLY GENERATED RSA CRT */
sbuf[len++] = 0x00;
sbuf[len++] = data->mod_len / 256; /* 2 bytes for modulus bitlength */
sbuf[len++] = data->mod_len % 256;
sbuf[len++] = data->pubexp_len / 256; /* 2 bytes for pubexp bitlength */
sbuf[len++] = data->pubexp_len % 256;
memcpy(sbuf + len, data->pubexp, (data->pubexp_len + 7) / 8);
len += (data->pubexp_len + 7) / 8;
if (data->op_type == OP_TYPE_STORE) {
sbuf[len++] = data->primep_len / 256;
sbuf[len++] = data->primep_len % 256;
memcpy(sbuf + len, data->primep, (data->primep_len + 7) / 8);
len += (data->primep_len + 7) / 8;
sbuf[len++] = data->primeq_len / 256;
sbuf[len++] = data->primeq_len % 256;
memcpy(sbuf + len, data->primeq, (data->primeq_len + 7) / 8);
len += (data->primeq_len + 7) / 8;
}
sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x46, 0x00, 0x00);
apdu.cla = 0x00;
apdu.data = sbuf;
apdu.datalen = len;
apdu.lc = len;
r = sc_transmit_apdu(card, &apdu);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed");
r = sc_check_sw(card, apdu.sw1, apdu.sw2);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "STORE/GENERATE_KEY returned error");
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r);
}
|
C
|
OpenSC
| 0 |
CVE-2017-5093
|
https://www.cvedetails.com/cve/CVE-2017-5093/
|
CWE-20
|
https://github.com/chromium/chromium/commit/0720b02e4f303ea6b114d4ae9453e3a7ff55f8dc
|
0720b02e4f303ea6b114d4ae9453e3a7ff55f8dc
|
If JavaScript shows a dialog, cause the page to lose fullscreen.
BUG=670135, 550017, 726761, 728276
Review-Url: https://codereview.chromium.org/2906133004
Cr-Commit-Position: refs/heads/master@{#478884}
|
void WebContentsImpl::Replace(const base::string16& word) {
RenderFrameHost* focused_frame = GetFocusedFrame();
if (!focused_frame)
return;
focused_frame->GetFrameInputHandler()->Replace(word);
}
|
void WebContentsImpl::Replace(const base::string16& word) {
RenderFrameHost* focused_frame = GetFocusedFrame();
if (!focused_frame)
return;
focused_frame->GetFrameInputHandler()->Replace(word);
}
|
C
|
Chrome
| 0 |
CVE-2014-3191
|
https://www.cvedetails.com/cve/CVE-2014-3191/
|
CWE-416
|
https://github.com/chromium/chromium/commit/11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
Defer call to updateWidgetPositions() outside of RenderLayerScrollableArea.
updateWidgetPositions() can destroy the render tree, so it should never
be called from inside RenderLayerScrollableArea. Leaving it there allows
for the potential of use-after-free bugs.
BUG=402407
[email protected]
Review URL: https://codereview.chromium.org/490473003
git-svn-id: svn://svn.chromium.org/blink/trunk@180681 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
GraphicsLayer* RenderLayerScrollableArea::layerForVerticalScrollbar() const
{
DisableCompositingQueryAsserts disabler;
return layer()->hasCompositedLayerMapping() ? layer()->compositedLayerMapping()->layerForVerticalScrollbar() : 0;
}
|
GraphicsLayer* RenderLayerScrollableArea::layerForVerticalScrollbar() const
{
DisableCompositingQueryAsserts disabler;
return layer()->hasCompositedLayerMapping() ? layer()->compositedLayerMapping()->layerForVerticalScrollbar() : 0;
}
|
C
|
Chrome
| 0 |
CVE-2013-7026
|
https://www.cvedetails.com/cve/CVE-2013-7026/
|
CWE-362
|
https://github.com/torvalds/linux/commit/a399b29dfbaaaf91162b2dc5a5875dd51bbfa2a1
|
a399b29dfbaaaf91162b2dc5a5875dd51bbfa2a1
|
ipc,shm: fix shm_file deletion races
When IPC_RMID races with other shm operations there's potential for
use-after-free of the shm object's associated file (shm_file).
Here's the race before this patch:
TASK 1 TASK 2
------ ------
shm_rmid()
ipc_lock_object()
shmctl()
shp = shm_obtain_object_check()
shm_destroy()
shum_unlock()
fput(shp->shm_file)
ipc_lock_object()
shmem_lock(shp->shm_file)
<OOPS>
The oops is caused because shm_destroy() calls fput() after dropping the
ipc_lock. fput() clears the file's f_inode, f_path.dentry, and
f_path.mnt, which causes various NULL pointer references in task 2. I
reliably see the oops in task 2 if with shmlock, shmu
This patch fixes the races by:
1) set shm_file=NULL in shm_destroy() while holding ipc_object_lock().
2) modify at risk operations to check shm_file while holding
ipc_object_lock().
Example workloads, which each trigger oops...
Workload 1:
while true; do
id=$(shmget 1 4096)
shm_rmid $id &
shmlock $id &
wait
done
The oops stack shows accessing NULL f_inode due to racing fput:
_raw_spin_lock
shmem_lock
SyS_shmctl
Workload 2:
while true; do
id=$(shmget 1 4096)
shmat $id 4096 &
shm_rmid $id &
wait
done
The oops stack is similar to workload 1 due to NULL f_inode:
touch_atime
shmem_mmap
shm_mmap
mmap_region
do_mmap_pgoff
do_shmat
SyS_shmat
Workload 3:
while true; do
id=$(shmget 1 4096)
shmlock $id
shm_rmid $id &
shmunlock $id &
wait
done
The oops stack shows second fput tripping on an NULL f_inode. The
first fput() completed via from shm_destroy(), but a racing thread did
a get_file() and queued this fput():
locks_remove_flock
__fput
____fput
task_work_run
do_notify_resume
int_signal
Fixes: c2c737a0461e ("ipc,shm: shorten critical region for shmat")
Fixes: 2caacaa82a51 ("ipc,shm: shorten critical region for shmctl")
Signed-off-by: Greg Thelen <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Manfred Spraul <[email protected]>
Cc: <[email protected]> # 3.10.17+ 3.11.6+
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = (unsigned long)shmaddr;
int retval = -EINVAL;
#ifdef CONFIG_MMU
loff_t size = 0;
struct vm_area_struct *next;
#endif
if (addr & ~PAGE_MASK)
return retval;
down_write(&mm->mmap_sem);
/*
* This function tries to be smart and unmap shm segments that
* were modified by partial mlock or munmap calls:
* - It first determines the size of the shm segment that should be
* unmapped: It searches for a vma that is backed by shm and that
* started at address shmaddr. It records it's size and then unmaps
* it.
* - Then it unmaps all shm vmas that started at shmaddr and that
* are within the initially determined size.
* Errors from do_munmap are ignored: the function only fails if
* it's called with invalid parameters or if it's called to unmap
* a part of a vma. Both calls in this function are for full vmas,
* the parameters are directly copied from the vma itself and always
* valid - therefore do_munmap cannot fail. (famous last words?)
*/
/*
* If it had been mremap()'d, the starting address would not
* match the usual checks anyway. So assume all vma's are
* above the starting address given.
*/
vma = find_vma(mm, addr);
#ifdef CONFIG_MMU
while (vma) {
next = vma->vm_next;
/*
* Check if the starting address would match, i.e. it's
* a fragment created by mprotect() and/or munmap(), or it
* otherwise it starts at this address with no hassles.
*/
if ((vma->vm_ops == &shm_vm_ops) &&
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
size = file_inode(vma->vm_file)->i_size;
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
/*
* We discovered the size of the shm segment, so
* break out of here and fall through to the next
* loop that uses the size information to stop
* searching for matching vma's.
*/
retval = 0;
vma = next;
break;
}
vma = next;
}
/*
* We need look no further than the maximum address a fragment
* could possibly have landed at. Also cast things to loff_t to
* prevent overflows and make comparisons vs. equal-width types.
*/
size = PAGE_ALIGN(size);
while (vma && (loff_t)(vma->vm_end - addr) <= size) {
next = vma->vm_next;
/* finding a matching vma now does not alter retval */
if ((vma->vm_ops == &shm_vm_ops) &&
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
vma = next;
}
#else /* CONFIG_MMU */
/* under NOMMU conditions, the exact address to be destroyed must be
* given */
if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
retval = 0;
}
#endif
up_write(&mm->mmap_sem);
return retval;
}
|
SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = (unsigned long)shmaddr;
int retval = -EINVAL;
#ifdef CONFIG_MMU
loff_t size = 0;
struct vm_area_struct *next;
#endif
if (addr & ~PAGE_MASK)
return retval;
down_write(&mm->mmap_sem);
/*
* This function tries to be smart and unmap shm segments that
* were modified by partial mlock or munmap calls:
* - It first determines the size of the shm segment that should be
* unmapped: It searches for a vma that is backed by shm and that
* started at address shmaddr. It records it's size and then unmaps
* it.
* - Then it unmaps all shm vmas that started at shmaddr and that
* are within the initially determined size.
* Errors from do_munmap are ignored: the function only fails if
* it's called with invalid parameters or if it's called to unmap
* a part of a vma. Both calls in this function are for full vmas,
* the parameters are directly copied from the vma itself and always
* valid - therefore do_munmap cannot fail. (famous last words?)
*/
/*
* If it had been mremap()'d, the starting address would not
* match the usual checks anyway. So assume all vma's are
* above the starting address given.
*/
vma = find_vma(mm, addr);
#ifdef CONFIG_MMU
while (vma) {
next = vma->vm_next;
/*
* Check if the starting address would match, i.e. it's
* a fragment created by mprotect() and/or munmap(), or it
* otherwise it starts at this address with no hassles.
*/
if ((vma->vm_ops == &shm_vm_ops) &&
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
size = file_inode(vma->vm_file)->i_size;
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
/*
* We discovered the size of the shm segment, so
* break out of here and fall through to the next
* loop that uses the size information to stop
* searching for matching vma's.
*/
retval = 0;
vma = next;
break;
}
vma = next;
}
/*
* We need look no further than the maximum address a fragment
* could possibly have landed at. Also cast things to loff_t to
* prevent overflows and make comparisons vs. equal-width types.
*/
size = PAGE_ALIGN(size);
while (vma && (loff_t)(vma->vm_end - addr) <= size) {
next = vma->vm_next;
/* finding a matching vma now does not alter retval */
if ((vma->vm_ops == &shm_vm_ops) &&
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
vma = next;
}
#else /* CONFIG_MMU */
/* under NOMMU conditions, the exact address to be destroyed must be
* given */
if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
retval = 0;
}
#endif
up_write(&mm->mmap_sem);
return retval;
}
|
C
|
linux
| 0 |
CVE-2018-6096
|
https://www.cvedetails.com/cve/CVE-2018-6096/
| null |
https://github.com/chromium/chromium/commit/36f801fdbec07d116a6f4f07bb363f10897d6a51
|
36f801fdbec07d116a6f4f07bb363f10897d6a51
|
If a page calls |window.focus()|, kick it out of fullscreen.
BUG=776418, 800056
Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017
Reviewed-on: https://chromium-review.googlesource.com/852378
Reviewed-by: Nasko Oskov <[email protected]>
Reviewed-by: Philip Jägenstedt <[email protected]>
Commit-Queue: Avi Drissman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#533790}
|
int Height() const { return size_.height; }
|
int Height() const { return size_.height; }
|
C
|
Chrome
| 0 |
CVE-2013-7020
|
https://www.cvedetails.com/cve/CVE-2013-7020/
|
CWE-119
|
https://github.com/FFmpeg/FFmpeg/commit/b05cd1ea7e45a836f7f6071a716c38bb30326e0f
|
b05cd1ea7e45a836f7f6071a716c38bb30326e0f
|
ffv1dec: Check bits_per_raw_sample and colorspace for equality in ver 0/1 headers
Signed-off-by: Michael Niedermayer <[email protected]>
|
static int read_header(FFV1Context *f)
{
uint8_t state[CONTEXT_SIZE];
int i, j, context_count = -1; //-1 to avoid warning
RangeCoder *const c = &f->slice_context[0]->c;
memset(state, 128, sizeof(state));
if (f->version < 2) {
int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
unsigned v= get_symbol(c, state, 0);
if (v >= 2) {
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
return AVERROR_INVALIDDATA;
}
f->version = v;
f->ac = f->avctx->coder_type = get_symbol(c, state, 0);
if (f->ac > 1) {
for (i = 1; i < 256; i++)
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
}
colorspace = get_symbol(c, state, 0); //YUV cs type
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
chroma_planes = get_rac(c, state);
chroma_h_shift = get_symbol(c, state, 0);
chroma_v_shift = get_symbol(c, state, 0);
transparency = get_rac(c, state);
if (f->plane_count) {
if ( colorspace != f->colorspace
|| bits_per_raw_sample != f->avctx->bits_per_raw_sample
|| chroma_planes != f->chroma_planes
|| chroma_h_shift!= f->chroma_h_shift
|| chroma_v_shift!= f->chroma_v_shift
|| transparency != f->transparency) {
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
return AVERROR_INVALIDDATA;
}
}
f->colorspace = colorspace;
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
f->chroma_planes = chroma_planes;
f->chroma_h_shift = chroma_h_shift;
f->chroma_v_shift = chroma_v_shift;
f->transparency = transparency;
f->plane_count = 2 + f->transparency;
}
if (f->colorspace == 0) {
if (!f->transparency && !f->chroma_planes) {
if (f->avctx->bits_per_raw_sample <= 8)
f->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
else
f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
} else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) {
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P; break;
case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample == 9) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample == 10) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else {
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
}
} else if (f->colorspace == 1) {
if (f->chroma_h_shift || f->chroma_v_shift) {
av_log(f->avctx, AV_LOG_ERROR,
"chroma subsampling not supported in this colorspace\n");
return AVERROR(ENOSYS);
}
if ( f->avctx->bits_per_raw_sample == 9)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP9;
else if (f->avctx->bits_per_raw_sample == 10)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP10;
else if (f->avctx->bits_per_raw_sample == 12)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP12;
else if (f->avctx->bits_per_raw_sample == 14)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP14;
else
if (f->transparency) f->avctx->pix_fmt = AV_PIX_FMT_RGB32;
else f->avctx->pix_fmt = AV_PIX_FMT_0RGB32;
} else {
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
return AVERROR(ENOSYS);
}
av_dlog(f->avctx, "%d %d %d\n",
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
if (f->version < 2) {
context_count = read_quant_tables(c, f->quant_table);
if (context_count < 0) {
av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
return AVERROR_INVALIDDATA;
}
} else if (f->version < 3) {
f->slice_count = get_symbol(c, state, 0);
} else {
const uint8_t *p = c->bytestream_end;
for (f->slice_count = 0;
f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start;
f->slice_count++) {
int trailer = 3 + 5*!!f->ec;
int size = AV_RB24(p-trailer);
if (size + trailer > p - c->bytestream_start)
break;
p -= size + trailer;
}
}
if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0) {
av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
return AVERROR_INVALIDDATA;
}
for (j = 0; j < f->slice_count; j++) {
FFV1Context *fs = f->slice_context[j];
fs->ac = f->ac;
fs->packed_at_lsb = f->packed_at_lsb;
fs->slice_damaged = 0;
if (f->version == 2) {
fs->slice_x = get_symbol(c, state, 0) * f->width ;
fs->slice_y = get_symbol(c, state, 0) * f->height;
fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x;
fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y;
fs->slice_x /= f->num_h_slices;
fs->slice_y /= f->num_v_slices;
fs->slice_width = fs->slice_width / f->num_h_slices - fs->slice_x;
fs->slice_height = fs->slice_height / f->num_v_slices - fs->slice_y;
if ((unsigned)fs->slice_width > f->width ||
(unsigned)fs->slice_height > f->height)
return AVERROR_INVALIDDATA;
if ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
|| (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
return AVERROR_INVALIDDATA;
}
for (i = 0; i < f->plane_count; i++) {
PlaneContext *const p = &fs->plane[i];
if (f->version == 2) {
int idx = get_symbol(c, state, 0);
if (idx > (unsigned)f->quant_table_count) {
av_log(f->avctx, AV_LOG_ERROR,
"quant_table_index out of range\n");
return AVERROR_INVALIDDATA;
}
p->quant_table_index = idx;
memcpy(p->quant_table, f->quant_tables[idx],
sizeof(p->quant_table));
context_count = f->context_count[idx];
} else {
memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
}
if (f->version <= 2) {
av_assert0(context_count >= 0);
if (p->context_count < context_count) {
av_freep(&p->state);
av_freep(&p->vlc_state);
}
p->context_count = context_count;
}
}
}
return 0;
}
|
static int read_header(FFV1Context *f)
{
uint8_t state[CONTEXT_SIZE];
int i, j, context_count = -1; //-1 to avoid warning
RangeCoder *const c = &f->slice_context[0]->c;
memset(state, 128, sizeof(state));
if (f->version < 2) {
int chroma_planes, chroma_h_shift, chroma_v_shift, transparency;
unsigned v= get_symbol(c, state, 0);
if (v >= 2) {
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
return AVERROR_INVALIDDATA;
}
f->version = v;
f->ac = f->avctx->coder_type = get_symbol(c, state, 0);
if (f->ac > 1) {
for (i = 1; i < 256; i++)
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
}
f->colorspace = get_symbol(c, state, 0); //YUV cs type
if (f->version > 0)
f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
chroma_planes = get_rac(c, state);
chroma_h_shift = get_symbol(c, state, 0);
chroma_v_shift = get_symbol(c, state, 0);
transparency = get_rac(c, state);
if (f->plane_count) {
if ( chroma_planes != f->chroma_planes
|| chroma_h_shift!= f->chroma_h_shift
|| chroma_v_shift!= f->chroma_v_shift
|| transparency != f->transparency) {
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
return AVERROR_INVALIDDATA;
}
}
f->chroma_planes = chroma_planes;
f->chroma_h_shift = chroma_h_shift;
f->chroma_v_shift = chroma_v_shift;
f->transparency = transparency;
f->plane_count = 2 + f->transparency;
}
if (f->colorspace == 0) {
if (!f->transparency && !f->chroma_planes) {
if (f->avctx->bits_per_raw_sample <= 8)
f->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
else
f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
} else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) {
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P; break;
case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample == 9) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else if (f->avctx->bits_per_raw_sample == 10) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
} else {
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
default:
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
}
} else if (f->colorspace == 1) {
if (f->chroma_h_shift || f->chroma_v_shift) {
av_log(f->avctx, AV_LOG_ERROR,
"chroma subsampling not supported in this colorspace\n");
return AVERROR(ENOSYS);
}
if ( f->avctx->bits_per_raw_sample == 9)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP9;
else if (f->avctx->bits_per_raw_sample == 10)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP10;
else if (f->avctx->bits_per_raw_sample == 12)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP12;
else if (f->avctx->bits_per_raw_sample == 14)
f->avctx->pix_fmt = AV_PIX_FMT_GBRP14;
else
if (f->transparency) f->avctx->pix_fmt = AV_PIX_FMT_RGB32;
else f->avctx->pix_fmt = AV_PIX_FMT_0RGB32;
} else {
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
return AVERROR(ENOSYS);
}
av_dlog(f->avctx, "%d %d %d\n",
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
if (f->version < 2) {
context_count = read_quant_tables(c, f->quant_table);
if (context_count < 0) {
av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
return AVERROR_INVALIDDATA;
}
} else if (f->version < 3) {
f->slice_count = get_symbol(c, state, 0);
} else {
const uint8_t *p = c->bytestream_end;
for (f->slice_count = 0;
f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start;
f->slice_count++) {
int trailer = 3 + 5*!!f->ec;
int size = AV_RB24(p-trailer);
if (size + trailer > p - c->bytestream_start)
break;
p -= size + trailer;
}
}
if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0) {
av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
return AVERROR_INVALIDDATA;
}
for (j = 0; j < f->slice_count; j++) {
FFV1Context *fs = f->slice_context[j];
fs->ac = f->ac;
fs->packed_at_lsb = f->packed_at_lsb;
fs->slice_damaged = 0;
if (f->version == 2) {
fs->slice_x = get_symbol(c, state, 0) * f->width ;
fs->slice_y = get_symbol(c, state, 0) * f->height;
fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x;
fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y;
fs->slice_x /= f->num_h_slices;
fs->slice_y /= f->num_v_slices;
fs->slice_width = fs->slice_width / f->num_h_slices - fs->slice_x;
fs->slice_height = fs->slice_height / f->num_v_slices - fs->slice_y;
if ((unsigned)fs->slice_width > f->width ||
(unsigned)fs->slice_height > f->height)
return AVERROR_INVALIDDATA;
if ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
|| (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
return AVERROR_INVALIDDATA;
}
for (i = 0; i < f->plane_count; i++) {
PlaneContext *const p = &fs->plane[i];
if (f->version == 2) {
int idx = get_symbol(c, state, 0);
if (idx > (unsigned)f->quant_table_count) {
av_log(f->avctx, AV_LOG_ERROR,
"quant_table_index out of range\n");
return AVERROR_INVALIDDATA;
}
p->quant_table_index = idx;
memcpy(p->quant_table, f->quant_tables[idx],
sizeof(p->quant_table));
context_count = f->context_count[idx];
} else {
memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
}
if (f->version <= 2) {
av_assert0(context_count >= 0);
if (p->context_count < context_count) {
av_freep(&p->state);
av_freep(&p->vlc_state);
}
p->context_count = context_count;
}
}
}
return 0;
}
|
C
|
FFmpeg
| 1 |
null | null | null |
https://github.com/chromium/chromium/commit/283fb25624bf253d120708152e23cf9143519198
|
283fb25624bf253d120708152e23cf9143519198
|
Coverity; Fixing pass by value bugs.
CID=101466, 101464, 101494, 101495, 101496, 101497
BUG=NONE
TEST=NONE
Review URL: http://codereview.chromium.org/8956046
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115399 0039d316-1c4b-4281-b951-d872f2087c98
|
void TaskManagerHandler::DisableTaskManager(const ListValue* indexes) {
if (!is_enabled_)
return;
is_enabled_ = false;
model_->StopUpdating();
model_->RemoveObserver(this);
}
|
void TaskManagerHandler::DisableTaskManager(const ListValue* indexes) {
if (!is_enabled_)
return;
is_enabled_ = false;
model_->StopUpdating();
model_->RemoveObserver(this);
}
|
C
|
Chrome
| 0 |
CVE-2014-9421
|
https://www.cvedetails.com/cve/CVE-2014-9421/
| null |
https://github.com/krb5/krb5/commit/a197e92349a4aa2141b5dff12e9dd44c2a2166e3
|
a197e92349a4aa2141b5dff12e9dd44c2a2166e3
|
Fix kadm5/gssrpc XDR double free [CVE-2014-9421]
[MITKRB5-SA-2015-001] In auth_gssapi_unwrap_data(), do not free
partial deserialization results upon failure to deserialize. This
responsibility belongs to the callers, svctcp_getargs() and
svcudp_getargs(); doing it in the unwrap function results in freeing
the results twice.
In xdr_krb5_tl_data() and xdr_krb5_principal(), null out the pointers
we are freeing, as other XDR functions such as xdr_bytes() and
xdr_string().
ticket: 8056 (new)
target_version: 1.13.1
tags: pullup
|
bool_t xdr_ui_4(XDR *xdrs, krb5_ui_4 *objp)
{
/* Assumes that krb5_ui_4 and u_int32 are both four bytes long.
This should not be a harmful assumption. */
return xdr_u_int32(xdrs, (uint32_t *) objp);
}
|
bool_t xdr_ui_4(XDR *xdrs, krb5_ui_4 *objp)
{
/* Assumes that krb5_ui_4 and u_int32 are both four bytes long.
This should not be a harmful assumption. */
return xdr_u_int32(xdrs, (uint32_t *) objp);
}
|
C
|
krb5
| 0 |
CVE-2015-8126
|
https://www.cvedetails.com/cve/CVE-2015-8126/
|
CWE-119
|
https://github.com/chromium/chromium/commit/7f3d85b096f66870a15b37c2f40b219b2e292693
|
7f3d85b096f66870a15b37c2f40b219b2e292693
|
third_party/libpng: update to 1.2.54
[email protected]
BUG=560291
Review URL: https://codereview.chromium.org/1467263003
Cr-Commit-Position: refs/heads/master@{#362298}
|
png_set_cHRM_fixed(png_structp png_ptr, png_infop info_ptr,
png_fixed_point white_x, png_fixed_point white_y, png_fixed_point red_x,
png_fixed_point red_y, png_fixed_point green_x, png_fixed_point green_y,
png_fixed_point blue_x, png_fixed_point blue_y)
{
png_debug1(1, "in %s storage function", "cHRM fixed");
if (png_ptr == NULL || info_ptr == NULL)
return;
#ifdef PNG_CHECK_cHRM_SUPPORTED
if (png_check_cHRM_fixed(png_ptr,
white_x, white_y, red_x, red_y, green_x, green_y, blue_x, blue_y))
#endif
{
info_ptr->int_x_white = white_x;
info_ptr->int_y_white = white_y;
info_ptr->int_x_red = red_x;
info_ptr->int_y_red = red_y;
info_ptr->int_x_green = green_x;
info_ptr->int_y_green = green_y;
info_ptr->int_x_blue = blue_x;
info_ptr->int_y_blue = blue_y;
#ifdef PNG_FLOATING_POINT_SUPPORTED
info_ptr->x_white = (float)(white_x/100000.);
info_ptr->y_white = (float)(white_y/100000.);
info_ptr->x_red = (float)( red_x/100000.);
info_ptr->y_red = (float)( red_y/100000.);
info_ptr->x_green = (float)(green_x/100000.);
info_ptr->y_green = (float)(green_y/100000.);
info_ptr->x_blue = (float)( blue_x/100000.);
info_ptr->y_blue = (float)( blue_y/100000.);
#endif
info_ptr->valid |= PNG_INFO_cHRM;
}
}
|
png_set_cHRM_fixed(png_structp png_ptr, png_infop info_ptr,
png_fixed_point white_x, png_fixed_point white_y, png_fixed_point red_x,
png_fixed_point red_y, png_fixed_point green_x, png_fixed_point green_y,
png_fixed_point blue_x, png_fixed_point blue_y)
{
png_debug1(1, "in %s storage function", "cHRM fixed");
if (png_ptr == NULL || info_ptr == NULL)
return;
#ifdef PNG_CHECK_cHRM_SUPPORTED
if (png_check_cHRM_fixed(png_ptr,
white_x, white_y, red_x, red_y, green_x, green_y, blue_x, blue_y))
#endif
{
info_ptr->int_x_white = white_x;
info_ptr->int_y_white = white_y;
info_ptr->int_x_red = red_x;
info_ptr->int_y_red = red_y;
info_ptr->int_x_green = green_x;
info_ptr->int_y_green = green_y;
info_ptr->int_x_blue = blue_x;
info_ptr->int_y_blue = blue_y;
#ifdef PNG_FLOATING_POINT_SUPPORTED
info_ptr->x_white = (float)(white_x/100000.);
info_ptr->y_white = (float)(white_y/100000.);
info_ptr->x_red = (float)( red_x/100000.);
info_ptr->y_red = (float)( red_y/100000.);
info_ptr->x_green = (float)(green_x/100000.);
info_ptr->y_green = (float)(green_y/100000.);
info_ptr->x_blue = (float)( blue_x/100000.);
info_ptr->y_blue = (float)( blue_y/100000.);
#endif
info_ptr->valid |= PNG_INFO_cHRM;
}
}
|
C
|
Chrome
| 0 |
CVE-2012-6638
|
https://www.cvedetails.com/cve/CVE-2012-6638/
|
CWE-399
|
https://github.com/torvalds/linux/commit/fdf5af0daf8019cec2396cdef8fb042d80fe71fa
|
fdf5af0daf8019cec2396cdef8fb042d80fe71fa
|
tcp: drop SYN+FIN messages
Denys Fedoryshchenko reported that SYN+FIN attacks were bringing his
linux machines to their limits.
Dont call conn_request() if the TCP flags includes SYN flag
Reported-by: Denys Fedoryshchenko <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static inline void tcp_store_ts_recent(struct tcp_sock *tp)
{
tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
tp->rx_opt.ts_recent_stamp = get_seconds();
}
|
static inline void tcp_store_ts_recent(struct tcp_sock *tp)
{
tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
tp->rx_opt.ts_recent_stamp = get_seconds();
}
|
C
|
linux
| 0 |
CVE-2012-2744
|
https://www.cvedetails.com/cve/CVE-2012-2744/
| null |
https://github.com/torvalds/linux/commit/9e2dcf72023d1447f09c47d77c99b0c49659e5ce
|
9e2dcf72023d1447f09c47d77c99b0c49659e5ce
|
netfilter: nf_conntrack_reasm: properly handle packets fragmented into a single fragment
When an ICMPV6_PKT_TOOBIG message is received with a MTU below 1280,
all further packets include a fragment header.
Unlike regular defragmentation, conntrack also needs to "reassemble"
those fragments in order to obtain a packet without the fragment
header for connection tracking. Currently nf_conntrack_reasm checks
whether a fragment has either IP6_MF set or an offset != 0, which
makes it ignore those fragments.
Remove the invalid check and make reassembly handle fragment queues
containing only a single fragment.
Reported-and-tested-by: Ulrich Weber <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]>
|
static void nf_skb_free(struct sk_buff *skb)
{
if (NFCT_FRAG6_CB(skb)->orig)
kfree_skb(NFCT_FRAG6_CB(skb)->orig);
}
|
static void nf_skb_free(struct sk_buff *skb)
{
if (NFCT_FRAG6_CB(skb)->orig)
kfree_skb(NFCT_FRAG6_CB(skb)->orig);
}
|
C
|
linux
| 0 |
CVE-2016-7127
|
https://www.cvedetails.com/cve/CVE-2016-7127/
|
CWE-787
|
https://github.com/php/php-src/commit/1bd103df00f49cf4d4ade2cfe3f456ac058a4eae?w=1
|
1bd103df00f49cf4d4ade2cfe3f456ac058a4eae?w=1
|
Fix bug #72730 - imagegammacorrect allows arbitrary write access
|
PHP_MINFO_FUNCTION(gd)
{
php_info_print_table_start();
php_info_print_table_row(2, "GD Support", "enabled");
/* need to use a PHPAPI function here because it is external module in windows */
#if defined(HAVE_GD_BUNDLED)
php_info_print_table_row(2, "GD Version", PHP_GD_VERSION_STRING);
#else
php_info_print_table_row(2, "GD headers Version", PHP_GD_VERSION_STRING);
#if defined(HAVE_GD_LIBVERSION)
php_info_print_table_row(2, "GD library Version", gdVersionString());
#endif
#endif
#ifdef ENABLE_GD_TTF
php_info_print_table_row(2, "FreeType Support", "enabled");
#if HAVE_LIBFREETYPE
php_info_print_table_row(2, "FreeType Linkage", "with freetype");
{
char tmp[256];
#ifdef FREETYPE_PATCH
snprintf(tmp, sizeof(tmp), "%d.%d.%d", FREETYPE_MAJOR, FREETYPE_MINOR, FREETYPE_PATCH);
#elif defined(FREETYPE_MAJOR)
snprintf(tmp, sizeof(tmp), "%d.%d", FREETYPE_MAJOR, FREETYPE_MINOR);
#else
snprintf(tmp, sizeof(tmp), "1.x");
#endif
php_info_print_table_row(2, "FreeType Version", tmp);
}
#else
php_info_print_table_row(2, "FreeType Linkage", "with unknown library");
#endif
#endif
#ifdef HAVE_LIBT1
php_info_print_table_row(2, "T1Lib Support", "enabled");
#endif
php_info_print_table_row(2, "GIF Read Support", "enabled");
php_info_print_table_row(2, "GIF Create Support", "enabled");
#ifdef HAVE_GD_JPG
{
php_info_print_table_row(2, "JPEG Support", "enabled");
php_info_print_table_row(2, "libJPEG Version", gdJpegGetVersionString());
}
#endif
#ifdef HAVE_GD_PNG
php_info_print_table_row(2, "PNG Support", "enabled");
php_info_print_table_row(2, "libPNG Version", gdPngGetVersionString());
#endif
php_info_print_table_row(2, "WBMP Support", "enabled");
#if defined(HAVE_GD_XPM)
php_info_print_table_row(2, "XPM Support", "enabled");
{
char tmp[12];
snprintf(tmp, sizeof(tmp), "%d", XpmLibraryVersion());
php_info_print_table_row(2, "libXpm Version", tmp);
}
#endif
php_info_print_table_row(2, "XBM Support", "enabled");
#if defined(USE_GD_JISX0208)
php_info_print_table_row(2, "JIS-mapped Japanese Font Support", "enabled");
#endif
#ifdef HAVE_GD_WEBP
php_info_print_table_row(2, "WebP Support", "enabled");
#endif
php_info_print_table_end();
DISPLAY_INI_ENTRIES();
}
|
PHP_MINFO_FUNCTION(gd)
{
php_info_print_table_start();
php_info_print_table_row(2, "GD Support", "enabled");
/* need to use a PHPAPI function here because it is external module in windows */
#if defined(HAVE_GD_BUNDLED)
php_info_print_table_row(2, "GD Version", PHP_GD_VERSION_STRING);
#else
php_info_print_table_row(2, "GD headers Version", PHP_GD_VERSION_STRING);
#if defined(HAVE_GD_LIBVERSION)
php_info_print_table_row(2, "GD library Version", gdVersionString());
#endif
#endif
#ifdef ENABLE_GD_TTF
php_info_print_table_row(2, "FreeType Support", "enabled");
#if HAVE_LIBFREETYPE
php_info_print_table_row(2, "FreeType Linkage", "with freetype");
{
char tmp[256];
#ifdef FREETYPE_PATCH
snprintf(tmp, sizeof(tmp), "%d.%d.%d", FREETYPE_MAJOR, FREETYPE_MINOR, FREETYPE_PATCH);
#elif defined(FREETYPE_MAJOR)
snprintf(tmp, sizeof(tmp), "%d.%d", FREETYPE_MAJOR, FREETYPE_MINOR);
#else
snprintf(tmp, sizeof(tmp), "1.x");
#endif
php_info_print_table_row(2, "FreeType Version", tmp);
}
#else
php_info_print_table_row(2, "FreeType Linkage", "with unknown library");
#endif
#endif
#ifdef HAVE_LIBT1
php_info_print_table_row(2, "T1Lib Support", "enabled");
#endif
php_info_print_table_row(2, "GIF Read Support", "enabled");
php_info_print_table_row(2, "GIF Create Support", "enabled");
#ifdef HAVE_GD_JPG
{
php_info_print_table_row(2, "JPEG Support", "enabled");
php_info_print_table_row(2, "libJPEG Version", gdJpegGetVersionString());
}
#endif
#ifdef HAVE_GD_PNG
php_info_print_table_row(2, "PNG Support", "enabled");
php_info_print_table_row(2, "libPNG Version", gdPngGetVersionString());
#endif
php_info_print_table_row(2, "WBMP Support", "enabled");
#if defined(HAVE_GD_XPM)
php_info_print_table_row(2, "XPM Support", "enabled");
{
char tmp[12];
snprintf(tmp, sizeof(tmp), "%d", XpmLibraryVersion());
php_info_print_table_row(2, "libXpm Version", tmp);
}
#endif
php_info_print_table_row(2, "XBM Support", "enabled");
#if defined(USE_GD_JISX0208)
php_info_print_table_row(2, "JIS-mapped Japanese Font Support", "enabled");
#endif
#ifdef HAVE_GD_WEBP
php_info_print_table_row(2, "WebP Support", "enabled");
#endif
php_info_print_table_end();
DISPLAY_INI_ENTRIES();
}
|
C
|
php-src
| 0 |
CVE-2012-5148
|
https://www.cvedetails.com/cve/CVE-2012-5148/
|
CWE-20
|
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
|
e89cfcb9090e8c98129ae9160c513f504db74599
|
Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
|
void BrowserWindowGtk::EnterFullscreen(
const GURL& url, FullscreenExitBubbleType type) {
gtk_window_fullscreen(window_);
fullscreen_exit_bubble_type_ = type;
}
|
void BrowserWindowGtk::EnterFullscreen(
const GURL& url, FullscreenExitBubbleType type) {
gtk_window_fullscreen(window_);
fullscreen_exit_bubble_type_ = type;
}
|
C
|
Chrome
| 0 |
CVE-2016-5218
|
https://www.cvedetails.com/cve/CVE-2016-5218/
|
CWE-20
|
https://github.com/chromium/chromium/commit/45d901b56f578a74b19ba0d10fa5c4c467f19303
|
45d901b56f578a74b19ba0d10fa5c4c467f19303
|
Paint tab groups with the group color.
* The background of TabGroupHeader now uses the group color.
* The backgrounds of tabs in the group are tinted with the group color.
This treatment, along with the colors chosen, are intended to be
a placeholder.
Bug: 905491
Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504
Commit-Queue: Bret Sepulveda <[email protected]>
Reviewed-by: Taylor Bergquist <[email protected]>
Cr-Commit-Position: refs/heads/master@{#660498}
|
void TabStrip::OnTouchUiChanged() {
UpdateNewTabButtonBorder();
new_tab_button_bounds_.set_size(new_tab_button_->GetPreferredSize());
new_tab_button_->SetBoundsRect(new_tab_button_bounds_);
StopAnimating(true);
PreferredSizeChanged();
}
|
void TabStrip::OnTouchUiChanged() {
UpdateNewTabButtonBorder();
new_tab_button_bounds_.set_size(new_tab_button_->GetPreferredSize());
new_tab_button_->SetBoundsRect(new_tab_button_bounds_);
StopAnimating(true);
PreferredSizeChanged();
}
|
C
|
Chrome
| 0 |
CVE-2014-9940
|
https://www.cvedetails.com/cve/CVE-2014-9940/
|
CWE-416
|
https://github.com/torvalds/linux/commit/60a2362f769cf549dc466134efe71c8bf9fbaaba
|
60a2362f769cf549dc466134efe71c8bf9fbaaba
|
regulator: core: Fix regualtor_ena_gpio_free not to access pin after freeing
After freeing pin from regulator_ena_gpio_free, loop can access
the pin. So this patch fixes not to access pin after freeing.
Signed-off-by: Seung-Woo Kim <[email protected]>
Signed-off-by: Mark Brown <[email protected]>
|
static int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV)
{
struct regulator *regulator;
list_for_each_entry(regulator, &rdev->consumer_list, list) {
/*
* Assume consumers that didn't say anything are OK
* with anything in the constraint range.
*/
if (!regulator->min_uV && !regulator->max_uV)
continue;
if (*max_uV > regulator->max_uV)
*max_uV = regulator->max_uV;
if (*min_uV < regulator->min_uV)
*min_uV = regulator->min_uV;
}
if (*min_uV > *max_uV) {
rdev_err(rdev, "Restricting voltage, %u-%uuV\n",
*min_uV, *max_uV);
return -EINVAL;
}
return 0;
}
|
static int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV)
{
struct regulator *regulator;
list_for_each_entry(regulator, &rdev->consumer_list, list) {
/*
* Assume consumers that didn't say anything are OK
* with anything in the constraint range.
*/
if (!regulator->min_uV && !regulator->max_uV)
continue;
if (*max_uV > regulator->max_uV)
*max_uV = regulator->max_uV;
if (*min_uV < regulator->min_uV)
*min_uV = regulator->min_uV;
}
if (*min_uV > *max_uV) {
rdev_err(rdev, "Restricting voltage, %u-%uuV\n",
*min_uV, *max_uV);
return -EINVAL;
}
return 0;
}
|
C
|
linux
| 0 |
CVE-2018-10124
|
https://www.cvedetails.com/cve/CVE-2018-10124/
|
CWE-119
|
https://github.com/torvalds/linux/commit/4ea77014af0d6205b05503d1c7aac6eace11d473
|
4ea77014af0d6205b05503d1c7aac6eace11d473
|
kernel/signal.c: avoid undefined behaviour in kill_something_info
When running kill(72057458746458112, 0) in userspace I hit the following
issue.
UBSAN: Undefined behaviour in kernel/signal.c:1462:11
negation of -2147483648 cannot be represented in type 'int':
CPU: 226 PID: 9849 Comm: test Tainted: G B ---- ------- 3.10.0-327.53.58.70.x86_64_ubsan+ #116
Hardware name: Huawei Technologies Co., Ltd. RH8100 V3/BC61PBIA, BIOS BLHSV028 11/11/2014
Call Trace:
dump_stack+0x19/0x1b
ubsan_epilogue+0xd/0x50
__ubsan_handle_negate_overflow+0x109/0x14e
SYSC_kill+0x43e/0x4d0
SyS_kill+0xe/0x10
system_call_fastpath+0x16/0x1b
Add code to avoid the UBSAN detection.
[[email protected]: tweak comment]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: zhongjiang <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Xishi Qiu <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
SYSCALL_DEFINE0(restart_syscall)
{
struct restart_block *restart = ¤t->restart_block;
return restart->fn(restart);
}
|
SYSCALL_DEFINE0(restart_syscall)
{
struct restart_block *restart = ¤t->restart_block;
return restart->fn(restart);
}
|
C
|
linux
| 0 |
CVE-2017-18203
|
https://www.cvedetails.com/cve/CVE-2017-18203/
|
CWE-362
|
https://github.com/torvalds/linux/commit/b9a41d21dceadf8104812626ef85dc56ee8a60ed
|
b9a41d21dceadf8104812626ef85dc56ee8a60ed
|
dm: fix race between dm_get_from_kobject() and __dm_destroy()
The following BUG_ON was hit when testing repeat creation and removal of
DM devices:
kernel BUG at drivers/md/dm.c:2919!
CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44
Call Trace:
[<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a
[<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e
[<ffffffff817b46d1>] ? mutex_lock+0x26/0x44
[<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf
[<ffffffff811de257>] kernfs_seq_show+0x23/0x25
[<ffffffff81199118>] seq_read+0x16f/0x325
[<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f
[<ffffffff8117b625>] __vfs_read+0x26/0x9d
[<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44
[<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9
[<ffffffff8117be9d>] vfs_read+0x8f/0xcf
[<ffffffff81193e34>] ? __fdget_pos+0x12/0x41
[<ffffffff8117c686>] SyS_read+0x4b/0x76
[<ffffffff817b606e>] system_call_fastpath+0x12/0x71
The bug can be easily triggered, if an extra delay (e.g. 10ms) is added
between the test of DMF_FREEING & DMF_DELETING and dm_get() in
dm_get_from_kobject().
To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and
dm_get() are done in an atomic way, so _minor_lock is used.
The other callers of dm_get() have also been checked to be OK: some
callers invoke dm_get() under _minor_lock, some callers invoke it under
_hash_lock, and dm_start_request() invoke it after increasing
md->open_count.
Cc: [email protected]
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Mike Snitzer <[email protected]>
|
unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
unsigned param = ACCESS_ONCE(*module_param);
unsigned modified_param = 0;
if (!param)
modified_param = def;
else if (param > max)
modified_param = max;
if (modified_param) {
(void)cmpxchg(module_param, param, modified_param);
param = modified_param;
}
return param;
}
|
unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
unsigned param = ACCESS_ONCE(*module_param);
unsigned modified_param = 0;
if (!param)
modified_param = def;
else if (param > max)
modified_param = max;
if (modified_param) {
(void)cmpxchg(module_param, param, modified_param);
param = modified_param;
}
return param;
}
|
C
|
linux
| 0 |
CVE-2011-2839
|
https://www.cvedetails.com/cve/CVE-2011-2839/
|
CWE-20
|
https://github.com/chromium/chromium/commit/c63f2b7fe4fe2977f858a8e36d5f48db17eff2e7
|
c63f2b7fe4fe2977f858a8e36d5f48db17eff2e7
|
Extend TTS extension API to support richer events returned from the engine
to the client. Previously we just had a completed event; this adds start,
word boundary, sentence boundary, and marker boundary. In addition,
interrupted and canceled, which were previously errors, now become events.
Mac and Windows implementations extended to support as many of these events
as possible.
BUG=67713
BUG=70198
BUG=75106
BUG=83404
TEST=Updates all TTS API tests to be event-based, and adds new tests.
Review URL: http://codereview.chromium.org/6792014
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91665 0039d316-1c4b-4281-b951-d872f2087c98
|
void ExtensionTtsController::ClearUtteranceQueue() {
std::set<std::string> required_event_types;
if (options->HasKey(constants::kRequiredEventTypesKey)) {
ListValue* list;
EXTENSION_FUNCTION_VALIDATE(
options->GetList(constants::kRequiredEventTypesKey, &list));
for (size_t i = 0; i < list->GetSize(); i++) {
std::string event_type;
if (!list->GetString(i, &event_type))
required_event_types.insert(event_type);
}
}
|
void ExtensionTtsController::ClearUtteranceQueue() {
while (!utterance_queue_.empty()) {
Utterance* utterance = utterance_queue_.front();
utterance_queue_.pop();
utterance->set_error(kSpeechRemovedFromQueueError);
utterance->FinishAndDestroy();
}
}
|
C
|
Chrome
| 1 |
CVE-2018-6135
|
https://www.cvedetails.com/cve/CVE-2018-6135/
| null |
https://github.com/chromium/chromium/commit/2ccbb407dccc976ae4bdbaa5ff2f777f4eb0723b
|
2ccbb407dccc976ae4bdbaa5ff2f777f4eb0723b
|
Force a flush of drawing to the widget when a dialog is shown.
BUG=823353
TEST=as in bug
Change-Id: I5da777068fc29c5638ef02d50e59d5d7b2729260
Reviewed-on: https://chromium-review.googlesource.com/971661
Reviewed-by: Ken Buchanan <[email protected]>
Commit-Queue: Avi Drissman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#544518}
|
void RenderWidgetHostImpl::PauseForPendingResizeOrRepaints() {
TRACE_EVENT0("browser",
"RenderWidgetHostImpl::PauseForPendingResizeOrRepaints");
if (is_hidden())
return;
if (!repaint_ack_pending_ && !resize_ack_pending_)
return;
if (auto_resize_enabled_)
return;
if (!view_)
return;
const int kPaintMsgTimeoutMS = 167;
TRACE_EVENT0("renderer_host", "RenderWidgetHostImpl::WaitForSurface");
DCHECK(!is_hidden_) << "WaitForSurface called while hidden!";
DCHECK(!in_get_backing_store_) << "WaitForSurface called recursively!";
base::AutoReset<bool> auto_reset_in_get_backing_store(
&in_get_backing_store_, true);
if (!view_->ShouldContinueToPauseForFrame())
return;
TimeTicks start_time = TimeTicks::Now();
TimeDelta time_left = TimeDelta::FromMilliseconds(kPaintMsgTimeoutMS);
TimeTicks timeout_time = start_time + time_left;
while (1) {
TRACE_EVENT0("renderer_host", "WaitForSurface::WaitForSingleTaskToRun");
if (ui::WindowResizeHelperMac::Get()->WaitForSingleTaskToRun(time_left)) {
if (!view_->ShouldContinueToPauseForFrame())
break;
}
time_left = timeout_time - TimeTicks::Now();
if (time_left <= TimeDelta::FromSeconds(0)) {
TRACE_EVENT0("renderer_host", "WaitForSurface::Timeout");
break;
}
}
}
|
void RenderWidgetHostImpl::PauseForPendingResizeOrRepaints() {
TRACE_EVENT0("browser",
"RenderWidgetHostImpl::PauseForPendingResizeOrRepaints");
if (is_hidden())
return;
if (!repaint_ack_pending_ && !resize_ack_pending_)
return;
if (auto_resize_enabled_)
return;
if (!view_)
return;
const int kPaintMsgTimeoutMS = 167;
TRACE_EVENT0("renderer_host", "RenderWidgetHostImpl::WaitForSurface");
DCHECK(!is_hidden_) << "WaitForSurface called while hidden!";
DCHECK(!in_get_backing_store_) << "WaitForSurface called recursively!";
base::AutoReset<bool> auto_reset_in_get_backing_store(
&in_get_backing_store_, true);
if (!view_->ShouldContinueToPauseForFrame())
return;
TimeTicks start_time = TimeTicks::Now();
TimeDelta time_left = TimeDelta::FromMilliseconds(kPaintMsgTimeoutMS);
TimeTicks timeout_time = start_time + time_left;
while (1) {
TRACE_EVENT0("renderer_host", "WaitForSurface::WaitForSingleTaskToRun");
if (ui::WindowResizeHelperMac::Get()->WaitForSingleTaskToRun(time_left)) {
if (!view_->ShouldContinueToPauseForFrame())
break;
}
time_left = timeout_time - TimeTicks::Now();
if (time_left <= TimeDelta::FromSeconds(0)) {
TRACE_EVENT0("renderer_host", "WaitForSurface::Timeout");
break;
}
}
}
|
C
|
Chrome
| 0 |
CVE-2015-8324
|
https://www.cvedetails.com/cve/CVE-2015-8324/
| null |
https://github.com/torvalds/linux/commit/744692dc059845b2a3022119871846e74d4f6e11
|
744692dc059845b2a3022119871846e74d4f6e11
|
ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
|
static int ext4_writeback_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
int ret = 0, ret2;
trace_ext4_writeback_write_end(inode, pos, len, copied);
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
copied = ret2;
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
if (ret2 < 0)
ret = ret2;
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
return ret ? ret : copied;
}
|
static int ext4_writeback_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
int ret = 0, ret2;
trace_ext4_writeback_write_end(inode, pos, len, copied);
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
copied = ret2;
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
if (ret2 < 0)
ret = ret2;
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
return ret ? ret : copied;
}
|
C
|
linux
| 0 |
CVE-2018-6063
|
https://www.cvedetails.com/cve/CVE-2018-6063/
|
CWE-787
|
https://github.com/chromium/chromium/commit/673ce95d481ea9368c4d4d43ac756ba1d6d9e608
|
673ce95d481ea9368c4d4d43ac756ba1d6d9e608
|
Correct mojo::WrapSharedMemoryHandle usage
Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which
were assuming that the call actually has any control over the memory
protection applied to a handle when mapped.
Where fixing usage is infeasible for this CL, TODOs are added to
annotate follow-up work.
Also updates the API and documentation to (hopefully) improve clarity
and avoid similar mistakes from being made in the future.
BUG=792900
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477
Reviewed-on: https://chromium-review.googlesource.com/818282
Reviewed-by: Wei Li <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Reviewed-by: John Abd-El-Malek <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Sadrul Chowdhury <[email protected]>
Reviewed-by: Yuzhu Shen <[email protected]>
Reviewed-by: Robert Sesek <[email protected]>
Commit-Queue: Ken Rockot <[email protected]>
Cr-Commit-Position: refs/heads/master@{#530268}
|
FromMojom(media::mojom::DecodeError error,
media::JpegDecodeAccelerator::Error* out) {
switch (error) {
case media::mojom::DecodeError::NO_ERRORS:
*out = media::JpegDecodeAccelerator::Error::NO_ERRORS;
return true;
case media::mojom::DecodeError::INVALID_ARGUMENT:
*out = media::JpegDecodeAccelerator::Error::INVALID_ARGUMENT;
return true;
case media::mojom::DecodeError::UNREADABLE_INPUT:
*out = media::JpegDecodeAccelerator::Error::UNREADABLE_INPUT;
return true;
case media::mojom::DecodeError::PARSE_JPEG_FAILED:
*out = media::JpegDecodeAccelerator::Error::PARSE_JPEG_FAILED;
return true;
case media::mojom::DecodeError::UNSUPPORTED_JPEG:
*out = media::JpegDecodeAccelerator::Error::UNSUPPORTED_JPEG;
return true;
case media::mojom::DecodeError::PLATFORM_FAILURE:
*out = media::JpegDecodeAccelerator::Error::PLATFORM_FAILURE;
return true;
}
NOTREACHED();
return false;
}
|
FromMojom(media::mojom::DecodeError error,
media::JpegDecodeAccelerator::Error* out) {
switch (error) {
case media::mojom::DecodeError::NO_ERRORS:
*out = media::JpegDecodeAccelerator::Error::NO_ERRORS;
return true;
case media::mojom::DecodeError::INVALID_ARGUMENT:
*out = media::JpegDecodeAccelerator::Error::INVALID_ARGUMENT;
return true;
case media::mojom::DecodeError::UNREADABLE_INPUT:
*out = media::JpegDecodeAccelerator::Error::UNREADABLE_INPUT;
return true;
case media::mojom::DecodeError::PARSE_JPEG_FAILED:
*out = media::JpegDecodeAccelerator::Error::PARSE_JPEG_FAILED;
return true;
case media::mojom::DecodeError::UNSUPPORTED_JPEG:
*out = media::JpegDecodeAccelerator::Error::UNSUPPORTED_JPEG;
return true;
case media::mojom::DecodeError::PLATFORM_FAILURE:
*out = media::JpegDecodeAccelerator::Error::PLATFORM_FAILURE;
return true;
}
NOTREACHED();
return false;
}
|
C
|
Chrome
| 0 |
CVE-2012-1179
|
https://www.cvedetails.com/cve/CVE-2012-1179/
|
CWE-264
|
https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850
|
4a1d704194a441bf83c636004a479e01360ec850
|
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream.
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
pm->buffer[pm->pos++] = pfn;
if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
}
|
static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
pm->buffer[pm->pos++] = pfn;
if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
}
|
C
|
linux
| 0 |
CVE-2015-1214
|
https://www.cvedetails.com/cve/CVE-2015-1214/
|
CWE-190
|
https://github.com/chromium/chromium/commit/a81c185f34b34ef8410239506825b185b332c00b
|
a81c185f34b34ef8410239506825b185b332c00b
|
Add data usage tracking for chrome services
Add data usage tracking for captive portal, web resource and signin services
BUG=655749
Review-Url: https://codereview.chromium.org/2643013004
Cr-Commit-Position: refs/heads/master@{#445810}
|
void GaiaCookieManagerService::OnMergeSessionFailure(
const GoogleServiceAuthError& error) {
DCHECK(requests_.front().request_type() ==
GaiaCookieRequestType::ADD_ACCOUNT);
VLOG(1) << "Failed MergeSession"
<< " account=" << requests_.front().account_id()
<< " error=" << error.ToString();
if (++fetcher_retries_ < kMaxFetcherRetries && error.IsTransientError()) {
fetcher_backoff_.InformOfRequest(false);
UMA_HISTOGRAM_ENUMERATION("OAuth2Login.MergeSessionRetry",
error.state(), GoogleServiceAuthError::NUM_STATES);
fetcher_timer_.Start(
FROM_HERE, fetcher_backoff_.GetTimeUntilRelease(),
base::Bind(&SigninClient::DelayNetworkCall,
base::Unretained(signin_client_),
base::Bind(
&GaiaCookieManagerService::StartFetchingMergeSession,
base::Unretained(this))));
return;
}
uber_token_ = std::string();
const std::string account_id = requests_.front().account_id();
UMA_HISTOGRAM_ENUMERATION("OAuth2Login.MergeSessionFailure",
error.state(), GoogleServiceAuthError::NUM_STATES);
HandleNextRequest();
SignalComplete(account_id, error);
}
|
void GaiaCookieManagerService::OnMergeSessionFailure(
const GoogleServiceAuthError& error) {
DCHECK(requests_.front().request_type() ==
GaiaCookieRequestType::ADD_ACCOUNT);
VLOG(1) << "Failed MergeSession"
<< " account=" << requests_.front().account_id()
<< " error=" << error.ToString();
if (++fetcher_retries_ < kMaxFetcherRetries && error.IsTransientError()) {
fetcher_backoff_.InformOfRequest(false);
UMA_HISTOGRAM_ENUMERATION("OAuth2Login.MergeSessionRetry",
error.state(), GoogleServiceAuthError::NUM_STATES);
fetcher_timer_.Start(
FROM_HERE, fetcher_backoff_.GetTimeUntilRelease(),
base::Bind(&SigninClient::DelayNetworkCall,
base::Unretained(signin_client_),
base::Bind(
&GaiaCookieManagerService::StartFetchingMergeSession,
base::Unretained(this))));
return;
}
uber_token_ = std::string();
const std::string account_id = requests_.front().account_id();
UMA_HISTOGRAM_ENUMERATION("OAuth2Login.MergeSessionFailure",
error.state(), GoogleServiceAuthError::NUM_STATES);
HandleNextRequest();
SignalComplete(account_id, error);
}
|
C
|
Chrome
| 0 |
CVE-2017-16544
|
https://www.cvedetails.com/cve/CVE-2017-16544/
|
CWE-94
|
https://git.busybox.net/busybox/commit/?id=c3797d40a1c57352192c6106cc0f435e7d9c11e8
|
c3797d40a1c57352192c6106cc0f435e7d9c11e8
| null |
static bool BB_isalnum_or_underscore(CHAR_T c) {
return ((unsigned)c < 256 && isalnum(c)) || c == '_';
}
|
static bool BB_isalnum_or_underscore(CHAR_T c) {
return ((unsigned)c < 256 && isalnum(c)) || c == '_';
}
|
C
|
busybox
| 0 |
CVE-2015-1793
|
https://www.cvedetails.com/cve/CVE-2015-1793/
|
CWE-254
|
https://git.openssl.org/?p=openssl.git;a=commit;h=9a0db453ba017ebcaccbee933ee6511a9ae4d1c8
|
9a0db453ba017ebcaccbee933ee6511a9ae4d1c8
| null |
STACK_OF(X509) *X509_STORE_CTX_get1_chain(X509_STORE_CTX *ctx)
{
int i;
X509 *x;
STACK_OF(X509) *chain;
if (!ctx->chain || !(chain = sk_X509_dup(ctx->chain)))
return NULL;
for (i = 0; i < sk_X509_num(chain); i++) {
x = sk_X509_value(chain, i);
CRYPTO_add(&x->references, 1, CRYPTO_LOCK_X509);
}
return chain;
}
|
STACK_OF(X509) *X509_STORE_CTX_get1_chain(X509_STORE_CTX *ctx)
{
int i;
X509 *x;
STACK_OF(X509) *chain;
if (!ctx->chain || !(chain = sk_X509_dup(ctx->chain)))
return NULL;
for (i = 0; i < sk_X509_num(chain); i++) {
x = sk_X509_value(chain, i);
CRYPTO_add(&x->references, 1, CRYPTO_LOCK_X509);
}
return chain;
}
|
C
|
openssl
| 0 |
CVE-2018-9490
|
https://www.cvedetails.com/cve/CVE-2018-9490/
|
CWE-704
|
https://android.googlesource.com/platform/external/v8/+/a24543157ae2cdd25da43e20f4e48a07481e6ceb
|
a24543157ae2cdd25da43e20f4e48a07481e6ceb
|
Backport: Fix Object.entries/values with changing elements
Bug: 111274046
Test: m -j proxy_resolver_v8_unittest && adb sync && adb shell \
/data/nativetest64/proxy_resolver_v8_unittest/proxy_resolver_v8_unittest
Change-Id: I705fc512cc5837e9364ed187559cc75d079aa5cb
(cherry picked from commit d8be9a10287afed07705ac8af027d6a46d4def99)
|
static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
FixedArray* parameter_map = FixedArray::cast(holder->elements());
uint32_t length = parameter_map->length() - 2;
if (entry < length) {
return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
}
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
}
|
static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
FixedArray* parameter_map = FixedArray::cast(holder->elements());
uint32_t length = parameter_map->length() - 2;
if (entry < length) {
return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
}
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
}
|
C
|
Android
| 0 |
CVE-2017-14041
|
https://www.cvedetails.com/cve/CVE-2017-14041/
|
CWE-787
|
https://github.com/uclouvain/openjpeg/commit/e5285319229a5d77bf316bb0d3a6cbd3cb8666d9
|
e5285319229a5d77bf316bb0d3a6cbd3cb8666d9
|
pgxtoimage(): fix write stack buffer overflow (#997)
|
int imagetorawl(opj_image_t * image, const char *outfile)
{
return imagetoraw_common(image, outfile, OPJ_FALSE);
}
|
int imagetorawl(opj_image_t * image, const char *outfile)
{
return imagetoraw_common(image, outfile, OPJ_FALSE);
}
|
C
|
openjpeg
| 0 |
CVE-2012-2816
|
https://www.cvedetails.com/cve/CVE-2012-2816/
| null |
https://github.com/chromium/chromium/commit/cd0bd79d6ebdb72183e6f0833673464cc10b3600
|
cd0bd79d6ebdb72183e6f0833673464cc10b3600
|
Convert plugin and GPU process to brokered handle duplication.
BUG=119250
Review URL: https://chromiumcodereview.appspot.com/9958034
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98
|
void WebPluginDelegateProxy::CopyFromBackBufferToFrontBuffer(
const gfx::Rect& rect) {
#if defined(OS_MACOSX)
const size_t stride =
skia::PlatformCanvas::StrideForWidth(plugin_rect_.width());
const size_t chunk_size = 4 * rect.width();
DCHECK(back_buffer_dib() != NULL);
uint8* source_data = static_cast<uint8*>(back_buffer_dib()->memory()) +
rect.y() * stride + 4 * rect.x();
DCHECK(front_buffer_dib() != NULL);
uint8* target_data = static_cast<uint8*>(front_buffer_dib()->memory()) +
rect.y() * stride + 4 * rect.x();
for (int row = 0; row < rect.height(); ++row) {
memcpy(target_data, source_data, chunk_size);
source_data += stride;
target_data += stride;
}
#else
BlitCanvasToCanvas(front_buffer_canvas(),
rect,
back_buffer_canvas(),
rect.origin());
#endif
}
|
void WebPluginDelegateProxy::CopyFromBackBufferToFrontBuffer(
const gfx::Rect& rect) {
#if defined(OS_MACOSX)
const size_t stride =
skia::PlatformCanvas::StrideForWidth(plugin_rect_.width());
const size_t chunk_size = 4 * rect.width();
DCHECK(back_buffer_dib() != NULL);
uint8* source_data = static_cast<uint8*>(back_buffer_dib()->memory()) +
rect.y() * stride + 4 * rect.x();
DCHECK(front_buffer_dib() != NULL);
uint8* target_data = static_cast<uint8*>(front_buffer_dib()->memory()) +
rect.y() * stride + 4 * rect.x();
for (int row = 0; row < rect.height(); ++row) {
memcpy(target_data, source_data, chunk_size);
source_data += stride;
target_data += stride;
}
#else
BlitCanvasToCanvas(front_buffer_canvas(),
rect,
back_buffer_canvas(),
rect.origin());
#endif
}
|
C
|
Chrome
| 0 |
CVE-2018-6135
|
https://www.cvedetails.com/cve/CVE-2018-6135/
| null |
https://github.com/chromium/chromium/commit/2ccbb407dccc976ae4bdbaa5ff2f777f4eb0723b
|
2ccbb407dccc976ae4bdbaa5ff2f777f4eb0723b
|
Force a flush of drawing to the widget when a dialog is shown.
BUG=823353
TEST=as in bug
Change-Id: I5da777068fc29c5638ef02d50e59d5d7b2729260
Reviewed-on: https://chromium-review.googlesource.com/971661
Reviewed-by: Ken Buchanan <[email protected]>
Commit-Queue: Avi Drissman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#544518}
|
void WebContentsImpl::UpdateWebContentsVisibility(bool visible) {
if (!did_first_set_visible_) {
if (visible) {
WasShown();
did_first_set_visible_ = true;
}
return;
}
if (visible == should_normally_be_visible_)
return;
if (visible)
WasShown();
else
WasHidden();
}
|
void WebContentsImpl::UpdateWebContentsVisibility(bool visible) {
if (!did_first_set_visible_) {
if (visible) {
WasShown();
did_first_set_visible_ = true;
}
return;
}
if (visible == should_normally_be_visible_)
return;
if (visible)
WasShown();
else
WasHidden();
}
|
C
|
Chrome
| 0 |
CVE-2016-1621
|
https://www.cvedetails.com/cve/CVE-2016-1621/
|
CWE-119
|
https://android.googlesource.com/platform/external/libvpx/+/5a9753fca56f0eeb9f61e342b2fccffc364f9426
|
5a9753fca56f0eeb9f61e342b2fccffc364f9426
|
Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
|
void UpdateMD5(::libvpx_test::Decoder *dec, const vpx_codec_cx_pkt_t *pkt,
::libvpx_test::MD5 *md5) {
const vpx_codec_err_t res = dec->DecodeFrame(
reinterpret_cast<uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz);
if (res != VPX_CODEC_OK) {
abort_ = true;
ASSERT_EQ(VPX_CODEC_OK, res);
}
const vpx_image_t *img = dec->GetDxData().Next();
md5->Add(img);
}
|
void UpdateMD5(::libvpx_test::Decoder *dec, const vpx_codec_cx_pkt_t *pkt,
::libvpx_test::MD5 *md5) {
const vpx_codec_err_t res = dec->DecodeFrame(
reinterpret_cast<uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz);
if (res != VPX_CODEC_OK) {
abort_ = true;
ASSERT_EQ(VPX_CODEC_OK, res);
}
const vpx_image_t *img = dec->GetDxData().Next();
md5->Add(img);
}
|
C
|
Android
| 0 |
CVE-2018-18344
|
https://www.cvedetails.com/cve/CVE-2018-18344/
|
CWE-20
|
https://github.com/chromium/chromium/commit/c71d8045ce0592cf3f4290744ab57b23c1d1b4c6
|
c71d8045ce0592cf3f4290744ab57b23c1d1b4c6
|
[DevTools] Do not allow Page.setDownloadBehavior for extensions
Bug: 866426
Change-Id: I71b672978e1a8ec779ede49da16b21198567d3a4
Reviewed-on: https://chromium-review.googlesource.com/c/1270007
Commit-Queue: Dmitry Gozman <[email protected]>
Reviewed-by: Devlin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#598004}
|
Response PageHandler::NavigateToHistoryEntry(int entry_id) {
WebContentsImpl* web_contents = GetWebContents();
if (!web_contents)
return Response::InternalError();
NavigationController& controller = web_contents->GetController();
for (int i = 0; i != controller.GetEntryCount(); ++i) {
if (controller.GetEntryAtIndex(i)->GetUniqueID() == entry_id) {
controller.GoToIndex(i);
return Response::OK();
}
}
return Response::InvalidParams("No entry with passed id");
}
|
Response PageHandler::NavigateToHistoryEntry(int entry_id) {
WebContentsImpl* web_contents = GetWebContents();
if (!web_contents)
return Response::InternalError();
NavigationController& controller = web_contents->GetController();
for (int i = 0; i != controller.GetEntryCount(); ++i) {
if (controller.GetEntryAtIndex(i)->GetUniqueID() == entry_id) {
controller.GoToIndex(i);
return Response::OK();
}
}
return Response::InvalidParams("No entry with passed id");
}
|
C
|
Chrome
| 0 |
CVE-2017-12187
|
https://www.cvedetails.com/cve/CVE-2017-12187/
|
CWE-20
|
https://cgit.freedesktop.org/xorg/xserver/commit/?id=cad5a1050b7184d828aef9c1dd151c3ab649d37e
|
cad5a1050b7184d828aef9c1dd151c3ab649d37e
| null |
XineramaXvSetPortAttribute(ClientPtr client)
{
REQUEST(xvSetPortAttributeReq);
PanoramiXRes *port;
int result, i;
REQUEST_SIZE_MATCH(xvSetPortAttributeReq);
result = dixLookupResourceByType((void **) &port, stuff->port,
XvXRTPort, client, DixReadAccess);
if (result != Success)
return result;
FOR_NSCREENS_BACKWARD(i) {
if (port->info[i].id) {
stuff->port = port->info[i].id;
result = ProcXvSetPortAttribute(client);
}
}
return result;
}
|
XineramaXvSetPortAttribute(ClientPtr client)
{
REQUEST(xvSetPortAttributeReq);
PanoramiXRes *port;
int result, i;
REQUEST_SIZE_MATCH(xvSetPortAttributeReq);
result = dixLookupResourceByType((void **) &port, stuff->port,
XvXRTPort, client, DixReadAccess);
if (result != Success)
return result;
FOR_NSCREENS_BACKWARD(i) {
if (port->info[i].id) {
stuff->port = port->info[i].id;
result = ProcXvSetPortAttribute(client);
}
}
return result;
}
|
C
|
xserver
| 0 |
CVE-2017-8061
|
https://www.cvedetails.com/cve/CVE-2017-8061/
|
CWE-119
|
https://github.com/torvalds/linux/commit/67b0503db9c29b04eadfeede6bebbfe5ddad94ef
|
67b0503db9c29b04eadfeede6bebbfe5ddad94ef
|
[media] dvb-usb-firmware: don't do DMA on stack
The buffer allocation for the firmware data was changed in
commit 43fab9793c1f ("[media] dvb-usb: don't use stack for firmware load")
but the same applies for the reset value.
Fixes: 43fab9793c1f ("[media] dvb-usb: don't use stack for firmware load")
Cc: [email protected]
Signed-off-by: Stefan Brüns <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
int dvb_usb_get_hexline(const struct firmware *fw, struct hexline *hx,
int *pos)
{
u8 *b = (u8 *) &fw->data[*pos];
int data_offs = 4;
if (*pos >= fw->size)
return 0;
memset(hx,0,sizeof(struct hexline));
hx->len = b[0];
if ((*pos + hx->len + 4) >= fw->size)
return -EINVAL;
hx->addr = b[1] | (b[2] << 8);
hx->type = b[3];
if (hx->type == 0x04) {
/* b[4] and b[5] are the Extended linear address record data field */
hx->addr |= (b[4] << 24) | (b[5] << 16);
/* hx->len -= 2;
data_offs += 2; */
}
memcpy(hx->data,&b[data_offs],hx->len);
hx->chk = b[hx->len + data_offs];
*pos += hx->len + 5;
return *pos;
}
|
int dvb_usb_get_hexline(const struct firmware *fw, struct hexline *hx,
int *pos)
{
u8 *b = (u8 *) &fw->data[*pos];
int data_offs = 4;
if (*pos >= fw->size)
return 0;
memset(hx,0,sizeof(struct hexline));
hx->len = b[0];
if ((*pos + hx->len + 4) >= fw->size)
return -EINVAL;
hx->addr = b[1] | (b[2] << 8);
hx->type = b[3];
if (hx->type == 0x04) {
/* b[4] and b[5] are the Extended linear address record data field */
hx->addr |= (b[4] << 24) | (b[5] << 16);
/* hx->len -= 2;
data_offs += 2; */
}
memcpy(hx->data,&b[data_offs],hx->len);
hx->chk = b[hx->len + data_offs];
*pos += hx->len + 5;
return *pos;
}
|
C
|
linux
| 0 |
CVE-2013-5634
|
https://www.cvedetails.com/cve/CVE-2013-5634/
|
CWE-399
|
https://github.com/torvalds/linux/commit/e8180dcaa8470ceca21109f143876fdcd9fe050a
|
e8180dcaa8470ceca21109f143876fdcd9fe050a
|
ARM: KVM: prevent NULL pointer dereferences with KVM VCPU ioctl
Some ARM KVM VCPU ioctls require the vCPU to be properly initialized
with the KVM_ARM_VCPU_INIT ioctl before being used with further
requests. KVM_RUN checks whether this initialization has been
done, but other ioctls do not.
Namely KVM_GET_REG_LIST will dereference an array with index -1
without initialization and thus leads to a kernel oops.
Fix this by adding checks before executing the ioctl handlers.
[ Removed superflous comment from static function - Christoffer ]
Changes from v1:
* moved check into a static function with a meaningful name
Signed-off-by: Andre Przywara <[email protected]>
Signed-off-by: Christoffer Dall <[email protected]>
|
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return 0;
}
|
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return 0;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/a03d4448faf2c40f4ef444a88cb9aace5b98e8c4
|
a03d4448faf2c40f4ef444a88cb9aace5b98e8c4
|
Introduce background.scripts feature for extension manifests.
This optimizes for the common use case where background pages
just include a reference to one or more script files and no
additional HTML.
BUG=107791
Review URL: http://codereview.chromium.org/9150008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@117110 0039d316-1c4b-4281-b951-d872f2087c98
|
void TestingAutomationProvider::Reload(int handle,
IPC::Message* reply_message) {
if (tab_tracker_->ContainsHandle(handle)) {
NavigationController* tab = tab_tracker_->GetResource(handle);
Browser* browser = FindAndActivateTab(tab);
if (browser && browser->command_updater()->IsCommandEnabled(IDC_RELOAD)) {
new NavigationNotificationObserver(
tab, this, reply_message, 1, false, false);
browser->Reload(CURRENT_TAB);
return;
}
}
AutomationMsg_Reload::WriteReplyParams(
reply_message, AUTOMATION_MSG_NAVIGATION_ERROR);
Send(reply_message);
}
|
void TestingAutomationProvider::Reload(int handle,
IPC::Message* reply_message) {
if (tab_tracker_->ContainsHandle(handle)) {
NavigationController* tab = tab_tracker_->GetResource(handle);
Browser* browser = FindAndActivateTab(tab);
if (browser && browser->command_updater()->IsCommandEnabled(IDC_RELOAD)) {
new NavigationNotificationObserver(
tab, this, reply_message, 1, false, false);
browser->Reload(CURRENT_TAB);
return;
}
}
AutomationMsg_Reload::WriteReplyParams(
reply_message, AUTOMATION_MSG_NAVIGATION_ERROR);
Send(reply_message);
}
|
C
|
Chrome
| 0 |
CVE-2009-3605
|
https://www.cvedetails.com/cve/CVE-2009-3605/
|
CWE-189
|
https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
|
7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
| null |
CairoImage::CairoImage (double x1, double y1, double x2, double y2) {
this->image = NULL;
this->x1 = x1;
this->y1 = y1;
this->x2 = x2;
this->y2 = y2;
}
|
CairoImage::CairoImage (double x1, double y1, double x2, double y2) {
this->image = NULL;
this->x1 = x1;
this->y1 = y1;
this->x2 = x2;
this->y2 = y2;
}
|
CPP
|
poppler
| 0 |
CVE-2018-6063
|
https://www.cvedetails.com/cve/CVE-2018-6063/
|
CWE-787
|
https://github.com/chromium/chromium/commit/673ce95d481ea9368c4d4d43ac756ba1d6d9e608
|
673ce95d481ea9368c4d4d43ac756ba1d6d9e608
|
Correct mojo::WrapSharedMemoryHandle usage
Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which
were assuming that the call actually has any control over the memory
protection applied to a handle when mapped.
Where fixing usage is infeasible for this CL, TODOs are added to
annotate follow-up work.
Also updates the API and documentation to (hopefully) improve clarity
and avoid similar mistakes from being made in the future.
BUG=792900
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477
Reviewed-on: https://chromium-review.googlesource.com/818282
Reviewed-by: Wei Li <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Reviewed-by: John Abd-El-Malek <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Sadrul Chowdhury <[email protected]>
Reviewed-by: Yuzhu Shen <[email protected]>
Reviewed-by: Robert Sesek <[email protected]>
Commit-Queue: Ken Rockot <[email protected]>
Cr-Commit-Position: refs/heads/master@{#530268}
|
static void AppendCompositorCommandLineFlags(base::CommandLine* command_line) {
command_line->AppendSwitchASCII(
switches::kNumRasterThreads,
base::IntToString(NumberOfRendererRasterThreads()));
int msaa_sample_count = GpuRasterizationMSAASampleCount();
if (msaa_sample_count >= 0) {
command_line->AppendSwitchASCII(switches::kGpuRasterizationMSAASampleCount,
base::IntToString(msaa_sample_count));
}
if (IsZeroCopyUploadEnabled())
command_line->AppendSwitch(switches::kEnableZeroCopy);
if (!IsPartialRasterEnabled())
command_line->AppendSwitch(switches::kDisablePartialRaster);
if (IsGpuMemoryBufferCompositorResourcesEnabled()) {
command_line->AppendSwitch(
switches::kEnableGpuMemoryBufferCompositorResources);
}
if (IsMainFrameBeforeActivationEnabled())
command_line->AppendSwitch(cc::switches::kEnableMainFrameBeforeActivation);
if (IsCheckerImagingEnabled())
command_line->AppendSwitch(cc::switches::kEnableCheckerImaging);
if (IsCompositorImageAnimationEnabled())
command_line->AppendSwitch(switches::kEnableCompositorImageAnimations);
GpuDataManagerImpl* gpu_data_manager = GpuDataManagerImpl::GetInstance();
DCHECK(gpu_data_manager);
gpu_data_manager->AppendRendererCommandLine(command_line);
if (base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableSlimmingPaintV2)) {
command_line->AppendSwitch(cc::switches::kEnableLayerLists);
}
}
|
static void AppendCompositorCommandLineFlags(base::CommandLine* command_line) {
command_line->AppendSwitchASCII(
switches::kNumRasterThreads,
base::IntToString(NumberOfRendererRasterThreads()));
int msaa_sample_count = GpuRasterizationMSAASampleCount();
if (msaa_sample_count >= 0) {
command_line->AppendSwitchASCII(switches::kGpuRasterizationMSAASampleCount,
base::IntToString(msaa_sample_count));
}
if (IsZeroCopyUploadEnabled())
command_line->AppendSwitch(switches::kEnableZeroCopy);
if (!IsPartialRasterEnabled())
command_line->AppendSwitch(switches::kDisablePartialRaster);
if (IsGpuMemoryBufferCompositorResourcesEnabled()) {
command_line->AppendSwitch(
switches::kEnableGpuMemoryBufferCompositorResources);
}
if (IsMainFrameBeforeActivationEnabled())
command_line->AppendSwitch(cc::switches::kEnableMainFrameBeforeActivation);
if (IsCheckerImagingEnabled())
command_line->AppendSwitch(cc::switches::kEnableCheckerImaging);
if (IsCompositorImageAnimationEnabled())
command_line->AppendSwitch(switches::kEnableCompositorImageAnimations);
GpuDataManagerImpl* gpu_data_manager = GpuDataManagerImpl::GetInstance();
DCHECK(gpu_data_manager);
gpu_data_manager->AppendRendererCommandLine(command_line);
if (base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableSlimmingPaintV2)) {
command_line->AppendSwitch(cc::switches::kEnableLayerLists);
}
}
|
C
|
Chrome
| 0 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Geoff Lang <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#657568}
|
void GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::Destroy(
bool have_context) {
if (have_context) {
api->glDeleteFramebuffersEXTFn(1, &framebuffer_service_id);
framebuffer_service_id = 0;
api->glDeleteRenderbuffersEXTFn(1, &color_buffer_service_id);
color_buffer_service_id = 0;
api->glDeleteRenderbuffersEXTFn(1, &depth_stencil_buffer_service_id);
color_buffer_service_id = 0;
api->glDeleteRenderbuffersEXTFn(1, &depth_buffer_service_id);
depth_buffer_service_id = 0;
api->glDeleteRenderbuffersEXTFn(1, &stencil_buffer_service_id);
stencil_buffer_service_id = 0;
}
if (color_texture) {
color_texture->Destroy(have_context);
}
}
|
void GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::Destroy(
bool have_context) {
if (have_context) {
api->glDeleteFramebuffersEXTFn(1, &framebuffer_service_id);
framebuffer_service_id = 0;
api->glDeleteRenderbuffersEXTFn(1, &color_buffer_service_id);
color_buffer_service_id = 0;
api->glDeleteRenderbuffersEXTFn(1, &depth_stencil_buffer_service_id);
color_buffer_service_id = 0;
api->glDeleteRenderbuffersEXTFn(1, &depth_buffer_service_id);
depth_buffer_service_id = 0;
api->glDeleteRenderbuffersEXTFn(1, &stencil_buffer_service_id);
stencil_buffer_service_id = 0;
}
if (color_texture) {
color_texture->Destroy(have_context);
}
}
|
C
|
Chrome
| 0 |
CVE-2011-3964
|
https://www.cvedetails.com/cve/CVE-2011-3964/
| null |
https://github.com/chromium/chromium/commit/0c14577c9905bd8161159ec7eaac810c594508d0
|
0c14577c9905bd8161159ec7eaac810c594508d0
|
Change omnibox behavior when stripping javascript schema to navigate after stripping the schema on drag drop.
BUG=109245
TEST=N/A
Review URL: http://codereview.chromium.org/9116016
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@116692 0039d316-1c4b-4281-b951-d872f2087c98
|
bool OmniboxViewWin::IsItemForCommandIdDynamic(int command_id) const {
return command_id == IDS_PASTE_AND_GO;
}
|
bool OmniboxViewWin::IsItemForCommandIdDynamic(int command_id) const {
return command_id == IDS_PASTE_AND_GO;
}
|
C
|
Chrome
| 0 |
CVE-2018-6111
|
https://www.cvedetails.com/cve/CVE-2018-6111/
|
CWE-20
|
https://github.com/chromium/chromium/commit/3c8e4852477d5b1e2da877808c998dc57db9460f
|
3c8e4852477d5b1e2da877808c998dc57db9460f
|
DevTools: speculative fix for crash in NetworkHandler::Disable
This keeps BrowserContext* and StoragePartition* instead of
RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost
upon closure of DevTools front-end.
Bug: 801117, 783067, 780694
Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b
Reviewed-on: https://chromium-review.googlesource.com/876657
Commit-Queue: Andrey Kosyakov <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#531157}
|
void ServiceWorkerHandler::OnWorkerRegistrationUpdated(
const std::vector<ServiceWorkerRegistrationInfo>& registrations) {
using Registration = ServiceWorker::ServiceWorkerRegistration;
std::unique_ptr<protocol::Array<Registration>> result =
protocol::Array<Registration>::create();
for (const auto& registration : registrations) {
result->addItem(Registration::Create()
.SetRegistrationId(
base::Int64ToString(registration.registration_id))
.SetScopeURL(registration.pattern.spec())
.SetIsDeleted(registration.delete_flag ==
ServiceWorkerRegistrationInfo::IS_DELETED)
.Build());
}
frontend_->WorkerRegistrationUpdated(std::move(result));
}
|
void ServiceWorkerHandler::OnWorkerRegistrationUpdated(
const std::vector<ServiceWorkerRegistrationInfo>& registrations) {
using Registration = ServiceWorker::ServiceWorkerRegistration;
std::unique_ptr<protocol::Array<Registration>> result =
protocol::Array<Registration>::create();
for (const auto& registration : registrations) {
result->addItem(Registration::Create()
.SetRegistrationId(
base::Int64ToString(registration.registration_id))
.SetScopeURL(registration.pattern.spec())
.SetIsDeleted(registration.delete_flag ==
ServiceWorkerRegistrationInfo::IS_DELETED)
.Build());
}
frontend_->WorkerRegistrationUpdated(std::move(result));
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/f2f703241635fa96fa630b83afcc9a330cc21b7e
|
f2f703241635fa96fa630b83afcc9a330cc21b7e
|
CrOS Shelf: Get rid of 'split view' mode for shelf background
In the new UI, "maximized" and "split view" are treated the same in
specs, so there is no more need for a separate "split view" mode. This
folds it into the "maximized" mode.
Note that the only thing that _seems_ different in
shelf_background_animator is ShelfBackgroundAnimator::kMaxAlpha (255)
vs kShelfTranslucentMaximizedWindow (254), which should be virtually
impossible to distinguish.
This CL therefore does not have any visual effect (and doesn't
directly fix the linked bug, but is relevant).
Bug: 899289
Change-Id: I60947338176ac15ca016b1ba4edf13d16362cb24
Reviewed-on: https://chromium-review.googlesource.com/c/1469741
Commit-Queue: Xiyuan Xia <[email protected]>
Reviewed-by: Xiyuan Xia <[email protected]>
Auto-Submit: Manu Cornet <[email protected]>
Cr-Commit-Position: refs/heads/master@{#631752}
|
void ShelfBackgroundAnimator::NotifyObservers() {
for (auto& observer : observers_)
NotifyObserver(&observer);
}
|
void ShelfBackgroundAnimator::NotifyObservers() {
for (auto& observer : observers_)
NotifyObserver(&observer);
}
|
C
|
Chrome
| 0 |
CVE-2014-3145
|
https://www.cvedetails.com/cve/CVE-2014-3145/
|
CWE-189
|
https://github.com/torvalds/linux/commit/05ab8f2647e4221cbdb3856dd7d32bd5407316b3
|
05ab8f2647e4221cbdb3856dd7d32bd5407316b3
|
filter: prevent nla extensions to peek beyond the end of the message
The BPF_S_ANC_NLATTR and BPF_S_ANC_NLATTR_NEST extensions fail to check
for a minimal message length before testing the supplied offset to be
within the bounds of the message. This allows the subtraction of the nla
header to underflow and therefore -- as the data type is unsigned --
allowing far to big offset and length values for the search of the
netlink attribute.
The remainder calculation for the BPF_S_ANC_NLATTR_NEST extension is
also wrong. It has the minuend and subtrahend mixed up, therefore
calculates a huge length value, allowing to overrun the end of the
message while looking for the netlink attribute.
The following three BPF snippets will trigger the bugs when attached to
a UNIX datagram socket and parsing a message with length 1, 2 or 3.
,-[ PoC for missing size check in BPF_S_ANC_NLATTR ]--
| ld #0x87654321
| ldx #42
| ld #nla
| ret a
`---
,-[ PoC for the same bug in BPF_S_ANC_NLATTR_NEST ]--
| ld #0x87654321
| ldx #42
| ld #nlan
| ret a
`---
,-[ PoC for wrong remainder calculation in BPF_S_ANC_NLATTR_NEST ]--
| ; (needs a fake netlink header at offset 0)
| ld #0
| ldx #42
| ld #nlan
| ret a
`---
Fix the first issue by ensuring the message length fulfills the minimal
size constrains of a nla header. Fix the second bug by getting the math
for the remainder calculation right.
Fixes: 4738c1db15 ("[SKFILTER]: Add SKF_ADF_NLATTR instruction")
Fixes: d214c7537b ("filter: add SKF_AD_NLATTR_NEST to look for nested..")
Cc: Patrick McHardy <[email protected]>
Cc: Pablo Neira Ayuso <[email protected]>
Signed-off-by: Mathias Krause <[email protected]>
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int check_load_and_stores(struct sock_filter *filter, int flen)
{
u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
int pc, ret = 0;
BUILD_BUG_ON(BPF_MEMWORDS > 16);
masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
if (!masks)
return -ENOMEM;
memset(masks, 0xff, flen * sizeof(*masks));
for (pc = 0; pc < flen; pc++) {
memvalid &= masks[pc];
switch (filter[pc].code) {
case BPF_S_ST:
case BPF_S_STX:
memvalid |= (1 << filter[pc].k);
break;
case BPF_S_LD_MEM:
case BPF_S_LDX_MEM:
if (!(memvalid & (1 << filter[pc].k))) {
ret = -EINVAL;
goto error;
}
break;
case BPF_S_JMP_JA:
/* a jump must set masks on target */
masks[pc + 1 + filter[pc].k] &= memvalid;
memvalid = ~0;
break;
case BPF_S_JMP_JEQ_K:
case BPF_S_JMP_JEQ_X:
case BPF_S_JMP_JGE_K:
case BPF_S_JMP_JGE_X:
case BPF_S_JMP_JGT_K:
case BPF_S_JMP_JGT_X:
case BPF_S_JMP_JSET_X:
case BPF_S_JMP_JSET_K:
/* a jump must set masks on targets */
masks[pc + 1 + filter[pc].jt] &= memvalid;
masks[pc + 1 + filter[pc].jf] &= memvalid;
memvalid = ~0;
break;
}
}
error:
kfree(masks);
return ret;
}
|
static int check_load_and_stores(struct sock_filter *filter, int flen)
{
u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
int pc, ret = 0;
BUILD_BUG_ON(BPF_MEMWORDS > 16);
masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
if (!masks)
return -ENOMEM;
memset(masks, 0xff, flen * sizeof(*masks));
for (pc = 0; pc < flen; pc++) {
memvalid &= masks[pc];
switch (filter[pc].code) {
case BPF_S_ST:
case BPF_S_STX:
memvalid |= (1 << filter[pc].k);
break;
case BPF_S_LD_MEM:
case BPF_S_LDX_MEM:
if (!(memvalid & (1 << filter[pc].k))) {
ret = -EINVAL;
goto error;
}
break;
case BPF_S_JMP_JA:
/* a jump must set masks on target */
masks[pc + 1 + filter[pc].k] &= memvalid;
memvalid = ~0;
break;
case BPF_S_JMP_JEQ_K:
case BPF_S_JMP_JEQ_X:
case BPF_S_JMP_JGE_K:
case BPF_S_JMP_JGE_X:
case BPF_S_JMP_JGT_K:
case BPF_S_JMP_JGT_X:
case BPF_S_JMP_JSET_X:
case BPF_S_JMP_JSET_K:
/* a jump must set masks on targets */
masks[pc + 1 + filter[pc].jt] &= memvalid;
masks[pc + 1 + filter[pc].jf] &= memvalid;
memvalid = ~0;
break;
}
}
error:
kfree(masks);
return ret;
}
|
C
|
linux
| 0 |
CVE-2015-1352
|
https://www.cvedetails.com/cve/CVE-2015-1352/
| null |
https://git.php.net/?p=php-src.git;a=commit;h=124fb22a13fafa3648e4e15b4f207c7096d8155e
|
124fb22a13fafa3648e4e15b4f207c7096d8155e
| null |
PHP_FUNCTION(pg_num_rows)
{
php_pgsql_get_result_info(INTERNAL_FUNCTION_PARAM_PASSTHRU,PHP_PG_NUM_ROWS);
}
|
PHP_FUNCTION(pg_num_rows)
{
php_pgsql_get_result_info(INTERNAL_FUNCTION_PARAM_PASSTHRU,PHP_PG_NUM_ROWS);
}
|
C
|
php
| 0 |
CVE-2016-5189
|
https://www.cvedetails.com/cve/CVE-2016-5189/
|
CWE-284
|
https://github.com/chromium/chromium/commit/2440e872debd68ae7c2a8bf9ddb34df2cce378cd
|
2440e872debd68ae7c2a8bf9ddb34df2cce378cd
|
[GCPW] Disallow sign in of consumer accounts when mdm is enabled.
Unless the registry key "mdm_aca" is explicitly set to 1, always
fail sign in of consumer accounts when mdm enrollment is enabled.
Consumer accounts are defined as accounts with gmail.com or
googlemail.com domain.
Bug: 944049
Change-Id: Icb822f3737d90931de16a8d3317616dd2b159edd
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1532903
Commit-Queue: Tien Mai <[email protected]>
Reviewed-by: Roger Tawa <[email protected]>
Cr-Commit-Position: refs/heads/master@{#646278}
|
HRESULT CGaiaCredentialBase::ReportError(LONG status,
LONG substatus,
BSTR status_text) {
USES_CONVERSION;
LOGFN(INFO);
result_status_ = status;
TerminateLogonProcess();
UpdateSubmitButtonInteractiveState();
DisplayErrorInUI(status, STATUS_SUCCESS, status_text);
return provider_->OnUserAuthenticated(nullptr, CComBSTR(), CComBSTR(),
CComBSTR(), FALSE);
}
|
HRESULT CGaiaCredentialBase::ReportError(LONG status,
LONG substatus,
BSTR status_text) {
USES_CONVERSION;
LOGFN(INFO);
result_status_ = status;
TerminateLogonProcess();
UpdateSubmitButtonInteractiveState();
DisplayErrorInUI(status, STATUS_SUCCESS, status_text);
return provider_->OnUserAuthenticated(nullptr, CComBSTR(), CComBSTR(),
CComBSTR(), FALSE);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/a0af50481db56aa780942e8595a20c36b2c34f5c
|
a0af50481db56aa780942e8595a20c36b2c34f5c
|
Build fix following bug #30696.
Patch by Gavin Barraclough <[email protected]> on 2009-10-22
Reviewed by NOBODY (build fix).
* WebCoreSupport/FrameLoaderClientGtk.cpp:
(WebKit::FrameLoaderClient::windowObjectCleared):
* webkit/webkitwebframe.cpp:
(webkit_web_frame_get_global_context):
git-svn-id: svn://svn.chromium.org/blink/trunk@49964 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void loadDone(WebKitWebFrame* frame, bool didSucceed)
{
g_signal_emit_by_name(frame, "load-done", didSucceed);
notifyStatus(frame, WEBKIT_LOAD_FINISHED);
}
|
static void loadDone(WebKitWebFrame* frame, bool didSucceed)
{
g_signal_emit_by_name(frame, "load-done", didSucceed);
notifyStatus(frame, WEBKIT_LOAD_FINISHED);
}
|
C
|
Chrome
| 0 |
CVE-2019-3817
|
https://www.cvedetails.com/cve/CVE-2019-3817/
|
CWE-416
|
https://github.com/rpm-software-management/libcomps/commit/e3a5d056633677959ad924a51758876d415e7046
|
e3a5d056633677959ad924a51758876d415e7046
|
Fix UAF in comps_objmrtree_unite function
The added field is not used at all in many places and it is probably the
left-over of some copy-paste.
|
COMPS_Object* comps_objrtree_get_x(COMPS_ObjRTree * rt, const char * key) {
return __comps_objrtree_get(rt, key);
}
|
COMPS_Object* comps_objrtree_get_x(COMPS_ObjRTree * rt, const char * key) {
return __comps_objrtree_get(rt, key);
}
|
C
|
libcomps
| 0 |
CVE-2016-2342
|
https://www.cvedetails.com/cve/CVE-2016-2342/
|
CWE-119
|
https://git.savannah.gnu.org/cgit/quagga.git/commit/?id=a3bc7e9400b214a0f078fdb19596ba54214a1442
|
a3bc7e9400b214a0f078fdb19596ba54214a1442
| null |
DEFUN (show_ip_bgp_vpnv4_rd,
show_ip_bgp_vpnv4_rd_cmd,
"show ip bgp vpnv4 rd ASN:nn_or_IP-address:nn",
SHOW_STR
IP_STR
BGP_STR
"Display VPNv4 NLRI specific information\n"
"Display information for a route distinguisher\n"
"VPN Route Distinguisher\n")
{
int ret;
struct prefix_rd prd;
ret = str2prefix_rd (argv[0], &prd);
if (! ret)
{
vty_out (vty, "%% Malformed Route Distinguisher%s", VTY_NEWLINE);
return CMD_WARNING;
}
return bgp_show_mpls_vpn (vty, &prd, bgp_show_type_normal, NULL, 0);
}
|
DEFUN (show_ip_bgp_vpnv4_rd,
show_ip_bgp_vpnv4_rd_cmd,
"show ip bgp vpnv4 rd ASN:nn_or_IP-address:nn",
SHOW_STR
IP_STR
BGP_STR
"Display VPNv4 NLRI specific information\n"
"Display information for a route distinguisher\n"
"VPN Route Distinguisher\n")
{
int ret;
struct prefix_rd prd;
ret = str2prefix_rd (argv[0], &prd);
if (! ret)
{
vty_out (vty, "%% Malformed Route Distinguisher%s", VTY_NEWLINE);
return CMD_WARNING;
}
return bgp_show_mpls_vpn (vty, &prd, bgp_show_type_normal, NULL, 0);
}
|
C
|
savannah
| 0 |
CVE-2018-11380
|
https://www.cvedetails.com/cve/CVE-2018-11380/
|
CWE-125
|
https://github.com/radare/radare2/commit/60208765887f5f008b3b9a883f3addc8bdb9c134
|
60208765887f5f008b3b9a883f3addc8bdb9c134
|
Fix #9970 - heap oobread in mach0 parser (#10026)
|
static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) {
int i, j, sym, wordsize;
ut32 stype;
wordsize = MACH0_(get_bits)(bin) / 8;
if (idx < 0 || idx >= bin->nsymtab) {
return 0;
}
if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) {
stype = S_LAZY_SYMBOL_POINTERS;
} else {
stype = S_NON_LAZY_SYMBOL_POINTERS;
}
reloc->offset = 0;
reloc->addr = 0;
reloc->addend = 0;
#define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break
switch (wordsize) {
CASE(8);
CASE(16);
CASE(32);
CASE(64);
default: return false;
}
#undef CASE
for (i = 0; i < bin->nsects; i++) {
if ((bin->sects[i].flags & SECTION_TYPE) == stype) {
for (j = 0, sym = -1; bin->sects[i].reserved1 + j < bin->nindirectsyms; j++) {
int indidx = bin->sects[i].reserved1 + j;
if (indidx < 0 || indidx >= bin->nindirectsyms) {
break;
}
if (idx == bin->indirectsyms[indidx]) {
sym = j;
break;
}
}
reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize;
reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize;
return true;
}
}
return false;
}
|
static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) {
int i, j, sym, wordsize;
ut32 stype;
wordsize = MACH0_(get_bits)(bin) / 8;
if (idx < 0 || idx >= bin->nsymtab) {
return 0;
}
if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) {
stype = S_LAZY_SYMBOL_POINTERS;
} else {
stype = S_NON_LAZY_SYMBOL_POINTERS;
}
reloc->offset = 0;
reloc->addr = 0;
reloc->addend = 0;
#define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break
switch (wordsize) {
CASE(8);
CASE(16);
CASE(32);
CASE(64);
default: return false;
}
#undef CASE
for (i = 0; i < bin->nsects; i++) {
if ((bin->sects[i].flags & SECTION_TYPE) == stype) {
for (j=0, sym=-1; bin->sects[i].reserved1+j < bin->nindirectsyms; j++)
if (idx == bin->indirectsyms[bin->sects[i].reserved1 + j]) {
sym = j;
break;
}
reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize;
reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize;
return true;
}
}
return false;
}
|
C
|
radare2
| 1 |
CVE-2017-5120
|
https://www.cvedetails.com/cve/CVE-2017-5120/
| null |
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
|
b7277af490d28ac7f802c015bb0ff31395768556
|
bindings: Support "attribute FrozenArray<T>?"
Adds a quick hack to support a case of "attribute FrozenArray<T>?".
Bug: 1028047
Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866
Reviewed-by: Hitoshi Yoshida <[email protected]>
Commit-Queue: Yuki Shiino <[email protected]>
Cr-Commit-Position: refs/heads/master@{#718676}
|
static void NamedPropertyEnumerator(const v8::PropertyCallbackInfo<v8::Array>& info) {
ExceptionState exception_state(
info.GetIsolate(),
ExceptionState::kEnumerationContext,
"TestObject");
TestObject* impl = V8TestObject::ToImpl(info.Holder());
Vector<String> names;
impl->NamedPropertyEnumerator(names, exception_state);
if (exception_state.HadException())
return;
V8SetReturnValue(info, ToV8(names, info.Holder(), info.GetIsolate()).As<v8::Array>());
}
|
static void NamedPropertyEnumerator(const v8::PropertyCallbackInfo<v8::Array>& info) {
ExceptionState exception_state(
info.GetIsolate(),
ExceptionState::kEnumerationContext,
"TestObject");
TestObject* impl = V8TestObject::ToImpl(info.Holder());
Vector<String> names;
impl->NamedPropertyEnumerator(names, exception_state);
if (exception_state.HadException())
return;
V8SetReturnValue(info, ToV8(names, info.Holder(), info.GetIsolate()).As<v8::Array>());
}
|
C
|
Chrome
| 0 |
CVE-2012-5111
|
https://www.cvedetails.com/cve/CVE-2012-5111/
| null |
https://github.com/chromium/chromium/commit/ef97ce340c462d5212336f09bf8075d1cb10faa4
|
ef97ce340c462d5212336f09bf8075d1cb10faa4
|
Handle crashing Pepper plug-ins the same as crashing NPAPI plug-ins.
BUG=151895
Review URL: https://chromiumcodereview.appspot.com/10956065
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@158364 0039d316-1c4b-4281-b951-d872f2087c98
|
void PluginInfoMessageFilter::OnDestruct() const {
const_cast<PluginInfoMessageFilter*>(this)->
weak_ptr_factory_.DetachFromThread();
const_cast<PluginInfoMessageFilter*>(this)->
weak_ptr_factory_.InvalidateWeakPtrs();
content::BrowserThread::DeleteOnUIThread::Destruct(this);
}
|
void PluginInfoMessageFilter::OnDestruct() const {
const_cast<PluginInfoMessageFilter*>(this)->
weak_ptr_factory_.DetachFromThread();
const_cast<PluginInfoMessageFilter*>(this)->
weak_ptr_factory_.InvalidateWeakPtrs();
content::BrowserThread::DeleteOnUIThread::Destruct(this);
}
|
C
|
Chrome
| 0 |
CVE-2015-8816
|
https://www.cvedetails.com/cve/CVE-2015-8816/
| null |
https://github.com/torvalds/linux/commit/e50293ef9775c5f1cf3fcc093037dd6a8c5684ea
|
e50293ef9775c5f1cf3fcc093037dd6a8c5684ea
|
USB: fix invalid memory access in hub_activate()
Commit 8520f38099cc ("USB: change hub initialization sleeps to
delayed_work") changed the hub_activate() routine to make part of it
run in a workqueue. However, the commit failed to take a reference to
the usb_hub structure or to lock the hub interface while doing so. As
a result, if a hub is plugged in and quickly unplugged before the work
routine can run, the routine will try to access memory that has been
deallocated. Or, if the hub is unplugged while the routine is
running, the memory may be deallocated while it is in active use.
This patch fixes the problem by taking a reference to the usb_hub at
the start of hub_activate() and releasing it at the end (when the work
is finished), and by locking the hub interface while the work routine
is running. It also adds a check at the start of the routine to see
if the hub has already been disconnected, in which nothing should be
done.
Signed-off-by: Alan Stern <[email protected]>
Reported-by: Alexandru Cornea <[email protected]>
Tested-by: Alexandru Cornea <[email protected]>
Fixes: 8520f38099cc ("USB: change hub initialization sleeps to delayed_work")
CC: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void update_devnum(struct usb_device *udev, int devnum)
{
/* The address for a WUSB device is managed by wusbcore. */
if (!udev->wusb)
udev->devnum = devnum;
}
|
static void update_devnum(struct usb_device *udev, int devnum)
{
/* The address for a WUSB device is managed by wusbcore. */
if (!udev->wusb)
udev->devnum = devnum;
}
|
C
|
linux
| 0 |
CVE-2017-5120
|
https://www.cvedetails.com/cve/CVE-2017-5120/
| null |
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
|
b7277af490d28ac7f802c015bb0ff31395768556
|
bindings: Support "attribute FrozenArray<T>?"
Adds a quick hack to support a case of "attribute FrozenArray<T>?".
Bug: 1028047
Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866
Reviewed-by: Hitoshi Yoshida <[email protected]>
Commit-Queue: Yuki Shiino <[email protected]>
Cr-Commit-Position: refs/heads/master@{#718676}
|
static void OverloadedPerWorldBindingsMethod1Method(const v8::FunctionCallbackInfo<v8::Value>& info) {
TestObject* impl = V8TestObject::ToImpl(info.Holder());
impl->overloadedPerWorldBindingsMethod();
}
|
static void OverloadedPerWorldBindingsMethod1Method(const v8::FunctionCallbackInfo<v8::Value>& info) {
TestObject* impl = V8TestObject::ToImpl(info.Holder());
impl->overloadedPerWorldBindingsMethod();
}
|
C
|
Chrome
| 0 |
CVE-2015-7990
|
https://www.cvedetails.com/cve/CVE-2015-7990/
|
CWE-362
|
https://github.com/torvalds/linux/commit/8c7188b23474cca017b3ef354c4a58456f68303a
|
8c7188b23474cca017b3ef354c4a58456f68303a
|
RDS: fix race condition when sending a message on unbound socket
Sasha's found a NULL pointer dereference in the RDS connection code when
sending a message to an apparently unbound socket. The problem is caused
by the code checking if the socket is bound in rds_sendmsg(), which checks
the rs_bound_addr field without taking a lock on the socket. This opens a
race where rs_bound_addr is temporarily set but where the transport is not
in rds_bind(), leading to a NULL pointer dereference when trying to
dereference 'trans' in __rds_conn_create().
Vegard wrote a reproducer for this issue, so kindly ask him to share if
you're interested.
I cannot reproduce the NULL pointer dereference using Vegard's reproducer
with this patch, whereas I could without.
Complete earlier incomplete fix to CVE-2015-6937:
74e98eb08588 ("RDS: verify the underlying transport exists before creating a connection")
Cc: David S. Miller <[email protected]>
Cc: [email protected]
Reviewed-by: Vegard Nossum <[email protected]>
Reviewed-by: Sasha Levin <[email protected]>
Acked-by: Santosh Shilimkar <[email protected]>
Signed-off-by: Quentin Casasnovas <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
struct rds_connection *rds_conn_create(struct net *net,
__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp)
{
return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
}
|
struct rds_connection *rds_conn_create(struct net *net,
__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp)
{
return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
}
|
C
|
linux
| 0 |
CVE-2016-3841
|
https://www.cvedetails.com/cve/CVE-2016-3841/
|
CWE-416
|
https://github.com/torvalds/linux/commit/45f6fad84cc305103b28d73482b344d7f5b76f39
|
45f6fad84cc305103b28d73482b344d7f5b76f39
|
ipv6: add complete rcu protection around np->opt
This patch addresses multiple problems :
UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions
while socket is not locked : Other threads can change np->opt
concurrently. Dmitry posted a syzkaller
(http://github.com/google/syzkaller) program desmonstrating
use-after-free.
Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock()
and dccp_v6_request_recv_sock() also need to use RCU protection
to dereference np->opt once (before calling ipv6_dup_options())
This patch adds full RCU protection to np->opt
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void tcp_v6_reqsk_destructor(struct request_sock *req)
{
kfree_skb(inet_rsk(req)->pktopts);
}
|
static void tcp_v6_reqsk_destructor(struct request_sock *req)
{
kfree_skb(inet_rsk(req)->pktopts);
}
|
C
|
linux
| 0 |
CVE-2016-5770
|
https://www.cvedetails.com/cve/CVE-2016-5770/
|
CWE-190
|
https://github.com/php/php-src/commit/7245bff300d3fa8bacbef7897ff080a6f1c23eba?w=1
|
7245bff300d3fa8bacbef7897ff080a6f1c23eba?w=1
|
Fix bug #72262 - do not overflow int
|
static int spl_filesystem_file_read(spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */
{
char *buf;
size_t line_len = 0;
long line_add = (intern->u.file.current_line || intern->u.file.current_zval) ? 1 : 0;
spl_filesystem_file_free_line(intern TSRMLS_CC);
if (php_stream_eof(intern->u.file.stream)) {
if (!silent) {
zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot read from file %s", intern->file_name);
}
return FAILURE;
}
if (intern->u.file.max_line_len > 0) {
buf = safe_emalloc((intern->u.file.max_line_len + 1), sizeof(char), 0);
if (php_stream_get_line(intern->u.file.stream, buf, intern->u.file.max_line_len + 1, &line_len) == NULL) {
efree(buf);
buf = NULL;
} else {
buf[line_len] = '\0';
}
} else {
buf = php_stream_get_line(intern->u.file.stream, NULL, 0, &line_len);
}
if (!buf) {
intern->u.file.current_line = estrdup("");
intern->u.file.current_line_len = 0;
} else {
if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_DROP_NEW_LINE)) {
line_len = strcspn(buf, "\r\n");
buf[line_len] = '\0';
}
intern->u.file.current_line = buf;
intern->u.file.current_line_len = line_len;
}
intern->u.file.current_line_num += line_add;
return SUCCESS;
} /* }}} */
|
static int spl_filesystem_file_read(spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */
{
char *buf;
size_t line_len = 0;
long line_add = (intern->u.file.current_line || intern->u.file.current_zval) ? 1 : 0;
spl_filesystem_file_free_line(intern TSRMLS_CC);
if (php_stream_eof(intern->u.file.stream)) {
if (!silent) {
zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot read from file %s", intern->file_name);
}
return FAILURE;
}
if (intern->u.file.max_line_len > 0) {
buf = safe_emalloc((intern->u.file.max_line_len + 1), sizeof(char), 0);
if (php_stream_get_line(intern->u.file.stream, buf, intern->u.file.max_line_len + 1, &line_len) == NULL) {
efree(buf);
buf = NULL;
} else {
buf[line_len] = '\0';
}
} else {
buf = php_stream_get_line(intern->u.file.stream, NULL, 0, &line_len);
}
if (!buf) {
intern->u.file.current_line = estrdup("");
intern->u.file.current_line_len = 0;
} else {
if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_DROP_NEW_LINE)) {
line_len = strcspn(buf, "\r\n");
buf[line_len] = '\0';
}
intern->u.file.current_line = buf;
intern->u.file.current_line_len = line_len;
}
intern->u.file.current_line_num += line_add;
return SUCCESS;
} /* }}} */
|
C
|
php-src
| 1 |
CVE-2019-15296
|
https://www.cvedetails.com/cve/CVE-2019-15296/
|
CWE-119
|
https://github.com/knik0/faad2/commit/942c3e0aee748ea6fe97cb2c1aa5893225316174
|
942c3e0aee748ea6fe97cb2c1aa5893225316174
|
Fix a couple buffer overflows
https://hackerone.com/reports/502816
https://hackerone.com/reports/507858
https://github.com/videolan/vlc/blob/master/contrib/src/faad2/faad2-fix-overflows.patch
|
uint8_t *faad_getbitbuffer(bitfile *ld, uint32_t bits
DEBUGDEC)
{
int i;
unsigned int temp;
int bytes = bits >> 3;
int remainder = bits & 0x7;
uint8_t *buffer = (uint8_t*)faad_malloc((bytes+1)*sizeof(uint8_t));
for (i = 0; i < bytes; i++)
{
buffer[i] = (uint8_t)faad_getbits(ld, 8 DEBUGVAR(print,var,dbg));
}
if (remainder)
{
temp = faad_getbits(ld, remainder DEBUGVAR(print,var,dbg)) << (8-remainder);
buffer[bytes] = (uint8_t)temp;
}
return buffer;
}
|
uint8_t *faad_getbitbuffer(bitfile *ld, uint32_t bits
DEBUGDEC)
{
int i;
unsigned int temp;
int bytes = bits >> 3;
int remainder = bits & 0x7;
uint8_t *buffer = (uint8_t*)faad_malloc((bytes+1)*sizeof(uint8_t));
for (i = 0; i < bytes; i++)
{
buffer[i] = (uint8_t)faad_getbits(ld, 8 DEBUGVAR(print,var,dbg));
}
if (remainder)
{
temp = faad_getbits(ld, remainder DEBUGVAR(print,var,dbg)) << (8-remainder);
buffer[bytes] = (uint8_t)temp;
}
return buffer;
}
|
C
|
faad2
| 0 |
CVE-2014-2972
|
https://www.cvedetails.com/cve/CVE-2014-2972/
|
CWE-189
|
https://git.exim.org/exim.git/commitdiff/7685ce68148a083d7759e78d01aa5198fc099c44
|
88a5ee399db9c15c2a94cd95aae6f364afab3249
| null |
expand_getcertele(uschar * field, uschar * certvar)
{
var_entry * vp;
certfield * cp;
if (!(vp = find_var_ent(certvar)))
{
expand_string_message =
string_sprintf("no variable named \"%s\"", certvar);
return NULL; /* Unknown variable name */
}
/* NB this stops us passing certs around in variable. Might
want to do that in future */
if (vp->type != vtype_cert)
{
expand_string_message =
string_sprintf("\"%s\" is not a certificate", certvar);
return NULL; /* Unknown variable name */
}
if (!*(void **)vp->value)
return NULL;
if (*field >= '0' && *field <= '9')
return tls_cert_ext_by_oid(*(void **)vp->value, field, 0);
for(cp = certfields;
cp < certfields + nelements(certfields);
cp++)
if (Ustrncmp(cp->name, field, cp->namelen) == 0)
{
uschar * modifier = *(field += cp->namelen) == ','
? ++field : NULL;
return (*cp->getfn)( *(void **)vp->value, modifier );
}
expand_string_message =
string_sprintf("bad field selector \"%s\" for certextract", field);
return NULL;
}
|
expand_getcertele(uschar * field, uschar * certvar)
{
var_entry * vp;
certfield * cp;
if (!(vp = find_var_ent(certvar)))
{
expand_string_message =
string_sprintf("no variable named \"%s\"", certvar);
return NULL; /* Unknown variable name */
}
/* NB this stops us passing certs around in variable. Might
want to do that in future */
if (vp->type != vtype_cert)
{
expand_string_message =
string_sprintf("\"%s\" is not a certificate", certvar);
return NULL; /* Unknown variable name */
}
if (!*(void **)vp->value)
return NULL;
if (*field >= '0' && *field <= '9')
return tls_cert_ext_by_oid(*(void **)vp->value, field, 0);
for(cp = certfields;
cp < certfields + nelements(certfields);
cp++)
if (Ustrncmp(cp->name, field, cp->namelen) == 0)
{
uschar * modifier = *(field += cp->namelen) == ','
? ++field : NULL;
return (*cp->getfn)( *(void **)vp->value, modifier );
}
expand_string_message =
string_sprintf("bad field selector \"%s\" for certextract", field);
return NULL;
}
|
C
|
exim
| 0 |
CVE-2018-11596
|
https://www.cvedetails.com/cve/CVE-2018-11596/
|
CWE-119
|
https://github.com/espruino/Espruino/commit/ce1924193862d58cb43d3d4d9dada710a8361b89
|
ce1924193862d58cb43d3d4d9dada710a8361b89
|
fix jsvGetString regression
|
JsVar *jsvAsName(JsVar *var) {
if (!var) return 0;
if (jsvGetRefs(var) == 0) {
if (!jsvIsName(var))
var = jsvMakeIntoVariableName(var, 0);
return jsvLockAgain(var);
} else { // it was reffed, we must add a new one
return jsvMakeIntoVariableName(jsvCopy(var, false), 0);
}
}
|
JsVar *jsvAsName(JsVar *var) {
if (!var) return 0;
if (jsvGetRefs(var) == 0) {
if (!jsvIsName(var))
var = jsvMakeIntoVariableName(var, 0);
return jsvLockAgain(var);
} else { // it was reffed, we must add a new one
return jsvMakeIntoVariableName(jsvCopy(var, false), 0);
}
}
|
C
|
Espruino
| 0 |
CVE-2014-7822
|
https://www.cvedetails.com/cve/CVE-2014-7822/
|
CWE-264
|
https://github.com/torvalds/linux/commit/8d0207652cbe27d1f962050737848e5ad4671958
|
8d0207652cbe27d1f962050737848e5ad4671958
|
->splice_write() via ->write_iter()
iter_file_splice_write() - a ->splice_write() instance that gathers the
pipe buffers, builds a bio_vec-based iov_iter covering those and feeds
it to ->write_iter(). A bunch of simple cases coverted to that...
[AV: fixed the braino spotted by Cyrill]
Signed-off-by: Al Viro <[email protected]>
|
COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32,
unsigned int, nr_segs, unsigned int, flags)
{
unsigned i;
struct iovec __user *iov;
if (nr_segs > UIO_MAXIOV)
return -EINVAL;
iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec));
for (i = 0; i < nr_segs; i++) {
struct compat_iovec v;
if (get_user(v.iov_base, &iov32[i].iov_base) ||
get_user(v.iov_len, &iov32[i].iov_len) ||
put_user(compat_ptr(v.iov_base), &iov[i].iov_base) ||
put_user(v.iov_len, &iov[i].iov_len))
return -EFAULT;
}
return sys_vmsplice(fd, iov, nr_segs, flags);
}
|
COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32,
unsigned int, nr_segs, unsigned int, flags)
{
unsigned i;
struct iovec __user *iov;
if (nr_segs > UIO_MAXIOV)
return -EINVAL;
iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec));
for (i = 0; i < nr_segs; i++) {
struct compat_iovec v;
if (get_user(v.iov_base, &iov32[i].iov_base) ||
get_user(v.iov_len, &iov32[i].iov_len) ||
put_user(compat_ptr(v.iov_base), &iov[i].iov_base) ||
put_user(v.iov_len, &iov[i].iov_len))
return -EFAULT;
}
return sys_vmsplice(fd, iov, nr_segs, flags);
}
|
C
|
linux
| 0 |
CVE-2014-2669
|
https://www.cvedetails.com/cve/CVE-2014-2669/
|
CWE-189
|
https://github.com/postgres/postgres/commit/31400a673325147e1205326008e32135a78b4d8a
|
31400a673325147e1205326008e32135a78b4d8a
|
Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064
|
execconsistent(QUERYTYPE *query, ArrayType *array, bool calcnot)
{
CHKVAL chkval;
CHECKARRVALID(array);
chkval.arrb = ARRPTR(array);
chkval.arre = chkval.arrb + ARRNELEMS(array);
return execute(GETQUERY(query) + query->size - 1,
(void *) &chkval, calcnot,
checkcondition_arr);
}
|
execconsistent(QUERYTYPE *query, ArrayType *array, bool calcnot)
{
CHKVAL chkval;
CHECKARRVALID(array);
chkval.arrb = ARRPTR(array);
chkval.arre = chkval.arrb + ARRNELEMS(array);
return execute(GETQUERY(query) + query->size - 1,
(void *) &chkval, calcnot,
checkcondition_arr);
}
|
C
|
postgres
| 0 |
CVE-2015-1352
|
https://www.cvedetails.com/cve/CVE-2015-1352/
| null |
https://git.php.net/?p=php-src.git;a=commit;h=124fb22a13fafa3648e4e15b4f207c7096d8155e
|
124fb22a13fafa3648e4e15b4f207c7096d8155e
| null |
PHP_PGSQL_API int php_pgsql_meta_data(PGconn *pg_link, const char *table_name, zval *meta, zend_bool extended)
{
PGresult *pg_result;
char *src, *tmp_name, *tmp_name2 = NULL;
char *escaped;
smart_str querystr = {0};
size_t new_len;
int i, num_rows;
zval elem;
if (!*table_name) {
php_error_docref(NULL, E_WARNING, "The table name must be specified");
return FAILURE;
}
src = estrdup(table_name);
tmp_name = php_strtok_r(src, ".", &tmp_name2);
if (!tmp_name2 || !*tmp_name2) {
/* Default schema */
tmp_name2 = tmp_name;
tmp_name = "public";
}
if (extended) {
smart_str_appends(&querystr,
"SELECT a.attname, a.attnum, t.typname, a.attlen, a.attnotNULL, a.atthasdef, a.attndims, t.typtype, "
"d.description "
"FROM pg_class as c "
" JOIN pg_attribute a ON (a.attrelid = c.oid) "
" JOIN pg_type t ON (a.atttypid = t.oid) "
" JOIN pg_namespace n ON (c.relnamespace = n.oid) "
" LEFT JOIN pg_description d ON (d.objoid=a.attrelid AND d.objsubid=a.attnum AND c.oid=d.objoid) "
"WHERE a.attnum > 0 AND c.relname = '");
} else {
smart_str_appends(&querystr,
"SELECT a.attname, a.attnum, t.typname, a.attlen, a.attnotnull, a.atthasdef, a.attndims, t.typtype "
"FROM pg_class as c "
" JOIN pg_attribute a ON (a.attrelid = c.oid) "
" JOIN pg_type t ON (a.atttypid = t.oid) "
" JOIN pg_namespace n ON (c.relnamespace = n.oid) "
"WHERE a.attnum > 0 AND c.relname = '");
}
escaped = (char *)safe_emalloc(strlen(tmp_name2), 2, 1);
new_len = PQescapeStringConn(pg_link, escaped, tmp_name2, strlen(tmp_name2), NULL);
if (new_len) {
smart_str_appendl(&querystr, escaped, new_len);
}
efree(escaped);
smart_str_appends(&querystr, "' AND n.nspname = '");
escaped = (char *)safe_emalloc(strlen(tmp_name), 2, 1);
new_len = PQescapeStringConn(pg_link, escaped, tmp_name, strlen(tmp_name), NULL);
if (new_len) {
smart_str_appendl(&querystr, escaped, new_len);
}
efree(escaped);
smart_str_appends(&querystr, "' ORDER BY a.attnum;");
smart_str_0(&querystr);
efree(src);
pg_result = PQexec(pg_link, querystr.s->val);
if (PQresultStatus(pg_result) != PGRES_TUPLES_OK || (num_rows = PQntuples(pg_result)) == 0) {
php_error_docref(NULL, E_WARNING, "Table '%s' doesn't exists", table_name);
smart_str_free(&querystr);
PQclear(pg_result);
return FAILURE;
}
smart_str_free(&querystr);
for (i = 0; i < num_rows; i++) {
char *name;
array_init(&elem);
/* pg_attribute.attnum */
add_assoc_long_ex(&elem, "num", sizeof("num") - 1, atoi(PQgetvalue(pg_result, i, 1)));
/* pg_type.typname */
add_assoc_string_ex(&elem, "type", sizeof("type") - 1, PQgetvalue(pg_result, i, 2));
/* pg_attribute.attlen */
add_assoc_long_ex(&elem, "len", sizeof("len") - 1, atoi(PQgetvalue(pg_result,i,3)));
/* pg_attribute.attnonull */
add_assoc_bool_ex(&elem, "not null", sizeof("not null") - 1, !strcmp(PQgetvalue(pg_result, i, 4), "t"));
/* pg_attribute.atthasdef */
add_assoc_bool_ex(&elem, "has default", sizeof("has default") - 1, !strcmp(PQgetvalue(pg_result,i,5), "t"));
/* pg_attribute.attndims */
add_assoc_long_ex(&elem, "array dims", sizeof("array dims") - 1, atoi(PQgetvalue(pg_result, i, 6)));
/* pg_type.typtype */
add_assoc_bool_ex(&elem, "is enum", sizeof("is enum") - 1, !strcmp(PQgetvalue(pg_result, i, 7), "e"));
if (extended) {
/* pg_type.typtype */
add_assoc_bool_ex(&elem, "is base", sizeof("is base") - 1, !strcmp(PQgetvalue(pg_result, i, 7), "b"));
add_assoc_bool_ex(&elem, "is composite", sizeof("is composite") - 1, !strcmp(PQgetvalue(pg_result, i, 7), "c"));
add_assoc_bool_ex(&elem, "is pesudo", sizeof("is pesudo") - 1, !strcmp(PQgetvalue(pg_result, i, 7), "p"));
/* pg_description.description */
add_assoc_string_ex(&elem, "description", sizeof("description") - 1, PQgetvalue(pg_result, i, 8));
}
/* pg_attribute.attname */
name = PQgetvalue(pg_result,i,0);
add_assoc_zval(meta, name, &elem);
}
PQclear(pg_result);
return SUCCESS;
}
|
PHP_PGSQL_API int php_pgsql_meta_data(PGconn *pg_link, const char *table_name, zval *meta, zend_bool extended)
{
PGresult *pg_result;
char *src, *tmp_name, *tmp_name2 = NULL;
char *escaped;
smart_str querystr = {0};
size_t new_len;
int i, num_rows;
zval elem;
if (!*table_name) {
php_error_docref(NULL, E_WARNING, "The table name must be specified");
return FAILURE;
}
src = estrdup(table_name);
tmp_name = php_strtok_r(src, ".", &tmp_name2);
if (!tmp_name2 || !*tmp_name2) {
/* Default schema */
tmp_name2 = tmp_name;
tmp_name = "public";
}
if (extended) {
smart_str_appends(&querystr,
"SELECT a.attname, a.attnum, t.typname, a.attlen, a.attnotNULL, a.atthasdef, a.attndims, t.typtype, "
"d.description "
"FROM pg_class as c "
" JOIN pg_attribute a ON (a.attrelid = c.oid) "
" JOIN pg_type t ON (a.atttypid = t.oid) "
" JOIN pg_namespace n ON (c.relnamespace = n.oid) "
" LEFT JOIN pg_description d ON (d.objoid=a.attrelid AND d.objsubid=a.attnum AND c.oid=d.objoid) "
"WHERE a.attnum > 0 AND c.relname = '");
} else {
smart_str_appends(&querystr,
"SELECT a.attname, a.attnum, t.typname, a.attlen, a.attnotnull, a.atthasdef, a.attndims, t.typtype "
"FROM pg_class as c "
" JOIN pg_attribute a ON (a.attrelid = c.oid) "
" JOIN pg_type t ON (a.atttypid = t.oid) "
" JOIN pg_namespace n ON (c.relnamespace = n.oid) "
"WHERE a.attnum > 0 AND c.relname = '");
}
escaped = (char *)safe_emalloc(strlen(tmp_name2), 2, 1);
new_len = PQescapeStringConn(pg_link, escaped, tmp_name2, strlen(tmp_name2), NULL);
if (new_len) {
smart_str_appendl(&querystr, escaped, new_len);
}
efree(escaped);
smart_str_appends(&querystr, "' AND n.nspname = '");
escaped = (char *)safe_emalloc(strlen(tmp_name), 2, 1);
new_len = PQescapeStringConn(pg_link, escaped, tmp_name, strlen(tmp_name), NULL);
if (new_len) {
smart_str_appendl(&querystr, escaped, new_len);
}
efree(escaped);
smart_str_appends(&querystr, "' ORDER BY a.attnum;");
smart_str_0(&querystr);
efree(src);
pg_result = PQexec(pg_link, querystr.s->val);
if (PQresultStatus(pg_result) != PGRES_TUPLES_OK || (num_rows = PQntuples(pg_result)) == 0) {
php_error_docref(NULL, E_WARNING, "Table '%s' doesn't exists", table_name);
smart_str_free(&querystr);
PQclear(pg_result);
return FAILURE;
}
smart_str_free(&querystr);
for (i = 0; i < num_rows; i++) {
char *name;
array_init(&elem);
/* pg_attribute.attnum */
add_assoc_long_ex(&elem, "num", sizeof("num") - 1, atoi(PQgetvalue(pg_result, i, 1)));
/* pg_type.typname */
add_assoc_string_ex(&elem, "type", sizeof("type") - 1, PQgetvalue(pg_result, i, 2));
/* pg_attribute.attlen */
add_assoc_long_ex(&elem, "len", sizeof("len") - 1, atoi(PQgetvalue(pg_result,i,3)));
/* pg_attribute.attnonull */
add_assoc_bool_ex(&elem, "not null", sizeof("not null") - 1, !strcmp(PQgetvalue(pg_result, i, 4), "t"));
/* pg_attribute.atthasdef */
add_assoc_bool_ex(&elem, "has default", sizeof("has default") - 1, !strcmp(PQgetvalue(pg_result,i,5), "t"));
/* pg_attribute.attndims */
add_assoc_long_ex(&elem, "array dims", sizeof("array dims") - 1, atoi(PQgetvalue(pg_result, i, 6)));
/* pg_type.typtype */
add_assoc_bool_ex(&elem, "is enum", sizeof("is enum") - 1, !strcmp(PQgetvalue(pg_result, i, 7), "e"));
if (extended) {
/* pg_type.typtype */
add_assoc_bool_ex(&elem, "is base", sizeof("is base") - 1, !strcmp(PQgetvalue(pg_result, i, 7), "b"));
add_assoc_bool_ex(&elem, "is composite", sizeof("is composite") - 1, !strcmp(PQgetvalue(pg_result, i, 7), "c"));
add_assoc_bool_ex(&elem, "is pesudo", sizeof("is pesudo") - 1, !strcmp(PQgetvalue(pg_result, i, 7), "p"));
/* pg_description.description */
add_assoc_string_ex(&elem, "description", sizeof("description") - 1, PQgetvalue(pg_result, i, 8));
}
/* pg_attribute.attname */
name = PQgetvalue(pg_result,i,0);
add_assoc_zval(meta, name, &elem);
}
PQclear(pg_result);
return SUCCESS;
}
|
C
|
php
| 0 |
CVE-2015-3412
|
https://www.cvedetails.com/cve/CVE-2015-3412/
|
CWE-254
|
https://git.php.net/?p=php-src.git;a=commit;h=4435b9142ff9813845d5c97ab29a5d637bedb257
|
4435b9142ff9813845d5c97ab29a5d637bedb257
| null |
PHP_FUNCTION(pg_meta_data)
{
zval *pgsql_link;
char *table_name;
uint table_name_len;
PGconn *pgsql;
int id = -1;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs",
&pgsql_link, &table_name, &table_name_len) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE2(pgsql, PGconn *, &pgsql_link, id, "PostgreSQL link", le_link, le_plink);
array_init(return_value);
if (php_pgsql_meta_data(pgsql, table_name, return_value TSRMLS_CC) == FAILURE) {
zval_dtor(return_value); /* destroy array */
RETURN_FALSE;
}
else {
HashPosition pos;
zval **val;
for (zend_hash_internal_pointer_reset_ex(Z_ARRVAL_P(return_value), &pos);
zend_hash_get_current_data_ex(Z_ARRVAL_P(return_value), (void **)&val, &pos) == SUCCESS;
zend_hash_move_forward_ex(Z_ARRVAL_P(return_value), &pos)) {
/* delete newly added entry, in order to keep BC */
zend_hash_del_key_or_index(Z_ARRVAL_PP(val), "is enum", sizeof("is enum"), 0, HASH_DEL_KEY);
}
}
}
|
PHP_FUNCTION(pg_meta_data)
{
zval *pgsql_link;
char *table_name;
uint table_name_len;
PGconn *pgsql;
int id = -1;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs",
&pgsql_link, &table_name, &table_name_len) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE2(pgsql, PGconn *, &pgsql_link, id, "PostgreSQL link", le_link, le_plink);
array_init(return_value);
if (php_pgsql_meta_data(pgsql, table_name, return_value TSRMLS_CC) == FAILURE) {
zval_dtor(return_value); /* destroy array */
RETURN_FALSE;
}
else {
HashPosition pos;
zval **val;
for (zend_hash_internal_pointer_reset_ex(Z_ARRVAL_P(return_value), &pos);
zend_hash_get_current_data_ex(Z_ARRVAL_P(return_value), (void **)&val, &pos) == SUCCESS;
zend_hash_move_forward_ex(Z_ARRVAL_P(return_value), &pos)) {
/* delete newly added entry, in order to keep BC */
zend_hash_del_key_or_index(Z_ARRVAL_PP(val), "is enum", sizeof("is enum"), 0, HASH_DEL_KEY);
}
}
}
|
C
|
php
| 0 |
CVE-2019-5755
|
https://www.cvedetails.com/cve/CVE-2019-5755/
|
CWE-189
|
https://github.com/chromium/chromium/commit/971548cdca2d4c0a6fedd3db0c94372c2a27eac3
|
971548cdca2d4c0a6fedd3db0c94372c2a27eac3
|
Make MediaStreamDispatcherHost per-request instead of per-frame.
Instead of having RenderFrameHost own a single MSDH to handle all
requests from a frame, MSDH objects will be owned by a strong binding.
A consequence of this is that an additional requester ID is added to
requests to MediaStreamManager, so that an MSDH is able to cancel only
requests generated by it.
In practice, MSDH will continue to be per frame in most cases since
each frame normally makes a single request for an MSDH object.
This fixes a lifetime issue caused by the IO thread executing tasks
after the RenderFrameHost dies.
Drive-by: Fix some minor lint issues.
Bug: 912520
Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516
Reviewed-on: https://chromium-review.googlesource.com/c/1369799
Reviewed-by: Emircan Uysaler <[email protected]>
Reviewed-by: Ken Buchanan <[email protected]>
Reviewed-by: Olga Sharonova <[email protected]>
Commit-Queue: Guido Urdaneta <[email protected]>
Cr-Commit-Position: refs/heads/master@{#616347}
|
void SpeechRecognitionManagerImpl::OnAudioLevelsChange(
int session_id, float volume, float noise_volume) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
if (!SessionExists(session_id))
return;
if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
delegate_listener->OnAudioLevelsChange(session_id, volume, noise_volume);
if (SpeechRecognitionEventListener* listener = GetListener(session_id))
listener->OnAudioLevelsChange(session_id, volume, noise_volume);
}
|
void SpeechRecognitionManagerImpl::OnAudioLevelsChange(
int session_id, float volume, float noise_volume) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
if (!SessionExists(session_id))
return;
if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
delegate_listener->OnAudioLevelsChange(session_id, volume, noise_volume);
if (SpeechRecognitionEventListener* listener = GetListener(session_id))
listener->OnAudioLevelsChange(session_id, volume, noise_volume);
}
|
C
|
Chrome
| 0 |
CVE-2016-2494
|
https://www.cvedetails.com/cve/CVE-2016-2494/
|
CWE-264
|
https://android.googlesource.com/platform/system/core/+/864e2e22fcd0cba3f5e67680ccabd0302dfda45d
|
864e2e22fcd0cba3f5e67680ccabd0302dfda45d
|
Fix overflow in path building
An incorrect size was causing an unsigned value
to wrap, causing it to write past the end of
the buffer.
Bug: 28085658
Change-Id: Ie9625c729cca024d514ba2880ff97209d435a165
|
static int handle_readdir(struct fuse* fuse, struct fuse_handler* handler,
const struct fuse_in_header* hdr, const struct fuse_read_in* req)
{
char buffer[8192];
struct fuse_dirent *fde = (struct fuse_dirent*) buffer;
struct dirent *de;
struct dirhandle *h = id_to_ptr(req->fh);
TRACE("[%d] READDIR %p\n", handler->token, h);
if (req->offset == 0) {
/* rewinddir() might have been called above us, so rewind here too */
TRACE("[%d] calling rewinddir()\n", handler->token);
rewinddir(h->d);
}
de = readdir(h->d);
if (!de) {
return 0;
}
fde->ino = FUSE_UNKNOWN_INO;
/* increment the offset so we can detect when rewinddir() seeks back to the beginning */
fde->off = req->offset + 1;
fde->type = de->d_type;
fde->namelen = strlen(de->d_name);
memcpy(fde->name, de->d_name, fde->namelen + 1);
fuse_reply(fuse, hdr->unique, fde,
FUSE_DIRENT_ALIGN(sizeof(struct fuse_dirent) + fde->namelen));
return NO_STATUS;
}
|
static int handle_readdir(struct fuse* fuse, struct fuse_handler* handler,
const struct fuse_in_header* hdr, const struct fuse_read_in* req)
{
char buffer[8192];
struct fuse_dirent *fde = (struct fuse_dirent*) buffer;
struct dirent *de;
struct dirhandle *h = id_to_ptr(req->fh);
TRACE("[%d] READDIR %p\n", handler->token, h);
if (req->offset == 0) {
/* rewinddir() might have been called above us, so rewind here too */
TRACE("[%d] calling rewinddir()\n", handler->token);
rewinddir(h->d);
}
de = readdir(h->d);
if (!de) {
return 0;
}
fde->ino = FUSE_UNKNOWN_INO;
/* increment the offset so we can detect when rewinddir() seeks back to the beginning */
fde->off = req->offset + 1;
fde->type = de->d_type;
fde->namelen = strlen(de->d_name);
memcpy(fde->name, de->d_name, fde->namelen + 1);
fuse_reply(fuse, hdr->unique, fde,
FUSE_DIRENT_ALIGN(sizeof(struct fuse_dirent) + fde->namelen));
return NO_STATUS;
}
|
C
|
Android
| 0 |
CVE-2009-3605
|
https://www.cvedetails.com/cve/CVE-2009-3605/
|
CWE-189
|
https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
|
7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
| null |
void GfxIndexedColorSpace::getDefaultColor(GfxColor *color) {
color->c[0] = 0;
}
|
void GfxIndexedColorSpace::getDefaultColor(GfxColor *color) {
color->c[0] = 0;
}
|
CPP
|
poppler
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/ea3d1d84be3d6f97bf50e76511c9e26af6895533
|
ea3d1d84be3d6f97bf50e76511c9e26af6895533
|
Fix passing pointers between processes.
BUG=31880
Review URL: http://codereview.chromium.org/558036
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@37555 0039d316-1c4b-4281-b951-d872f2087c98
|
NPError PluginInstance::NPP_NewStream(NPMIMEType type,
NPStream *stream,
NPBool seekable,
unsigned short *stype) {
DCHECK(npp_functions_ != 0);
DCHECK(npp_functions_->newstream != 0);
if (npp_functions_->newstream != 0) {
return npp_functions_->newstream(npp_, type, stream, seekable, stype);
}
return NPERR_INVALID_FUNCTABLE_ERROR;
}
|
NPError PluginInstance::NPP_NewStream(NPMIMEType type,
NPStream *stream,
NPBool seekable,
unsigned short *stype) {
DCHECK(npp_functions_ != 0);
DCHECK(npp_functions_->newstream != 0);
if (npp_functions_->newstream != 0) {
return npp_functions_->newstream(npp_, type, stream, seekable, stype);
}
return NPERR_INVALID_FUNCTABLE_ERROR;
}
|
C
|
Chrome
| 0 |
CVE-2018-18349
|
https://www.cvedetails.com/cve/CVE-2018-18349/
|
CWE-732
|
https://github.com/chromium/chromium/commit/5f8671e7667b8b133bd3664100012a3906e92d65
|
5f8671e7667b8b133bd3664100012a3906e92d65
|
Add a check for disallowing remote frame navigations to local resources.
Previously, RemoteFrame navigations did not perform any renderer-side
checks and relied solely on the browser-side logic to block disallowed
navigations via mechanisms like FilterURL. This means that blocked
remote frame navigations were silently navigated to about:blank
without any console error message.
This CL adds a CanDisplay check to the remote navigation path to match
an equivalent check done for local frame navigations. This way, the
renderer can consistently block disallowed navigations in both cases
and output an error message.
Bug: 894399
Change-Id: I172f68f77c1676f6ca0172d2a6c78f7edc0e3b7a
Reviewed-on: https://chromium-review.googlesource.com/c/1282390
Reviewed-by: Charlie Reis <[email protected]>
Reviewed-by: Nate Chapin <[email protected]>
Commit-Queue: Alex Moshchuk <[email protected]>
Cr-Commit-Position: refs/heads/master@{#601022}
|
void NavigateNamedFrame(const ToRenderFrameHost& caller_frame,
const GURL& url,
const std::string& name) {
EXPECT_EQ(true, EvalJs(caller_frame,
JsReplace("!!window.open($1, $2)", url, name)));
}
|
void NavigateNamedFrame(const ToRenderFrameHost& caller_frame,
const GURL& url,
const std::string& name) {
EXPECT_EQ(true, EvalJs(caller_frame,
JsReplace("!!window.open($1, $2)", url, name)));
}
|
C
|
Chrome
| 0 |
CVE-2015-8865
|
https://www.cvedetails.com/cve/CVE-2015-8865/
|
CWE-119
|
https://github.com/file/file/commit/6713ca45e7757297381f4b4cdb9cf5e624a9ad36
|
6713ca45e7757297381f4b4cdb9cf5e624a9ad36
|
PR/454: Fix memory corruption when the continuation level jumps by more than
20 in a single step.
|
file_check_mem(struct magic_set *ms, unsigned int level)
{
size_t len;
if (level >= ms->c.len) {
len = (ms->c.len = 20 + level) * sizeof(*ms->c.li);
ms->c.li = CAST(struct level_info *, (ms->c.li == NULL) ?
malloc(len) :
realloc(ms->c.li, len));
if (ms->c.li == NULL) {
file_oomem(ms, len);
return -1;
}
}
ms->c.li[level].got_match = 0;
#ifdef ENABLE_CONDITIONALS
ms->c.li[level].last_match = 0;
ms->c.li[level].last_cond = COND_NONE;
#endif /* ENABLE_CONDITIONALS */
return 0;
}
|
file_check_mem(struct magic_set *ms, unsigned int level)
{
size_t len;
if (level >= ms->c.len) {
len = (ms->c.len += 20) * sizeof(*ms->c.li);
ms->c.li = CAST(struct level_info *, (ms->c.li == NULL) ?
malloc(len) :
realloc(ms->c.li, len));
if (ms->c.li == NULL) {
file_oomem(ms, len);
return -1;
}
}
ms->c.li[level].got_match = 0;
#ifdef ENABLE_CONDITIONALS
ms->c.li[level].last_match = 0;
ms->c.li[level].last_cond = COND_NONE;
#endif /* ENABLE_CONDITIONALS */
return 0;
}
|
C
|
file
| 1 |
CVE-2016-3839
|
https://www.cvedetails.com/cve/CVE-2016-3839/
|
CWE-284
|
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
|
472271b153c5dc53c28beac55480a8d8434b2d5c
|
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
|
BOOLEAN btif_hl_find_mcl_idx_using_handle( tBTA_HL_MCL_HANDLE mcl_handle,
UINT8 *p_app_idx, UINT8 *p_mcl_idx){
btif_hl_app_cb_t *p_acb;
BOOLEAN found=FALSE;
UINT8 i,j;
for (i=0; i<BTA_HL_NUM_APPS; i++)
{
p_acb =BTIF_HL_GET_APP_CB_PTR(i);
for (j=0; j < BTA_HL_NUM_MCLS ; j++)
{
if (p_acb->mcb[j].in_use)
BTIF_TRACE_DEBUG("btif_hl_find_mcl_idx_using_handle:app_idx=%d,"
"mcl_idx =%d mcl_handle=%d",i,j,p_acb->mcb[j].mcl_handle);
if (p_acb->mcb[j].in_use &&
(p_acb->mcb[j].mcl_handle == mcl_handle))
{
found = TRUE;
*p_app_idx = i;
*p_mcl_idx = j;
break;
}
}
}
BTIF_TRACE_DEBUG("%s found=%d app_idx=%d mcl_idx=%d",__FUNCTION__,
found, i, j);
return found;
}
|
BOOLEAN btif_hl_find_mcl_idx_using_handle( tBTA_HL_MCL_HANDLE mcl_handle,
UINT8 *p_app_idx, UINT8 *p_mcl_idx){
btif_hl_app_cb_t *p_acb;
BOOLEAN found=FALSE;
UINT8 i,j;
for (i=0; i<BTA_HL_NUM_APPS; i++)
{
p_acb =BTIF_HL_GET_APP_CB_PTR(i);
for (j=0; j < BTA_HL_NUM_MCLS ; j++)
{
if (p_acb->mcb[j].in_use)
BTIF_TRACE_DEBUG("btif_hl_find_mcl_idx_using_handle:app_idx=%d,"
"mcl_idx =%d mcl_handle=%d",i,j,p_acb->mcb[j].mcl_handle);
if (p_acb->mcb[j].in_use &&
(p_acb->mcb[j].mcl_handle == mcl_handle))
{
found = TRUE;
*p_app_idx = i;
*p_mcl_idx = j;
break;
}
}
}
BTIF_TRACE_DEBUG("%s found=%d app_idx=%d mcl_idx=%d",__FUNCTION__,
found, i, j);
return found;
}
|
C
|
Android
| 0 |
CVE-2012-2875
|
https://www.cvedetails.com/cve/CVE-2012-2875/
| null |
https://github.com/chromium/chromium/commit/1266ba494530a267ec8a21442ea1b5cae94da4fb
|
1266ba494530a267ec8a21442ea1b5cae94da4fb
|
Introduce XGetImage() for GrabWindowSnapshot() in ChromeOS.
BUG=119492
TEST=manually done
Review URL: https://chromiumcodereview.appspot.com/10386124
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137556 0039d316-1c4b-4281-b951-d872f2087c98
|
bool RootWindowHostLinux::ConfineCursorToRootWindow() {
#if XFIXES_MAJOR >= 5
DCHECK(!pointer_barriers_.get());
if (pointer_barriers_.get())
return false;
gfx::Size screen_size = RootWindowHost::GetNativeScreenSize();
pointer_barriers_.reset(new XID[4]);
pointer_barriers_[0] = XFixesCreatePointerBarrier(
xdisplay_, x_root_window_,
0, bounds_.y(), screen_size.width(), bounds_.y(),
BarrierPositiveY,
0, NULL); // default device
pointer_barriers_[1] = XFixesCreatePointerBarrier(
xdisplay_, x_root_window_,
0, bounds_.bottom(), screen_size.width(), bounds_.bottom(),
BarrierNegativeY,
0, NULL); // default device
pointer_barriers_[2] = XFixesCreatePointerBarrier(
xdisplay_, x_root_window_,
bounds_.x(), 0, bounds_.x(), screen_size.height(),
BarrierPositiveX,
0, NULL); // default device
pointer_barriers_[3] = XFixesCreatePointerBarrier(
xdisplay_, x_root_window_,
bounds_.right(), 0, bounds_.right(), screen_size.height(),
BarrierNegativeX,
0, NULL); // default device
#endif
return true;
}
|
bool RootWindowHostLinux::ConfineCursorToRootWindow() {
#if XFIXES_MAJOR >= 5
DCHECK(!pointer_barriers_.get());
if (pointer_barriers_.get())
return false;
gfx::Size screen_size = RootWindowHost::GetNativeScreenSize();
pointer_barriers_.reset(new XID[4]);
pointer_barriers_[0] = XFixesCreatePointerBarrier(
xdisplay_, x_root_window_,
0, bounds_.y(), screen_size.width(), bounds_.y(),
BarrierPositiveY,
0, NULL); // default device
pointer_barriers_[1] = XFixesCreatePointerBarrier(
xdisplay_, x_root_window_,
0, bounds_.bottom(), screen_size.width(), bounds_.bottom(),
BarrierNegativeY,
0, NULL); // default device
pointer_barriers_[2] = XFixesCreatePointerBarrier(
xdisplay_, x_root_window_,
bounds_.x(), 0, bounds_.x(), screen_size.height(),
BarrierPositiveX,
0, NULL); // default device
pointer_barriers_[3] = XFixesCreatePointerBarrier(
xdisplay_, x_root_window_,
bounds_.right(), 0, bounds_.right(), screen_size.height(),
BarrierNegativeX,
0, NULL); // default device
#endif
return true;
}
|
C
|
Chrome
| 0 |
CVE-2016-9601
|
https://www.cvedetails.com/cve/CVE-2016-9601/
|
CWE-119
|
http://git.ghostscript.com/?p=jbig2dec.git;a=commit;h=e698d5c11d27212aa1098bc5b1673a3378563092
|
e698d5c11d27212aa1098bc5b1673a3378563092
| null |
jbig2_data_in(Jbig2Ctx *ctx, const unsigned char *data, size_t size)
{
const size_t initial_buf_size = 1024;
if (ctx->buf == NULL) {
size_t buf_size = initial_buf_size;
do
buf_size <<= 1;
while (buf_size < size);
ctx->buf = jbig2_new(ctx, byte, buf_size);
if (ctx->buf == NULL) {
return jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "failed to allocate ctx->buf in jbig2_data_in");
}
ctx->buf_size = buf_size;
ctx->buf_rd_ix = 0;
ctx->buf_wr_ix = 0;
} else if (ctx->buf_wr_ix + size > ctx->buf_size) {
if (ctx->buf_rd_ix <= (ctx->buf_size >> 1) && ctx->buf_wr_ix - ctx->buf_rd_ix + size <= ctx->buf_size) {
memmove(ctx->buf, ctx->buf + ctx->buf_rd_ix, ctx->buf_wr_ix - ctx->buf_rd_ix);
} else {
byte *buf;
size_t buf_size = initial_buf_size;
do
buf_size <<= 1;
while (buf_size < ctx->buf_wr_ix - ctx->buf_rd_ix + size);
buf = jbig2_new(ctx, byte, buf_size);
if (buf == NULL) {
return jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "failed to allocate buf in jbig2_data_in");
}
memcpy(buf, ctx->buf + ctx->buf_rd_ix, ctx->buf_wr_ix - ctx->buf_rd_ix);
jbig2_free(ctx->allocator, ctx->buf);
ctx->buf = buf;
ctx->buf_size = buf_size;
}
ctx->buf_wr_ix -= ctx->buf_rd_ix;
ctx->buf_rd_ix = 0;
}
memcpy(ctx->buf + ctx->buf_wr_ix, data, size);
ctx->buf_wr_ix += size;
/* data has now been added to buffer */
for (;;) {
const byte jbig2_id_string[8] = { 0x97, 0x4a, 0x42, 0x32, 0x0d, 0x0a, 0x1a, 0x0a };
Jbig2Segment *segment;
size_t header_size;
int code;
switch (ctx->state) {
case JBIG2_FILE_HEADER:
/* D.4.1 */
if (ctx->buf_wr_ix - ctx->buf_rd_ix < 9)
return 0;
if (memcmp(ctx->buf + ctx->buf_rd_ix, jbig2_id_string, 8))
return jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "Not a JBIG2 file header");
/* D.4.2 */
ctx->file_header_flags = ctx->buf[ctx->buf_rd_ix + 8];
if (ctx->file_header_flags & 0xFC) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, -1, "reserved bits (2-7) of file header flags are not zero (0x%02x)", ctx->file_header_flags);
}
/* D.4.3 */
if (!(ctx->file_header_flags & 2)) { /* number of pages is known */
if (ctx->buf_wr_ix - ctx->buf_rd_ix < 13)
return 0;
ctx->n_pages = jbig2_get_uint32(ctx->buf + ctx->buf_rd_ix + 9);
ctx->buf_rd_ix += 13;
if (ctx->n_pages == 1)
jbig2_error(ctx, JBIG2_SEVERITY_INFO, -1, "file header indicates a single page document");
else
jbig2_error(ctx, JBIG2_SEVERITY_INFO, -1, "file header indicates a %d page document", ctx->n_pages);
} else { /* number of pages not known */
ctx->n_pages = 0;
ctx->buf_rd_ix += 9;
}
/* determine the file organization based on the flags - D.4.2 again */
if (ctx->file_header_flags & 1) {
ctx->state = JBIG2_FILE_SEQUENTIAL_HEADER;
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, -1, "file header indicates sequential organization");
} else {
ctx->state = JBIG2_FILE_RANDOM_HEADERS;
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, -1, "file header indicates random-access organization");
}
break;
case JBIG2_FILE_SEQUENTIAL_HEADER:
case JBIG2_FILE_RANDOM_HEADERS:
segment = jbig2_parse_segment_header(ctx, ctx->buf + ctx->buf_rd_ix, ctx->buf_wr_ix - ctx->buf_rd_ix, &header_size);
if (segment == NULL)
return 0; /* need more data */
ctx->buf_rd_ix += header_size;
if (ctx->n_segments == ctx->n_segments_max)
ctx->segments = jbig2_renew(ctx, ctx->segments, Jbig2Segment *, (ctx->n_segments_max <<= 2));
ctx->segments[ctx->n_segments++] = segment;
if (ctx->state == JBIG2_FILE_RANDOM_HEADERS) {
if ((segment->flags & 63) == 51) /* end of file */
ctx->state = JBIG2_FILE_RANDOM_BODIES;
} else /* JBIG2_FILE_SEQUENTIAL_HEADER */
ctx->state = JBIG2_FILE_SEQUENTIAL_BODY;
break;
case JBIG2_FILE_SEQUENTIAL_BODY:
case JBIG2_FILE_RANDOM_BODIES:
segment = ctx->segments[ctx->segment_index];
if (segment->data_length > ctx->buf_wr_ix - ctx->buf_rd_ix)
return 0; /* need more data */
code = jbig2_parse_segment(ctx, segment, ctx->buf + ctx->buf_rd_ix);
ctx->buf_rd_ix += segment->data_length;
ctx->segment_index++;
if (ctx->state == JBIG2_FILE_RANDOM_BODIES) {
if (ctx->segment_index == ctx->n_segments)
ctx->state = JBIG2_FILE_EOF;
} else { /* JBIG2_FILE_SEQUENCIAL_BODY */
ctx->state = JBIG2_FILE_SEQUENTIAL_HEADER;
}
if (code < 0) {
ctx->state = JBIG2_FILE_EOF;
return code;
}
break;
case JBIG2_FILE_EOF:
if (ctx->buf_rd_ix == ctx->buf_wr_ix)
return 0;
return jbig2_error(ctx, JBIG2_SEVERITY_WARNING, -1, "Garbage beyond end of file");
}
}
}
|
jbig2_data_in(Jbig2Ctx *ctx, const unsigned char *data, size_t size)
{
const size_t initial_buf_size = 1024;
if (ctx->buf == NULL) {
size_t buf_size = initial_buf_size;
do
buf_size <<= 1;
while (buf_size < size);
ctx->buf = jbig2_new(ctx, byte, buf_size);
if (ctx->buf == NULL) {
return jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "failed to allocate ctx->buf in jbig2_data_in");
}
ctx->buf_size = buf_size;
ctx->buf_rd_ix = 0;
ctx->buf_wr_ix = 0;
} else if (ctx->buf_wr_ix + size > ctx->buf_size) {
if (ctx->buf_rd_ix <= (ctx->buf_size >> 1) && ctx->buf_wr_ix - ctx->buf_rd_ix + size <= ctx->buf_size) {
memmove(ctx->buf, ctx->buf + ctx->buf_rd_ix, ctx->buf_wr_ix - ctx->buf_rd_ix);
} else {
byte *buf;
size_t buf_size = initial_buf_size;
do
buf_size <<= 1;
while (buf_size < ctx->buf_wr_ix - ctx->buf_rd_ix + size);
buf = jbig2_new(ctx, byte, buf_size);
if (buf == NULL) {
return jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "failed to allocate buf in jbig2_data_in");
}
memcpy(buf, ctx->buf + ctx->buf_rd_ix, ctx->buf_wr_ix - ctx->buf_rd_ix);
jbig2_free(ctx->allocator, ctx->buf);
ctx->buf = buf;
ctx->buf_size = buf_size;
}
ctx->buf_wr_ix -= ctx->buf_rd_ix;
ctx->buf_rd_ix = 0;
}
memcpy(ctx->buf + ctx->buf_wr_ix, data, size);
ctx->buf_wr_ix += size;
/* data has now been added to buffer */
for (;;) {
const byte jbig2_id_string[8] = { 0x97, 0x4a, 0x42, 0x32, 0x0d, 0x0a, 0x1a, 0x0a };
Jbig2Segment *segment;
size_t header_size;
int code;
switch (ctx->state) {
case JBIG2_FILE_HEADER:
/* D.4.1 */
if (ctx->buf_wr_ix - ctx->buf_rd_ix < 9)
return 0;
if (memcmp(ctx->buf + ctx->buf_rd_ix, jbig2_id_string, 8))
return jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "Not a JBIG2 file header");
/* D.4.2 */
ctx->file_header_flags = ctx->buf[ctx->buf_rd_ix + 8];
if (ctx->file_header_flags & 0xFC) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, -1, "reserved bits (2-7) of file header flags are not zero (0x%02x)", ctx->file_header_flags);
}
/* D.4.3 */
if (!(ctx->file_header_flags & 2)) { /* number of pages is known */
if (ctx->buf_wr_ix - ctx->buf_rd_ix < 13)
return 0;
ctx->n_pages = jbig2_get_uint32(ctx->buf + ctx->buf_rd_ix + 9);
ctx->buf_rd_ix += 13;
if (ctx->n_pages == 1)
jbig2_error(ctx, JBIG2_SEVERITY_INFO, -1, "file header indicates a single page document");
else
jbig2_error(ctx, JBIG2_SEVERITY_INFO, -1, "file header indicates a %d page document", ctx->n_pages);
} else { /* number of pages not known */
ctx->n_pages = 0;
ctx->buf_rd_ix += 9;
}
/* determine the file organization based on the flags - D.4.2 again */
if (ctx->file_header_flags & 1) {
ctx->state = JBIG2_FILE_SEQUENTIAL_HEADER;
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, -1, "file header indicates sequential organization");
} else {
ctx->state = JBIG2_FILE_RANDOM_HEADERS;
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, -1, "file header indicates random-access organization");
}
break;
case JBIG2_FILE_SEQUENTIAL_HEADER:
case JBIG2_FILE_RANDOM_HEADERS:
segment = jbig2_parse_segment_header(ctx, ctx->buf + ctx->buf_rd_ix, ctx->buf_wr_ix - ctx->buf_rd_ix, &header_size);
if (segment == NULL)
return 0; /* need more data */
ctx->buf_rd_ix += header_size;
if (ctx->n_segments == ctx->n_segments_max)
ctx->segments = jbig2_renew(ctx, ctx->segments, Jbig2Segment *, (ctx->n_segments_max <<= 2));
ctx->segments[ctx->n_segments++] = segment;
if (ctx->state == JBIG2_FILE_RANDOM_HEADERS) {
if ((segment->flags & 63) == 51) /* end of file */
ctx->state = JBIG2_FILE_RANDOM_BODIES;
} else /* JBIG2_FILE_SEQUENTIAL_HEADER */
ctx->state = JBIG2_FILE_SEQUENTIAL_BODY;
break;
case JBIG2_FILE_SEQUENTIAL_BODY:
case JBIG2_FILE_RANDOM_BODIES:
segment = ctx->segments[ctx->segment_index];
if (segment->data_length > ctx->buf_wr_ix - ctx->buf_rd_ix)
return 0; /* need more data */
code = jbig2_parse_segment(ctx, segment, ctx->buf + ctx->buf_rd_ix);
ctx->buf_rd_ix += segment->data_length;
ctx->segment_index++;
if (ctx->state == JBIG2_FILE_RANDOM_BODIES) {
if (ctx->segment_index == ctx->n_segments)
ctx->state = JBIG2_FILE_EOF;
} else { /* JBIG2_FILE_SEQUENCIAL_BODY */
ctx->state = JBIG2_FILE_SEQUENTIAL_HEADER;
}
if (code < 0) {
ctx->state = JBIG2_FILE_EOF;
return code;
}
break;
case JBIG2_FILE_EOF:
if (ctx->buf_rd_ix == ctx->buf_wr_ix)
return 0;
return jbig2_error(ctx, JBIG2_SEVERITY_WARNING, -1, "Garbage beyond end of file");
}
}
}
|
C
|
ghostscript
| 0 |
CVE-2014-6269
|
https://www.cvedetails.com/cve/CVE-2014-6269/
|
CWE-189
|
https://git.haproxy.org/?p=haproxy-1.5.git;a=commitdiff;h=b4d05093bc89f71377230228007e69a1434c1a0c
|
b4d05093bc89f71377230228007e69a1434c1a0c
| null |
int http_request_forward_body(struct session *s, struct channel *req, int an_bit)
{
struct http_txn *txn = &s->txn;
struct http_msg *msg = &s->txn.req;
if (unlikely(msg->msg_state < HTTP_MSG_BODY))
return 0;
if ((req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
((req->flags & CF_SHUTW) && (req->to_forward || req->buf->o))) {
/* Output closed while we were sending data. We must abort and
* wake the other side up.
*/
msg->msg_state = HTTP_MSG_ERROR;
http_resync_states(s);
return 1;
}
/* Note that we don't have to send 100-continue back because we don't
* need the data to complete our job, and it's up to the server to
* decide whether to return 100, 417 or anything else in return of
* an "Expect: 100-continue" header.
*/
if (msg->sov > 0) {
/* we have msg->sov which points to the first byte of message
* body, and req->buf.p still points to the beginning of the
* message. We forward the headers now, as we don't need them
* anymore, and we want to flush them.
*/
b_adv(req->buf, msg->sov);
msg->next -= msg->sov;
msg->sov = 0;
/* The previous analysers guarantee that the state is somewhere
* between MSG_BODY and the first MSG_DATA. So msg->sol and
* msg->next are always correct.
*/
if (msg->msg_state < HTTP_MSG_CHUNK_SIZE) {
if (msg->flags & HTTP_MSGF_TE_CHNK)
msg->msg_state = HTTP_MSG_CHUNK_SIZE;
else
msg->msg_state = HTTP_MSG_DATA;
}
}
/* Some post-connect processing might want us to refrain from starting to
* forward data. Currently, the only reason for this is "balance url_param"
* whichs need to parse/process the request after we've enabled forwarding.
*/
if (unlikely(msg->flags & HTTP_MSGF_WAIT_CONN)) {
if (!(s->rep->flags & CF_READ_ATTACHED)) {
channel_auto_connect(req);
req->flags |= CF_WAKE_CONNECT;
goto missing_data;
}
msg->flags &= ~HTTP_MSGF_WAIT_CONN;
}
/* in most states, we should abort in case of early close */
channel_auto_close(req);
if (req->to_forward) {
/* We can't process the buffer's contents yet */
req->flags |= CF_WAKE_WRITE;
goto missing_data;
}
while (1) {
if (msg->msg_state == HTTP_MSG_DATA) {
/* must still forward */
/* we may have some pending data starting at req->buf->p */
if (msg->chunk_len > req->buf->i - msg->next) {
req->flags |= CF_WAKE_WRITE;
goto missing_data;
}
msg->next += msg->chunk_len;
msg->chunk_len = 0;
/* nothing left to forward */
if (msg->flags & HTTP_MSGF_TE_CHNK)
msg->msg_state = HTTP_MSG_CHUNK_CRLF;
else
msg->msg_state = HTTP_MSG_DONE;
}
else if (msg->msg_state == HTTP_MSG_CHUNK_SIZE) {
/* read the chunk size and assign it to ->chunk_len, then
* set ->next to point to the body and switch to DATA or
* TRAILERS state.
*/
int ret = http_parse_chunk_size(msg);
if (ret == 0)
goto missing_data;
else if (ret < 0) {
session_inc_http_err_ctr(s);
if (msg->err_pos >= 0)
http_capture_bad_message(&s->fe->invalid_req, s, msg, HTTP_MSG_CHUNK_SIZE, s->be);
goto return_bad_req;
}
/* otherwise we're in HTTP_MSG_DATA or HTTP_MSG_TRAILERS state */
}
else if (msg->msg_state == HTTP_MSG_CHUNK_CRLF) {
/* we want the CRLF after the data */
int ret = http_skip_chunk_crlf(msg);
if (ret == 0)
goto missing_data;
else if (ret < 0) {
session_inc_http_err_ctr(s);
if (msg->err_pos >= 0)
http_capture_bad_message(&s->fe->invalid_req, s, msg, HTTP_MSG_CHUNK_CRLF, s->be);
goto return_bad_req;
}
/* we're in MSG_CHUNK_SIZE now */
}
else if (msg->msg_state == HTTP_MSG_TRAILERS) {
int ret = http_forward_trailers(msg);
if (ret == 0)
goto missing_data;
else if (ret < 0) {
session_inc_http_err_ctr(s);
if (msg->err_pos >= 0)
http_capture_bad_message(&s->fe->invalid_req, s, msg, HTTP_MSG_TRAILERS, s->be);
goto return_bad_req;
}
/* we're in HTTP_MSG_DONE now */
}
else {
int old_state = msg->msg_state;
/* other states, DONE...TUNNEL */
/* we may have some pending data starting at req->buf->p
* such as last chunk of data or trailers.
*/
b_adv(req->buf, msg->next);
if (unlikely(!(s->req->flags & CF_WROTE_DATA)))
msg->sov -= msg->next;
msg->next = 0;
/* for keep-alive we don't want to forward closes on DONE */
if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL ||
(txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL)
channel_dont_close(req);
if (http_resync_states(s)) {
/* some state changes occurred, maybe the analyser
* was disabled too.
*/
if (unlikely(msg->msg_state == HTTP_MSG_ERROR)) {
if (req->flags & CF_SHUTW) {
/* request errors are most likely due to
* the server aborting the transfer.
*/
goto aborted_xfer;
}
if (msg->err_pos >= 0)
http_capture_bad_message(&s->fe->invalid_req, s, msg, old_state, s->be);
goto return_bad_req;
}
return 1;
}
/* If "option abortonclose" is set on the backend, we
* want to monitor the client's connection and forward
* any shutdown notification to the server, which will
* decide whether to close or to go on processing the
* request.
*/
if (s->be->options & PR_O_ABRT_CLOSE) {
channel_auto_read(req);
channel_auto_close(req);
}
else if (s->txn.meth == HTTP_METH_POST) {
/* POST requests may require to read extra CRLF
* sent by broken browsers and which could cause
* an RST to be sent upon close on some systems
* (eg: Linux).
*/
channel_auto_read(req);
}
return 0;
}
}
missing_data:
/* we may have some pending data starting at req->buf->p */
b_adv(req->buf, msg->next);
if (unlikely(!(s->req->flags & CF_WROTE_DATA)))
msg->sov -= msg->next + MIN(msg->chunk_len, req->buf->i);
msg->next = 0;
msg->chunk_len -= channel_forward(req, msg->chunk_len);
/* stop waiting for data if the input is closed before the end */
if (req->flags & CF_SHUTR) {
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_CLICL;
if (!(s->flags & SN_FINST_MASK)) {
if (txn->rsp.msg_state < HTTP_MSG_ERROR)
s->flags |= SN_FINST_H;
else
s->flags |= SN_FINST_D;
}
s->fe->fe_counters.cli_aborts++;
s->be->be_counters.cli_aborts++;
if (objt_server(s->target))
objt_server(s->target)->counters.cli_aborts++;
goto return_bad_req_stats_ok;
}
/* waiting for the last bits to leave the buffer */
if (req->flags & CF_SHUTW)
goto aborted_xfer;
/* When TE: chunked is used, we need to get there again to parse remaining
* chunks even if the client has closed, so we don't want to set CF_DONTCLOSE.
*/
if (msg->flags & HTTP_MSGF_TE_CHNK)
channel_dont_close(req);
/* We know that more data are expected, but we couldn't send more that
* what we did. So we always set the CF_EXPECT_MORE flag so that the
* system knows it must not set a PUSH on this first part. Interactive
* modes are already handled by the stream sock layer. We must not do
* this in content-length mode because it could present the MSG_MORE
* flag with the last block of forwarded data, which would cause an
* additional delay to be observed by the receiver.
*/
if (msg->flags & HTTP_MSGF_TE_CHNK)
req->flags |= CF_EXPECT_MORE;
return 0;
return_bad_req: /* let's centralize all bad requests */
s->fe->fe_counters.failed_req++;
if (s->listener->counters)
s->listener->counters->failed_req++;
return_bad_req_stats_ok:
/* we may have some pending data starting at req->buf->p */
b_adv(req->buf, msg->next);
msg->next = 0;
txn->req.msg_state = HTTP_MSG_ERROR;
if (txn->status) {
/* Note: we don't send any error if some data were already sent */
stream_int_retnclose(req->prod, NULL);
} else {
txn->status = 400;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_400));
}
req->analysers = 0;
s->rep->analysers = 0; /* we're in data phase, we want to abort both directions */
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_PRXCOND;
if (!(s->flags & SN_FINST_MASK)) {
if (txn->rsp.msg_state < HTTP_MSG_ERROR)
s->flags |= SN_FINST_H;
else
s->flags |= SN_FINST_D;
}
return 0;
aborted_xfer:
txn->req.msg_state = HTTP_MSG_ERROR;
if (txn->status) {
/* Note: we don't send any error if some data were already sent */
stream_int_retnclose(req->prod, NULL);
} else {
txn->status = 502;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_502));
}
req->analysers = 0;
s->rep->analysers = 0; /* we're in data phase, we want to abort both directions */
s->fe->fe_counters.srv_aborts++;
s->be->be_counters.srv_aborts++;
if (objt_server(s->target))
objt_server(s->target)->counters.srv_aborts++;
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_SRVCL;
if (!(s->flags & SN_FINST_MASK)) {
if (txn->rsp.msg_state < HTTP_MSG_ERROR)
s->flags |= SN_FINST_H;
else
s->flags |= SN_FINST_D;
}
return 0;
}
|
int http_request_forward_body(struct session *s, struct channel *req, int an_bit)
{
struct http_txn *txn = &s->txn;
struct http_msg *msg = &s->txn.req;
if (unlikely(msg->msg_state < HTTP_MSG_BODY))
return 0;
if ((req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
((req->flags & CF_SHUTW) && (req->to_forward || req->buf->o))) {
/* Output closed while we were sending data. We must abort and
* wake the other side up.
*/
msg->msg_state = HTTP_MSG_ERROR;
http_resync_states(s);
return 1;
}
/* Note that we don't have to send 100-continue back because we don't
* need the data to complete our job, and it's up to the server to
* decide whether to return 100, 417 or anything else in return of
* an "Expect: 100-continue" header.
*/
if (msg->sov > 0) {
/* we have msg->sov which points to the first byte of message
* body, and req->buf.p still points to the beginning of the
* message. We forward the headers now, as we don't need them
* anymore, and we want to flush them.
*/
b_adv(req->buf, msg->sov);
msg->next -= msg->sov;
msg->sov = 0;
/* The previous analysers guarantee that the state is somewhere
* between MSG_BODY and the first MSG_DATA. So msg->sol and
* msg->next are always correct.
*/
if (msg->msg_state < HTTP_MSG_CHUNK_SIZE) {
if (msg->flags & HTTP_MSGF_TE_CHNK)
msg->msg_state = HTTP_MSG_CHUNK_SIZE;
else
msg->msg_state = HTTP_MSG_DATA;
}
}
/* Some post-connect processing might want us to refrain from starting to
* forward data. Currently, the only reason for this is "balance url_param"
* whichs need to parse/process the request after we've enabled forwarding.
*/
if (unlikely(msg->flags & HTTP_MSGF_WAIT_CONN)) {
if (!(s->rep->flags & CF_READ_ATTACHED)) {
channel_auto_connect(req);
req->flags |= CF_WAKE_CONNECT;
goto missing_data;
}
msg->flags &= ~HTTP_MSGF_WAIT_CONN;
}
/* in most states, we should abort in case of early close */
channel_auto_close(req);
if (req->to_forward) {
/* We can't process the buffer's contents yet */
req->flags |= CF_WAKE_WRITE;
goto missing_data;
}
while (1) {
if (msg->msg_state == HTTP_MSG_DATA) {
/* must still forward */
/* we may have some pending data starting at req->buf->p */
if (msg->chunk_len > req->buf->i - msg->next) {
req->flags |= CF_WAKE_WRITE;
goto missing_data;
}
msg->next += msg->chunk_len;
msg->chunk_len = 0;
/* nothing left to forward */
if (msg->flags & HTTP_MSGF_TE_CHNK)
msg->msg_state = HTTP_MSG_CHUNK_CRLF;
else
msg->msg_state = HTTP_MSG_DONE;
}
else if (msg->msg_state == HTTP_MSG_CHUNK_SIZE) {
/* read the chunk size and assign it to ->chunk_len, then
* set ->next to point to the body and switch to DATA or
* TRAILERS state.
*/
int ret = http_parse_chunk_size(msg);
if (ret == 0)
goto missing_data;
else if (ret < 0) {
session_inc_http_err_ctr(s);
if (msg->err_pos >= 0)
http_capture_bad_message(&s->fe->invalid_req, s, msg, HTTP_MSG_CHUNK_SIZE, s->be);
goto return_bad_req;
}
/* otherwise we're in HTTP_MSG_DATA or HTTP_MSG_TRAILERS state */
}
else if (msg->msg_state == HTTP_MSG_CHUNK_CRLF) {
/* we want the CRLF after the data */
int ret = http_skip_chunk_crlf(msg);
if (ret == 0)
goto missing_data;
else if (ret < 0) {
session_inc_http_err_ctr(s);
if (msg->err_pos >= 0)
http_capture_bad_message(&s->fe->invalid_req, s, msg, HTTP_MSG_CHUNK_CRLF, s->be);
goto return_bad_req;
}
/* we're in MSG_CHUNK_SIZE now */
}
else if (msg->msg_state == HTTP_MSG_TRAILERS) {
int ret = http_forward_trailers(msg);
if (ret == 0)
goto missing_data;
else if (ret < 0) {
session_inc_http_err_ctr(s);
if (msg->err_pos >= 0)
http_capture_bad_message(&s->fe->invalid_req, s, msg, HTTP_MSG_TRAILERS, s->be);
goto return_bad_req;
}
/* we're in HTTP_MSG_DONE now */
}
else {
int old_state = msg->msg_state;
/* other states, DONE...TUNNEL */
/* we may have some pending data starting at req->buf->p
* such as last chunk of data or trailers.
*/
b_adv(req->buf, msg->next);
if (unlikely(!(s->rep->flags & CF_READ_ATTACHED)))
msg->sov -= msg->next;
msg->next = 0;
/* for keep-alive we don't want to forward closes on DONE */
if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL ||
(txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL)
channel_dont_close(req);
if (http_resync_states(s)) {
/* some state changes occurred, maybe the analyser
* was disabled too.
*/
if (unlikely(msg->msg_state == HTTP_MSG_ERROR)) {
if (req->flags & CF_SHUTW) {
/* request errors are most likely due to
* the server aborting the transfer.
*/
goto aborted_xfer;
}
if (msg->err_pos >= 0)
http_capture_bad_message(&s->fe->invalid_req, s, msg, old_state, s->be);
goto return_bad_req;
}
return 1;
}
/* If "option abortonclose" is set on the backend, we
* want to monitor the client's connection and forward
* any shutdown notification to the server, which will
* decide whether to close or to go on processing the
* request.
*/
if (s->be->options & PR_O_ABRT_CLOSE) {
channel_auto_read(req);
channel_auto_close(req);
}
else if (s->txn.meth == HTTP_METH_POST) {
/* POST requests may require to read extra CRLF
* sent by broken browsers and which could cause
* an RST to be sent upon close on some systems
* (eg: Linux).
*/
channel_auto_read(req);
}
return 0;
}
}
missing_data:
/* we may have some pending data starting at req->buf->p */
b_adv(req->buf, msg->next);
if (unlikely(!(s->rep->flags & CF_READ_ATTACHED)))
msg->sov -= msg->next + MIN(msg->chunk_len, req->buf->i);
msg->next = 0;
msg->chunk_len -= channel_forward(req, msg->chunk_len);
/* stop waiting for data if the input is closed before the end */
if (req->flags & CF_SHUTR) {
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_CLICL;
if (!(s->flags & SN_FINST_MASK)) {
if (txn->rsp.msg_state < HTTP_MSG_ERROR)
s->flags |= SN_FINST_H;
else
s->flags |= SN_FINST_D;
}
s->fe->fe_counters.cli_aborts++;
s->be->be_counters.cli_aborts++;
if (objt_server(s->target))
objt_server(s->target)->counters.cli_aborts++;
goto return_bad_req_stats_ok;
}
/* waiting for the last bits to leave the buffer */
if (req->flags & CF_SHUTW)
goto aborted_xfer;
/* When TE: chunked is used, we need to get there again to parse remaining
* chunks even if the client has closed, so we don't want to set CF_DONTCLOSE.
*/
if (msg->flags & HTTP_MSGF_TE_CHNK)
channel_dont_close(req);
/* We know that more data are expected, but we couldn't send more that
* what we did. So we always set the CF_EXPECT_MORE flag so that the
* system knows it must not set a PUSH on this first part. Interactive
* modes are already handled by the stream sock layer. We must not do
* this in content-length mode because it could present the MSG_MORE
* flag with the last block of forwarded data, which would cause an
* additional delay to be observed by the receiver.
*/
if (msg->flags & HTTP_MSGF_TE_CHNK)
req->flags |= CF_EXPECT_MORE;
return 0;
return_bad_req: /* let's centralize all bad requests */
s->fe->fe_counters.failed_req++;
if (s->listener->counters)
s->listener->counters->failed_req++;
return_bad_req_stats_ok:
/* we may have some pending data starting at req->buf->p */
b_adv(req->buf, msg->next);
msg->next = 0;
txn->req.msg_state = HTTP_MSG_ERROR;
if (txn->status) {
/* Note: we don't send any error if some data were already sent */
stream_int_retnclose(req->prod, NULL);
} else {
txn->status = 400;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_400));
}
req->analysers = 0;
s->rep->analysers = 0; /* we're in data phase, we want to abort both directions */
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_PRXCOND;
if (!(s->flags & SN_FINST_MASK)) {
if (txn->rsp.msg_state < HTTP_MSG_ERROR)
s->flags |= SN_FINST_H;
else
s->flags |= SN_FINST_D;
}
return 0;
aborted_xfer:
txn->req.msg_state = HTTP_MSG_ERROR;
if (txn->status) {
/* Note: we don't send any error if some data were already sent */
stream_int_retnclose(req->prod, NULL);
} else {
txn->status = 502;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_502));
}
req->analysers = 0;
s->rep->analysers = 0; /* we're in data phase, we want to abort both directions */
s->fe->fe_counters.srv_aborts++;
s->be->be_counters.srv_aborts++;
if (objt_server(s->target))
objt_server(s->target)->counters.srv_aborts++;
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_SRVCL;
if (!(s->flags & SN_FINST_MASK)) {
if (txn->rsp.msg_state < HTTP_MSG_ERROR)
s->flags |= SN_FINST_H;
else
s->flags |= SN_FINST_D;
}
return 0;
}
|
C
|
haproxy
| 1 |
CVE-2015-4170
|
https://www.cvedetails.com/cve/CVE-2015-4170/
|
CWE-362
|
https://github.com/torvalds/linux/commit/cf872776fc84128bb779ce2b83a37c884c3203ae
|
cf872776fc84128bb779ce2b83a37c884c3203ae
|
tty: Fix hang at ldsem_down_read()
When a controlling tty is being hung up and the hang up is
waiting for a just-signalled tty reader or writer to exit, and a new tty
reader/writer tries to acquire an ldisc reference concurrently with the
ldisc reference release from the signalled reader/writer, the hangup
can hang. The new reader/writer is sleeping in ldsem_down_read() and the
hangup is sleeping in ldsem_down_write() [1].
The new reader/writer fails to wakeup the waiting hangup because the
wrong lock count value is checked (the old lock count rather than the new
lock count) to see if the lock is unowned.
Change helper function to return the new lock count if the cmpxchg was
successful; document this behavior.
[1] edited dmesg log from reporter
SysRq : Show Blocked State
task PC stack pid father
systemd D ffff88040c4f0000 0 1 0 0x00000000
ffff88040c49fbe0 0000000000000046 ffff88040c4a0000 ffff88040c49ffd8
00000000001d3980 00000000001d3980 ffff88040c4a0000 ffff88040593d840
ffff88040c49fb40 ffffffff810a4cc0 0000000000000006 0000000000000023
Call Trace:
[<ffffffff810a4cc0>] ? sched_clock_cpu+0x9f/0xe4
[<ffffffff810a4cc0>] ? sched_clock_cpu+0x9f/0xe4
[<ffffffff810a4cc0>] ? sched_clock_cpu+0x9f/0xe4
[<ffffffff810a4cc0>] ? sched_clock_cpu+0x9f/0xe4
[<ffffffff817a6649>] schedule+0x24/0x5e
[<ffffffff817a588b>] schedule_timeout+0x15b/0x1ec
[<ffffffff810a4cc0>] ? sched_clock_cpu+0x9f/0xe4
[<ffffffff817aa691>] ? _raw_spin_unlock_irq+0x24/0x26
[<ffffffff817aa10c>] down_read_failed+0xe3/0x1b9
[<ffffffff817aa26d>] ldsem_down_read+0x8b/0xa5
[<ffffffff8142b5ca>] ? tty_ldisc_ref_wait+0x1b/0x44
[<ffffffff8142b5ca>] tty_ldisc_ref_wait+0x1b/0x44
[<ffffffff81423f5b>] tty_write+0x7d/0x28a
[<ffffffff814241f5>] redirected_tty_write+0x8d/0x98
[<ffffffff81424168>] ? tty_write+0x28a/0x28a
[<ffffffff8115d03f>] do_loop_readv_writev+0x56/0x79
[<ffffffff8115e604>] do_readv_writev+0x1b0/0x1ff
[<ffffffff8116ea0b>] ? do_vfs_ioctl+0x32a/0x489
[<ffffffff81167d9d>] ? final_putname+0x1d/0x3a
[<ffffffff8115e6c7>] vfs_writev+0x2e/0x49
[<ffffffff8115e7d3>] SyS_writev+0x47/0xaa
[<ffffffff817ab822>] system_call_fastpath+0x16/0x1b
bash D ffffffff81c104c0 0 5469 5302 0x00000082
ffff8800cf817ac0 0000000000000046 ffff8804086b22a0 ffff8800cf817fd8
00000000001d3980 00000000001d3980 ffff8804086b22a0 ffff8800cf817a48
000000000000b9a0 ffff8800cf817a78 ffffffff81004675 ffff8800cf817a44
Call Trace:
[<ffffffff81004675>] ? dump_trace+0x165/0x29c
[<ffffffff810a4cc0>] ? sched_clock_cpu+0x9f/0xe4
[<ffffffff8100edda>] ? save_stack_trace+0x26/0x41
[<ffffffff817a6649>] schedule+0x24/0x5e
[<ffffffff817a588b>] schedule_timeout+0x15b/0x1ec
[<ffffffff810a4cc0>] ? sched_clock_cpu+0x9f/0xe4
[<ffffffff817a9f03>] ? down_write_failed+0xa3/0x1c9
[<ffffffff817aa691>] ? _raw_spin_unlock_irq+0x24/0x26
[<ffffffff817a9f0b>] down_write_failed+0xab/0x1c9
[<ffffffff817aa300>] ldsem_down_write+0x79/0xb1
[<ffffffff817aada3>] ? tty_ldisc_lock_pair_timeout+0xa5/0xd9
[<ffffffff817aada3>] tty_ldisc_lock_pair_timeout+0xa5/0xd9
[<ffffffff8142bf33>] tty_ldisc_hangup+0xc4/0x218
[<ffffffff81423ab3>] __tty_hangup+0x2e2/0x3ed
[<ffffffff81424a76>] disassociate_ctty+0x63/0x226
[<ffffffff81078aa7>] do_exit+0x79f/0xa11
[<ffffffff81086bdb>] ? get_signal_to_deliver+0x206/0x62f
[<ffffffff810b4bfb>] ? lock_release_holdtime.part.8+0xf/0x16e
[<ffffffff81079b05>] do_group_exit+0x47/0xb5
[<ffffffff81086c16>] get_signal_to_deliver+0x241/0x62f
[<ffffffff810020a7>] do_signal+0x43/0x59d
[<ffffffff810f2af7>] ? __audit_syscall_exit+0x21a/0x2a8
[<ffffffff810b4bfb>] ? lock_release_holdtime.part.8+0xf/0x16e
[<ffffffff81002655>] do_notify_resume+0x54/0x6c
[<ffffffff817abaf8>] int_signal+0x12/0x17
Reported-by: Sami Farin <[email protected]>
Cc: <[email protected]> # 3.12.x
Signed-off-by: Peter Hurley <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static inline int __ldsem_down_read_nested(struct ld_semaphore *sem,
int subclass, long timeout)
{
long count;
lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
if (count <= 0) {
lock_stat(sem, contended);
if (!down_read_failed(sem, count, timeout)) {
lockdep_release(sem, 1, _RET_IP_);
return 0;
}
}
lock_stat(sem, acquired);
return 1;
}
|
static inline int __ldsem_down_read_nested(struct ld_semaphore *sem,
int subclass, long timeout)
{
long count;
lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
if (count <= 0) {
lock_stat(sem, contended);
if (!down_read_failed(sem, count, timeout)) {
lockdep_release(sem, 1, _RET_IP_);
return 0;
}
}
lock_stat(sem, acquired);
return 1;
}
|
C
|
linux
| 0 |
CVE-2013-2017
|
https://www.cvedetails.com/cve/CVE-2013-2017/
|
CWE-399
|
https://github.com/torvalds/linux/commit/6ec82562ffc6f297d0de36d65776cff8e5704867
|
6ec82562ffc6f297d0de36d65776cff8e5704867
|
veth: Dont kfree_skb() after dev_forward_skb()
In case of congestion, netif_rx() frees the skb, so we must assume
dev_forward_skb() also consume skb.
Bug introduced by commit 445409602c092
(veth: move loopback logic to common location)
We must change dev_forward_skb() to always consume skb, and veth to not
double free it.
Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3
Reported-by: Martín Ferrari <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
const struct net_device_stats *dev_get_stats(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_get_stats)
return ops->ndo_get_stats(dev);
dev_txq_stats_fold(dev, &dev->stats);
return &dev->stats;
}
|
const struct net_device_stats *dev_get_stats(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_get_stats)
return ops->ndo_get_stats(dev);
dev_txq_stats_fold(dev, &dev->stats);
return &dev->stats;
}
|
C
|
linux
| 0 |
CVE-2018-13006
|
https://www.cvedetails.com/cve/CVE-2018-13006/
|
CWE-125
|
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
fixed 2 possible heap overflows (inc. #1088)
|
GF_Err gnra_Size(GF_Box *s)
{
GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s;
s->type = GF_ISOM_BOX_TYPE_GNRA;
gf_isom_audio_sample_entry_size((GF_AudioSampleEntryBox *)s);
ptr->size += ptr->data_size;
return GF_OK;
}
|
GF_Err gnra_Size(GF_Box *s)
{
GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s;
s->type = GF_ISOM_BOX_TYPE_GNRA;
gf_isom_audio_sample_entry_size((GF_AudioSampleEntryBox *)s);
ptr->size += ptr->data_size;
return GF_OK;
}
|
C
|
gpac
| 0 |
CVE-2012-2895
|
https://www.cvedetails.com/cve/CVE-2012-2895/
|
CWE-119
|
https://github.com/chromium/chromium/commit/16dcd30c215801941d9890859fd79a234128fc3e
|
16dcd30c215801941d9890859fd79a234128fc3e
|
Refactors to simplify rename pathway in DownloadFileManager.
This is https://chromiumcodereview.appspot.com/10668004 / r144817 (reverted
due to CrOS failure) with the completion logic moved to after the
auto-opening. The tests that test the auto-opening (for web store install)
were waiting for download completion to check install, and hence were
failing when completion was moved earlier.
Doing this right would probably require another state (OPENED).
BUG=123998
BUG-134930
[email protected]
Review URL: https://chromiumcodereview.appspot.com/10701040
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@145157 0039d316-1c4b-4281-b951-d872f2087c98
|
void CleanUp(DownloadId id) {
MockDownloadFile* file = download_file_factory_->GetExistingFile(id);
ASSERT_TRUE(file != NULL);
EXPECT_CALL(*file, Cancel());
download_file_manager_->CancelDownload(id);
EXPECT_EQ(NULL, download_file_manager_->GetDownloadFile(id));
}
|
void CleanUp(DownloadId id) {
MockDownloadFile* file = download_file_factory_->GetExistingFile(id);
ASSERT_TRUE(file != NULL);
EXPECT_CALL(*file, Cancel());
download_file_manager_->CancelDownload(id);
EXPECT_TRUE(NULL == download_file_manager_->GetDownloadFile(id));
}
|
C
|
Chrome
| 1 |
CVE-2019-5837
|
https://www.cvedetails.com/cve/CVE-2019-5837/
|
CWE-200
|
https://github.com/chromium/chromium/commit/04aaacb936a08d70862d6d9d7e8354721ae46be8
|
04aaacb936a08d70862d6d9d7e8354721ae46be8
|
Reland "AppCache: Add padding to cross-origin responses."
This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7
Initialized CacheRecord::padding_size to 0.
Original change's description:
> AppCache: Add padding to cross-origin responses.
>
> Bug: 918293
> Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059
> Commit-Queue: Staphany Park <[email protected]>
> Reviewed-by: Victor Costan <[email protected]>
> Reviewed-by: Marijn Kruisselbrink <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#644624}
Bug: 918293
Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906
Reviewed-by: Victor Costan <[email protected]>
Commit-Queue: Staphany Park <[email protected]>
Cr-Commit-Position: refs/heads/master@{#644719}
|
void AppCacheUpdateJob::CancelAllUrlFetches() {
for (auto& pair : pending_url_fetches_)
delete pair.second;
url_fetches_completed_ +=
pending_url_fetches_.size() + urls_to_fetch_.size();
pending_url_fetches_.clear();
urls_to_fetch_.clear();
}
|
void AppCacheUpdateJob::CancelAllUrlFetches() {
for (auto& pair : pending_url_fetches_)
delete pair.second;
url_fetches_completed_ +=
pending_url_fetches_.size() + urls_to_fetch_.size();
pending_url_fetches_.clear();
urls_to_fetch_.clear();
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/c4363d1ca65494cb7b271625e1ff6541a9f593c9
|
c4363d1ca65494cb7b271625e1ff6541a9f593c9
|
ozone: evdev: Add a couple more trace events
Add trace event inside each read notification for evdev.
BUG=none
TEST=chrome://tracing in link_freon
Review URL: https://codereview.chromium.org/1110693003
Cr-Commit-Position: refs/heads/master@{#327110}
|
bool EventReaderLibevdevCros::HasTouchpad() const {
return has_touchpad_;
}
|
bool EventReaderLibevdevCros::HasTouchpad() const {
return has_touchpad_;
}
|
C
|
Chrome
| 0 |
CVE-2013-6621
|
https://www.cvedetails.com/cve/CVE-2013-6621/
|
CWE-399
|
https://github.com/chromium/chromium/commit/4039d2fcaab746b6c20017ba9bb51c3a2403a76c
|
4039d2fcaab746b6c20017ba9bb51c3a2403a76c
|
Add logging to figure out which IPC we're failing to deserialize in RenderFrame.
BUG=369553
[email protected]
Review URL: https://codereview.chromium.org/263833020
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@268565 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderFrameImpl::didCreateDocumentElement(blink::WebLocalFrame* frame) {
DCHECK(!frame_ || frame_ == frame);
GURL url = frame->document().url();
if (url.is_valid() && url.spec() != kAboutBlankURL) {
if (frame == render_view_->webview()->mainFrame()) {
render_view_->Send(new ViewHostMsg_DocumentAvailableInMainFrame(
render_view_->GetRoutingID()));
}
}
FOR_EACH_OBSERVER(RenderViewObserver, render_view_->observers(),
DidCreateDocumentElement(frame));
}
|
void RenderFrameImpl::didCreateDocumentElement(blink::WebLocalFrame* frame) {
DCHECK(!frame_ || frame_ == frame);
GURL url = frame->document().url();
if (url.is_valid() && url.spec() != kAboutBlankURL) {
if (frame == render_view_->webview()->mainFrame()) {
render_view_->Send(new ViewHostMsg_DocumentAvailableInMainFrame(
render_view_->GetRoutingID()));
}
}
FOR_EACH_OBSERVER(RenderViewObserver, render_view_->observers(),
DidCreateDocumentElement(frame));
}
|
C
|
Chrome
| 0 |
CVE-2011-2840
|
https://www.cvedetails.com/cve/CVE-2011-2840/
|
CWE-20
|
https://github.com/chromium/chromium/commit/2db5a2048dfcacfe5ad4311c2b1e435c4c67febc
|
2db5a2048dfcacfe5ad4311c2b1e435c4c67febc
|
chromeos: fix bug where "aw snap" page replaces first tab if it was a NTP when closing window with > 1 tab.
BUG=chromium-os:12088
TEST=verify bug per bug report.
Review URL: http://codereview.chromium.org/6882058
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@83031 0039d316-1c4b-4281-b951-d872f2087c98
|
int TabStripModel::GetIndexOfTabContents(
const TabContentsWrapper* contents) const {
int index = 0;
TabContentsDataVector::const_iterator iter = contents_data_.begin();
for (; iter != contents_data_.end(); ++iter, ++index) {
if ((*iter)->contents == contents)
return index;
}
return kNoTab;
}
|
int TabStripModel::GetIndexOfTabContents(
const TabContentsWrapper* contents) const {
int index = 0;
TabContentsDataVector::const_iterator iter = contents_data_.begin();
for (; iter != contents_data_.end(); ++iter, ++index) {
if ((*iter)->contents == contents)
return index;
}
return kNoTab;
}
|
C
|
Chrome
| 0 |
CVE-2016-8666
|
https://www.cvedetails.com/cve/CVE-2016-8666/
|
CWE-400
|
https://github.com/torvalds/linux/commit/fac8e0f579695a3ecbc4d3cac369139d7f819971
|
fac8e0f579695a3ecbc4d3cac369139d7f819971
|
tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter)
{
struct netdev_adjacent *lower;
lower = list_entry(*iter, struct netdev_adjacent, list);
if (&lower->list == &dev->adj_list.lower)
return NULL;
*iter = lower->list.next;
return lower->private;
}
|
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter)
{
struct netdev_adjacent *lower;
lower = list_entry(*iter, struct netdev_adjacent, list);
if (&lower->list == &dev->adj_list.lower)
return NULL;
*iter = lower->list.next;
return lower->private;
}
|
C
|
linux
| 0 |
CVE-2016-1665
|
https://www.cvedetails.com/cve/CVE-2016-1665/
|
CWE-20
|
https://github.com/chromium/chromium/commit/282f53ffdc3b1902da86f6a0791af736837efbf8
|
282f53ffdc3b1902da86f6a0791af736837efbf8
|
[signin] Add metrics to track the source for refresh token updated events
This CL add a source for update and revoke credentials operations. It then
surfaces the source in the chrome://signin-internals page.
This CL also records the following histograms that track refresh token events:
* Signin.RefreshTokenUpdated.ToValidToken.Source
* Signin.RefreshTokenUpdated.ToInvalidToken.Source
* Signin.RefreshTokenRevoked.Source
These histograms are needed to validate the assumptions of how often tokens
are revoked by the browser and the sources for the token revocations.
Bug: 896182
Change-Id: I2fcab80ee8e5699708e695bc3289fa6d34859a90
Reviewed-on: https://chromium-review.googlesource.com/c/1286464
Reviewed-by: Jochen Eisinger <[email protected]>
Reviewed-by: David Roger <[email protected]>
Reviewed-by: Ilya Sherman <[email protected]>
Commit-Queue: Mihai Sardarescu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#606181}
|
void InlineSigninHelper::OnClientOAuthSuccessAndBrowserOpened(
const ClientOAuthResult& result,
Profile* profile,
Profile::CreateStatus status) {
if (is_force_sign_in_with_usermanager_)
UnlockProfileAndHideLoginUI(profile_->GetPath(), handler_.get());
Browser* browser = NULL;
if (handler_) {
browser = handler_->GetDesktopBrowser();
}
AboutSigninInternals* about_signin_internals =
AboutSigninInternalsFactory::GetForProfile(profile_);
about_signin_internals->OnRefreshTokenReceived("Successful");
std::string account_id =
AccountTrackerServiceFactory::GetForProfile(profile_)
->SeedAccountInfo(gaia_id_, email_);
signin_metrics::AccessPoint access_point =
signin::GetAccessPointForPromoURL(current_url_);
signin_metrics::Reason reason =
signin::GetSigninReasonForPromoURL(current_url_);
SigninManager* signin_manager = SigninManagerFactory::GetForProfile(profile_);
std::string primary_email =
signin_manager->GetAuthenticatedAccountInfo().email;
if (gaia::AreEmailsSame(email_, primary_email) &&
(reason == signin_metrics::Reason::REASON_REAUTHENTICATION ||
reason == signin_metrics::Reason::REASON_UNLOCK) &&
!password_.empty() && profiles::IsLockAvailable(profile_)) {
LocalAuth::SetLocalAuthCredentials(profile_, password_);
}
#if defined(SYNC_PASSWORD_REUSE_DETECTION_ENABLED)
if (!password_.empty()) {
scoped_refptr<password_manager::PasswordStore> password_store =
PasswordStoreFactory::GetForProfile(profile_,
ServiceAccessType::EXPLICIT_ACCESS);
if (password_store && !primary_email.empty()) {
password_store->SaveGaiaPasswordHash(
primary_email, base::UTF8ToUTF16(password_),
password_manager::metrics_util::SyncPasswordHashChange::
SAVED_ON_CHROME_SIGNIN);
}
}
#endif
if (reason == signin_metrics::Reason::REASON_REAUTHENTICATION ||
reason == signin_metrics::Reason::REASON_UNLOCK ||
reason == signin_metrics::Reason::REASON_ADD_SECONDARY_ACCOUNT) {
ProfileOAuth2TokenServiceFactory::GetForProfile(profile_)
->UpdateCredentials(account_id, result.refresh_token,
signin_metrics::SourceForRefreshTokenOperation::
kInlineLoginHandler_Signin);
if (signin::IsAutoCloseEnabledInURL(current_url_)) {
bool show_account_management = ShouldShowAccountManagement(
current_url_,
AccountConsistencyModeManager::IsMirrorEnabledForProfile(profile_));
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(&InlineLoginHandlerImpl::CloseTab, handler_,
show_account_management));
}
if (reason == signin_metrics::Reason::REASON_REAUTHENTICATION ||
reason == signin_metrics::Reason::REASON_UNLOCK) {
signin_manager->MergeSigninCredentialIntoCookieJar();
}
LogSigninReason(reason);
} else {
browser_sync::ProfileSyncService* sync_service =
ProfileSyncServiceFactory::GetForProfile(profile_);
SigninErrorController* error_controller =
SigninErrorControllerFactory::GetForProfile(profile_);
OneClickSigninSyncStarter::StartSyncMode start_mode =
OneClickSigninSyncStarter::CONFIRM_SYNC_SETTINGS_FIRST;
if (access_point == signin_metrics::AccessPoint::ACCESS_POINT_SETTINGS ||
choose_what_to_sync_) {
bool show_settings_without_configure =
error_controller->HasError() && sync_service &&
sync_service->IsFirstSetupComplete();
if (!show_settings_without_configure)
start_mode = OneClickSigninSyncStarter::CONFIGURE_SYNC_FIRST;
}
OneClickSigninSyncStarter::ConfirmationRequired confirmation_required =
confirm_untrusted_signin_ ?
OneClickSigninSyncStarter::CONFIRM_UNTRUSTED_SIGNIN :
OneClickSigninSyncStarter::CONFIRM_AFTER_SIGNIN;
bool start_signin = !HandleCrossAccountError(
result.refresh_token, confirmation_required, start_mode);
if (start_signin) {
CreateSyncStarter(browser, current_url_, result.refresh_token,
OneClickSigninSyncStarter::CURRENT_PROFILE, start_mode,
confirmation_required);
base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, this);
}
}
}
|
void InlineSigninHelper::OnClientOAuthSuccessAndBrowserOpened(
const ClientOAuthResult& result,
Profile* profile,
Profile::CreateStatus status) {
if (is_force_sign_in_with_usermanager_)
UnlockProfileAndHideLoginUI(profile_->GetPath(), handler_.get());
Browser* browser = NULL;
if (handler_) {
browser = handler_->GetDesktopBrowser();
}
AboutSigninInternals* about_signin_internals =
AboutSigninInternalsFactory::GetForProfile(profile_);
about_signin_internals->OnRefreshTokenReceived("Successful");
std::string account_id =
AccountTrackerServiceFactory::GetForProfile(profile_)
->SeedAccountInfo(gaia_id_, email_);
signin_metrics::AccessPoint access_point =
signin::GetAccessPointForPromoURL(current_url_);
signin_metrics::Reason reason =
signin::GetSigninReasonForPromoURL(current_url_);
SigninManager* signin_manager = SigninManagerFactory::GetForProfile(profile_);
std::string primary_email =
signin_manager->GetAuthenticatedAccountInfo().email;
if (gaia::AreEmailsSame(email_, primary_email) &&
(reason == signin_metrics::Reason::REASON_REAUTHENTICATION ||
reason == signin_metrics::Reason::REASON_UNLOCK) &&
!password_.empty() && profiles::IsLockAvailable(profile_)) {
LocalAuth::SetLocalAuthCredentials(profile_, password_);
}
#if defined(SYNC_PASSWORD_REUSE_DETECTION_ENABLED)
if (!password_.empty()) {
scoped_refptr<password_manager::PasswordStore> password_store =
PasswordStoreFactory::GetForProfile(profile_,
ServiceAccessType::EXPLICIT_ACCESS);
if (password_store && !primary_email.empty()) {
password_store->SaveGaiaPasswordHash(
primary_email, base::UTF8ToUTF16(password_),
password_manager::metrics_util::SyncPasswordHashChange::
SAVED_ON_CHROME_SIGNIN);
}
}
#endif
if (reason == signin_metrics::Reason::REASON_REAUTHENTICATION ||
reason == signin_metrics::Reason::REASON_UNLOCK ||
reason == signin_metrics::Reason::REASON_ADD_SECONDARY_ACCOUNT) {
ProfileOAuth2TokenServiceFactory::GetForProfile(profile_)->
UpdateCredentials(account_id, result.refresh_token);
if (signin::IsAutoCloseEnabledInURL(current_url_)) {
bool show_account_management = ShouldShowAccountManagement(
current_url_,
AccountConsistencyModeManager::IsMirrorEnabledForProfile(profile_));
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(&InlineLoginHandlerImpl::CloseTab, handler_,
show_account_management));
}
if (reason == signin_metrics::Reason::REASON_REAUTHENTICATION ||
reason == signin_metrics::Reason::REASON_UNLOCK) {
signin_manager->MergeSigninCredentialIntoCookieJar();
}
LogSigninReason(reason);
} else {
browser_sync::ProfileSyncService* sync_service =
ProfileSyncServiceFactory::GetForProfile(profile_);
SigninErrorController* error_controller =
SigninErrorControllerFactory::GetForProfile(profile_);
OneClickSigninSyncStarter::StartSyncMode start_mode =
OneClickSigninSyncStarter::CONFIRM_SYNC_SETTINGS_FIRST;
if (access_point == signin_metrics::AccessPoint::ACCESS_POINT_SETTINGS ||
choose_what_to_sync_) {
bool show_settings_without_configure =
error_controller->HasError() && sync_service &&
sync_service->IsFirstSetupComplete();
if (!show_settings_without_configure)
start_mode = OneClickSigninSyncStarter::CONFIGURE_SYNC_FIRST;
}
OneClickSigninSyncStarter::ConfirmationRequired confirmation_required =
confirm_untrusted_signin_ ?
OneClickSigninSyncStarter::CONFIRM_UNTRUSTED_SIGNIN :
OneClickSigninSyncStarter::CONFIRM_AFTER_SIGNIN;
bool start_signin = !HandleCrossAccountError(
result.refresh_token, confirmation_required, start_mode);
if (start_signin) {
CreateSyncStarter(browser, current_url_, result.refresh_token,
OneClickSigninSyncStarter::CURRENT_PROFILE, start_mode,
confirmation_required);
base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, this);
}
}
}
|
C
|
Chrome
| 1 |
CVE-2018-17468
|
https://www.cvedetails.com/cve/CVE-2018-17468/
|
CWE-200
|
https://github.com/chromium/chromium/commit/5fe74f831fddb92afa5ddfe46490bb49f083132b
|
5fe74f831fddb92afa5ddfe46490bb49f083132b
|
Do not forward resource timing to parent frame after back-forward navigation
LocalFrame has |should_send_resource_timing_info_to_parent_| flag not to
send timing info to parent except for the first navigation. This flag is
cleared when the first timing is sent to parent, however this does not happen
if iframe's first navigation was by back-forward navigation. For such
iframes, we shouldn't send timings to parent at all.
Bug: 876822
Change-Id: I128b51a82ef278c439548afc8283ae63abdef5c5
Reviewed-on: https://chromium-review.googlesource.com/1186215
Reviewed-by: Kinuko Yasuda <[email protected]>
Commit-Queue: Kunihiko Sakamoto <[email protected]>
Cr-Commit-Position: refs/heads/master@{#585736}
|
WebRect WebLocalFrameImpl::GetSelectionBoundsRectForTesting() const {
return HasSelection()
? WebRect(PixelSnappedIntRect(
GetFrame()->Selection().AbsoluteUnclippedBounds()))
: WebRect();
}
|
WebRect WebLocalFrameImpl::GetSelectionBoundsRectForTesting() const {
return HasSelection()
? WebRect(PixelSnappedIntRect(
GetFrame()->Selection().AbsoluteUnclippedBounds()))
: WebRect();
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/9a3dbf43f97aa7cb6b4399f9b11ce1de20f0680f
|
9a3dbf43f97aa7cb6b4399f9b11ce1de20f0680f
|
Fix crash if utterance is garbage-collected before speech ends.
BUG=359130,348863
Review URL: https://codereview.chromium.org/228133002
git-svn-id: svn://svn.chromium.org/blink/trunk@171077 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void SpeechSynthesis::voicesDidChange()
{
m_voiceList.clear();
if (!executionContext()->activeDOMObjectsAreStopped())
dispatchEvent(Event::create(EventTypeNames::voiceschanged));
}
|
void SpeechSynthesis::voicesDidChange()
{
m_voiceList.clear();
if (!executionContext()->activeDOMObjectsAreStopped())
dispatchEvent(Event::create(EventTypeNames::voiceschanged));
}
|
C
|
Chrome
| 0 |
CVE-2014-3183
|
https://www.cvedetails.com/cve/CVE-2014-3183/
|
CWE-119
|
https://github.com/torvalds/linux/commit/51217e69697fba92a06e07e16f55c9a52d8e8945
|
51217e69697fba92a06e07e16f55c9a52d8e8945
|
HID: logitech: fix bounds checking on LED report size
The check on report size for REPORT_TYPE_LEDS in logi_dj_ll_raw_request()
is wrong; the current check doesn't make any sense -- the report allocated
by HID core in hid_hw_raw_request() can be much larger than
DJREPORT_SHORT_LENGTH, and currently logi_dj_ll_raw_request() doesn't
handle this properly at all.
Fix the check by actually trimming down the report size properly if it is
too large.
Cc: [email protected]
Reported-by: Ben Hawkes <[email protected]>
Reviewed-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>
|
static void __exit logi_dj_exit(void)
{
dbg_hid("Logitech-DJ:%s\n", __func__);
hid_unregister_driver(&logi_djdevice_driver);
hid_unregister_driver(&logi_djreceiver_driver);
}
|
static void __exit logi_dj_exit(void)
{
dbg_hid("Logitech-DJ:%s\n", __func__);
hid_unregister_driver(&logi_djdevice_driver);
hid_unregister_driver(&logi_djreceiver_driver);
}
|
C
|
linux
| 0 |
CVE-2018-12896
|
https://www.cvedetails.com/cve/CVE-2018-12896/
|
CWE-190
|
https://github.com/torvalds/linux/commit/78c9c4dfbf8c04883941445a195276bb4bb92c76
|
78c9c4dfbf8c04883941445a195276bb4bb92c76
|
posix-timers: Sanitize overrun handling
The posix timer overrun handling is broken because the forwarding functions
can return a huge number of overruns which does not fit in an int. As a
consequence timer_getoverrun(2) and siginfo::si_overrun can turn into
random number generators.
The k_clock::timer_forward() callbacks return a 64 bit value now. Make
k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal
accounting is correct. 3Remove the temporary (int) casts.
Add a helper function which clamps the overrun value returned to user space
via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value
between 0 and INT_MAX. INT_MAX is an indicator for user space that the
overrun value has been clamped.
Reported-by: Team OWL337 <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Michael Kerrisk <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
|
static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
{
ktime_get_boottime_ts64(tp);
return 0;
}
|
static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
{
ktime_get_boottime_ts64(tp);
return 0;
}
|
C
|
linux
| 0 |
CVE-2018-9490
|
https://www.cvedetails.com/cve/CVE-2018-9490/
|
CWE-704
|
https://android.googlesource.com/platform/external/v8/+/a24543157ae2cdd25da43e20f4e48a07481e6ceb
|
a24543157ae2cdd25da43e20f4e48a07481e6ceb
|
Backport: Fix Object.entries/values with changing elements
Bug: 111274046
Test: m -j proxy_resolver_v8_unittest && adb sync && adb shell \
/data/nativetest64/proxy_resolver_v8_unittest/proxy_resolver_v8_unittest
Change-Id: I705fc512cc5837e9364ed187559cc75d079aa5cb
(cherry picked from commit d8be9a10287afed07705ac8af027d6a46d4def99)
|
static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
FixedArrayBase* to, ElementsKind from_kind,
uint32_t to_start, int packed_size,
int copy_size) {
DisallowHeapAllocation no_gc;
ElementsKind to_kind = KindTraits::Kind;
switch (from_kind) {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
CopyObjectToObjectElements(from, from_kind, from_start, to, to_kind,
to_start, copy_size);
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS: {
AllowHeapAllocation allow_allocation;
DCHECK(IsFastObjectElementsKind(to_kind));
CopyDoubleToObjectElements(from, from_start, to, to_start, copy_size);
break;
}
case DICTIONARY_ELEMENTS:
CopyDictionaryToObjectElements(from, from_start, to, to_kind, to_start,
copy_size);
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
UNREACHABLE();
break;
case NO_ELEMENTS:
break; // Nothing to do.
}
}
|
static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
FixedArrayBase* to, ElementsKind from_kind,
uint32_t to_start, int packed_size,
int copy_size) {
DisallowHeapAllocation no_gc;
ElementsKind to_kind = KindTraits::Kind;
switch (from_kind) {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
CopyObjectToObjectElements(from, from_kind, from_start, to, to_kind,
to_start, copy_size);
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS: {
AllowHeapAllocation allow_allocation;
DCHECK(IsFastObjectElementsKind(to_kind));
CopyDoubleToObjectElements(from, from_start, to, to_start, copy_size);
break;
}
case DICTIONARY_ELEMENTS:
CopyDictionaryToObjectElements(from, from_start, to, to_kind, to_start,
copy_size);
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
UNREACHABLE();
break;
case NO_ELEMENTS:
break; // Nothing to do.
}
}
|
C
|
Android
| 0 |
CVE-2013-6381
|
https://www.cvedetails.com/cve/CVE-2013-6381/
|
CWE-119
|
https://github.com/torvalds/linux/commit/6fb392b1a63ae36c31f62bc3fc8630b49d602b62
|
6fb392b1a63ae36c31f62bc3fc8630b49d602b62
|
qeth: avoid buffer overflow in snmp ioctl
Check user-defined length in snmp ioctl request and allow request
only if it fits into a qeth command buffer.
Signed-off-by: Ursula Braun <[email protected]>
Signed-off-by: Frank Blaschka <[email protected]>
Reviewed-by: Heiko Carstens <[email protected]>
Reported-by: Nico Golde <[email protected]>
Reported-by: Fabian Yamaguchi <[email protected]>
Cc: <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int qeth_cm_enable(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "cmenable");
iob = qeth_wait_for_buffer(&card->write);
memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
&card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
qeth_cm_enable_cb, NULL);
return rc;
}
|
static int qeth_cm_enable(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "cmenable");
iob = qeth_wait_for_buffer(&card->write);
memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
&card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
qeth_cm_enable_cb, NULL);
return rc;
}
|
C
|
linux
| 0 |
CVE-2016-9915
|
https://www.cvedetails.com/cve/CVE-2016-9915/
|
CWE-400
|
https://git.qemu.org/?p=qemu.git;a=commit;h=971f406b77a6eb84e0ad27dcc416b663765aee30
|
971f406b77a6eb84e0ad27dcc416b663765aee30
| null |
static inline int open_by_handle(int mountfd, const char *fh, int flags)
{
return open_by_handle_at(mountfd, (struct file_handle *)fh, flags);
}
|
static inline int open_by_handle(int mountfd, const char *fh, int flags)
{
return open_by_handle_at(mountfd, (struct file_handle *)fh, flags);
}
|
C
|
qemu
| 0 |
CVE-2016-3819
|
https://www.cvedetails.com/cve/CVE-2016-3819/
|
CWE-119
|
https://android.googlesource.com/platform/frameworks/av/+/590d1729883f700ab905cdc9ad850f3ddd7e1f56
|
590d1729883f700ab905cdc9ad850f3ddd7e1f56
|
Fix potential overflow
Bug: 28533562
Change-Id: I798ab24caa4c81f3ba564cad7c9ee019284fb702
|
static void SetPicNums(dpbStorage_t *dpb, u32 currFrameNum)
{
/* Variables */
u32 i;
i32 frameNumWrap;
/* Code */
ASSERT(dpb);
ASSERT(currFrameNum < dpb->maxFrameNum);
for (i = 0; i < dpb->numRefFrames; i++)
if (IS_SHORT_TERM(dpb->buffer[i]))
{
if (dpb->buffer[i].frameNum > currFrameNum)
frameNumWrap =
(i32)dpb->buffer[i].frameNum - (i32)dpb->maxFrameNum;
else
frameNumWrap = (i32)dpb->buffer[i].frameNum;
dpb->buffer[i].picNum = frameNumWrap;
}
}
|
static void SetPicNums(dpbStorage_t *dpb, u32 currFrameNum)
{
/* Variables */
u32 i;
i32 frameNumWrap;
/* Code */
ASSERT(dpb);
ASSERT(currFrameNum < dpb->maxFrameNum);
for (i = 0; i < dpb->numRefFrames; i++)
if (IS_SHORT_TERM(dpb->buffer[i]))
{
if (dpb->buffer[i].frameNum > currFrameNum)
frameNumWrap =
(i32)dpb->buffer[i].frameNum - (i32)dpb->maxFrameNum;
else
frameNumWrap = (i32)dpb->buffer[i].frameNum;
dpb->buffer[i].picNum = frameNumWrap;
}
}
|
C
|
Android
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.