CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2017-6386
|
https://www.cvedetails.com/cve/CVE-2017-6386/
|
CWE-772
|
https://cgit.freedesktop.org/virglrenderer/commit/?id=737c3350850ca4dbc5633b3bdb4118176ce59920
|
737c3350850ca4dbc5633b3bdb4118176ce59920
| null |
void vrend_renderer_resource_detach_iov(int res_handle,
struct iovec **iov_p,
int *num_iovs_p)
{
struct vrend_resource *res;
res = vrend_resource_lookup(res_handle, 0);
if (!res) {
return;
}
if (iov_p)
*iov_p = res->iov;
if (num_iovs_p)
*num_iovs_p = res->num_iovs;
res->iov = NULL;
res->num_iovs = 0;
}
|
void vrend_renderer_resource_detach_iov(int res_handle,
struct iovec **iov_p,
int *num_iovs_p)
{
struct vrend_resource *res;
res = vrend_resource_lookup(res_handle, 0);
if (!res) {
return;
}
if (iov_p)
*iov_p = res->iov;
if (num_iovs_p)
*num_iovs_p = res->num_iovs;
res->iov = NULL;
res->num_iovs = 0;
}
|
C
|
virglrenderer
| 0 |
CVE-2013-4282
|
https://www.cvedetails.com/cve/CVE-2013-4282/
|
CWE-119
|
https://cgit.freedesktop.org/spice/spice/commit/?id=8af619009660b24e0b41ad26b30289eea288fcc2
|
8af619009660b24e0b41ad26b30289eea288fcc2
| null |
static void reds_info_new_channel(RedLinkInfo *link, int connection_id)
{
spice_info("channel %d:%d, connected successfully, over %s link",
link->link_mess->channel_type,
link->link_mess->channel_id,
link->stream->ssl == NULL ? "Non Secure" : "Secure");
/* add info + send event */
if (link->stream->ssl) {
link->stream->info->flags |= SPICE_CHANNEL_EVENT_FLAG_TLS;
}
link->stream->info->connection_id = connection_id;
link->stream->info->type = link->link_mess->channel_type;
link->stream->info->id = link->link_mess->channel_id;
reds_stream_push_channel_event(link->stream, SPICE_CHANNEL_EVENT_INITIALIZED);
}
|
static void reds_info_new_channel(RedLinkInfo *link, int connection_id)
{
spice_info("channel %d:%d, connected successfully, over %s link",
link->link_mess->channel_type,
link->link_mess->channel_id,
link->stream->ssl == NULL ? "Non Secure" : "Secure");
/* add info + send event */
if (link->stream->ssl) {
link->stream->info->flags |= SPICE_CHANNEL_EVENT_FLAG_TLS;
}
link->stream->info->connection_id = connection_id;
link->stream->info->type = link->link_mess->channel_type;
link->stream->info->id = link->link_mess->channel_id;
reds_stream_push_channel_event(link->stream, SPICE_CHANNEL_EVENT_INITIALIZED);
}
|
C
|
spice
| 0 |
CVE-2018-1091
|
https://www.cvedetails.com/cve/CVE-2018-1091/
|
CWE-119
|
https://github.com/torvalds/linux/commit/c1fa0768a8713b135848f78fd43ffc208d8ded70
|
c1fa0768a8713b135848f78fd43ffc208d8ded70
|
powerpc/tm: Flush TM only if CPU has TM feature
Commit cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump")
added code to access TM SPRs in flush_tmregs_to_thread(). However
flush_tmregs_to_thread() does not check if TM feature is available on
CPU before trying to access TM SPRs in order to copy live state to
thread structures. flush_tmregs_to_thread() is indeed guarded by
CONFIG_PPC_TRANSACTIONAL_MEM but it might be the case that kernel
was compiled with CONFIG_PPC_TRANSACTIONAL_MEM enabled and ran on
a CPU without TM feature available, thus rendering the execution
of TM instructions that are treated by the CPU as illegal instructions.
The fix is just to add proper checking in flush_tmregs_to_thread()
if CPU has the TM feature before accessing any TM-specific resource,
returning immediately if TM is no available on the CPU. Adding
that checking in flush_tmregs_to_thread() instead of in places
where it is called, like in vsr_get() and vsr_set(), is better because
avoids the same problem cropping up elsewhere.
Cc: [email protected] # v4.13+
Fixes: cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump")
Signed-off-by: Gustavo Romero <[email protected]>
Reviewed-by: Cyril Bur <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
|
void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.debug.dbcr0 &= ~DBCR0_IC;
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_SE;
regs->msr |= MSR_BE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
|
void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.debug.dbcr0 &= ~DBCR0_IC;
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_SE;
regs->msr |= MSR_BE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
|
C
|
linux
| 0 |
CVE-2016-5187
|
https://www.cvedetails.com/cve/CVE-2016-5187/
|
CWE-20
|
https://github.com/chromium/chromium/commit/4e4c9b553ae124ed9bb60356e2ecff9106abddd0
|
4e4c9b553ae124ed9bb60356e2ecff9106abddd0
|
Convert Uses of base::RepeatingCallback<>::Equals to Use == or != in //base
Staging this change because some conversions will have semantic changes.
BUG=937566
Change-Id: I2d4950624c0fab00e107814421a161e43da965cc
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1507245
Reviewed-by: Gabriel Charette <[email protected]>
Commit-Queue: Gabriel Charette <[email protected]>
Cr-Commit-Position: refs/heads/master@{#639702}
|
void RecordAction(const UserMetricsAction& action) {
RecordComputedAction(action.str_);
}
|
void RecordAction(const UserMetricsAction& action) {
RecordComputedAction(action.str_);
}
|
C
|
Chrome
| 0 |
CVE-2018-6165
|
https://www.cvedetails.com/cve/CVE-2018-6165/
| null |
https://github.com/chromium/chromium/commit/4391ff2884fe15b8d609bd6d3af61aacf8ad52a1
|
4391ff2884fe15b8d609bd6d3af61aacf8ad52a1
|
Preserve renderer-initiated bit when reloading in a new process.
BUG=847718
TEST=See bug for repro steps.
Change-Id: I6c3461793fbb23f1a4d731dc27b4e77312f29227
Reviewed-on: https://chromium-review.googlesource.com/1080235
Commit-Queue: Charlie Reis <[email protected]>
Reviewed-by: Nasko Oskov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#563312}
|
void NavigationControllerImpl::HandleRendererDebugURL(
FrameTreeNode* frame_tree_node,
const GURL& url) {
if (!frame_tree_node->current_frame_host()->IsRenderFrameLive()) {
if (!IsInitialNavigation()) {
DiscardNonCommittedEntries();
return;
}
frame_tree_node->render_manager()->InitializeRenderFrameIfNecessary(
frame_tree_node->current_frame_host());
}
frame_tree_node->current_frame_host()->HandleRendererDebugURL(url);
}
|
void NavigationControllerImpl::HandleRendererDebugURL(
FrameTreeNode* frame_tree_node,
const GURL& url) {
if (!frame_tree_node->current_frame_host()->IsRenderFrameLive()) {
if (!IsInitialNavigation()) {
DiscardNonCommittedEntries();
return;
}
frame_tree_node->render_manager()->InitializeRenderFrameIfNecessary(
frame_tree_node->current_frame_host());
}
frame_tree_node->current_frame_host()->HandleRendererDebugURL(url);
}
|
C
|
Chrome
| 0 |
CVE-2013-0839
|
https://www.cvedetails.com/cve/CVE-2013-0839/
|
CWE-399
|
https://github.com/chromium/chromium/commit/dd3b6fe574edad231c01c78e4647a74c38dc4178
|
dd3b6fe574edad231c01c78e4647a74c38dc4178
|
Remove parent* arg from GDataEntry ctor.
* Remove static FromDocumentEntry from GDataEntry, GDataFile, GDataDirectory. Replace with InitFromDocumentEntry.
* Move common code from GDataFile::InitFromDocumentEntry and GDataDirectory::InitFromDocumentEntry to GDataEntry::InitFromDocumentEntry.
* Add GDataDirectoryService::FromDocumentEntry and use this everywhere.
* Make ctors of GDataFile, GDataDirectory private, so these must be created by GDataDirectoryService's CreateGDataFile and
CreateGDataDirectory. Make GDataEntry ctor protected.
BUG=141494
TEST=unit tests.
Review URL: https://chromiumcodereview.appspot.com/10854083
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@151008 0039d316-1c4b-4281-b951-d872f2087c98
|
void GDataFileSystem::OnMoveEntryToDirectoryWithFileMoveCallback(
const FileMoveCallback& callback,
GDataFileError error,
const FilePath& moved_file_path) {
if (error == GDATA_FILE_OK)
OnDirectoryChanged(moved_file_path.DirName());
if (!callback.is_null())
callback.Run(error, moved_file_path);
}
|
void GDataFileSystem::OnMoveEntryToDirectoryWithFileMoveCallback(
const FileMoveCallback& callback,
GDataFileError error,
const FilePath& moved_file_path) {
if (error == GDATA_FILE_OK)
OnDirectoryChanged(moved_file_path.DirName());
if (!callback.is_null())
callback.Run(error, moved_file_path);
}
|
C
|
Chrome
| 0 |
CVE-2012-2384
|
https://www.cvedetails.com/cve/CVE-2012-2384/
|
CWE-189
|
https://github.com/torvalds/linux/commit/44afb3a04391a74309d16180d1e4f8386fdfa745
|
44afb3a04391a74309d16180d1e4f8386fdfa745
|
drm/i915: fix integer overflow in i915_gem_do_execbuffer()
On 32-bit systems, a large args->num_cliprects from userspace via ioctl
may overflow the allocation size, leading to out-of-bounds access.
This vulnerability was introduced in commit 432e58ed ("drm/i915: Avoid
allocation for execbuffer object list").
Signed-off-by: Xi Wang <[email protected]>
Reviewed-by: Chris Wilson <[email protected]>
Cc: [email protected]
Signed-off-by: Daniel Vetter <[email protected]>
|
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
/* Copy in the exec list from userland */
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -ENOMEM;
}
ret = copy_from_user(exec_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -EFAULT;
}
for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].handle = exec_list[i].handle;
exec2_list[i].relocation_count = exec_list[i].relocation_count;
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
if (INTEL_INFO(dev)->gen < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
}
exec2.buffers_ptr = args->buffers_ptr;
exec2.buffer_count = args->buffer_count;
exec2.batch_start_offset = args->batch_start_offset;
exec2.batch_len = args->batch_len;
exec2.DR1 = args->DR1;
exec2.DR4 = args->DR4;
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
exec2.flags = I915_EXEC_RENDER;
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec_list);
drm_free_large(exec2_list);
return ret;
}
|
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
/* Copy in the exec list from userland */
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -ENOMEM;
}
ret = copy_from_user(exec_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -EFAULT;
}
for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].handle = exec_list[i].handle;
exec2_list[i].relocation_count = exec_list[i].relocation_count;
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
if (INTEL_INFO(dev)->gen < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
}
exec2.buffers_ptr = args->buffers_ptr;
exec2.buffer_count = args->buffer_count;
exec2.batch_start_offset = args->batch_start_offset;
exec2.batch_len = args->batch_len;
exec2.DR1 = args->DR1;
exec2.DR4 = args->DR4;
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
exec2.flags = I915_EXEC_RENDER;
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec_list);
drm_free_large(exec2_list);
return ret;
}
|
C
|
linux
| 0 |
CVE-2017-16931
|
https://www.cvedetails.com/cve/CVE-2017-16931/
|
CWE-119
|
https://github.com/GNOME/libxml2/commit/e26630548e7d138d2c560844c43820b6767251e3
|
e26630548e7d138d2c560844c43820b6767251e3
|
Fix handling of parameter-entity references
There were two bugs where parameter-entity references could lead to an
unexpected change of the input buffer in xmlParseNameComplex and
xmlDictLookup being called with an invalid pointer.
Percent sign in DTD Names
=========================
The NEXTL macro used to call xmlParserHandlePEReference. When parsing
"complex" names inside the DTD, this could result in entity expansion
which created a new input buffer. The fix is to simply remove the call
to xmlParserHandlePEReference from the NEXTL macro. This is safe because
no users of the macro require expansion of parameter entities.
- xmlParseNameComplex
- xmlParseNCNameComplex
- xmlParseNmtoken
The percent sign is not allowed in names, which are grammatical tokens.
- xmlParseEntityValue
Parameter-entity references in entity values are expanded but this
happens in a separate step in this function.
- xmlParseSystemLiteral
Parameter-entity references are ignored in the system literal.
- xmlParseAttValueComplex
- xmlParseCharDataComplex
- xmlParseCommentComplex
- xmlParsePI
- xmlParseCDSect
Parameter-entity references are ignored outside the DTD.
- xmlLoadEntityContent
This function is only called from xmlStringLenDecodeEntities and
entities are replaced in a separate step immediately after the function
call.
This bug could also be triggered with an internal subset and double
entity expansion.
This fixes bug 766956 initially reported by Wei Lei and independently by
Chromium's ClusterFuzz, Hanno Böck, and Marco Grassi. Thanks to everyone
involved.
xmlParseNameComplex with XML_PARSE_OLD10
========================================
When parsing Names inside an expanded parameter entity with the
XML_PARSE_OLD10 option, xmlParseNameComplex would call xmlGROW via the
GROW macro if the input buffer was exhausted. At the end of the
parameter entity's replacement text, this function would then call
xmlPopInput which invalidated the input buffer.
There should be no need to invoke GROW in this situation because the
buffer is grown periodically every XML_PARSER_CHUNK_SIZE characters and,
at least for UTF-8, in xmlCurrentChar. This also matches the code path
executed when XML_PARSE_OLD10 is not set.
This fixes bugs 781205 (CVE-2017-9049) and 781361 (CVE-2017-9050).
Thanks to Marcel Böhme and Thuan Pham for the report.
Additional hardening
====================
A separate check was added in xmlParseNameComplex to validate the
buffer size.
|
rngStreamTest(const char *filename,
const char *resul ATTRIBUTE_UNUSED,
const char *errr ATTRIBUTE_UNUSED,
int options) {
const char *base = baseFilename(filename);
const char *base2;
const char *instance;
int res = 0, len, ret;
char pattern[500];
char prefix[500];
char result[500];
char err[500];
glob_t globbuf;
size_t i;
char count = 0;
xmlTextReaderPtr reader;
int disable_err = 0;
/*
* most of the mess is about the output filenames generated by the Makefile
*/
len = strlen(base);
if ((len > 499) || (len < 5)) {
fprintf(stderr, "len(base) == %d !\n", len);
return(-1);
}
len -= 4; /* remove trailing .rng */
memcpy(prefix, base, len);
prefix[len] = 0;
/*
* strictly unifying the error messages is nearly impossible this
* hack is also done in the Makefile
*/
if ((!strcmp(prefix, "tutor10_1")) || (!strcmp(prefix, "tutor10_2")) ||
(!strcmp(prefix, "tutor3_2")) || (!strcmp(prefix, "307377")) ||
(!strcmp(prefix, "tutor8_2")))
disable_err = 1;
snprintf(pattern, 499, "./test/relaxng/%s_?.xml", prefix);
pattern[499] = 0;
globbuf.gl_offs = 0;
glob(pattern, GLOB_DOOFFS, NULL, &globbuf);
for (i = 0;i < globbuf.gl_pathc;i++) {
testErrorsSize = 0;
testErrors[0] = 0;
instance = globbuf.gl_pathv[i];
base2 = baseFilename(instance);
len = strlen(base2);
if ((len > 6) && (base2[len - 6] == '_')) {
count = base2[len - 5];
snprintf(result, 499, "result/relaxng/%s_%c",
prefix, count);
result[499] = 0;
snprintf(err, 499, "result/relaxng/%s_%c.err",
prefix, count);
err[499] = 0;
} else {
fprintf(stderr, "don't know how to process %s\n", instance);
continue;
}
reader = xmlReaderForFile(instance, NULL, options);
if (reader == NULL) {
fprintf(stderr, "Failed to build reder for %s\n", instance);
}
if (disable_err == 1)
ret = streamProcessTest(instance, result, NULL, reader, filename,
options);
else
ret = streamProcessTest(instance, result, err, reader, filename,
options);
xmlFreeTextReader(reader);
if (ret != 0) {
fprintf(stderr, "instance %s failed\n", instance);
res = ret;
}
}
globfree(&globbuf);
return(res);
}
|
rngStreamTest(const char *filename,
const char *resul ATTRIBUTE_UNUSED,
const char *errr ATTRIBUTE_UNUSED,
int options) {
const char *base = baseFilename(filename);
const char *base2;
const char *instance;
int res = 0, len, ret;
char pattern[500];
char prefix[500];
char result[500];
char err[500];
glob_t globbuf;
size_t i;
char count = 0;
xmlTextReaderPtr reader;
int disable_err = 0;
/*
* most of the mess is about the output filenames generated by the Makefile
*/
len = strlen(base);
if ((len > 499) || (len < 5)) {
fprintf(stderr, "len(base) == %d !\n", len);
return(-1);
}
len -= 4; /* remove trailing .rng */
memcpy(prefix, base, len);
prefix[len] = 0;
/*
* strictly unifying the error messages is nearly impossible this
* hack is also done in the Makefile
*/
if ((!strcmp(prefix, "tutor10_1")) || (!strcmp(prefix, "tutor10_2")) ||
(!strcmp(prefix, "tutor3_2")) || (!strcmp(prefix, "307377")) ||
(!strcmp(prefix, "tutor8_2")))
disable_err = 1;
snprintf(pattern, 499, "./test/relaxng/%s_?.xml", prefix);
pattern[499] = 0;
globbuf.gl_offs = 0;
glob(pattern, GLOB_DOOFFS, NULL, &globbuf);
for (i = 0;i < globbuf.gl_pathc;i++) {
testErrorsSize = 0;
testErrors[0] = 0;
instance = globbuf.gl_pathv[i];
base2 = baseFilename(instance);
len = strlen(base2);
if ((len > 6) && (base2[len - 6] == '_')) {
count = base2[len - 5];
snprintf(result, 499, "result/relaxng/%s_%c",
prefix, count);
result[499] = 0;
snprintf(err, 499, "result/relaxng/%s_%c.err",
prefix, count);
err[499] = 0;
} else {
fprintf(stderr, "don't know how to process %s\n", instance);
continue;
}
reader = xmlReaderForFile(instance, NULL, options);
if (reader == NULL) {
fprintf(stderr, "Failed to build reder for %s\n", instance);
}
if (disable_err == 1)
ret = streamProcessTest(instance, result, NULL, reader, filename,
options);
else
ret = streamProcessTest(instance, result, err, reader, filename,
options);
xmlFreeTextReader(reader);
if (ret != 0) {
fprintf(stderr, "instance %s failed\n", instance);
res = ret;
}
}
globfree(&globbuf);
return(res);
}
|
C
|
libxml2
| 0 |
CVE-2016-4482
|
https://www.cvedetails.com/cve/CVE-2016-4482/
|
CWE-200
|
https://github.com/torvalds/linux/commit/681fef8380eb818c0b845fca5d2ab1dcbab114ee
|
681fef8380eb818c0b845fca5d2ab1dcbab114ee
|
USB: usbfs: fix potential infoleak in devio
The stack object “ci” has a total size of 8 bytes. Its last 3 bytes
are padding bytes which are not initialized and leaked to userland
via “copy_to_user”.
Signed-off-by: Kangjie Lu <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int proc_control(struct usb_dev_state *ps, void __user *arg)
{
struct usb_device *dev = ps->dev;
struct usbdevfs_ctrltransfer ctrl;
unsigned int tmo;
unsigned char *tbuf;
unsigned wLength;
int i, pipe, ret;
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.bRequest,
ctrl.wIndex);
if (ret)
return ret;
wLength = ctrl.wLength; /* To suppress 64k PAGE_SIZE warning */
if (wLength > PAGE_SIZE)
return -EINVAL;
ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) +
sizeof(struct usb_ctrlrequest));
if (ret)
return ret;
tbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
if (!tbuf) {
ret = -ENOMEM;
goto done;
}
tmo = ctrl.timeout;
snoop(&dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
ctrl.bRequestType, ctrl.bRequest, ctrl.wValue,
ctrl.wIndex, ctrl.wLength);
if (ctrl.bRequestType & 0x80) {
if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
ctrl.wLength)) {
ret = -EINVAL;
goto done;
}
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usb_control_msg(dev, pipe, ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
tbuf, max(i, 0));
if ((i > 0) && ctrl.wLength) {
if (copy_to_user(ctrl.data, tbuf, i)) {
ret = -EFAULT;
goto done;
}
}
} else {
if (ctrl.wLength) {
if (copy_from_user(tbuf, ctrl.data, ctrl.wLength)) {
ret = -EFAULT;
goto done;
}
}
pipe = usb_sndctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT,
tbuf, ctrl.wLength);
usb_unlock_device(dev);
i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
}
if (i < 0 && i != -EPIPE) {
dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL "
"failed cmd %s rqt %u rq %u len %u ret %d\n",
current->comm, ctrl.bRequestType, ctrl.bRequest,
ctrl.wLength, i);
}
ret = i;
done:
free_page((unsigned long) tbuf);
usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) +
sizeof(struct usb_ctrlrequest));
return ret;
}
|
static int proc_control(struct usb_dev_state *ps, void __user *arg)
{
struct usb_device *dev = ps->dev;
struct usbdevfs_ctrltransfer ctrl;
unsigned int tmo;
unsigned char *tbuf;
unsigned wLength;
int i, pipe, ret;
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.bRequest,
ctrl.wIndex);
if (ret)
return ret;
wLength = ctrl.wLength; /* To suppress 64k PAGE_SIZE warning */
if (wLength > PAGE_SIZE)
return -EINVAL;
ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) +
sizeof(struct usb_ctrlrequest));
if (ret)
return ret;
tbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
if (!tbuf) {
ret = -ENOMEM;
goto done;
}
tmo = ctrl.timeout;
snoop(&dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
ctrl.bRequestType, ctrl.bRequest, ctrl.wValue,
ctrl.wIndex, ctrl.wLength);
if (ctrl.bRequestType & 0x80) {
if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
ctrl.wLength)) {
ret = -EINVAL;
goto done;
}
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usb_control_msg(dev, pipe, ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
tbuf, max(i, 0));
if ((i > 0) && ctrl.wLength) {
if (copy_to_user(ctrl.data, tbuf, i)) {
ret = -EFAULT;
goto done;
}
}
} else {
if (ctrl.wLength) {
if (copy_from_user(tbuf, ctrl.data, ctrl.wLength)) {
ret = -EFAULT;
goto done;
}
}
pipe = usb_sndctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT,
tbuf, ctrl.wLength);
usb_unlock_device(dev);
i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
}
if (i < 0 && i != -EPIPE) {
dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL "
"failed cmd %s rqt %u rq %u len %u ret %d\n",
current->comm, ctrl.bRequestType, ctrl.bRequest,
ctrl.wLength, i);
}
ret = i;
done:
free_page((unsigned long) tbuf);
usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) +
sizeof(struct usb_ctrlrequest));
return ret;
}
|
C
|
linux
| 0 |
CVE-2016-3751
|
https://www.cvedetails.com/cve/CVE-2016-3751/
| null |
https://android.googlesource.com/platform/external/libpng/+/9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
|
chunk_init(struct chunk * const chunk, struct file * const file)
/* When a chunk is initialized the file length/type/pos are copied into the
* corresponding chunk fields and the new chunk is registered in the file
* structure. There can only be one chunk at a time.
*
* NOTE: this routine must onely be called from the file alloc routine!
*/
{
assert(file->chunk == NULL);
CLEAR(*chunk);
chunk->file = file;
chunk->global = file->global;
chunk->chunk_data_pos = file->data_pos;
chunk->chunk_length = file->length;
chunk->chunk_type = file->type;
/* Compresssed/uncompressed size information (from the zlib control structure
* that is used to check the compressed data in a chunk.)
*/
chunk->uncompressed_digits = 0;
chunk->compressed_digits = 0;
file->chunk = chunk;
}
|
chunk_init(struct chunk * const chunk, struct file * const file)
/* When a chunk is initialized the file length/type/pos are copied into the
* corresponding chunk fields and the new chunk is registered in the file
* structure. There can only be one chunk at a time.
*
* NOTE: this routine must onely be called from the file alloc routine!
*/
{
assert(file->chunk == NULL);
CLEAR(*chunk);
chunk->file = file;
chunk->global = file->global;
chunk->chunk_data_pos = file->data_pos;
chunk->chunk_length = file->length;
chunk->chunk_type = file->type;
/* Compresssed/uncompressed size information (from the zlib control structure
* that is used to check the compressed data in a chunk.)
*/
chunk->uncompressed_digits = 0;
chunk->compressed_digits = 0;
file->chunk = chunk;
}
|
C
|
Android
| 0 |
CVE-2015-2925
|
https://www.cvedetails.com/cve/CVE-2015-2925/
|
CWE-254
|
https://github.com/torvalds/linux/commit/397d425dc26da728396e66d392d5dcb8dac30c37
|
397d425dc26da728396e66d392d5dcb8dac30c37
|
vfs: Test for and handle paths that are unreachable from their mnt_root
In rare cases a directory can be renamed out from under a bind mount.
In those cases without special handling it becomes possible to walk up
the directory tree to the root dentry of the filesystem and down
from the root dentry to every other file or directory on the filesystem.
Like division by zero .. from an unconnected path can not be given
a useful semantic as there is no predicting at which path component
the code will realize it is unconnected. We certainly can not match
the current behavior as the current behavior is a security hole.
Therefore when encounting .. when following an unconnected path
return -ENOENT.
- Add a function path_connected to verify path->dentry is reachable
from path->mnt.mnt_root. AKA to validate that rename did not do
something nasty to the bind mount.
To avoid races path_connected must be called after following a path
component to it's next path component.
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
static void restore_nameidata(void)
{
struct nameidata *now = current->nameidata, *old = now->saved;
current->nameidata = old;
if (old)
old->total_link_count = now->total_link_count;
if (now->stack != now->internal) {
kfree(now->stack);
now->stack = now->internal;
}
}
|
static void restore_nameidata(void)
{
struct nameidata *now = current->nameidata, *old = now->saved;
current->nameidata = old;
if (old)
old->total_link_count = now->total_link_count;
if (now->stack != now->internal) {
kfree(now->stack);
now->stack = now->internal;
}
}
|
C
|
linux
| 0 |
CVE-2015-7613
|
https://www.cvedetails.com/cve/CVE-2015-7613/
|
CWE-362
|
https://github.com/torvalds/linux/commit/b9a532277938798b53178d5a66af6e2915cb27cf
|
b9a532277938798b53178d5a66af6e2915cb27cf
|
Initialize msg/shm IPC objects before doing ipc_addid()
As reported by Dmitry Vyukov, we really shouldn't do ipc_addid() before
having initialized the IPC object state. Yes, we initialize the IPC
object in a locked state, but with all the lockless RCU lookup work,
that IPC object lock no longer means that the state cannot be seen.
We already did this for the IPC semaphore code (see commit e8577d1f0329:
"ipc/sem.c: fully initialize sem_array before making it visible") but we
clearly forgot about msg and shm.
Reported-by: Dmitry Vyukov <[email protected]>
Cc: Manfred Spraul <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
void msg_init_ns(struct ipc_namespace *ns)
{
ns->msg_ctlmax = MSGMAX;
ns->msg_ctlmnb = MSGMNB;
ns->msg_ctlmni = MSGMNI;
atomic_set(&ns->msg_bytes, 0);
atomic_set(&ns->msg_hdrs, 0);
ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
}
|
void msg_init_ns(struct ipc_namespace *ns)
{
ns->msg_ctlmax = MSGMAX;
ns->msg_ctlmnb = MSGMNB;
ns->msg_ctlmni = MSGMNI;
atomic_set(&ns->msg_bytes, 0);
atomic_set(&ns->msg_hdrs, 0);
ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
}
|
C
|
linux
| 0 |
CVE-2017-3731
|
https://www.cvedetails.com/cve/CVE-2017-3731/
|
CWE-125
|
https://github.com/openssl/openssl/commit/00d965474b22b54e4275232bc71ee0c699c5cd21
|
00d965474b22b54e4275232bc71ee0c699c5cd21
|
crypto/evp: harden AEAD ciphers.
Originally a crash in 32-bit build was reported CHACHA20-POLY1305
cipher. The crash is triggered by truncated packet and is result
of excessive hashing to the edge of accessible memory. Since hash
operation is read-only it is not considered to be exploitable
beyond a DoS condition. Other ciphers were hardened.
Thanks to Robert Święcki for report.
CVE-2017-3731
Reviewed-by: Rich Salz <[email protected]>
|
const EVP_CIPHER *EVP_aes_128_wrap_pad(void)
{
return &aes_128_wrap_pad;
}
|
const EVP_CIPHER *EVP_aes_128_wrap_pad(void)
{
return &aes_128_wrap_pad;
}
|
C
|
openssl
| 0 |
CVE-2014-1700
|
https://www.cvedetails.com/cve/CVE-2014-1700/
|
CWE-399
|
https://github.com/chromium/chromium/commit/685c3980d31b5199924086b8c93a1ce751d24733
|
685c3980d31b5199924086b8c93a1ce751d24733
|
content: Rename webkit_test_helpers.{cc,h} to blink_test_helpers.{cc,h}
Now that webkit/ is gone, we are preparing ourselves for the merge of
third_party/WebKit into //blink.
BUG=None
BUG=content_shell && content_unittests
[email protected]
Review URL: https://codereview.chromium.org/1118183003
Cr-Commit-Position: refs/heads/master@{#328202}
|
void LayoutTestContentRendererClient::RenderFrameCreated(
RenderFrame* render_frame) {
new LayoutTestRenderFrameObserver(render_frame);
}
|
void LayoutTestContentRendererClient::RenderFrameCreated(
RenderFrame* render_frame) {
new LayoutTestRenderFrameObserver(render_frame);
}
|
C
|
Chrome
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
coolkey_process_combined_object(sc_card_t *card, coolkey_private_data_t *priv, u8 *object, size_t object_length)
{
coolkey_combined_header_t *header = (coolkey_combined_header_t *)object;
unsigned short compressed_offset;
unsigned short compressed_length;
unsigned short compressed_type;
unsigned short object_offset;
unsigned short object_count;
coolkey_decompressed_header_t *decompressed_header;
u8 *decompressed_object = NULL;
size_t decompressed_object_len = 0;
int free_decompressed = 0;
int i, r;
if (object_length < sizeof(coolkey_combined_header_t)) {
return SC_ERROR_CORRUPTED_DATA;
}
compressed_offset = bebytes2ushort(header->compression_offset);
compressed_length = bebytes2ushort(header->compression_length);
compressed_type = bebytes2ushort(header->compression_type);
if ((((size_t)compressed_offset) + (size_t)compressed_length) > object_length) {
return SC_ERROR_CORRUPTED_DATA;
}
/* store the CUID */
memcpy(&priv->cuid, &header->cuid, sizeof(priv->cuid));
if (compressed_type == COOLKEY_COMPRESSION_ZLIB) {
#ifdef ENABLE_ZLIB
r = sc_decompress_alloc(&decompressed_object, &decompressed_object_len, &object[compressed_offset], compressed_length, COMPRESSION_AUTO);
if (r)
goto done;
free_decompressed = 1;
#else
sc_log(card->ctx, "Coolkey compression not supported, no zlib");
LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED);
#endif
} else {
decompressed_object =&object[compressed_offset];
decompressed_object_len = (size_t) compressed_length;
}
decompressed_header = (coolkey_decompressed_header_t *)decompressed_object;
if (decompressed_object_len < sizeof(coolkey_decompressed_header_t)) {
return SC_ERROR_CORRUPTED_DATA;
}
object_offset = bebytes2ushort(decompressed_header->object_offset);
object_count = bebytes2ushort(decompressed_header->object_count);
/*
* using 2 different tests here so we can log different errors if logging is
* turned on.
*/
/* make sure token_name doesn't overrun the buffer */
if (decompressed_header->token_name_length +
offsetof(coolkey_decompressed_header_t,token_name) > decompressed_object_len) {
r = SC_ERROR_CORRUPTED_DATA;
goto done;
}
/* make sure it doesn't overlap the object space */
if (decompressed_header->token_name_length +
offsetof(coolkey_decompressed_header_t,token_name) > object_offset) {
r = SC_ERROR_CORRUPTED_DATA;
goto done;
}
/* store the token name in the priv structure so the emulator can set it */
priv->token_name = malloc(decompressed_header->token_name_length+1);
if (priv->token_name == NULL) {
r = SC_ERROR_OUT_OF_MEMORY;
goto done;
}
memcpy(priv->token_name, &decompressed_header->token_name[0],
decompressed_header->token_name_length);
priv->token_name[decompressed_header->token_name_length] = 0;
priv->token_name_length = decompressed_header->token_name_length;
for (i=0; i < object_count && object_offset < decompressed_object_len; i++ ) {
u8 *current_object = &decompressed_object[object_offset];
coolkey_combined_object_header_t *object_header =
(coolkey_combined_object_header_t *)current_object;
unsigned long object_id = bebytes2ulong(object_header->object_id);
int current_object_len;
/* figure out how big it is */
r = coolkey_v1_get_object_length(current_object, decompressed_object_len-object_offset);
if (r < 0) {
goto done;
}
if ((size_t)r + object_offset > decompressed_object_len) {
r = SC_ERROR_CORRUPTED_DATA;
goto done;
}
current_object_len = r;
object_offset += current_object_len;
/* record this object */
r = coolkey_add_object(priv, object_id, current_object, current_object_len, 1);
if (r) {
goto done;
}
}
r = SC_SUCCESS;
done:
if (free_decompressed) {
free(decompressed_object);
}
return r;
}
|
coolkey_process_combined_object(sc_card_t *card, coolkey_private_data_t *priv, u8 *object, size_t object_length)
{
coolkey_combined_header_t *header = (coolkey_combined_header_t *)object;
unsigned short compressed_offset;
unsigned short compressed_length;
unsigned short compressed_type;
unsigned short object_offset;
unsigned short object_count;
coolkey_decompressed_header_t *decompressed_header;
u8 *decompressed_object = NULL;
size_t decompressed_object_len = 0;
int free_decompressed = 0;
int i, r;
if (object_length < sizeof(coolkey_combined_header_t)) {
return SC_ERROR_CORRUPTED_DATA;
}
compressed_offset = bebytes2ushort(header->compression_offset);
compressed_length = bebytes2ushort(header->compression_length);
compressed_type = bebytes2ushort(header->compression_type);
if ((((size_t)compressed_offset) + (size_t)compressed_length) > object_length) {
return SC_ERROR_CORRUPTED_DATA;
}
/* store the CUID */
memcpy(&priv->cuid, &header->cuid, sizeof(priv->cuid));
if (compressed_type == COOLKEY_COMPRESSION_ZLIB) {
#ifdef ENABLE_ZLIB
r = sc_decompress_alloc(&decompressed_object, &decompressed_object_len, &object[compressed_offset], compressed_length, COMPRESSION_AUTO);
if (r)
goto done;
free_decompressed = 1;
#else
sc_log(card->ctx, "Coolkey compression not supported, no zlib");
LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED);
#endif
} else {
decompressed_object =&object[compressed_offset];
decompressed_object_len = (size_t) compressed_length;
}
decompressed_header = (coolkey_decompressed_header_t *)decompressed_object;
if (decompressed_object_len < sizeof(coolkey_decompressed_header_t)) {
return SC_ERROR_CORRUPTED_DATA;
}
object_offset = bebytes2ushort(decompressed_header->object_offset);
object_count = bebytes2ushort(decompressed_header->object_count);
/*
* using 2 different tests here so we can log different errors if logging is
* turned on.
*/
/* make sure token_name doesn't overrun the buffer */
if (decompressed_header->token_name_length +
offsetof(coolkey_decompressed_header_t,token_name) > decompressed_object_len) {
r = SC_ERROR_CORRUPTED_DATA;
goto done;
}
/* make sure it doesn't overlap the object space */
if (decompressed_header->token_name_length +
offsetof(coolkey_decompressed_header_t,token_name) > object_offset) {
r = SC_ERROR_CORRUPTED_DATA;
goto done;
}
/* store the token name in the priv structure so the emulator can set it */
priv->token_name = malloc(decompressed_header->token_name_length+1);
if (priv->token_name == NULL) {
r = SC_ERROR_OUT_OF_MEMORY;
goto done;
}
memcpy(priv->token_name, &decompressed_header->token_name[0],
decompressed_header->token_name_length);
priv->token_name[decompressed_header->token_name_length] = 0;
priv->token_name_length = decompressed_header->token_name_length;
for (i=0; i < object_count && object_offset < decompressed_object_len; i++ ) {
u8 *current_object = &decompressed_object[object_offset];
coolkey_combined_object_header_t *object_header =
(coolkey_combined_object_header_t *)current_object;
unsigned long object_id = bebytes2ulong(object_header->object_id);
int current_object_len;
/* figure out how big it is */
r = coolkey_v1_get_object_length(current_object, decompressed_object_len-object_offset);
if (r < 0) {
goto done;
}
if ((size_t)r + object_offset > decompressed_object_len) {
r = SC_ERROR_CORRUPTED_DATA;
goto done;
}
current_object_len = r;
object_offset += current_object_len;
/* record this object */
r = coolkey_add_object(priv, object_id, current_object, current_object_len, 1);
if (r) {
goto done;
}
}
r = SC_SUCCESS;
done:
if (free_decompressed) {
free(decompressed_object);
}
return r;
}
|
C
|
OpenSC
| 0 |
CVE-2017-5940
|
https://www.cvedetails.com/cve/CVE-2017-5940/
|
CWE-269
|
https://github.com/netblue30/firejail/commit/b8a4ff9775318ca5e679183884a6a63f3da8f863
|
b8a4ff9775318ca5e679183884a6a63f3da8f863
|
replace copy_file with copy_file_as_user
|
void invalid_filename(const char *fname) {
EUID_ASSERT();
assert(fname);
const char *ptr = fname;
if (arg_debug_check_filename)
printf("Checking filename %s\n", fname);
if (strncmp(ptr, "${HOME}", 7) == 0)
ptr = fname + 7;
else if (strncmp(ptr, "${PATH}", 7) == 0)
ptr = fname + 7;
else if (strcmp(fname, "${DOWNLOADS}") == 0)
return;
int len = strlen(ptr);
if (strcspn(ptr, "\\&!?\"'<>%^(){}[];,") != (size_t)len) {
fprintf(stderr, "Error: \"%s\" is an invalid filename\n", ptr);
exit(1);
}
}
|
void invalid_filename(const char *fname) {
EUID_ASSERT();
assert(fname);
const char *ptr = fname;
if (arg_debug_check_filename)
printf("Checking filename %s\n", fname);
if (strncmp(ptr, "${HOME}", 7) == 0)
ptr = fname + 7;
else if (strncmp(ptr, "${PATH}", 7) == 0)
ptr = fname + 7;
else if (strcmp(fname, "${DOWNLOADS}") == 0)
return;
int len = strlen(ptr);
if (strcspn(ptr, "\\&!?\"'<>%^(){}[];,") != (size_t)len) {
fprintf(stderr, "Error: \"%s\" is an invalid filename\n", ptr);
exit(1);
}
}
|
C
|
firejail
| 0 |
CVE-2015-0204
|
https://www.cvedetails.com/cve/CVE-2015-0204/
|
CWE-310
|
https://github.com/openssl/openssl/commit/ce325c60c74b0fa784f5872404b722e120e5cab0
|
ce325c60c74b0fa784f5872404b722e120e5cab0
|
Only allow ephemeral RSA keys in export ciphersuites.
OpenSSL clients would tolerate temporary RSA keys in non-export
ciphersuites. It also had an option SSL_OP_EPHEMERAL_RSA which
enabled this server side. Remove both options as they are a
protocol violation.
Thanks to Karthikeyan Bhargavan for reporting this issue.
(CVE-2015-0204)
Reviewed-by: Matt Caswell <[email protected]>
|
int ssl3_send_client_key_exchange(SSL *s)
{
unsigned char *p;
int n;
unsigned long alg_k;
#ifndef OPENSSL_NO_RSA
unsigned char *q;
EVP_PKEY *pkey=NULL;
#endif
#ifndef OPENSSL_NO_KRB5
KSSL_ERR kssl_err;
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
EC_KEY *clnt_ecdh = NULL;
const EC_POINT *srvr_ecpoint = NULL;
EVP_PKEY *srvr_pub_pkey = NULL;
unsigned char *encodedPoint = NULL;
int encoded_pt_len = 0;
BN_CTX * bn_ctx = NULL;
#endif
if (s->state == SSL3_ST_CW_KEY_EXCH_A)
{
p = ssl_handshake_start(s);
alg_k=s->s3->tmp.new_cipher->algorithm_mkey;
/* Fool emacs indentation */
if (0) {}
#ifndef OPENSSL_NO_RSA
else if (alg_k & SSL_kRSA)
{
RSA *rsa;
unsigned char tmp_buf[SSL_MAX_MASTER_KEY_LENGTH];
if (s->session->sess_cert == NULL)
{
/* We should always have a server certificate with SSL_kRSA. */
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
if (s->session->sess_cert->peer_rsa_tmp != NULL)
rsa=s->session->sess_cert->peer_rsa_tmp;
else
{
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
if ((pkey == NULL) ||
(pkey->type != EVP_PKEY_RSA) ||
(pkey->pkey.rsa == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
rsa=pkey->pkey.rsa;
EVP_PKEY_free(pkey);
}
tmp_buf[0]=s->client_version>>8;
tmp_buf[1]=s->client_version&0xff;
if (RAND_bytes(&(tmp_buf[2]),sizeof tmp_buf-2) <= 0)
goto err;
s->session->master_key_length=sizeof tmp_buf;
q=p;
/* Fix buf for TLS and beyond */
if (s->version > SSL3_VERSION)
p+=2;
n=RSA_public_encrypt(sizeof tmp_buf,
tmp_buf,p,rsa,RSA_PKCS1_PADDING);
#ifdef PKCS1_CHECK
if (s->options & SSL_OP_PKCS1_CHECK_1) p[1]++;
if (s->options & SSL_OP_PKCS1_CHECK_2) tmp_buf[0]=0x70;
#endif
if (n <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_BAD_RSA_ENCRYPT);
goto err;
}
/* Fix buf for TLS and beyond */
if (s->version > SSL3_VERSION)
{
s2n(n,q);
n+=2;
}
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
tmp_buf,sizeof tmp_buf);
OPENSSL_cleanse(tmp_buf,sizeof tmp_buf);
}
#endif
#ifndef OPENSSL_NO_KRB5
else if (alg_k & SSL_kKRB5)
{
krb5_error_code krb5rc;
KSSL_CTX *kssl_ctx = s->kssl_ctx;
/* krb5_data krb5_ap_req; */
krb5_data *enc_ticket;
krb5_data authenticator, *authp = NULL;
EVP_CIPHER_CTX ciph_ctx;
const EVP_CIPHER *enc = NULL;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char tmp_buf[SSL_MAX_MASTER_KEY_LENGTH];
unsigned char epms[SSL_MAX_MASTER_KEY_LENGTH
+ EVP_MAX_IV_LENGTH];
int padl, outl = sizeof(epms);
EVP_CIPHER_CTX_init(&ciph_ctx);
#ifdef KSSL_DEBUG
fprintf(stderr,"ssl3_send_client_key_exchange(%lx & %lx)\n",
alg_k, SSL_kKRB5);
#endif /* KSSL_DEBUG */
authp = NULL;
#ifdef KRB5SENDAUTH
if (KRB5SENDAUTH) authp = &authenticator;
#endif /* KRB5SENDAUTH */
krb5rc = kssl_cget_tkt(kssl_ctx, &enc_ticket, authp,
&kssl_err);
enc = kssl_map_enc(kssl_ctx->enctype);
if (enc == NULL)
goto err;
#ifdef KSSL_DEBUG
{
fprintf(stderr,"kssl_cget_tkt rtn %d\n", krb5rc);
if (krb5rc && kssl_err.text)
fprintf(stderr,"kssl_cget_tkt kssl_err=%s\n", kssl_err.text);
}
#endif /* KSSL_DEBUG */
if (krb5rc)
{
ssl3_send_alert(s,SSL3_AL_FATAL,
SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
kssl_err.reason);
goto err;
}
/*-
* 20010406 VRS - Earlier versions used KRB5 AP_REQ
* in place of RFC 2712 KerberosWrapper, as in:
*
* Send ticket (copy to *p, set n = length)
* n = krb5_ap_req.length;
* memcpy(p, krb5_ap_req.data, krb5_ap_req.length);
* if (krb5_ap_req.data)
* kssl_krb5_free_data_contents(NULL,&krb5_ap_req);
*
* Now using real RFC 2712 KerberosWrapper
* (Thanks to Simon Wilkinson <[email protected]>)
* Note: 2712 "opaque" types are here replaced
* with a 2-byte length followed by the value.
* Example:
* KerberosWrapper= xx xx asn1ticket 0 0 xx xx encpms
* Where "xx xx" = length bytes. Shown here with
* optional authenticator omitted.
*/
/* KerberosWrapper.Ticket */
s2n(enc_ticket->length,p);
memcpy(p, enc_ticket->data, enc_ticket->length);
p+= enc_ticket->length;
n = enc_ticket->length + 2;
/* KerberosWrapper.Authenticator */
if (authp && authp->length)
{
s2n(authp->length,p);
memcpy(p, authp->data, authp->length);
p+= authp->length;
n+= authp->length + 2;
free(authp->data);
authp->data = NULL;
authp->length = 0;
}
else
{
s2n(0,p);/* null authenticator length */
n+=2;
}
tmp_buf[0]=s->client_version>>8;
tmp_buf[1]=s->client_version&0xff;
if (RAND_bytes(&(tmp_buf[2]),sizeof tmp_buf-2) <= 0)
goto err;
/*-
* 20010420 VRS. Tried it this way; failed.
* EVP_EncryptInit_ex(&ciph_ctx,enc, NULL,NULL);
* EVP_CIPHER_CTX_set_key_length(&ciph_ctx,
* kssl_ctx->length);
* EVP_EncryptInit_ex(&ciph_ctx,NULL, key,iv);
*/
memset(iv, 0, sizeof iv); /* per RFC 1510 */
EVP_EncryptInit_ex(&ciph_ctx,enc, NULL,
kssl_ctx->key,iv);
EVP_EncryptUpdate(&ciph_ctx,epms,&outl,tmp_buf,
sizeof tmp_buf);
EVP_EncryptFinal_ex(&ciph_ctx,&(epms[outl]),&padl);
outl += padl;
if (outl > (int)sizeof epms)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
EVP_CIPHER_CTX_cleanup(&ciph_ctx);
/* KerberosWrapper.EncryptedPreMasterSecret */
s2n(outl,p);
memcpy(p, epms, outl);
p+=outl;
n+=outl + 2;
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
tmp_buf, sizeof tmp_buf);
OPENSSL_cleanse(tmp_buf, sizeof tmp_buf);
OPENSSL_cleanse(epms, outl);
}
#endif
#ifndef OPENSSL_NO_DH
else if (alg_k & (SSL_kDHE|SSL_kDHr|SSL_kDHd))
{
DH *dh_srvr,*dh_clnt;
SESS_CERT *scert = s->session->sess_cert;
if (scert == NULL)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE);
goto err;
}
if (scert->peer_dh_tmp != NULL)
dh_srvr=scert->peer_dh_tmp;
else
{
/* we get them from the cert */
int idx = scert->peer_cert_type;
EVP_PKEY *spkey = NULL;
dh_srvr = NULL;
if (idx >= 0)
spkey = X509_get_pubkey(
scert->peer_pkeys[idx].x509);
if (spkey)
{
dh_srvr = EVP_PKEY_get1_DH(spkey);
EVP_PKEY_free(spkey);
}
if (dh_srvr == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
}
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY)
{
/* Use client certificate key */
EVP_PKEY *clkey = s->cert->key->privatekey;
dh_clnt = NULL;
if (clkey)
dh_clnt = EVP_PKEY_get1_DH(clkey);
if (dh_clnt == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
}
else
{
/* generate a new random key */
if ((dh_clnt=DHparams_dup(dh_srvr)) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
if (!DH_generate_key(dh_clnt))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
DH_free(dh_clnt);
goto err;
}
}
/* use the 'p' output buffer for the DH key, but
* make sure to clear it out afterwards */
n=DH_compute_key(p,dh_srvr->pub_key,dh_clnt);
if (scert->peer_dh_tmp == NULL)
DH_free(dh_srvr);
if (n <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
DH_free(dh_clnt);
goto err;
}
/* generate master key from the result */
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,p,n);
/* clean up */
memset(p,0,n);
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY)
n = 0;
else
{
/* send off the data */
n=BN_num_bytes(dh_clnt->pub_key);
s2n(n,p);
BN_bn2bin(dh_clnt->pub_key,p);
n+=2;
}
DH_free(dh_clnt);
/* perhaps clean things up a bit EAY EAY EAY EAY*/
}
#endif
#ifndef OPENSSL_NO_ECDH
else if (alg_k & (SSL_kECDHE|SSL_kECDHr|SSL_kECDHe))
{
const EC_GROUP *srvr_group = NULL;
EC_KEY *tkey;
int ecdh_clnt_cert = 0;
int field_size = 0;
if (s->session->sess_cert == NULL)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE);
goto err;
}
/* Did we send out the client's
* ECDH share for use in premaster
* computation as part of client certificate?
* If so, set ecdh_clnt_cert to 1.
*/
if ((alg_k & (SSL_kECDHr|SSL_kECDHe)) && (s->cert != NULL))
{
/*-
* XXX: For now, we do not support client
* authentication using ECDH certificates.
* To add such support, one needs to add
* code that checks for appropriate
* conditions and sets ecdh_clnt_cert to 1.
* For example, the cert have an ECC
* key on the same curve as the server's
* and the key should be authorized for
* key agreement.
*
* One also needs to add code in ssl3_connect
* to skip sending the certificate verify
* message.
*
* if ((s->cert->key->privatekey != NULL) &&
* (s->cert->key->privatekey->type ==
* EVP_PKEY_EC) && ...)
* ecdh_clnt_cert = 1;
*/
}
if (s->session->sess_cert->peer_ecdh_tmp != NULL)
{
tkey = s->session->sess_cert->peer_ecdh_tmp;
}
else
{
/* Get the Server Public Key from Cert */
srvr_pub_pkey = X509_get_pubkey(s->session-> \
sess_cert->peer_pkeys[SSL_PKEY_ECC].x509);
if ((srvr_pub_pkey == NULL) ||
(srvr_pub_pkey->type != EVP_PKEY_EC) ||
(srvr_pub_pkey->pkey.ec == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
tkey = srvr_pub_pkey->pkey.ec;
}
srvr_group = EC_KEY_get0_group(tkey);
srvr_ecpoint = EC_KEY_get0_public_key(tkey);
if ((srvr_group == NULL) || (srvr_ecpoint == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
if ((clnt_ecdh=EC_KEY_new()) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
if (!EC_KEY_set_group(clnt_ecdh, srvr_group))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
if (ecdh_clnt_cert)
{
/* Reuse key info from our certificate
* We only need our private key to perform
* the ECDH computation.
*/
const BIGNUM *priv_key;
tkey = s->cert->key->privatekey->pkey.ec;
priv_key = EC_KEY_get0_private_key(tkey);
if (priv_key == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
if (!EC_KEY_set_private_key(clnt_ecdh, priv_key))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
}
else
{
/* Generate a new ECDH key pair */
if (!(EC_KEY_generate_key(clnt_ecdh)))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
}
/* use the 'p' output buffer for the ECDH key, but
* make sure to clear it out afterwards
*/
field_size = EC_GROUP_get_degree(srvr_group);
if (field_size <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
n=ECDH_compute_key(p, (field_size+7)/8, srvr_ecpoint, clnt_ecdh, NULL);
if (n <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
/* generate master key from the result */
s->session->master_key_length = s->method->ssl3_enc \
-> generate_master_secret(s,
s->session->master_key,
p, n);
memset(p, 0, n); /* clean up */
if (ecdh_clnt_cert)
{
/* Send empty client key exch message */
n = 0;
}
else
{
/* First check the size of encoding and
* allocate memory accordingly.
*/
encoded_pt_len =
EC_POINT_point2oct(srvr_group,
EC_KEY_get0_public_key(clnt_ecdh),
POINT_CONVERSION_UNCOMPRESSED,
NULL, 0, NULL);
encodedPoint = (unsigned char *)
OPENSSL_malloc(encoded_pt_len *
sizeof(unsigned char));
bn_ctx = BN_CTX_new();
if ((encodedPoint == NULL) ||
(bn_ctx == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
/* Encode the public key */
n = EC_POINT_point2oct(srvr_group,
EC_KEY_get0_public_key(clnt_ecdh),
POINT_CONVERSION_UNCOMPRESSED,
encodedPoint, encoded_pt_len, bn_ctx);
*p = n; /* length of encoded point */
/* Encoded point will be copied here */
p += 1;
/* copy the point */
memcpy((unsigned char *)p, encodedPoint, n);
/* increment n to account for length field */
n += 1;
}
/* Free allocated memory */
BN_CTX_free(bn_ctx);
if (encodedPoint != NULL) OPENSSL_free(encodedPoint);
if (clnt_ecdh != NULL)
EC_KEY_free(clnt_ecdh);
EVP_PKEY_free(srvr_pub_pkey);
}
#endif /* !OPENSSL_NO_ECDH */
else if (alg_k & SSL_kGOST)
{
/* GOST key exchange message creation */
EVP_PKEY_CTX *pkey_ctx;
X509 *peer_cert;
size_t msglen;
unsigned int md_len;
int keytype;
unsigned char premaster_secret[32],shared_ukm[32], tmp[256];
EVP_MD_CTX *ukm_hash;
EVP_PKEY *pub_key;
/* Get server sertificate PKEY and create ctx from it */
peer_cert=s->session->sess_cert->peer_pkeys[(keytype=SSL_PKEY_GOST01)].x509;
if (!peer_cert)
peer_cert=s->session->sess_cert->peer_pkeys[(keytype=SSL_PKEY_GOST94)].x509;
if (!peer_cert) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER);
goto err;
}
pkey_ctx=EVP_PKEY_CTX_new(pub_key=X509_get_pubkey(peer_cert),NULL);
/* If we have send a certificate, and certificate key
* parameters match those of server certificate, use
* certificate key for key exchange
*/
/* Otherwise, generate ephemeral key pair */
EVP_PKEY_encrypt_init(pkey_ctx);
/* Generate session key */
RAND_bytes(premaster_secret,32);
/* If we have client certificate, use its secret as peer key */
if (s->s3->tmp.cert_req && s->cert->key->privatekey) {
if (EVP_PKEY_derive_set_peer(pkey_ctx,s->cert->key->privatekey) <=0) {
/* If there was an error - just ignore it. Ephemeral key
* would be used
*/
ERR_clear_error();
}
}
/* Compute shared IV and store it in algorithm-specific
* context data */
ukm_hash = EVP_MD_CTX_create();
EVP_DigestInit(ukm_hash,EVP_get_digestbynid(NID_id_GostR3411_94));
EVP_DigestUpdate(ukm_hash,s->s3->client_random,SSL3_RANDOM_SIZE);
EVP_DigestUpdate(ukm_hash,s->s3->server_random,SSL3_RANDOM_SIZE);
EVP_DigestFinal_ex(ukm_hash, shared_ukm, &md_len);
EVP_MD_CTX_destroy(ukm_hash);
if (EVP_PKEY_CTX_ctrl(pkey_ctx,-1,EVP_PKEY_OP_ENCRYPT,EVP_PKEY_CTRL_SET_IV,
8,shared_ukm)<0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_LIBRARY_BUG);
goto err;
}
/* Make GOST keytransport blob message */
/*Encapsulate it into sequence */
*(p++)=V_ASN1_SEQUENCE | V_ASN1_CONSTRUCTED;
msglen=255;
if (EVP_PKEY_encrypt(pkey_ctx,tmp,&msglen,premaster_secret,32)<0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_LIBRARY_BUG);
goto err;
}
if (msglen >= 0x80)
{
*(p++)=0x81;
*(p++)= msglen & 0xff;
n=msglen+3;
}
else
{
*(p++)= msglen & 0xff;
n=msglen+2;
}
memcpy(p, tmp, msglen);
/* Check if pubkey from client certificate was used */
if (EVP_PKEY_CTX_ctrl(pkey_ctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 2, NULL) > 0)
{
/* Set flag "skip certificate verify" */
s->s3->flags |= TLS1_FLAGS_SKIP_CERT_VERIFY;
}
EVP_PKEY_CTX_free(pkey_ctx);
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,premaster_secret,32);
EVP_PKEY_free(pub_key);
}
#ifndef OPENSSL_NO_SRP
else if (alg_k & SSL_kSRP)
{
if (s->srp_ctx.A != NULL)
{
/* send off the data */
n=BN_num_bytes(s->srp_ctx.A);
s2n(n,p);
BN_bn2bin(s->srp_ctx.A,p);
n+=2;
}
else
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
if (s->session->srp_username != NULL)
OPENSSL_free(s->session->srp_username);
s->session->srp_username = BUF_strdup(s->srp_ctx.login);
if (s->session->srp_username == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
if ((s->session->master_key_length = SRP_generate_client_master_secret(s,s->session->master_key))<0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
}
#endif
#ifndef OPENSSL_NO_PSK
else if (alg_k & SSL_kPSK)
{
/* The callback needs PSK_MAX_IDENTITY_LEN + 1 bytes
* to return a \0-terminated identity. The last byte
* is for us for simulating strnlen. */
char identity[PSK_MAX_IDENTITY_LEN + 2];
size_t identity_len;
unsigned char *t = NULL;
unsigned char psk_or_pre_ms[PSK_MAX_PSK_LEN*2+4];
unsigned int pre_ms_len = 0, psk_len = 0;
int psk_err = 1;
n = 0;
if (s->psk_client_callback == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_NO_CLIENT_CB);
goto err;
}
memset(identity, 0, sizeof(identity));
psk_len = s->psk_client_callback(s, s->ctx->psk_identity_hint,
identity, sizeof(identity) - 1,
psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_len > PSK_MAX_PSK_LEN)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto psk_err;
}
else if (psk_len == 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_IDENTITY_NOT_FOUND);
goto psk_err;
}
identity[PSK_MAX_IDENTITY_LEN + 1] = '\0';
identity_len = strlen(identity);
if (identity_len > PSK_MAX_IDENTITY_LEN)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto psk_err;
}
/* create PSK pre_master_secret */
pre_ms_len = 2+psk_len+2+psk_len;
t = psk_or_pre_ms;
memmove(psk_or_pre_ms+psk_len+4, psk_or_pre_ms, psk_len);
s2n(psk_len, t);
memset(t, 0, psk_len);
t+=psk_len;
s2n(psk_len, t);
if (s->session->psk_identity_hint != NULL)
OPENSSL_free(s->session->psk_identity_hint);
s->session->psk_identity_hint = BUF_strdup(s->ctx->psk_identity_hint);
if (s->ctx->psk_identity_hint != NULL &&
s->session->psk_identity_hint == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
if (s->session->psk_identity != NULL)
OPENSSL_free(s->session->psk_identity);
s->session->psk_identity = BUF_strdup(identity);
if (s->session->psk_identity == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
psk_or_pre_ms, pre_ms_len);
s2n(identity_len, p);
memcpy(p, identity, identity_len);
n = 2 + identity_len;
psk_err = 0;
psk_err:
OPENSSL_cleanse(identity, sizeof(identity));
OPENSSL_cleanse(psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_err != 0)
{
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
goto err;
}
}
#endif
else
{
ssl3_send_alert(s, SSL3_AL_FATAL,
SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
ssl_set_handshake_header(s, SSL3_MT_CLIENT_KEY_EXCHANGE, n);
s->state=SSL3_ST_CW_KEY_EXCH_B;
}
/* SSL3_ST_CW_KEY_EXCH_B */
return ssl_do_write(s);
err:
#ifndef OPENSSL_NO_ECDH
BN_CTX_free(bn_ctx);
if (encodedPoint != NULL) OPENSSL_free(encodedPoint);
if (clnt_ecdh != NULL)
EC_KEY_free(clnt_ecdh);
EVP_PKEY_free(srvr_pub_pkey);
#endif
return(-1);
}
|
int ssl3_send_client_key_exchange(SSL *s)
{
unsigned char *p;
int n;
unsigned long alg_k;
#ifndef OPENSSL_NO_RSA
unsigned char *q;
EVP_PKEY *pkey=NULL;
#endif
#ifndef OPENSSL_NO_KRB5
KSSL_ERR kssl_err;
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
EC_KEY *clnt_ecdh = NULL;
const EC_POINT *srvr_ecpoint = NULL;
EVP_PKEY *srvr_pub_pkey = NULL;
unsigned char *encodedPoint = NULL;
int encoded_pt_len = 0;
BN_CTX * bn_ctx = NULL;
#endif
if (s->state == SSL3_ST_CW_KEY_EXCH_A)
{
p = ssl_handshake_start(s);
alg_k=s->s3->tmp.new_cipher->algorithm_mkey;
/* Fool emacs indentation */
if (0) {}
#ifndef OPENSSL_NO_RSA
else if (alg_k & SSL_kRSA)
{
RSA *rsa;
unsigned char tmp_buf[SSL_MAX_MASTER_KEY_LENGTH];
if (s->session->sess_cert == NULL)
{
/* We should always have a server certificate with SSL_kRSA. */
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
if (s->session->sess_cert->peer_rsa_tmp != NULL)
rsa=s->session->sess_cert->peer_rsa_tmp;
else
{
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
if ((pkey == NULL) ||
(pkey->type != EVP_PKEY_RSA) ||
(pkey->pkey.rsa == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
rsa=pkey->pkey.rsa;
EVP_PKEY_free(pkey);
}
tmp_buf[0]=s->client_version>>8;
tmp_buf[1]=s->client_version&0xff;
if (RAND_bytes(&(tmp_buf[2]),sizeof tmp_buf-2) <= 0)
goto err;
s->session->master_key_length=sizeof tmp_buf;
q=p;
/* Fix buf for TLS and beyond */
if (s->version > SSL3_VERSION)
p+=2;
n=RSA_public_encrypt(sizeof tmp_buf,
tmp_buf,p,rsa,RSA_PKCS1_PADDING);
#ifdef PKCS1_CHECK
if (s->options & SSL_OP_PKCS1_CHECK_1) p[1]++;
if (s->options & SSL_OP_PKCS1_CHECK_2) tmp_buf[0]=0x70;
#endif
if (n <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_BAD_RSA_ENCRYPT);
goto err;
}
/* Fix buf for TLS and beyond */
if (s->version > SSL3_VERSION)
{
s2n(n,q);
n+=2;
}
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
tmp_buf,sizeof tmp_buf);
OPENSSL_cleanse(tmp_buf,sizeof tmp_buf);
}
#endif
#ifndef OPENSSL_NO_KRB5
else if (alg_k & SSL_kKRB5)
{
krb5_error_code krb5rc;
KSSL_CTX *kssl_ctx = s->kssl_ctx;
/* krb5_data krb5_ap_req; */
krb5_data *enc_ticket;
krb5_data authenticator, *authp = NULL;
EVP_CIPHER_CTX ciph_ctx;
const EVP_CIPHER *enc = NULL;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char tmp_buf[SSL_MAX_MASTER_KEY_LENGTH];
unsigned char epms[SSL_MAX_MASTER_KEY_LENGTH
+ EVP_MAX_IV_LENGTH];
int padl, outl = sizeof(epms);
EVP_CIPHER_CTX_init(&ciph_ctx);
#ifdef KSSL_DEBUG
fprintf(stderr,"ssl3_send_client_key_exchange(%lx & %lx)\n",
alg_k, SSL_kKRB5);
#endif /* KSSL_DEBUG */
authp = NULL;
#ifdef KRB5SENDAUTH
if (KRB5SENDAUTH) authp = &authenticator;
#endif /* KRB5SENDAUTH */
krb5rc = kssl_cget_tkt(kssl_ctx, &enc_ticket, authp,
&kssl_err);
enc = kssl_map_enc(kssl_ctx->enctype);
if (enc == NULL)
goto err;
#ifdef KSSL_DEBUG
{
fprintf(stderr,"kssl_cget_tkt rtn %d\n", krb5rc);
if (krb5rc && kssl_err.text)
fprintf(stderr,"kssl_cget_tkt kssl_err=%s\n", kssl_err.text);
}
#endif /* KSSL_DEBUG */
if (krb5rc)
{
ssl3_send_alert(s,SSL3_AL_FATAL,
SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
kssl_err.reason);
goto err;
}
/*-
* 20010406 VRS - Earlier versions used KRB5 AP_REQ
* in place of RFC 2712 KerberosWrapper, as in:
*
* Send ticket (copy to *p, set n = length)
* n = krb5_ap_req.length;
* memcpy(p, krb5_ap_req.data, krb5_ap_req.length);
* if (krb5_ap_req.data)
* kssl_krb5_free_data_contents(NULL,&krb5_ap_req);
*
* Now using real RFC 2712 KerberosWrapper
* (Thanks to Simon Wilkinson <[email protected]>)
* Note: 2712 "opaque" types are here replaced
* with a 2-byte length followed by the value.
* Example:
* KerberosWrapper= xx xx asn1ticket 0 0 xx xx encpms
* Where "xx xx" = length bytes. Shown here with
* optional authenticator omitted.
*/
/* KerberosWrapper.Ticket */
s2n(enc_ticket->length,p);
memcpy(p, enc_ticket->data, enc_ticket->length);
p+= enc_ticket->length;
n = enc_ticket->length + 2;
/* KerberosWrapper.Authenticator */
if (authp && authp->length)
{
s2n(authp->length,p);
memcpy(p, authp->data, authp->length);
p+= authp->length;
n+= authp->length + 2;
free(authp->data);
authp->data = NULL;
authp->length = 0;
}
else
{
s2n(0,p);/* null authenticator length */
n+=2;
}
tmp_buf[0]=s->client_version>>8;
tmp_buf[1]=s->client_version&0xff;
if (RAND_bytes(&(tmp_buf[2]),sizeof tmp_buf-2) <= 0)
goto err;
/*-
* 20010420 VRS. Tried it this way; failed.
* EVP_EncryptInit_ex(&ciph_ctx,enc, NULL,NULL);
* EVP_CIPHER_CTX_set_key_length(&ciph_ctx,
* kssl_ctx->length);
* EVP_EncryptInit_ex(&ciph_ctx,NULL, key,iv);
*/
memset(iv, 0, sizeof iv); /* per RFC 1510 */
EVP_EncryptInit_ex(&ciph_ctx,enc, NULL,
kssl_ctx->key,iv);
EVP_EncryptUpdate(&ciph_ctx,epms,&outl,tmp_buf,
sizeof tmp_buf);
EVP_EncryptFinal_ex(&ciph_ctx,&(epms[outl]),&padl);
outl += padl;
if (outl > (int)sizeof epms)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
EVP_CIPHER_CTX_cleanup(&ciph_ctx);
/* KerberosWrapper.EncryptedPreMasterSecret */
s2n(outl,p);
memcpy(p, epms, outl);
p+=outl;
n+=outl + 2;
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
tmp_buf, sizeof tmp_buf);
OPENSSL_cleanse(tmp_buf, sizeof tmp_buf);
OPENSSL_cleanse(epms, outl);
}
#endif
#ifndef OPENSSL_NO_DH
else if (alg_k & (SSL_kDHE|SSL_kDHr|SSL_kDHd))
{
DH *dh_srvr,*dh_clnt;
SESS_CERT *scert = s->session->sess_cert;
if (scert == NULL)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE);
goto err;
}
if (scert->peer_dh_tmp != NULL)
dh_srvr=scert->peer_dh_tmp;
else
{
/* we get them from the cert */
int idx = scert->peer_cert_type;
EVP_PKEY *spkey = NULL;
dh_srvr = NULL;
if (idx >= 0)
spkey = X509_get_pubkey(
scert->peer_pkeys[idx].x509);
if (spkey)
{
dh_srvr = EVP_PKEY_get1_DH(spkey);
EVP_PKEY_free(spkey);
}
if (dh_srvr == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
}
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY)
{
/* Use client certificate key */
EVP_PKEY *clkey = s->cert->key->privatekey;
dh_clnt = NULL;
if (clkey)
dh_clnt = EVP_PKEY_get1_DH(clkey);
if (dh_clnt == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
}
else
{
/* generate a new random key */
if ((dh_clnt=DHparams_dup(dh_srvr)) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
if (!DH_generate_key(dh_clnt))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
DH_free(dh_clnt);
goto err;
}
}
/* use the 'p' output buffer for the DH key, but
* make sure to clear it out afterwards */
n=DH_compute_key(p,dh_srvr->pub_key,dh_clnt);
if (scert->peer_dh_tmp == NULL)
DH_free(dh_srvr);
if (n <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
DH_free(dh_clnt);
goto err;
}
/* generate master key from the result */
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,p,n);
/* clean up */
memset(p,0,n);
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY)
n = 0;
else
{
/* send off the data */
n=BN_num_bytes(dh_clnt->pub_key);
s2n(n,p);
BN_bn2bin(dh_clnt->pub_key,p);
n+=2;
}
DH_free(dh_clnt);
/* perhaps clean things up a bit EAY EAY EAY EAY*/
}
#endif
#ifndef OPENSSL_NO_ECDH
else if (alg_k & (SSL_kECDHE|SSL_kECDHr|SSL_kECDHe))
{
const EC_GROUP *srvr_group = NULL;
EC_KEY *tkey;
int ecdh_clnt_cert = 0;
int field_size = 0;
if (s->session->sess_cert == NULL)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE);
goto err;
}
/* Did we send out the client's
* ECDH share for use in premaster
* computation as part of client certificate?
* If so, set ecdh_clnt_cert to 1.
*/
if ((alg_k & (SSL_kECDHr|SSL_kECDHe)) && (s->cert != NULL))
{
/*-
* XXX: For now, we do not support client
* authentication using ECDH certificates.
* To add such support, one needs to add
* code that checks for appropriate
* conditions and sets ecdh_clnt_cert to 1.
* For example, the cert have an ECC
* key on the same curve as the server's
* and the key should be authorized for
* key agreement.
*
* One also needs to add code in ssl3_connect
* to skip sending the certificate verify
* message.
*
* if ((s->cert->key->privatekey != NULL) &&
* (s->cert->key->privatekey->type ==
* EVP_PKEY_EC) && ...)
* ecdh_clnt_cert = 1;
*/
}
if (s->session->sess_cert->peer_ecdh_tmp != NULL)
{
tkey = s->session->sess_cert->peer_ecdh_tmp;
}
else
{
/* Get the Server Public Key from Cert */
srvr_pub_pkey = X509_get_pubkey(s->session-> \
sess_cert->peer_pkeys[SSL_PKEY_ECC].x509);
if ((srvr_pub_pkey == NULL) ||
(srvr_pub_pkey->type != EVP_PKEY_EC) ||
(srvr_pub_pkey->pkey.ec == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
tkey = srvr_pub_pkey->pkey.ec;
}
srvr_group = EC_KEY_get0_group(tkey);
srvr_ecpoint = EC_KEY_get0_public_key(tkey);
if ((srvr_group == NULL) || (srvr_ecpoint == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
if ((clnt_ecdh=EC_KEY_new()) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
if (!EC_KEY_set_group(clnt_ecdh, srvr_group))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
if (ecdh_clnt_cert)
{
/* Reuse key info from our certificate
* We only need our private key to perform
* the ECDH computation.
*/
const BIGNUM *priv_key;
tkey = s->cert->key->privatekey->pkey.ec;
priv_key = EC_KEY_get0_private_key(tkey);
if (priv_key == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
if (!EC_KEY_set_private_key(clnt_ecdh, priv_key))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
}
else
{
/* Generate a new ECDH key pair */
if (!(EC_KEY_generate_key(clnt_ecdh)))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
}
/* use the 'p' output buffer for the ECDH key, but
* make sure to clear it out afterwards
*/
field_size = EC_GROUP_get_degree(srvr_group);
if (field_size <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
n=ECDH_compute_key(p, (field_size+7)/8, srvr_ecpoint, clnt_ecdh, NULL);
if (n <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
/* generate master key from the result */
s->session->master_key_length = s->method->ssl3_enc \
-> generate_master_secret(s,
s->session->master_key,
p, n);
memset(p, 0, n); /* clean up */
if (ecdh_clnt_cert)
{
/* Send empty client key exch message */
n = 0;
}
else
{
/* First check the size of encoding and
* allocate memory accordingly.
*/
encoded_pt_len =
EC_POINT_point2oct(srvr_group,
EC_KEY_get0_public_key(clnt_ecdh),
POINT_CONVERSION_UNCOMPRESSED,
NULL, 0, NULL);
encodedPoint = (unsigned char *)
OPENSSL_malloc(encoded_pt_len *
sizeof(unsigned char));
bn_ctx = BN_CTX_new();
if ((encodedPoint == NULL) ||
(bn_ctx == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
/* Encode the public key */
n = EC_POINT_point2oct(srvr_group,
EC_KEY_get0_public_key(clnt_ecdh),
POINT_CONVERSION_UNCOMPRESSED,
encodedPoint, encoded_pt_len, bn_ctx);
*p = n; /* length of encoded point */
/* Encoded point will be copied here */
p += 1;
/* copy the point */
memcpy((unsigned char *)p, encodedPoint, n);
/* increment n to account for length field */
n += 1;
}
/* Free allocated memory */
BN_CTX_free(bn_ctx);
if (encodedPoint != NULL) OPENSSL_free(encodedPoint);
if (clnt_ecdh != NULL)
EC_KEY_free(clnt_ecdh);
EVP_PKEY_free(srvr_pub_pkey);
}
#endif /* !OPENSSL_NO_ECDH */
else if (alg_k & SSL_kGOST)
{
/* GOST key exchange message creation */
EVP_PKEY_CTX *pkey_ctx;
X509 *peer_cert;
size_t msglen;
unsigned int md_len;
int keytype;
unsigned char premaster_secret[32],shared_ukm[32], tmp[256];
EVP_MD_CTX *ukm_hash;
EVP_PKEY *pub_key;
/* Get server sertificate PKEY and create ctx from it */
peer_cert=s->session->sess_cert->peer_pkeys[(keytype=SSL_PKEY_GOST01)].x509;
if (!peer_cert)
peer_cert=s->session->sess_cert->peer_pkeys[(keytype=SSL_PKEY_GOST94)].x509;
if (!peer_cert) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER);
goto err;
}
pkey_ctx=EVP_PKEY_CTX_new(pub_key=X509_get_pubkey(peer_cert),NULL);
/* If we have send a certificate, and certificate key
* parameters match those of server certificate, use
* certificate key for key exchange
*/
/* Otherwise, generate ephemeral key pair */
EVP_PKEY_encrypt_init(pkey_ctx);
/* Generate session key */
RAND_bytes(premaster_secret,32);
/* If we have client certificate, use its secret as peer key */
if (s->s3->tmp.cert_req && s->cert->key->privatekey) {
if (EVP_PKEY_derive_set_peer(pkey_ctx,s->cert->key->privatekey) <=0) {
/* If there was an error - just ignore it. Ephemeral key
* would be used
*/
ERR_clear_error();
}
}
/* Compute shared IV and store it in algorithm-specific
* context data */
ukm_hash = EVP_MD_CTX_create();
EVP_DigestInit(ukm_hash,EVP_get_digestbynid(NID_id_GostR3411_94));
EVP_DigestUpdate(ukm_hash,s->s3->client_random,SSL3_RANDOM_SIZE);
EVP_DigestUpdate(ukm_hash,s->s3->server_random,SSL3_RANDOM_SIZE);
EVP_DigestFinal_ex(ukm_hash, shared_ukm, &md_len);
EVP_MD_CTX_destroy(ukm_hash);
if (EVP_PKEY_CTX_ctrl(pkey_ctx,-1,EVP_PKEY_OP_ENCRYPT,EVP_PKEY_CTRL_SET_IV,
8,shared_ukm)<0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_LIBRARY_BUG);
goto err;
}
/* Make GOST keytransport blob message */
/*Encapsulate it into sequence */
*(p++)=V_ASN1_SEQUENCE | V_ASN1_CONSTRUCTED;
msglen=255;
if (EVP_PKEY_encrypt(pkey_ctx,tmp,&msglen,premaster_secret,32)<0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_LIBRARY_BUG);
goto err;
}
if (msglen >= 0x80)
{
*(p++)=0x81;
*(p++)= msglen & 0xff;
n=msglen+3;
}
else
{
*(p++)= msglen & 0xff;
n=msglen+2;
}
memcpy(p, tmp, msglen);
/* Check if pubkey from client certificate was used */
if (EVP_PKEY_CTX_ctrl(pkey_ctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 2, NULL) > 0)
{
/* Set flag "skip certificate verify" */
s->s3->flags |= TLS1_FLAGS_SKIP_CERT_VERIFY;
}
EVP_PKEY_CTX_free(pkey_ctx);
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,premaster_secret,32);
EVP_PKEY_free(pub_key);
}
#ifndef OPENSSL_NO_SRP
else if (alg_k & SSL_kSRP)
{
if (s->srp_ctx.A != NULL)
{
/* send off the data */
n=BN_num_bytes(s->srp_ctx.A);
s2n(n,p);
BN_bn2bin(s->srp_ctx.A,p);
n+=2;
}
else
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
if (s->session->srp_username != NULL)
OPENSSL_free(s->session->srp_username);
s->session->srp_username = BUF_strdup(s->srp_ctx.login);
if (s->session->srp_username == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
if ((s->session->master_key_length = SRP_generate_client_master_secret(s,s->session->master_key))<0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
}
#endif
#ifndef OPENSSL_NO_PSK
else if (alg_k & SSL_kPSK)
{
/* The callback needs PSK_MAX_IDENTITY_LEN + 1 bytes
* to return a \0-terminated identity. The last byte
* is for us for simulating strnlen. */
char identity[PSK_MAX_IDENTITY_LEN + 2];
size_t identity_len;
unsigned char *t = NULL;
unsigned char psk_or_pre_ms[PSK_MAX_PSK_LEN*2+4];
unsigned int pre_ms_len = 0, psk_len = 0;
int psk_err = 1;
n = 0;
if (s->psk_client_callback == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_NO_CLIENT_CB);
goto err;
}
memset(identity, 0, sizeof(identity));
psk_len = s->psk_client_callback(s, s->ctx->psk_identity_hint,
identity, sizeof(identity) - 1,
psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_len > PSK_MAX_PSK_LEN)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto psk_err;
}
else if (psk_len == 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_IDENTITY_NOT_FOUND);
goto psk_err;
}
identity[PSK_MAX_IDENTITY_LEN + 1] = '\0';
identity_len = strlen(identity);
if (identity_len > PSK_MAX_IDENTITY_LEN)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto psk_err;
}
/* create PSK pre_master_secret */
pre_ms_len = 2+psk_len+2+psk_len;
t = psk_or_pre_ms;
memmove(psk_or_pre_ms+psk_len+4, psk_or_pre_ms, psk_len);
s2n(psk_len, t);
memset(t, 0, psk_len);
t+=psk_len;
s2n(psk_len, t);
if (s->session->psk_identity_hint != NULL)
OPENSSL_free(s->session->psk_identity_hint);
s->session->psk_identity_hint = BUF_strdup(s->ctx->psk_identity_hint);
if (s->ctx->psk_identity_hint != NULL &&
s->session->psk_identity_hint == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
if (s->session->psk_identity != NULL)
OPENSSL_free(s->session->psk_identity);
s->session->psk_identity = BUF_strdup(identity);
if (s->session->psk_identity == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
psk_or_pre_ms, pre_ms_len);
s2n(identity_len, p);
memcpy(p, identity, identity_len);
n = 2 + identity_len;
psk_err = 0;
psk_err:
OPENSSL_cleanse(identity, sizeof(identity));
OPENSSL_cleanse(psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_err != 0)
{
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
goto err;
}
}
#endif
else
{
ssl3_send_alert(s, SSL3_AL_FATAL,
SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
ssl_set_handshake_header(s, SSL3_MT_CLIENT_KEY_EXCHANGE, n);
s->state=SSL3_ST_CW_KEY_EXCH_B;
}
/* SSL3_ST_CW_KEY_EXCH_B */
return ssl_do_write(s);
err:
#ifndef OPENSSL_NO_ECDH
BN_CTX_free(bn_ctx);
if (encodedPoint != NULL) OPENSSL_free(encodedPoint);
if (clnt_ecdh != NULL)
EC_KEY_free(clnt_ecdh);
EVP_PKEY_free(srvr_pub_pkey);
#endif
return(-1);
}
|
C
|
openssl
| 0 |
CVE-2017-0587
|
https://www.cvedetails.com/cve/CVE-2017-0587/
|
CWE-119
|
https://android.googlesource.com/platform/external/libmpeg2/+/a86eb798d077b9b25c8f8c77e3c02c2f287c1ce7
|
a86eb798d077b9b25c8f8c77e3c02c2f287c1ce7
|
Fix in handling header decode errors
If header decode was unsuccessful, do not try decoding a frame
Also, initialize pic_wd, pic_ht for reinitialization when
decoder is created with smaller dimensions
Bug: 28886651
Bug: 35219737
Change-Id: I8c06d9052910e47fce2e6fe25ad318d4c83d2c50
(cherry picked from commit 2b9fa9ace2dbedfbac026fc9b6ab6cdac7f68c27)
(cherry picked from commit c2395cd7cc0c286a66de674032dd2ed26500aef4)
|
IV_API_CALL_STATUS_T impeg2d_api_get_buf_info(iv_obj_t *ps_dechdl,
void *pv_api_ip,
void *pv_api_op)
{
dec_state_t *ps_dec_state;
dec_state_multi_core_t *ps_dec_state_multi_core;
impeg2d_ctl_getbufinfo_ip_t *ps_ctl_bufinfo_ip =
(impeg2d_ctl_getbufinfo_ip_t *)pv_api_ip;
impeg2d_ctl_getbufinfo_op_t *ps_ctl_bufinfo_op =
(impeg2d_ctl_getbufinfo_op_t *)pv_api_op;
UWORD32 u4_i, u4_stride, u4_height;
UNUSED(ps_ctl_bufinfo_ip);
ps_dec_state_multi_core =
(dec_state_multi_core_t *)(ps_dechdl->pv_codec_handle);
ps_dec_state = ps_dec_state_multi_core->ps_dec_state[0];
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_in_bufs = 1;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_out_bufs = 1;
if(ps_dec_state->i4_chromaFormat == IV_YUV_420P)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_out_bufs =
MIN_OUT_BUFS_420;
}
else if((ps_dec_state->i4_chromaFormat == IV_YUV_420SP_UV)
|| (ps_dec_state->i4_chromaFormat == IV_YUV_420SP_VU))
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_out_bufs =
MIN_OUT_BUFS_420SP;
}
else if(ps_dec_state->i4_chromaFormat == IV_YUV_422ILE)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_out_bufs =
MIN_OUT_BUFS_422ILE;
}
else if(ps_dec_state->i4_chromaFormat == IV_RGB_565)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_out_bufs =
MIN_OUT_BUFS_RGB565;
}
else
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_error_code =
IVD_INIT_DEC_COL_FMT_NOT_SUPPORTED;
return IV_FAIL;
}
for(u4_i = 0; u4_i < IVD_VIDDEC_MAX_IO_BUFFERS; u4_i++)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_in_buf_size[u4_i] =
0;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[u4_i] =
0;
}
for(u4_i = 0;
u4_i < ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_in_bufs;
u4_i++)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_in_buf_size[u4_i] =
MAX_BITSTREAM_BUFFER_SIZE;
}
if (0 == ps_dec_state->u4_frm_buf_stride)
{
if (1 == ps_dec_state->u2_header_done)
{
u4_stride = ps_dec_state->u2_horizontal_size;
}
else
{
u4_stride = ps_dec_state->u2_create_max_width;
}
}
else
{
u4_stride = ps_dec_state->u4_frm_buf_stride;
}
u4_height = ((ps_dec_state->u2_frame_height + 15) >> 4) << 4;
if(ps_dec_state->i4_chromaFormat == IV_YUV_420P)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[0] =
(u4_stride * u4_height);
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[1] =
(u4_stride * u4_height) >> 2;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[2] =
(u4_stride * u4_height) >> 2;
}
else if((ps_dec_state->i4_chromaFormat == IV_YUV_420SP_UV)
|| (ps_dec_state->i4_chromaFormat == IV_YUV_420SP_VU))
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[0] =
(u4_stride * u4_height);
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[1] =
(u4_stride * u4_height) >> 1;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[2] = 0;
}
else if(ps_dec_state->i4_chromaFormat == IV_YUV_422ILE)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[0] =
(u4_stride * u4_height) * 2;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[1] =
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[2] =
0;
}
/* Adding initialization for 2 uninitialized values */
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_num_disp_bufs = 1;
if(ps_dec_state->u4_share_disp_buf)
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_num_disp_bufs =
NUM_INT_FRAME_BUFFERS;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_size = MAX_FRM_SIZE;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_error_code = IV_SUCCESS;
return (IV_SUCCESS);
}
|
IV_API_CALL_STATUS_T impeg2d_api_get_buf_info(iv_obj_t *ps_dechdl,
void *pv_api_ip,
void *pv_api_op)
{
dec_state_t *ps_dec_state;
dec_state_multi_core_t *ps_dec_state_multi_core;
impeg2d_ctl_getbufinfo_ip_t *ps_ctl_bufinfo_ip =
(impeg2d_ctl_getbufinfo_ip_t *)pv_api_ip;
impeg2d_ctl_getbufinfo_op_t *ps_ctl_bufinfo_op =
(impeg2d_ctl_getbufinfo_op_t *)pv_api_op;
UWORD32 u4_i, u4_stride, u4_height;
UNUSED(ps_ctl_bufinfo_ip);
ps_dec_state_multi_core =
(dec_state_multi_core_t *)(ps_dechdl->pv_codec_handle);
ps_dec_state = ps_dec_state_multi_core->ps_dec_state[0];
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_in_bufs = 1;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_out_bufs = 1;
if(ps_dec_state->i4_chromaFormat == IV_YUV_420P)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_out_bufs =
MIN_OUT_BUFS_420;
}
else if((ps_dec_state->i4_chromaFormat == IV_YUV_420SP_UV)
|| (ps_dec_state->i4_chromaFormat == IV_YUV_420SP_VU))
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_out_bufs =
MIN_OUT_BUFS_420SP;
}
else if(ps_dec_state->i4_chromaFormat == IV_YUV_422ILE)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_out_bufs =
MIN_OUT_BUFS_422ILE;
}
else if(ps_dec_state->i4_chromaFormat == IV_RGB_565)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_out_bufs =
MIN_OUT_BUFS_RGB565;
}
else
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_error_code =
IVD_INIT_DEC_COL_FMT_NOT_SUPPORTED;
return IV_FAIL;
}
for(u4_i = 0; u4_i < IVD_VIDDEC_MAX_IO_BUFFERS; u4_i++)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_in_buf_size[u4_i] =
0;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[u4_i] =
0;
}
for(u4_i = 0;
u4_i < ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_num_in_bufs;
u4_i++)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_in_buf_size[u4_i] =
MAX_BITSTREAM_BUFFER_SIZE;
}
if (0 == ps_dec_state->u4_frm_buf_stride)
{
if (1 == ps_dec_state->u2_header_done)
{
u4_stride = ps_dec_state->u2_horizontal_size;
}
else
{
u4_stride = ps_dec_state->u2_create_max_width;
}
}
else
{
u4_stride = ps_dec_state->u4_frm_buf_stride;
}
u4_height = ((ps_dec_state->u2_frame_height + 15) >> 4) << 4;
if(ps_dec_state->i4_chromaFormat == IV_YUV_420P)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[0] =
(u4_stride * u4_height);
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[1] =
(u4_stride * u4_height) >> 2;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[2] =
(u4_stride * u4_height) >> 2;
}
else if((ps_dec_state->i4_chromaFormat == IV_YUV_420SP_UV)
|| (ps_dec_state->i4_chromaFormat == IV_YUV_420SP_VU))
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[0] =
(u4_stride * u4_height);
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[1] =
(u4_stride * u4_height) >> 1;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[2] = 0;
}
else if(ps_dec_state->i4_chromaFormat == IV_YUV_422ILE)
{
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[0] =
(u4_stride * u4_height) * 2;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[1] =
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_min_out_buf_size[2] =
0;
}
/* Adding initialization for 2 uninitialized values */
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_num_disp_bufs = 1;
if(ps_dec_state->u4_share_disp_buf)
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_num_disp_bufs =
NUM_INT_FRAME_BUFFERS;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_size = MAX_FRM_SIZE;
ps_ctl_bufinfo_op->s_ivd_ctl_getbufinfo_op_t.u4_error_code = IV_SUCCESS;
return (IV_SUCCESS);
}
|
C
|
Android
| 0 |
CVE-2013-4587
|
https://www.cvedetails.com/cve/CVE-2013-4587/
|
CWE-20
|
https://github.com/torvalds/linux/commit/338c7dbadd2671189cec7faf64c84d01071b3f96
|
338c7dbadd2671189cec7faf64c84d01071b3f96
|
KVM: Improve create VCPU parameter (CVE-2013-4587)
In multiple functions the vcpu_id is used as an offset into a bitfield. Ag
malicious user could specify a vcpu_id greater than 255 in order to set or
clear bits in kernel memory. This could be used to elevate priveges in the
kernel. This patch verifies that the vcpu_id provided is less than 255.
The api documentation already specifies that the vcpu_id must be less than
max_vcpus, but this is currently not checked.
Reported-by: Andrew Honig <[email protected]>
Cc: [email protected]
Signed-off-by: Andrew Honig <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
void *v)
{
int cpu = (long)v;
val &= ~CPU_TASKS_FROZEN;
switch (val) {
case CPU_DYING:
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
cpu);
hardware_disable();
break;
case CPU_STARTING:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
hardware_enable();
break;
}
return NOTIFY_OK;
}
|
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
void *v)
{
int cpu = (long)v;
val &= ~CPU_TASKS_FROZEN;
switch (val) {
case CPU_DYING:
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
cpu);
hardware_disable();
break;
case CPU_STARTING:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
hardware_enable();
break;
}
return NOTIFY_OK;
}
|
C
|
linux
| 0 |
CVE-2019-5755
|
https://www.cvedetails.com/cve/CVE-2019-5755/
|
CWE-189
|
https://github.com/chromium/chromium/commit/971548cdca2d4c0a6fedd3db0c94372c2a27eac3
|
971548cdca2d4c0a6fedd3db0c94372c2a27eac3
|
Make MediaStreamDispatcherHost per-request instead of per-frame.
Instead of having RenderFrameHost own a single MSDH to handle all
requests from a frame, MSDH objects will be owned by a strong binding.
A consequence of this is that an additional requester ID is added to
requests to MediaStreamManager, so that an MSDH is able to cancel only
requests generated by it.
In practice, MSDH will continue to be per frame in most cases since
each frame normally makes a single request for an MSDH object.
This fixes a lifetime issue caused by the IO thread executing tasks
after the RenderFrameHost dies.
Drive-by: Fix some minor lint issues.
Bug: 912520
Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516
Reviewed-on: https://chromium-review.googlesource.com/c/1369799
Reviewed-by: Emircan Uysaler <[email protected]>
Reviewed-by: Ken Buchanan <[email protected]>
Reviewed-by: Olga Sharonova <[email protected]>
Commit-Queue: Guido Urdaneta <[email protected]>
Cr-Commit-Position: refs/heads/master@{#616347}
|
void MediaStreamManager::OnStreamStarted(const std::string& label) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
DeviceRequest* const request = FindRequest(label);
if (!request)
return;
if (request->ui_proxy) {
request->ui_proxy->OnStarted(
base::BindOnce(&MediaStreamManager::StopMediaStreamFromBrowser,
base::Unretained(this), label),
base::BindOnce(&MediaStreamManager::OnMediaStreamUIWindowId,
base::Unretained(this), request->video_type(),
request->devices));
}
}
|
void MediaStreamManager::OnStreamStarted(const std::string& label) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
DeviceRequest* const request = FindRequest(label);
if (!request)
return;
if (request->ui_proxy) {
request->ui_proxy->OnStarted(
base::BindOnce(&MediaStreamManager::StopMediaStreamFromBrowser,
base::Unretained(this), label),
base::BindOnce(&MediaStreamManager::OnMediaStreamUIWindowId,
base::Unretained(this), request->video_type(),
request->devices));
}
}
|
C
|
Chrome
| 0 |
CVE-2018-18344
|
https://www.cvedetails.com/cve/CVE-2018-18344/
|
CWE-20
|
https://github.com/chromium/chromium/commit/c71d8045ce0592cf3f4290744ab57b23c1d1b4c6
|
c71d8045ce0592cf3f4290744ab57b23c1d1b4c6
|
[DevTools] Do not allow Page.setDownloadBehavior for extensions
Bug: 866426
Change-Id: I71b672978e1a8ec779ede49da16b21198567d3a4
Reviewed-on: https://chromium-review.googlesource.com/c/1270007
Commit-Queue: Dmitry Gozman <[email protected]>
Reviewed-by: Devlin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#598004}
|
Response PageHandler::StopLoading() {
WebContentsImpl* web_contents = GetWebContents();
if (!web_contents)
return Response::InternalError();
web_contents->Stop();
return Response::OK();
}
|
Response PageHandler::StopLoading() {
WebContentsImpl* web_contents = GetWebContents();
if (!web_contents)
return Response::InternalError();
web_contents->Stop();
return Response::OK();
}
|
C
|
Chrome
| 0 |
CVE-2019-5822
|
https://www.cvedetails.com/cve/CVE-2019-5822/
|
CWE-284
|
https://github.com/chromium/chromium/commit/2f81d000fdb5331121cba7ff81dfaaec25b520a5
|
2f81d000fdb5331121cba7ff81dfaaec25b520a5
|
When turning a download into a navigation, navigate the right frame
Code changes from Nate Chapin <[email protected]>
Bug: 926105
Change-Id: I098599394e6ebe7d2fce5af838014297a337d294
Reviewed-on: https://chromium-review.googlesource.com/c/1454962
Reviewed-by: Camille Lamy <[email protected]>
Commit-Queue: Jochen Eisinger <[email protected]>
Cr-Commit-Position: refs/heads/master@{#629547}
|
void EnableFileChooser(bool enable) {
file_activity_observer_->EnableFileChooser(enable);
}
|
void EnableFileChooser(bool enable) {
file_activity_observer_->EnableFileChooser(enable);
}
|
C
|
Chrome
| 0 |
CVE-2015-4054
|
https://www.cvedetails.com/cve/CVE-2015-4054/
|
CWE-476
|
https://github.com/pgbouncer/pgbouncer/commit/edab5be6665b9e8de66c25ba527509b229468573
|
edab5be6665b9e8de66c25ba527509b229468573
|
Check if auth_user is set.
Fixes a crash if password packet appears before startup packet (#42).
|
static void start_auth_request(PgSocket *client, const char *username)
{
int res;
PktBuf *buf;
client->auth_user = client->db->auth_user;
/* have to fetch user info from db */
client->pool = get_pool(client->db, client->db->auth_user);
if (!find_server(client)) {
client->wait_for_user_conn = true;
return;
}
slog_noise(client, "Doing auth_conn query");
client->wait_for_user_conn = false;
client->wait_for_user = true;
if (!sbuf_pause(&client->sbuf)) {
release_server(client->link);
disconnect_client(client, true, "pause failed");
return;
}
client->link->ready = 0;
res = 0;
buf = pktbuf_dynamic(512);
if (buf) {
pktbuf_write_ExtQuery(buf, cf_auth_query, 1, username);
res = pktbuf_send_immediate(buf, client->link);
pktbuf_free(buf);
/*
* Should do instead:
* res = pktbuf_send_queued(buf, client->link);
* but that needs better integration with SBuf.
*/
}
if (!res)
disconnect_server(client->link, false, "unable to send login query");
}
|
static void start_auth_request(PgSocket *client, const char *username)
{
int res;
PktBuf *buf;
client->auth_user = client->db->auth_user;
/* have to fetch user info from db */
client->pool = get_pool(client->db, client->db->auth_user);
if (!find_server(client)) {
client->wait_for_user_conn = true;
return;
}
slog_noise(client, "Doing auth_conn query");
client->wait_for_user_conn = false;
client->wait_for_user = true;
if (!sbuf_pause(&client->sbuf)) {
release_server(client->link);
disconnect_client(client, true, "pause failed");
return;
}
client->link->ready = 0;
res = 0;
buf = pktbuf_dynamic(512);
if (buf) {
pktbuf_write_ExtQuery(buf, cf_auth_query, 1, username);
res = pktbuf_send_immediate(buf, client->link);
pktbuf_free(buf);
/*
* Should do instead:
* res = pktbuf_send_queued(buf, client->link);
* but that needs better integration with SBuf.
*/
}
if (!res)
disconnect_server(client->link, false, "unable to send login query");
}
|
C
|
pgbouncer
| 0 |
CVE-2011-3107
|
https://www.cvedetails.com/cve/CVE-2011-3107/
| null |
https://github.com/chromium/chromium/commit/89e4098439f73cb5c16996511cbfdb171a26e173
|
89e4098439f73cb5c16996511cbfdb171a26e173
|
[Qt][WK2] There's no way to test the gesture tap on WTR
https://bugs.webkit.org/show_bug.cgi?id=92895
Reviewed by Kenneth Rohde Christiansen.
Source/WebKit2:
Add an instance of QtViewportHandler to QQuickWebViewPrivate, so it's
now available on mobile and desktop modes, as a side effect gesture tap
events can now be created and sent to WebCore.
This is needed to test tap gestures and to get tap gestures working
when you have a WebView (in desktop mode) on notebooks equipped with
touch screens.
* UIProcess/API/qt/qquickwebview.cpp:
(QQuickWebViewPrivate::onComponentComplete):
(QQuickWebViewFlickablePrivate::onComponentComplete): Implementation
moved to QQuickWebViewPrivate::onComponentComplete.
* UIProcess/API/qt/qquickwebview_p_p.h:
(QQuickWebViewPrivate):
(QQuickWebViewFlickablePrivate):
Tools:
WTR doesn't create the QQuickItem from C++, not from QML, so a call
to componentComplete() was added to mimic the QML behaviour.
* WebKitTestRunner/qt/PlatformWebViewQt.cpp:
(WTR::PlatformWebView::PlatformWebView):
git-svn-id: svn://svn.chromium.org/blink/trunk@124625 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void QQuickWebView::handleFlickableMouseRelease(const QPointF& position, qint64 eventTimestampMillis)
{
Q_D(QQuickWebView);
QMouseEvent mouseEvent(QEvent::MouseButtonRelease, d->axisLocker.adjust(position), Qt::LeftButton, Qt::NoButton, Qt::NoModifier);
d->axisLocker.reset();
mouseEvent.setTimestamp(eventTimestampMillis);
QQuickFlickable::mouseReleaseEvent(&mouseEvent);
}
|
void QQuickWebView::handleFlickableMouseRelease(const QPointF& position, qint64 eventTimestampMillis)
{
Q_D(QQuickWebView);
QMouseEvent mouseEvent(QEvent::MouseButtonRelease, d->axisLocker.adjust(position), Qt::LeftButton, Qt::NoButton, Qt::NoModifier);
d->axisLocker.reset();
mouseEvent.setTimestamp(eventTimestampMillis);
QQuickFlickable::mouseReleaseEvent(&mouseEvent);
}
|
C
|
Chrome
| 0 |
CVE-2016-1621
|
https://www.cvedetails.com/cve/CVE-2016-1621/
|
CWE-119
|
https://android.googlesource.com/platform/external/libvpx/+/04839626ed859623901ebd3a5fd483982186b59d
|
04839626ed859623901ebd3a5fd483982186b59d
|
libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
|
long Track::GetFirst(const BlockEntry*& pBlockEntry) const
if (pCluster->EOS()) {
#if 0
if (m_pSegment->Unparsed() <= 0) { //all clusters have been loaded
pBlockEntry = GetEOS();
return 1;
}
#else
if (m_pSegment->DoneParsing()) {
pBlockEntry = GetEOS();
return 1;
}
#endif
pBlockEntry = 0;
return E_BUFFER_NOT_FULL;
}
long status = pCluster->GetFirst(pBlockEntry);
|
long Track::GetFirst(const BlockEntry*& pBlockEntry) const
{
const Cluster* pCluster = m_pSegment->GetFirst();
for (int i = 0; ; )
{
if (pCluster == NULL)
{
pBlockEntry = GetEOS();
return 1;
}
if (pCluster->EOS())
{
#if 0
if (m_pSegment->Unparsed() <= 0) //all clusters have been loaded
{
pBlockEntry = GetEOS();
return 1;
}
#else
if (m_pSegment->DoneParsing())
{
pBlockEntry = GetEOS();
return 1;
}
#endif
pBlockEntry = 0;
return E_BUFFER_NOT_FULL;
}
long status = pCluster->GetFirst(pBlockEntry);
if (status < 0) //error
return status;
if (pBlockEntry == 0) //empty cluster
{
pCluster = m_pSegment->GetNext(pCluster);
continue;
}
for (;;)
{
const Block* const pBlock = pBlockEntry->GetBlock();
assert(pBlock);
const long long tn = pBlock->GetTrackNumber();
if ((tn == m_info.number) && VetEntry(pBlockEntry))
return 0;
const BlockEntry* pNextEntry;
status = pCluster->GetNext(pBlockEntry, pNextEntry);
if (status < 0) //error
return status;
if (pNextEntry == 0)
break;
pBlockEntry = pNextEntry;
}
++i;
if (i >= 100)
break;
pCluster = m_pSegment->GetNext(pCluster);
}
pBlockEntry = GetEOS(); //so we can return a non-NULL value
return 1;
}
|
C
|
Android
| 1 |
CVE-2016-1696
|
https://www.cvedetails.com/cve/CVE-2016-1696/
|
CWE-284
|
https://github.com/chromium/chromium/commit/c0569cc04741cccf6548c2169fcc1609d958523f
|
c0569cc04741cccf6548c2169fcc1609d958523f
|
[Extensions] Expand bindings access checks
BUG=601149
BUG=601073
Review URL: https://codereview.chromium.org/1866103002
Cr-Commit-Position: refs/heads/master@{#387710}
|
void Dispatcher::OnUsingWebRequestAPI(bool webrequest_used) {
webrequest_used_ = webrequest_used;
}
|
void Dispatcher::OnUsingWebRequestAPI(bool webrequest_used) {
webrequest_used_ = webrequest_used;
}
|
C
|
Chrome
| 0 |
CVE-2015-1335
|
https://www.cvedetails.com/cve/CVE-2015-1335/
|
CWE-59
|
https://github.com/lxc/lxc/commit/592fd47a6245508b79fe6ac819fe6d3b2c1289be
|
592fd47a6245508b79fe6ac819fe6d3b2c1289be
|
CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]>
|
static int lxc_free_idmap(struct lxc_list *id_map) {
struct lxc_list *it, *next;
lxc_list_for_each_safe(it, id_map, next) {
lxc_list_del(it);
free(it->elem);
free(it);
}
return 0;
}
|
static int lxc_free_idmap(struct lxc_list *id_map) {
struct lxc_list *it, *next;
lxc_list_for_each_safe(it, id_map, next) {
lxc_list_del(it);
free(it->elem);
free(it);
}
return 0;
}
|
C
|
lxc
| 0 |
CVE-2013-6621
|
https://www.cvedetails.com/cve/CVE-2013-6621/
|
CWE-399
|
https://github.com/chromium/chromium/commit/4039d2fcaab746b6c20017ba9bb51c3a2403a76c
|
4039d2fcaab746b6c20017ba9bb51c3a2403a76c
|
Add logging to figure out which IPC we're failing to deserialize in RenderFrame.
BUG=369553
[email protected]
Review URL: https://codereview.chromium.org/263833020
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@268565 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderFrameImpl::AddObserver(RenderFrameObserver* observer) {
observers_.AddObserver(observer);
}
|
void RenderFrameImpl::AddObserver(RenderFrameObserver* observer) {
observers_.AddObserver(observer);
}
|
C
|
Chrome
| 0 |
CVE-2014-3690
|
https://www.cvedetails.com/cve/CVE-2014-3690/
|
CWE-399
|
https://github.com/torvalds/linux/commit/d974baa398f34393db76be45f7d4d04fbdbb4a0a
|
d974baa398f34393db76be45f7d4d04fbdbb4a0a
|
x86,kvm,vmx: Preserve CR4 across VM entry
CR4 isn't constant; at least the TSD and PCE bits can vary.
TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks
like it's correct.
This adds a branch and a read from cr4 to each vm entry. Because it is
extremely likely that consecutive entries into the same vcpu will have
the same host cr4 value, this fixes up the vmcs instead of restoring cr4
after the fact. A subsequent patch will add a kernel-wide cr4 shadow,
reducing the overhead in the common case to just two memory reads and a
branch.
Signed-off-by: Andy Lutomirski <[email protected]>
Acked-by: Paolo Bonzini <[email protected]>
Cc: [email protected]
Cc: Petr Matousek <[email protected]>
Cc: Gleb Natapov <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
{
unsigned long *msr_bitmap;
if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
if (is_long_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
else
msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
} else {
if (is_long_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_longmode;
else
msr_bitmap = vmx_msr_bitmap_legacy;
}
vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
}
|
static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
{
unsigned long *msr_bitmap;
if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
if (is_long_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
else
msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
} else {
if (is_long_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_longmode;
else
msr_bitmap = vmx_msr_bitmap_legacy;
}
vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
}
|
C
|
linux
| 0 |
CVE-2016-8655
|
https://www.cvedetails.com/cve/CVE-2016-8655/
|
CWE-416
|
https://github.com/torvalds/linux/commit/84ac7260236a49c79eede91617700174c2c19b0c
|
84ac7260236a49c79eede91617700174c2c19b0c
|
packet: fix race condition in packet_set_ring
When packet_set_ring creates a ring buffer it will initialize a
struct timer_list if the packet version is TPACKET_V3. This value
can then be raced by a different thread calling setsockopt to
set the version to TPACKET_V1 before packet_set_ring has finished.
This leads to a use-after-free on a function pointer in the
struct timer_list when the socket is closed as the previously
initialized timer will not be deleted.
The bug is fixed by taking lock_sock(sk) in packet_setsockopt when
changing the packet version while also taking the lock at the start
of packet_set_ring.
Fixes: f6fb8f100b80 ("af-packet: TPACKET_V3 flexible buffer implementation.")
Signed-off-by: Philip Pettersson <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
unsigned int len)
{
struct bpf_prog *new;
u32 fd;
if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
return -EPERM;
if (len != sizeof(fd))
return -EINVAL;
if (copy_from_user(&fd, data, len))
return -EFAULT;
new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
if (IS_ERR(new))
return PTR_ERR(new);
__fanout_set_data_bpf(po->fanout, new);
return 0;
}
|
static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
unsigned int len)
{
struct bpf_prog *new;
u32 fd;
if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
return -EPERM;
if (len != sizeof(fd))
return -EINVAL;
if (copy_from_user(&fd, data, len))
return -EFAULT;
new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
if (IS_ERR(new))
return PTR_ERR(new);
__fanout_set_data_bpf(po->fanout, new);
return 0;
}
|
C
|
linux
| 0 |
CVE-2017-10671
|
https://www.cvedetails.com/cve/CVE-2017-10671/
|
CWE-119
|
https://github.com/blueness/sthttpd/commit/c0dc63a49d8605649f1d8e4a96c9b468b0bff660
|
c0dc63a49d8605649f1d8e4a96c9b468b0bff660
|
Fix heap buffer overflow in de_dotdot
|
httpd_start_request( httpd_conn* hc, struct timeval* nowP )
{
int r;
/* Really start the request. */
r = really_start_request( hc, nowP );
/* And return the status. */
return r;
}
|
httpd_start_request( httpd_conn* hc, struct timeval* nowP )
{
int r;
/* Really start the request. */
r = really_start_request( hc, nowP );
/* And return the status. */
return r;
}
|
C
|
sthttpd
| 0 |
CVE-2019-5828
|
https://www.cvedetails.com/cve/CVE-2019-5828/
|
CWE-416
|
https://github.com/chromium/chromium/commit/761d65ebcac0cdb730fd27b87e207201ac38e3b4
|
761d65ebcac0cdb730fd27b87e207201ac38e3b4
|
[Payment Handler] Don't wait for response from closed payment app.
Before this patch, tapping the back button on top of the payment handler
window on desktop would not affect the |response_helper_|, which would
continue waiting for a response from the payment app. The service worker
of the closed payment app could timeout after 5 minutes and invoke the
|response_helper_|. Depending on what else the user did afterwards, in
the best case scenario, the payment sheet would display a "Transaction
failed" error message. In the worst case scenario, the
|response_helper_| would be used after free.
This patch clears the |response_helper_| in the PaymentRequestState and
in the ServiceWorkerPaymentInstrument after the payment app is closed.
After this patch, the cancelled payment app does not show "Transaction
failed" and does not use memory after it was freed.
Bug: 956597
Change-Id: I64134b911a4f8c154cb56d537a8243a68a806394
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1588682
Reviewed-by: anthonyvd <[email protected]>
Commit-Queue: Rouslan Solomakhin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#654995}
|
void ServiceWorkerPaymentInstrument::InvokePaymentApp(Delegate* delegate) {
delegate_ = delegate;
if (needs_installation_) {
content::PaymentAppProvider::GetInstance()->InstallAndInvokePaymentApp(
web_contents_, CreatePaymentRequestEventData(),
installable_web_app_info_->name,
installable_web_app_info_->icon == nullptr
? SkBitmap()
: *(installable_web_app_info_->icon),
installable_web_app_info_->sw_js_url,
installable_web_app_info_->sw_scope,
installable_web_app_info_->sw_use_cache, installable_enabled_method_,
base::BindOnce(&ServiceWorkerPaymentInstrument::OnPaymentAppInvoked,
weak_ptr_factory_.GetWeakPtr()));
} else {
content::PaymentAppProvider::GetInstance()->InvokePaymentApp(
browser_context_, stored_payment_app_info_->registration_id,
CreatePaymentRequestEventData(),
base::BindOnce(&ServiceWorkerPaymentInstrument::OnPaymentAppInvoked,
weak_ptr_factory_.GetWeakPtr()));
}
payment_request_delegate_->ShowProcessingSpinner();
}
|
void ServiceWorkerPaymentInstrument::InvokePaymentApp(Delegate* delegate) {
delegate_ = delegate;
if (needs_installation_) {
content::PaymentAppProvider::GetInstance()->InstallAndInvokePaymentApp(
web_contents_, CreatePaymentRequestEventData(),
installable_web_app_info_->name,
installable_web_app_info_->icon == nullptr
? SkBitmap()
: *(installable_web_app_info_->icon),
installable_web_app_info_->sw_js_url,
installable_web_app_info_->sw_scope,
installable_web_app_info_->sw_use_cache, installable_enabled_method_,
base::BindOnce(&ServiceWorkerPaymentInstrument::OnPaymentAppInvoked,
weak_ptr_factory_.GetWeakPtr()));
} else {
content::PaymentAppProvider::GetInstance()->InvokePaymentApp(
browser_context_, stored_payment_app_info_->registration_id,
CreatePaymentRequestEventData(),
base::BindOnce(&ServiceWorkerPaymentInstrument::OnPaymentAppInvoked,
weak_ptr_factory_.GetWeakPtr()));
}
payment_request_delegate_->ShowProcessingSpinner();
}
|
C
|
Chrome
| 0 |
CVE-2016-5189
|
https://www.cvedetails.com/cve/CVE-2016-5189/
|
CWE-284
|
https://github.com/chromium/chromium/commit/2440e872debd68ae7c2a8bf9ddb34df2cce378cd
|
2440e872debd68ae7c2a8bf9ddb34df2cce378cd
|
[GCPW] Disallow sign in of consumer accounts when mdm is enabled.
Unless the registry key "mdm_aca" is explicitly set to 1, always
fail sign in of consumer accounts when mdm enrollment is enabled.
Consumer accounts are defined as accounts with gmail.com or
googlemail.com domain.
Bug: 944049
Change-Id: Icb822f3737d90931de16a8d3317616dd2b159edd
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1532903
Commit-Queue: Tien Mai <[email protected]>
Reviewed-by: Roger Tawa <[email protected]>
Cr-Commit-Position: refs/heads/master@{#646278}
|
HRESULT CGaiaCredentialBase::GetBitmapValueImpl(DWORD field_id,
HBITMAP* phbmp) {
HRESULT hr = E_INVALIDARG;
switch (field_id) {
case FID_PROVIDER_LOGO:
*phbmp = ::LoadBitmap(CURRENT_MODULE(),
MAKEINTRESOURCE(IDB_GOOGLE_LOGO_SMALL));
if (*phbmp)
hr = S_OK;
break;
default:
break;
}
return hr;
}
|
HRESULT CGaiaCredentialBase::GetBitmapValueImpl(DWORD field_id,
HBITMAP* phbmp) {
HRESULT hr = E_INVALIDARG;
switch (field_id) {
case FID_PROVIDER_LOGO:
*phbmp = ::LoadBitmap(CURRENT_MODULE(),
MAKEINTRESOURCE(IDB_GOOGLE_LOGO_SMALL));
if (*phbmp)
hr = S_OK;
break;
default:
break;
}
return hr;
}
|
C
|
Chrome
| 0 |
CVE-2013-7271
|
https://www.cvedetails.com/cve/CVE-2013-7271/
|
CWE-20
|
https://github.com/torvalds/linux/commit/f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
|
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
{
struct sock_iocb *siocb = kiocb_to_siocb(iocb);
struct scm_cookie tmp_scm;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
int noblock = flags & MSG_DONTWAIT;
struct sk_buff *skb;
int err;
int peeked, skip;
err = -EOPNOTSUPP;
if (flags&MSG_OOB)
goto out;
err = mutex_lock_interruptible(&u->readlock);
if (err) {
err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
goto out;
}
skip = sk_peek_offset(sk, flags);
skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
if (!skb) {
unix_state_lock(sk);
/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
(sk->sk_shutdown & RCV_SHUTDOWN))
err = 0;
unix_state_unlock(sk);
goto out_unlock;
}
wake_up_interruptible_sync_poll(&u->peer_wait,
POLLOUT | POLLWRNORM | POLLWRBAND);
if (msg->msg_name)
unix_copy_addr(msg, skb->sk);
if (size > skb->len - skip)
size = skb->len - skip;
else if (size < skb->len - skip)
msg->msg_flags |= MSG_TRUNC;
err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
if (err)
goto out_free;
if (sock_flag(sk, SOCK_RCVTSTAMP))
__sock_recv_timestamp(msg, sk, skb);
if (!siocb->scm) {
siocb->scm = &tmp_scm;
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
unix_set_secdata(siocb->scm, skb);
if (!(flags & MSG_PEEK)) {
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
sk_peek_offset_bwd(sk, skb->len);
} else {
/* It is questionable: on PEEK we could:
- do not return fds - good, but too simple 8)
- return fds, and do not return them on read (old strategy,
apparently wrong)
- clone fds (I chose it for now, it is the most universal
solution)
POSIX 1003.1g does not actually define this clearly
at all. POSIX 1003.1g doesn't define a lot of things
clearly however!
*/
sk_peek_offset_fwd(sk, size);
if (UNIXCB(skb).fp)
siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
}
err = (flags & MSG_TRUNC) ? skb->len - skip : size;
scm_recv(sock, msg, siocb->scm, flags);
out_free:
skb_free_datagram(sk, skb);
out_unlock:
mutex_unlock(&u->readlock);
out:
return err;
}
|
static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
{
struct sock_iocb *siocb = kiocb_to_siocb(iocb);
struct scm_cookie tmp_scm;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
int noblock = flags & MSG_DONTWAIT;
struct sk_buff *skb;
int err;
int peeked, skip;
err = -EOPNOTSUPP;
if (flags&MSG_OOB)
goto out;
msg->msg_namelen = 0;
err = mutex_lock_interruptible(&u->readlock);
if (err) {
err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
goto out;
}
skip = sk_peek_offset(sk, flags);
skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
if (!skb) {
unix_state_lock(sk);
/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
(sk->sk_shutdown & RCV_SHUTDOWN))
err = 0;
unix_state_unlock(sk);
goto out_unlock;
}
wake_up_interruptible_sync_poll(&u->peer_wait,
POLLOUT | POLLWRNORM | POLLWRBAND);
if (msg->msg_name)
unix_copy_addr(msg, skb->sk);
if (size > skb->len - skip)
size = skb->len - skip;
else if (size < skb->len - skip)
msg->msg_flags |= MSG_TRUNC;
err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
if (err)
goto out_free;
if (sock_flag(sk, SOCK_RCVTSTAMP))
__sock_recv_timestamp(msg, sk, skb);
if (!siocb->scm) {
siocb->scm = &tmp_scm;
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
unix_set_secdata(siocb->scm, skb);
if (!(flags & MSG_PEEK)) {
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
sk_peek_offset_bwd(sk, skb->len);
} else {
/* It is questionable: on PEEK we could:
- do not return fds - good, but too simple 8)
- return fds, and do not return them on read (old strategy,
apparently wrong)
- clone fds (I chose it for now, it is the most universal
solution)
POSIX 1003.1g does not actually define this clearly
at all. POSIX 1003.1g doesn't define a lot of things
clearly however!
*/
sk_peek_offset_fwd(sk, size);
if (UNIXCB(skb).fp)
siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
}
err = (flags & MSG_TRUNC) ? skb->len - skip : size;
scm_recv(sock, msg, siocb->scm, flags);
out_free:
skb_free_datagram(sk, skb);
out_unlock:
mutex_unlock(&u->readlock);
out:
return err;
}
|
C
|
linux
| 1 |
CVE-2015-2348
|
https://www.cvedetails.com/cve/CVE-2015-2348/
|
CWE-264
|
https://git.php.net/?p=php-src.git;a=commit;h=1291d6bbee93b6109eb07e8f7916ff1b7fcc13e1
|
1291d6bbee93b6109eb07e8f7916ff1b7fcc13e1
| null |
static int php_ini_get_option(zend_ini_entry *ini_entry TSRMLS_DC, int num_args, va_list args, zend_hash_key *hash_key) /* {{{ */
{
zval *ini_array = va_arg(args, zval *);
int module_number = va_arg(args, int);
int details = va_arg(args, int);
zval *option;
if (module_number != 0 && ini_entry->module_number != module_number) {
return 0;
}
if (hash_key->nKeyLength == 0 ||
hash_key->arKey[0] != 0
) {
if (details) {
MAKE_STD_ZVAL(option);
array_init(option);
if (ini_entry->orig_value) {
add_assoc_stringl(option, "global_value", ini_entry->orig_value, ini_entry->orig_value_length, 1);
} else if (ini_entry->value) {
add_assoc_stringl(option, "global_value", ini_entry->value, ini_entry->value_length, 1);
} else {
add_assoc_null(option, "global_value");
}
if (ini_entry->value) {
add_assoc_stringl(option, "local_value", ini_entry->value, ini_entry->value_length, 1);
} else {
add_assoc_null(option, "local_value");
}
add_assoc_long(option, "access", ini_entry->modifiable);
add_assoc_zval_ex(ini_array, ini_entry->name, ini_entry->name_length, option);
} else {
if (ini_entry->value) {
add_assoc_stringl(ini_array, ini_entry->name, ini_entry->value, ini_entry->value_length, 1);
} else {
add_assoc_null(ini_array, ini_entry->name);
}
}
}
return 0;
}
/* }}} */
|
static int php_ini_get_option(zend_ini_entry *ini_entry TSRMLS_DC, int num_args, va_list args, zend_hash_key *hash_key) /* {{{ */
{
zval *ini_array = va_arg(args, zval *);
int module_number = va_arg(args, int);
int details = va_arg(args, int);
zval *option;
if (module_number != 0 && ini_entry->module_number != module_number) {
return 0;
}
if (hash_key->nKeyLength == 0 ||
hash_key->arKey[0] != 0
) {
if (details) {
MAKE_STD_ZVAL(option);
array_init(option);
if (ini_entry->orig_value) {
add_assoc_stringl(option, "global_value", ini_entry->orig_value, ini_entry->orig_value_length, 1);
} else if (ini_entry->value) {
add_assoc_stringl(option, "global_value", ini_entry->value, ini_entry->value_length, 1);
} else {
add_assoc_null(option, "global_value");
}
if (ini_entry->value) {
add_assoc_stringl(option, "local_value", ini_entry->value, ini_entry->value_length, 1);
} else {
add_assoc_null(option, "local_value");
}
add_assoc_long(option, "access", ini_entry->modifiable);
add_assoc_zval_ex(ini_array, ini_entry->name, ini_entry->name_length, option);
} else {
if (ini_entry->value) {
add_assoc_stringl(ini_array, ini_entry->name, ini_entry->value, ini_entry->value_length, 1);
} else {
add_assoc_null(ini_array, ini_entry->name);
}
}
}
return 0;
}
/* }}} */
|
C
|
php
| 0 |
CVE-2015-8325
|
https://www.cvedetails.com/cve/CVE-2015-8325/
|
CWE-264
|
https://anongit.mindrot.org/openssh.git/commit/?id=85bdcd7c92fe7ff133bbc4e10a65c91810f88755
|
85bdcd7c92fe7ff133bbc4e10a65c91810f88755
| null |
session_tty_list(void)
{
static char buf[1024];
int i;
char *cp;
buf[0] = '\0';
for (i = 0; i < sessions_nalloc; i++) {
Session *s = &sessions[i];
if (s->used && s->ttyfd != -1) {
if (strncmp(s->tty, "/dev/", 5) != 0) {
cp = strrchr(s->tty, '/');
cp = (cp == NULL) ? s->tty : cp + 1;
} else
cp = s->tty + 5;
if (buf[0] != '\0')
strlcat(buf, ",", sizeof buf);
strlcat(buf, cp, sizeof buf);
}
}
if (buf[0] == '\0')
strlcpy(buf, "notty", sizeof buf);
return buf;
}
|
session_tty_list(void)
{
static char buf[1024];
int i;
char *cp;
buf[0] = '\0';
for (i = 0; i < sessions_nalloc; i++) {
Session *s = &sessions[i];
if (s->used && s->ttyfd != -1) {
if (strncmp(s->tty, "/dev/", 5) != 0) {
cp = strrchr(s->tty, '/');
cp = (cp == NULL) ? s->tty : cp + 1;
} else
cp = s->tty + 5;
if (buf[0] != '\0')
strlcat(buf, ",", sizeof buf);
strlcat(buf, cp, sizeof buf);
}
}
if (buf[0] == '\0')
strlcpy(buf, "notty", sizeof buf);
return buf;
}
|
C
|
mindrot
| 0 |
CVE-2017-9527
|
https://www.cvedetails.com/cve/CVE-2017-9527/
|
CWE-416
|
https://github.com/mruby/mruby/commit/5c114c91d4ff31859fcd84cf8bf349b737b90d99
|
5c114c91d4ff31859fcd84cf8bf349b737b90d99
|
Clear unused stack region that may refer freed objects; fix #3596
|
test_mrb_write_barrier(void)
{
mrb_state *mrb = mrb_open();
struct RBasic *obj;
mrb_gc *gc = &mrb->gc;
puts("test_mrb_write_barrier");
obj = mrb_basic_ptr(mrb_ary_new(mrb));
paint_black(obj);
puts(" in MRB_GC_STATE_MARK");
gc->state = MRB_GC_STATE_MARK;
mrb_write_barrier(mrb, obj);
mrb_assert(is_gray(obj));
mrb_assert(gc->atomic_gray_list == obj);
puts(" fail with gray");
paint_gray(obj);
mrb_write_barrier(mrb, obj);
mrb_assert(is_gray(obj));
mrb_close(mrb);
}
|
test_mrb_write_barrier(void)
{
mrb_state *mrb = mrb_open();
struct RBasic *obj;
mrb_gc *gc = &mrb->gc;
puts("test_mrb_write_barrier");
obj = mrb_basic_ptr(mrb_ary_new(mrb));
paint_black(obj);
puts(" in MRB_GC_STATE_MARK");
gc->state = MRB_GC_STATE_MARK;
mrb_write_barrier(mrb, obj);
mrb_assert(is_gray(obj));
mrb_assert(gc->atomic_gray_list == obj);
puts(" fail with gray");
paint_gray(obj);
mrb_write_barrier(mrb, obj);
mrb_assert(is_gray(obj));
mrb_close(mrb);
}
|
C
|
mruby
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/9d02cda7a634fbd6e53d98091f618057f0174387
|
9d02cda7a634fbd6e53d98091f618057f0174387
|
Coverity: Fixing pass by value.
CID=101462, 101458, 101437, 101471, 101467
BUG=NONE
TEST=NONE
Review URL: http://codereview.chromium.org/9006023
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115257 0039d316-1c4b-4281-b951-d872f2087c98
|
ExtensionPrefs::~ExtensionPrefs() {
}
|
ExtensionPrefs::~ExtensionPrefs() {
}
|
C
|
Chrome
| 0 |
CVE-2019-11487
|
https://www.cvedetails.com/cve/CVE-2019-11487/
|
CWE-416
|
https://github.com/torvalds/linux/commit/6b3a707736301c2128ca85ce85fb13f60b5e350a
|
6b3a707736301c2128ca85ce85fb13f60b5e350a
|
Merge branch 'page-refs' (page ref overflow)
Merge page ref overflow branch.
Jann Horn reported that he can overflow the page ref count with
sufficient memory (and a filesystem that is intentionally extremely
slow).
Admittedly it's not exactly easy. To have more than four billion
references to a page requires a minimum of 32GB of kernel memory just
for the pointers to the pages, much less any metadata to keep track of
those pointers. Jann needed a total of 140GB of memory and a specially
crafted filesystem that leaves all reads pending (in order to not ever
free the page references and just keep adding more).
Still, we have a fairly straightforward way to limit the two obvious
user-controllable sources of page references: direct-IO like page
references gotten through get_user_pages(), and the splice pipe page
duplication. So let's just do that.
* branch page-refs:
fs: prevent page refcount overflow in pipe_buf_get
mm: prevent get_user_pages() from overflowing page refcount
mm: add 'try_get_page()' helper function
mm: make page ref count overflow check tighter and more explicit
|
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
unsigned long address, unsigned int *flags, int *nonblocking)
{
unsigned int fault_flags = 0;
vm_fault_t ret;
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
fault_flags |= FAULT_FLAG_REMOTE;
if (nonblocking)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
if (*flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
if (*flags & FOLL_TRIED) {
VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
fault_flags |= FAULT_FLAG_TRIED;
}
ret = handle_mm_fault(vma, address, fault_flags);
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, *flags);
if (err)
return err;
BUG();
}
if (tsk) {
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
}
if (ret & VM_FAULT_RETRY) {
if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
*nonblocking = 0;
return -EBUSY;
}
/*
* The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
* necessary, even if maybe_mkwrite decided not to set pte_write. We
* can thus safely do subsequent page lookups as if they were reads.
* But only do so when looping for pte_write is futile: in some cases
* userspace may also be wanting to write to the gotten user page,
* which a read fault here might prevent (a readonly page might get
* reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
*flags |= FOLL_COW;
return 0;
}
|
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
unsigned long address, unsigned int *flags, int *nonblocking)
{
unsigned int fault_flags = 0;
vm_fault_t ret;
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
fault_flags |= FAULT_FLAG_REMOTE;
if (nonblocking)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
if (*flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
if (*flags & FOLL_TRIED) {
VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
fault_flags |= FAULT_FLAG_TRIED;
}
ret = handle_mm_fault(vma, address, fault_flags);
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, *flags);
if (err)
return err;
BUG();
}
if (tsk) {
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
}
if (ret & VM_FAULT_RETRY) {
if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
*nonblocking = 0;
return -EBUSY;
}
/*
* The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
* necessary, even if maybe_mkwrite decided not to set pte_write. We
* can thus safely do subsequent page lookups as if they were reads.
* But only do so when looping for pte_write is futile: in some cases
* userspace may also be wanting to write to the gotten user page,
* which a read fault here might prevent (a readonly page might get
* reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
*flags |= FOLL_COW;
return 0;
}
|
C
|
linux
| 0 |
CVE-2018-1000039
|
https://www.cvedetails.com/cve/CVE-2018-1000039/
|
CWE-416
|
http://git.ghostscript.com/?p=mupdf.git;a=commitdiff;h=f597300439e62f5e921f0d7b1e880b5c1a1f1607;hp=093fc3b098dc5fadef5d8ad4b225db9fb124758b
|
f597300439e62f5e921f0d7b1e880b5c1a1f1607
| null |
static void walk_splay(cmap_splay *tree, unsigned int node, void (*fn)(cmap_splay *, void *), void *arg)
{
int from = TOP;
while (node != EMPTY)
{
switch (from)
{
case TOP:
if (tree[node].left != EMPTY)
{
node = tree[node].left;
from = TOP;
break;
}
/* fallthrough */
case LEFT:
fn(&tree[node], arg);
if (tree[node].right != EMPTY)
{
node = tree[node].right;
from = TOP;
break;
}
/* fallthrough */
case RIGHT:
{
unsigned int parent = tree[node].parent;
if (parent == EMPTY)
return;
if (tree[parent].left == node)
from = LEFT;
else
{
assert(tree[parent].right == node);
from = RIGHT;
}
node = parent;
}
}
}
}
|
static void walk_splay(cmap_splay *tree, unsigned int node, void (*fn)(cmap_splay *, void *), void *arg)
{
int from = TOP;
while (node != EMPTY)
{
switch (from)
{
case TOP:
if (tree[node].left != EMPTY)
{
node = tree[node].left;
from = TOP;
break;
}
/* fallthrough */
case LEFT:
fn(&tree[node], arg);
if (tree[node].right != EMPTY)
{
node = tree[node].right;
from = TOP;
break;
}
/* fallthrough */
case RIGHT:
{
unsigned int parent = tree[node].parent;
if (parent == EMPTY)
return;
if (tree[parent].left == node)
from = LEFT;
else
{
assert(tree[parent].right == node);
from = RIGHT;
}
node = parent;
}
}
}
}
|
C
|
ghostscript
| 0 |
CVE-2015-6761
|
https://www.cvedetails.com/cve/CVE-2015-6761/
|
CWE-362
|
https://github.com/chromium/chromium/commit/fd506b0ac6c7846ae45b5034044fe85c28ee68ac
|
fd506b0ac6c7846ae45b5034044fe85c28ee68ac
|
Fix detach with open()ed document leaving parent loading indefinitely
Change-Id: I26c2a054b9f1e5eb076acd677e1223058825f6d6
Bug: 803416
Test: fast/loader/document-open-iframe-then-detach.html
Change-Id: I26c2a054b9f1e5eb076acd677e1223058825f6d6
Reviewed-on: https://chromium-review.googlesource.com/887298
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Nate Chapin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#532967}
|
bool FrameLoader::PrepareRequestForThisFrame(FrameLoadRequest& request) {
if (!request.OriginDocument())
return true;
KURL url = request.GetResourceRequest().Url();
if (frame_->GetScriptController().ExecuteScriptIfJavaScriptURL(url, nullptr))
return false;
if (!request.OriginDocument()->GetSecurityOrigin()->CanDisplay(url)) {
request.OriginDocument()->AddConsoleMessage(ConsoleMessage::Create(
kSecurityMessageSource, kErrorMessageLevel,
"Not allowed to load local resource: " + url.ElidedString()));
return false;
}
if (frame_->IsMainFrame() &&
!request.GetResourceRequest().IsSameDocumentNavigation() &&
!frame_->Client()->AllowContentInitiatedDataUrlNavigations(
request.OriginDocument()->Url()) &&
!request.GetResourceRequest().GetSuggestedFilename().has_value() &&
url.ProtocolIsData() && NetworkUtils::IsDataURLMimeTypeSupported(url)) {
frame_->GetDocument()->AddConsoleMessage(ConsoleMessage::Create(
kSecurityMessageSource, kErrorMessageLevel,
"Not allowed to navigate top frame to data URL: " +
url.ElidedString()));
return false;
}
if (!request.Form() && request.FrameName().IsEmpty())
request.SetFrameName(frame_->GetDocument()->BaseTarget());
return true;
}
|
bool FrameLoader::PrepareRequestForThisFrame(FrameLoadRequest& request) {
if (!request.OriginDocument())
return true;
KURL url = request.GetResourceRequest().Url();
if (frame_->GetScriptController().ExecuteScriptIfJavaScriptURL(url, nullptr))
return false;
if (!request.OriginDocument()->GetSecurityOrigin()->CanDisplay(url)) {
request.OriginDocument()->AddConsoleMessage(ConsoleMessage::Create(
kSecurityMessageSource, kErrorMessageLevel,
"Not allowed to load local resource: " + url.ElidedString()));
return false;
}
if (frame_->IsMainFrame() &&
!request.GetResourceRequest().IsSameDocumentNavigation() &&
!frame_->Client()->AllowContentInitiatedDataUrlNavigations(
request.OriginDocument()->Url()) &&
!request.GetResourceRequest().GetSuggestedFilename().has_value() &&
url.ProtocolIsData() && NetworkUtils::IsDataURLMimeTypeSupported(url)) {
frame_->GetDocument()->AddConsoleMessage(ConsoleMessage::Create(
kSecurityMessageSource, kErrorMessageLevel,
"Not allowed to navigate top frame to data URL: " +
url.ElidedString()));
return false;
}
if (!request.Form() && request.FrameName().IsEmpty())
request.SetFrameName(frame_->GetDocument()->BaseTarget());
return true;
}
|
C
|
Chrome
| 0 |
CVE-2017-5093
|
https://www.cvedetails.com/cve/CVE-2017-5093/
|
CWE-20
|
https://github.com/chromium/chromium/commit/0720b02e4f303ea6b114d4ae9453e3a7ff55f8dc
|
0720b02e4f303ea6b114d4ae9453e3a7ff55f8dc
|
If JavaScript shows a dialog, cause the page to lose fullscreen.
BUG=670135, 550017, 726761, 728276
Review-Url: https://codereview.chromium.org/2906133004
Cr-Commit-Position: refs/heads/master@{#478884}
|
void WebContentsImpl::OnPluginCrashed(RenderFrameHostImpl* source,
const base::FilePath& plugin_path,
base::ProcessId plugin_pid) {
for (auto& observer : observers_)
observer.PluginCrashed(plugin_path, plugin_pid);
}
|
void WebContentsImpl::OnPluginCrashed(RenderFrameHostImpl* source,
const base::FilePath& plugin_path,
base::ProcessId plugin_pid) {
for (auto& observer : observers_)
observer.PluginCrashed(plugin_path, plugin_pid);
}
|
C
|
Chrome
| 0 |
CVE-2018-10540
|
https://www.cvedetails.com/cve/CVE-2018-10540/
|
CWE-787
|
https://github.com/dbry/WavPack/commit/6f8bb34c2993a48ab9afbe353e6d0cff7c8d821d
|
6f8bb34c2993a48ab9afbe353e6d0cff7c8d821d
|
issue #33, sanitize size of unknown chunks before malloc()
|
int WriteWave64Header (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode)
{
Wave64ChunkHeader datahdr, fmthdr;
Wave64FileHeader filehdr;
WaveHeader wavhdr;
uint32_t bcount;
int64_t total_data_bytes, total_file_bytes;
int num_channels = WavpackGetNumChannels (wpc);
int32_t channel_mask = WavpackGetChannelMask (wpc);
int32_t sample_rate = WavpackGetSampleRate (wpc);
int bytes_per_sample = WavpackGetBytesPerSample (wpc);
int bits_per_sample = WavpackGetBitsPerSample (wpc);
int format = WavpackGetFloatNormExp (wpc) ? 3 : 1;
int wavhdrsize = 16;
if (format == 3 && WavpackGetFloatNormExp (wpc) != 127) {
error_line ("can't create valid Wave64 header for non-normalized floating data!");
return FALSE;
}
if (total_samples == -1)
total_samples = 0x7ffff000 / (bytes_per_sample * num_channels);
total_data_bytes = total_samples * bytes_per_sample * num_channels;
CLEAR (wavhdr);
wavhdr.FormatTag = format;
wavhdr.NumChannels = num_channels;
wavhdr.SampleRate = sample_rate;
wavhdr.BytesPerSecond = sample_rate * num_channels * bytes_per_sample;
wavhdr.BlockAlign = bytes_per_sample * num_channels;
wavhdr.BitsPerSample = bits_per_sample;
if (num_channels > 2 || channel_mask != 0x5 - num_channels) {
wavhdrsize = sizeof (wavhdr);
wavhdr.cbSize = 22;
wavhdr.ValidBitsPerSample = bits_per_sample;
wavhdr.SubFormat = format;
wavhdr.ChannelMask = channel_mask;
wavhdr.FormatTag = 0xfffe;
wavhdr.BitsPerSample = bytes_per_sample * 8;
wavhdr.GUID [4] = 0x10;
wavhdr.GUID [6] = 0x80;
wavhdr.GUID [9] = 0xaa;
wavhdr.GUID [11] = 0x38;
wavhdr.GUID [12] = 0x9b;
wavhdr.GUID [13] = 0x71;
}
total_file_bytes = sizeof (filehdr) + sizeof (fmthdr) + wavhdrsize + sizeof (datahdr) + ((total_data_bytes + 7) & ~(int64_t)7);
memcpy (filehdr.ckID, riff_guid, sizeof (riff_guid));
memcpy (filehdr.formType, wave_guid, sizeof (wave_guid));
filehdr.ckSize = total_file_bytes;
memcpy (fmthdr.ckID, fmt_guid, sizeof (fmt_guid));
fmthdr.ckSize = sizeof (fmthdr) + wavhdrsize;
memcpy (datahdr.ckID, data_guid, sizeof (data_guid));
datahdr.ckSize = total_data_bytes + sizeof (datahdr);
WavpackNativeToLittleEndian (&filehdr, Wave64ChunkHeaderFormat);
WavpackNativeToLittleEndian (&fmthdr, Wave64ChunkHeaderFormat);
WavpackNativeToLittleEndian (&wavhdr, WaveHeaderFormat);
WavpackNativeToLittleEndian (&datahdr, Wave64ChunkHeaderFormat);
if (!DoWriteFile (outfile, &filehdr, sizeof (filehdr), &bcount) || bcount != sizeof (filehdr) ||
!DoWriteFile (outfile, &fmthdr, sizeof (fmthdr), &bcount) || bcount != sizeof (fmthdr) ||
!DoWriteFile (outfile, &wavhdr, wavhdrsize, &bcount) || bcount != wavhdrsize ||
!DoWriteFile (outfile, &datahdr, sizeof (datahdr), &bcount) || bcount != sizeof (datahdr)) {
error_line ("can't write .W64 data, disk probably full!");
return FALSE;
}
return TRUE;
}
|
int WriteWave64Header (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode)
{
Wave64ChunkHeader datahdr, fmthdr;
Wave64FileHeader filehdr;
WaveHeader wavhdr;
uint32_t bcount;
int64_t total_data_bytes, total_file_bytes;
int num_channels = WavpackGetNumChannels (wpc);
int32_t channel_mask = WavpackGetChannelMask (wpc);
int32_t sample_rate = WavpackGetSampleRate (wpc);
int bytes_per_sample = WavpackGetBytesPerSample (wpc);
int bits_per_sample = WavpackGetBitsPerSample (wpc);
int format = WavpackGetFloatNormExp (wpc) ? 3 : 1;
int wavhdrsize = 16;
if (format == 3 && WavpackGetFloatNormExp (wpc) != 127) {
error_line ("can't create valid Wave64 header for non-normalized floating data!");
return FALSE;
}
if (total_samples == -1)
total_samples = 0x7ffff000 / (bytes_per_sample * num_channels);
total_data_bytes = total_samples * bytes_per_sample * num_channels;
CLEAR (wavhdr);
wavhdr.FormatTag = format;
wavhdr.NumChannels = num_channels;
wavhdr.SampleRate = sample_rate;
wavhdr.BytesPerSecond = sample_rate * num_channels * bytes_per_sample;
wavhdr.BlockAlign = bytes_per_sample * num_channels;
wavhdr.BitsPerSample = bits_per_sample;
if (num_channels > 2 || channel_mask != 0x5 - num_channels) {
wavhdrsize = sizeof (wavhdr);
wavhdr.cbSize = 22;
wavhdr.ValidBitsPerSample = bits_per_sample;
wavhdr.SubFormat = format;
wavhdr.ChannelMask = channel_mask;
wavhdr.FormatTag = 0xfffe;
wavhdr.BitsPerSample = bytes_per_sample * 8;
wavhdr.GUID [4] = 0x10;
wavhdr.GUID [6] = 0x80;
wavhdr.GUID [9] = 0xaa;
wavhdr.GUID [11] = 0x38;
wavhdr.GUID [12] = 0x9b;
wavhdr.GUID [13] = 0x71;
}
total_file_bytes = sizeof (filehdr) + sizeof (fmthdr) + wavhdrsize + sizeof (datahdr) + ((total_data_bytes + 7) & ~(int64_t)7);
memcpy (filehdr.ckID, riff_guid, sizeof (riff_guid));
memcpy (filehdr.formType, wave_guid, sizeof (wave_guid));
filehdr.ckSize = total_file_bytes;
memcpy (fmthdr.ckID, fmt_guid, sizeof (fmt_guid));
fmthdr.ckSize = sizeof (fmthdr) + wavhdrsize;
memcpy (datahdr.ckID, data_guid, sizeof (data_guid));
datahdr.ckSize = total_data_bytes + sizeof (datahdr);
WavpackNativeToLittleEndian (&filehdr, Wave64ChunkHeaderFormat);
WavpackNativeToLittleEndian (&fmthdr, Wave64ChunkHeaderFormat);
WavpackNativeToLittleEndian (&wavhdr, WaveHeaderFormat);
WavpackNativeToLittleEndian (&datahdr, Wave64ChunkHeaderFormat);
if (!DoWriteFile (outfile, &filehdr, sizeof (filehdr), &bcount) || bcount != sizeof (filehdr) ||
!DoWriteFile (outfile, &fmthdr, sizeof (fmthdr), &bcount) || bcount != sizeof (fmthdr) ||
!DoWriteFile (outfile, &wavhdr, wavhdrsize, &bcount) || bcount != wavhdrsize ||
!DoWriteFile (outfile, &datahdr, sizeof (datahdr), &bcount) || bcount != sizeof (datahdr)) {
error_line ("can't write .W64 data, disk probably full!");
return FALSE;
}
return TRUE;
}
|
C
|
WavPack
| 0 |
CVE-2013-6644
|
https://www.cvedetails.com/cve/CVE-2013-6644/
| null |
https://github.com/chromium/chromium/commit/db93178bcaaf7e99ebb18bd51fa99b2feaf47e1f
|
db93178bcaaf7e99ebb18bd51fa99b2feaf47e1f
|
[Extensions] Add GetInstalledExtension() method to ExtensionRegistry
This CL adds GetInstalledExtension() method to ExtensionRegistry and
uses it instead of deprecated ExtensionService::GetInstalledExtension()
in chrome/browser/ui/app_list/.
Part of removing the deprecated GetInstalledExtension() call
from the ExtensionService.
BUG=489687
Review URL: https://codereview.chromium.org/1130353010
Cr-Commit-Position: refs/heads/master@{#333036}
|
bool IsDriveAppSyncId(const std::string& sync_id) {
return StartsWithASCII(sync_id, kDriveAppSyncIdPrefix, true);
}
|
bool IsDriveAppSyncId(const std::string& sync_id) {
return StartsWithASCII(sync_id, kDriveAppSyncIdPrefix, true);
}
|
C
|
Chrome
| 0 |
CVE-2011-0716
|
https://www.cvedetails.com/cve/CVE-2011-0716/
|
CWE-399
|
https://github.com/torvalds/linux/commit/6b0d6a9b4296fa16a28d10d416db7a770fc03287
|
6b0d6a9b4296fa16a28d10d416db7a770fc03287
|
bridge: Fix mglist corruption that leads to memory corruption
The list mp->mglist is used to indicate whether a multicast group
is active on the bridge interface itself as opposed to one of the
constituent interfaces in the bridge.
Unfortunately the operation that adds the mp->mglist node to the
list neglected to check whether it has already been added. This
leads to list corruption in the form of nodes pointing to itself.
Normally this would be quite obvious as it would cause an infinite
loop when walking the list. However, as this list is never actually
walked (which means that we don't really need it, I'll get rid of
it in a subsequent patch), this instead is hidden until we perform
a delete operation on the affected nodes.
As the same node may now be pointed to by more than one node, the
delete operations can then cause modification of freed memory.
This was observed in practice to cause corruption in 512-byte slabs,
most commonly leading to crashes in jbd2.
Thanks to Josef Bacik for pointing me in the right direction.
Reported-by: Ian Page Hands <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void br_multicast_port_query_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
if (port->state == BR_STATE_DISABLED ||
port->state == BR_STATE_BLOCKING)
goto out;
if (port->multicast_startup_queries_sent <
br->multicast_startup_query_count)
port->multicast_startup_queries_sent++;
br_multicast_send_query(port->br, port,
port->multicast_startup_queries_sent);
out:
spin_unlock(&br->multicast_lock);
}
|
static void br_multicast_port_query_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
if (port->state == BR_STATE_DISABLED ||
port->state == BR_STATE_BLOCKING)
goto out;
if (port->multicast_startup_queries_sent <
br->multicast_startup_query_count)
port->multicast_startup_queries_sent++;
br_multicast_send_query(port->br, port,
port->multicast_startup_queries_sent);
out:
spin_unlock(&br->multicast_lock);
}
|
C
|
linux
| 0 |
CVE-2013-0829
|
https://www.cvedetails.com/cve/CVE-2013-0829/
|
CWE-264
|
https://github.com/chromium/chromium/commit/d123966ec156cd2f92fdada36be39694641b479e
|
d123966ec156cd2f92fdada36be39694641b479e
|
File permission fix: now we selectively grant read permission for Sandboxed files
We also need to check the read permission and call GrantReadFile() for
sandboxed files for CreateSnapshotFile().
BUG=162114
TEST=manual
Review URL: https://codereview.chromium.org/11280231
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@170181 0039d316-1c4b-4281-b951-d872f2087c98
|
void FileAPIMessageFilter::OverrideThreadForMessage(
const IPC::Message& message,
BrowserThread::ID* thread) {
if (message.type() == FileSystemHostMsg_SyncGetPlatformPath::ID)
*thread = BrowserThread::FILE;
}
|
void FileAPIMessageFilter::OverrideThreadForMessage(
const IPC::Message& message,
BrowserThread::ID* thread) {
if (message.type() == FileSystemHostMsg_SyncGetPlatformPath::ID)
*thread = BrowserThread::FILE;
}
|
C
|
Chrome
| 0 |
CVE-2013-6644
|
https://www.cvedetails.com/cve/CVE-2013-6644/
| null |
https://github.com/chromium/chromium/commit/db93178bcaaf7e99ebb18bd51fa99b2feaf47e1f
|
db93178bcaaf7e99ebb18bd51fa99b2feaf47e1f
|
[Extensions] Add GetInstalledExtension() method to ExtensionRegistry
This CL adds GetInstalledExtension() method to ExtensionRegistry and
uses it instead of deprecated ExtensionService::GetInstalledExtension()
in chrome/browser/ui/app_list/.
Part of removing the deprecated GetInstalledExtension() call
from the ExtensionService.
BUG=489687
Review URL: https://codereview.chromium.org/1130353010
Cr-Commit-Position: refs/heads/master@{#333036}
|
syncer::SyncData GetSyncDataFromSyncItem(
const AppListSyncableService::SyncItem* item) {
sync_pb::EntitySpecifics specifics;
GetSyncSpecificsFromSyncItem(item, specifics.mutable_app_list());
return syncer::SyncData::CreateLocalData(item->item_id,
item->item_id,
specifics);
}
|
syncer::SyncData GetSyncDataFromSyncItem(
const AppListSyncableService::SyncItem* item) {
sync_pb::EntitySpecifics specifics;
GetSyncSpecificsFromSyncItem(item, specifics.mutable_app_list());
return syncer::SyncData::CreateLocalData(item->item_id,
item->item_id,
specifics);
}
|
C
|
Chrome
| 0 |
CVE-2014-9644
|
https://www.cvedetails.com/cve/CVE-2014-9644/
|
CWE-264
|
https://github.com/torvalds/linux/commit/4943ba16bbc2db05115707b3ff7b4874e9e3c560
|
4943ba16bbc2db05115707b3ff7b4874e9e3c560
|
crypto: include crypto- module prefix in template
This adds the module loading prefix "crypto-" to the template lookup
as well.
For example, attempting to load 'vfat(blowfish)' via AF_ALG now correctly
includes the "crypto-" prefix at every level, correctly rejecting "vfat":
net-pf-38
algif-hash
crypto-vfat(blowfish)
crypto-vfat(blowfish)-all
crypto-vfat
Reported-by: Mathias Krause <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: Mathias Krause <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
int crypto_unregister_alg(struct crypto_alg *alg)
{
int ret;
LIST_HEAD(list);
down_write(&crypto_alg_sem);
ret = crypto_remove_alg(alg, &list);
up_write(&crypto_alg_sem);
if (ret)
return ret;
BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
if (alg->cra_destroy)
alg->cra_destroy(alg);
crypto_remove_final(&list);
return 0;
}
|
int crypto_unregister_alg(struct crypto_alg *alg)
{
int ret;
LIST_HEAD(list);
down_write(&crypto_alg_sem);
ret = crypto_remove_alg(alg, &list);
up_write(&crypto_alg_sem);
if (ret)
return ret;
BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
if (alg->cra_destroy)
alg->cra_destroy(alg);
crypto_remove_final(&list);
return 0;
}
|
C
|
linux
| 0 |
CVE-2016-1667
|
https://www.cvedetails.com/cve/CVE-2016-1667/
|
CWE-284
|
https://github.com/chromium/chromium/commit/350f7d4b2c76950c8e7271284de84a9756b796e1
|
350f7d4b2c76950c8e7271284de84a9756b796e1
|
P2PQuicStream write functionality.
This adds the P2PQuicStream::WriteData function and adds tests. It also
adds the concept of a write buffered amount, enforcing this at the
P2PQuicStreamImpl.
Bug: 874296
Change-Id: Id02c8aa8d5368a87bb24a2e50dab5ef94bcae131
Reviewed-on: https://chromium-review.googlesource.com/c/1315534
Commit-Queue: Seth Hampson <[email protected]>
Reviewed-by: Henrik Boström <[email protected]>
Cr-Commit-Position: refs/heads/master@{#605766}
|
quic::QuicCryptoStream* P2PQuicTransportImpl::GetMutableCryptoStream() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
return crypto_stream_.get();
}
|
quic::QuicCryptoStream* P2PQuicTransportImpl::GetMutableCryptoStream() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
return crypto_stream_.get();
}
|
C
|
Chrome
| 0 |
CVE-2017-5120
|
https://www.cvedetails.com/cve/CVE-2017-5120/
| null |
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
|
b7277af490d28ac7f802c015bb0ff31395768556
|
bindings: Support "attribute FrozenArray<T>?"
Adds a quick hack to support a case of "attribute FrozenArray<T>?".
Bug: 1028047
Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866
Reviewed-by: Hitoshi Yoshida <[email protected]>
Commit-Queue: Yuki Shiino <[email protected]>
Cr-Commit-Position: refs/heads/master@{#718676}
|
static void HighEntropyReadonlyAttributeWithMeasureAsAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Local<v8::Object> holder = info.Holder();
TestObject* impl = V8TestObject::ToImpl(holder);
V8SetReturnValueString(info, impl->highEntropyReadonlyAttributeWithMeasureAs(), info.GetIsolate());
}
|
static void HighEntropyReadonlyAttributeWithMeasureAsAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Local<v8::Object> holder = info.Holder();
TestObject* impl = V8TestObject::ToImpl(holder);
V8SetReturnValueString(info, impl->highEntropyReadonlyAttributeWithMeasureAs(), info.GetIsolate());
}
|
C
|
Chrome
| 0 |
CVE-2013-4282
|
https://www.cvedetails.com/cve/CVE-2013-4282/
|
CWE-119
|
https://cgit.freedesktop.org/spice/spice/commit/?id=8af619009660b24e0b41ad26b30289eea288fcc2
|
8af619009660b24e0b41ad26b30289eea288fcc2
| null |
SPICE_GNUC_VISIBLE spice_compat_version_t spice_get_current_compat_version(void)
{
return SPICE_COMPAT_VERSION_CURRENT;
}
|
SPICE_GNUC_VISIBLE spice_compat_version_t spice_get_current_compat_version(void)
{
return SPICE_COMPAT_VERSION_CURRENT;
}
|
C
|
spice
| 0 |
CVE-2016-2476
|
https://www.cvedetails.com/cve/CVE-2016-2476/
|
CWE-119
|
https://android.googlesource.com/platform/frameworks/av/+/94d9e646454f6246bf823b6897bd6aea5f08eda3
|
94d9e646454f6246bf823b6897bd6aea5f08eda3
|
Fix initialization of AAC presentation struct
Otherwise the new size checks trip on this.
Bug: 27207275
Change-Id: I1f8f01097e3a88ff041b69279a6121be842f1766
|
status_t ACodec::setParameters(const sp<AMessage> ¶ms) {
int32_t videoBitrate;
if (params->findInt32("video-bitrate", &videoBitrate)) {
OMX_VIDEO_CONFIG_BITRATETYPE configParams;
InitOMXParams(&configParams);
configParams.nPortIndex = kPortIndexOutput;
configParams.nEncodeBitrate = videoBitrate;
status_t err = mOMX->setConfig(
mNode,
OMX_IndexConfigVideoBitrate,
&configParams,
sizeof(configParams));
if (err != OK) {
ALOGE("setConfig(OMX_IndexConfigVideoBitrate, %d) failed w/ err %d",
videoBitrate, err);
return err;
}
}
int64_t skipFramesBeforeUs;
if (params->findInt64("skip-frames-before", &skipFramesBeforeUs)) {
status_t err =
mOMX->setInternalOption(
mNode,
kPortIndexInput,
IOMX::INTERNAL_OPTION_START_TIME,
&skipFramesBeforeUs,
sizeof(skipFramesBeforeUs));
if (err != OK) {
ALOGE("Failed to set parameter 'skip-frames-before' (err %d)", err);
return err;
}
}
int32_t dropInputFrames;
if (params->findInt32("drop-input-frames", &dropInputFrames)) {
bool suspend = dropInputFrames != 0;
status_t err =
mOMX->setInternalOption(
mNode,
kPortIndexInput,
IOMX::INTERNAL_OPTION_SUSPEND,
&suspend,
sizeof(suspend));
if (err != OK) {
ALOGE("Failed to set parameter 'drop-input-frames' (err %d)", err);
return err;
}
}
int32_t dummy;
if (params->findInt32("request-sync", &dummy)) {
status_t err = requestIDRFrame();
if (err != OK) {
ALOGE("Requesting a sync frame failed w/ err %d", err);
return err;
}
}
float rate;
if (params->findFloat("operating-rate", &rate) && rate > 0) {
status_t err = setOperatingRate(rate, mIsVideo);
if (err != OK) {
ALOGE("Failed to set parameter 'operating-rate' (err %d)", err);
return err;
}
}
return OK;
}
|
status_t ACodec::setParameters(const sp<AMessage> ¶ms) {
int32_t videoBitrate;
if (params->findInt32("video-bitrate", &videoBitrate)) {
OMX_VIDEO_CONFIG_BITRATETYPE configParams;
InitOMXParams(&configParams);
configParams.nPortIndex = kPortIndexOutput;
configParams.nEncodeBitrate = videoBitrate;
status_t err = mOMX->setConfig(
mNode,
OMX_IndexConfigVideoBitrate,
&configParams,
sizeof(configParams));
if (err != OK) {
ALOGE("setConfig(OMX_IndexConfigVideoBitrate, %d) failed w/ err %d",
videoBitrate, err);
return err;
}
}
int64_t skipFramesBeforeUs;
if (params->findInt64("skip-frames-before", &skipFramesBeforeUs)) {
status_t err =
mOMX->setInternalOption(
mNode,
kPortIndexInput,
IOMX::INTERNAL_OPTION_START_TIME,
&skipFramesBeforeUs,
sizeof(skipFramesBeforeUs));
if (err != OK) {
ALOGE("Failed to set parameter 'skip-frames-before' (err %d)", err);
return err;
}
}
int32_t dropInputFrames;
if (params->findInt32("drop-input-frames", &dropInputFrames)) {
bool suspend = dropInputFrames != 0;
status_t err =
mOMX->setInternalOption(
mNode,
kPortIndexInput,
IOMX::INTERNAL_OPTION_SUSPEND,
&suspend,
sizeof(suspend));
if (err != OK) {
ALOGE("Failed to set parameter 'drop-input-frames' (err %d)", err);
return err;
}
}
int32_t dummy;
if (params->findInt32("request-sync", &dummy)) {
status_t err = requestIDRFrame();
if (err != OK) {
ALOGE("Requesting a sync frame failed w/ err %d", err);
return err;
}
}
float rate;
if (params->findFloat("operating-rate", &rate) && rate > 0) {
status_t err = setOperatingRate(rate, mIsVideo);
if (err != OK) {
ALOGE("Failed to set parameter 'operating-rate' (err %d)", err);
return err;
}
}
return OK;
}
|
C
|
Android
| 0 |
CVE-2011-3104
|
https://www.cvedetails.com/cve/CVE-2011-3104/
|
CWE-119
|
https://github.com/chromium/chromium/commit/6b5f83842b5edb5d4bd6684b196b3630c6769731
|
6b5f83842b5edb5d4bd6684b196b3630c6769731
|
[i18n-fixlet] Make strings branding specific in extension code.
IDS_EXTENSIONS_UNINSTALL
IDS_EXTENSIONS_INCOGNITO_WARNING
IDS_EXTENSION_INSTALLED_HEADING
IDS_EXTENSION_ALERT_ITEM_EXTERNAL And fix a $1 $1 bug.
IDS_EXTENSION_INLINE_INSTALL_PROMPT_TITLE
BUG=NONE
TEST=NONE
Review URL: http://codereview.chromium.org/9107061
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@118018 0039d316-1c4b-4281-b951-d872f2087c98
|
std::string SkColorToRGBComponents(SkColor color) {
return base::StringPrintf(
"%d,%d,%d",
SkColorGetR(color),
SkColorGetG(color),
SkColorGetB(color));
}
|
std::string SkColorToRGBComponents(SkColor color) {
return base::StringPrintf(
"%d,%d,%d",
SkColorGetR(color),
SkColorGetG(color),
SkColorGetB(color));
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/610f904d8215075c4681be4eb413f4348860bf9f
|
610f904d8215075c4681be4eb413f4348860bf9f
|
Retrieve per host storage usage from QuotaManager.
[email protected]
BUG=none
TEST=QuotaManagerTest.GetUsage
Review URL: http://codereview.chromium.org/8079004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@103921 0039d316-1c4b-4281-b951-d872f2087c98
|
void QuotaManager::DidInitializeTemporaryGlobalQuota(int64 quota) {
temporary_global_quota_ = quota;
temporary_global_quota_callbacks_.Run(
db_disabled_ ? kQuotaErrorInvalidAccess : kQuotaStatusOk,
kStorageTypeTemporary, quota);
if (db_disabled_ || eviction_disabled_)
return;
if (!need_initialize_origins_) {
StartEviction();
return;
}
temporary_usage_tracker_->GetGlobalUsage(callback_factory_.NewCallback(
&QuotaManager::DidRunInitialGetTemporaryGlobalUsage));
}
|
void QuotaManager::DidInitializeTemporaryGlobalQuota(int64 quota) {
temporary_global_quota_ = quota;
temporary_global_quota_callbacks_.Run(
db_disabled_ ? kQuotaErrorInvalidAccess : kQuotaStatusOk,
kStorageTypeTemporary, quota);
if (db_disabled_ || eviction_disabled_)
return;
if (!need_initialize_origins_) {
StartEviction();
return;
}
temporary_usage_tracker_->GetGlobalUsage(callback_factory_.NewCallback(
&QuotaManager::DidRunInitialGetTemporaryGlobalUsage));
}
|
C
|
Chrome
| 0 |
CVE-2011-2918
|
https://www.cvedetails.com/cve/CVE-2011-2918/
|
CWE-399
|
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: Michael Cree <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Deng-Cheng Zhu <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Eric B Munson <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jason Wessel <[email protected]>
Cc: Don Zickus <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
u64 enabled, running;
rcu_read_lock();
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we can be called in
* NMI context
*/
calc_timer_values(event, &enabled, &running);
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
userpg = rb->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
* spin too long if we get preempted.
*/
preempt_disable();
++userpg->lock;
barrier();
userpg->index = perf_event_index(event);
userpg->offset = perf_event_count(event);
if (event->state == PERF_EVENT_STATE_ACTIVE)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
atomic64_read(&event->child_total_time_enabled);
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
barrier();
++userpg->lock;
preempt_enable();
unlock:
rcu_read_unlock();
}
|
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
u64 enabled, running;
rcu_read_lock();
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we can be called in
* NMI context
*/
calc_timer_values(event, &enabled, &running);
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
userpg = rb->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
* spin too long if we get preempted.
*/
preempt_disable();
++userpg->lock;
barrier();
userpg->index = perf_event_index(event);
userpg->offset = perf_event_count(event);
if (event->state == PERF_EVENT_STATE_ACTIVE)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
atomic64_read(&event->child_total_time_enabled);
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
barrier();
++userpg->lock;
preempt_enable();
unlock:
rcu_read_unlock();
}
|
C
|
linux
| 0 |
CVE-2019-6974
|
https://www.cvedetails.com/cve/CVE-2019-6974/
|
CWE-362
|
https://github.com/torvalds/linux/commit/cfa39381173d5f969daf43582c95ad679189cbc9
|
cfa39381173d5f969daf43582c95ad679189cbc9
|
kvm: fix kvm_ioctl_create_device() reference counting (CVE-2019-6974)
kvm_ioctl_create_device() does the following:
1. creates a device that holds a reference to the VM object (with a borrowed
reference, the VM's refcount has not been bumped yet)
2. initializes the device
3. transfers the reference to the device to the caller's file descriptor table
4. calls kvm_get_kvm() to turn the borrowed reference to the VM into a real
reference
The ownership transfer in step 3 must not happen before the reference to the VM
becomes a proper, non-borrowed reference, which only happens in step 4.
After step 3, an attacker can close the file descriptor and drop the borrowed
reference, which can cause the refcount of the kvm object to drop to zero.
This means that we need to grab a reference for the device before
anon_inode_getfd(), otherwise the VM can disappear from under us.
Fixes: 852b6d57dc7f ("kvm: add device control API")
Cc: [email protected]
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx;
int ret;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
range->end, range->blockable);
srcu_read_unlock(&kvm->srcu, idx);
return ret;
}
|
static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx;
int ret;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
range->end, range->blockable);
srcu_read_unlock(&kvm->srcu, idx);
return ret;
}
|
C
|
linux
| 0 |
CVE-2016-2185
|
https://www.cvedetails.com/cve/CVE-2016-2185/
| null |
https://github.com/torvalds/linux/commit/950336ba3e4a1ffd2ca60d29f6ef386dd2c7351d
|
950336ba3e4a1ffd2ca60d29f6ef386dd2c7351d
|
Input: ati_remote2 - fix crashes on detecting device with invalid descriptor
The ati_remote2 driver expects at least two interfaces with one
endpoint each. If given malicious descriptor that specify one
interface or no endpoints, it will crash in the probe function.
Ensure there is at least two interfaces and one endpoint for each
interface before using it.
The full disclosure: http://seclists.org/bugtraq/2016/Mar/90
Reported-by: Ralf Spenneberg <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Cc: [email protected]
Signed-off-by: Dmitry Torokhov <[email protected]>
|
static int ati_remote2_lookup(unsigned int hw_code)
{
int i;
for (i = 0; i < ARRAY_SIZE(ati_remote2_key_table); i++)
if (ati_remote2_key_table[i].hw_code == hw_code)
return i;
return -1;
}
|
static int ati_remote2_lookup(unsigned int hw_code)
{
int i;
for (i = 0; i < ARRAY_SIZE(ati_remote2_key_table); i++)
if (ati_remote2_key_table[i].hw_code == hw_code)
return i;
return -1;
}
|
C
|
linux
| 0 |
CVE-2016-3751
|
https://www.cvedetails.com/cve/CVE-2016-3751/
| null |
https://android.googlesource.com/platform/external/libpng/+/9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
9d4853418ab2f754c2b63e091c29c5529b8b86ca
|
DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
|
gamma_image_validate(gamma_display *dp, png_const_structp pp,
png_infop pi)
{
/* Get some constants derived from the input and output file formats: */
const png_store* const ps = dp->this.ps;
const png_byte in_ct = dp->this.colour_type;
const png_byte in_bd = dp->this.bit_depth;
const png_uint_32 w = dp->this.w;
const png_uint_32 h = dp->this.h;
const size_t cbRow = dp->this.cbRow;
const png_byte out_ct = png_get_color_type(pp, pi);
const png_byte out_bd = png_get_bit_depth(pp, pi);
/* There are three sources of error, firstly the quantization in the
* file encoding, determined by sbit and/or the file depth, secondly
* the output (screen) gamma and thirdly the output file encoding.
*
* Since this API receives the screen and file gamma in double
* precision it is possible to calculate an exact answer given an input
* pixel value. Therefore we assume that the *input* value is exact -
* sample/maxsample - calculate the corresponding gamma corrected
* output to the limits of double precision arithmetic and compare with
* what libpng returns.
*
* Since the library must quantize the output to 8 or 16 bits there is
* a fundamental limit on the accuracy of the output of +/-.5 - this
* quantization limit is included in addition to the other limits
* specified by the paramaters to the API. (Effectively, add .5
* everywhere.)
*
* The behavior of the 'sbit' paramter is defined by section 12.5
* (sample depth scaling) of the PNG spec. That section forces the
* decoder to assume that the PNG values have been scaled if sBIT is
* present:
*
* png-sample = floor( input-sample * (max-out/max-in) + .5);
*
* This means that only a subset of the possible PNG values should
* appear in the input. However, the spec allows the encoder to use a
* variety of approximations to the above and doesn't require any
* restriction of the values produced.
*
* Nevertheless the spec requires that the upper 'sBIT' bits of the
* value stored in a PNG file be the original sample bits.
* Consequently the code below simply scales the top sbit bits by
* (1<<sbit)-1 to obtain an original sample value.
*
* Because there is limited precision in the input it is arguable that
* an acceptable result is any valid result from input-.5 to input+.5.
* The basic tests below do not do this, however if 'use_input_precision'
* is set a subsequent test is performed above.
*/
const unsigned int samples_per_pixel = (out_ct & 2U) ? 3U : 1U;
int processing;
png_uint_32 y;
const store_palette_entry *in_palette = dp->this.palette;
const int in_is_transparent = dp->this.is_transparent;
int process_tRNS;
int out_npalette = -1;
int out_is_transparent = 0; /* Just refers to the palette case */
store_palette out_palette;
validate_info vi;
/* Check for row overwrite errors */
store_image_check(dp->this.ps, pp, 0);
/* Supply the input and output sample depths here - 8 for an indexed image,
* otherwise the bit depth.
*/
init_validate_info(&vi, dp, pp, in_ct==3?8:in_bd, out_ct==3?8:out_bd);
processing = (vi.gamma_correction > 0 && !dp->threshold_test)
|| in_bd != out_bd || in_ct != out_ct || vi.do_background;
process_tRNS = dp->this.has_tRNS && vi.do_background;
/* TODO: FIX THIS: MAJOR BUG! If the transformations all happen inside
* the palette there is no way of finding out, because libpng fails to
* update the palette on png_read_update_info. Indeed, libpng doesn't
* even do the required work until much later, when it doesn't have any
* info pointer. Oops. For the moment 'processing' is turned off if
* out_ct is palette.
*/
if (in_ct == 3 && out_ct == 3)
processing = 0;
if (processing && out_ct == 3)
out_is_transparent = read_palette(out_palette, &out_npalette, pp, pi);
for (y=0; y<h; ++y)
{
png_const_bytep pRow = store_image_row(ps, pp, 0, y);
png_byte std[STANDARD_ROWMAX];
transform_row(pp, std, in_ct, in_bd, y);
if (processing)
{
unsigned int x;
for (x=0; x<w; ++x)
{
double alpha = 1; /* serves as a flag value */
/* Record the palette index for index images. */
const unsigned int in_index =
in_ct == 3 ? sample(std, 3, in_bd, x, 0, 0, 0) : 256;
const unsigned int out_index =
out_ct == 3 ? sample(std, 3, out_bd, x, 0, 0, 0) : 256;
/* Handle input alpha - png_set_background will cause the output
* alpha to disappear so there is nothing to check.
*/
if ((in_ct & PNG_COLOR_MASK_ALPHA) != 0 ||
(in_ct == 3 && in_is_transparent))
{
const unsigned int input_alpha = in_ct == 3 ?
dp->this.palette[in_index].alpha :
sample(std, in_ct, in_bd, x, samples_per_pixel, 0, 0);
unsigned int output_alpha = 65536 /* as a flag value */;
if (out_ct == 3)
{
if (out_is_transparent)
output_alpha = out_palette[out_index].alpha;
}
else if ((out_ct & PNG_COLOR_MASK_ALPHA) != 0)
output_alpha = sample(pRow, out_ct, out_bd, x,
samples_per_pixel, 0, 0);
if (output_alpha != 65536)
alpha = gamma_component_validate("alpha", &vi, input_alpha,
output_alpha, -1/*alpha*/, 0/*background*/);
else /* no alpha in output */
{
/* This is a copy of the calculation of 'i' above in order to
* have the alpha value to use in the background calculation.
*/
alpha = input_alpha >> vi.isbit_shift;
alpha /= vi.sbit_max;
}
}
else if (process_tRNS)
{
/* alpha needs to be set appropriately for this pixel, it is
* currently 1 and needs to be 0 for an input pixel which matches
* the values in tRNS.
*/
switch (in_ct)
{
case 0: /* gray */
if (sample(std, in_ct, in_bd, x, 0, 0, 0) ==
dp->this.transparent.red)
alpha = 0;
break;
case 2: /* RGB */
if (sample(std, in_ct, in_bd, x, 0, 0, 0) ==
dp->this.transparent.red &&
sample(std, in_ct, in_bd, x, 1, 0, 0) ==
dp->this.transparent.green &&
sample(std, in_ct, in_bd, x, 2, 0, 0) ==
dp->this.transparent.blue)
alpha = 0;
break;
default:
break;
}
}
/* Handle grayscale or RGB components. */
if ((in_ct & PNG_COLOR_MASK_COLOR) == 0) /* grayscale */
(void)gamma_component_validate("gray", &vi,
sample(std, in_ct, in_bd, x, 0, 0, 0),
sample(pRow, out_ct, out_bd, x, 0, 0, 0),
alpha/*component*/, vi.background_red);
else /* RGB or palette */
{
(void)gamma_component_validate("red", &vi,
in_ct == 3 ? in_palette[in_index].red :
sample(std, in_ct, in_bd, x, 0, 0, 0),
out_ct == 3 ? out_palette[out_index].red :
sample(pRow, out_ct, out_bd, x, 0, 0, 0),
alpha/*component*/, vi.background_red);
(void)gamma_component_validate("green", &vi,
in_ct == 3 ? in_palette[in_index].green :
sample(std, in_ct, in_bd, x, 1, 0, 0),
out_ct == 3 ? out_palette[out_index].green :
sample(pRow, out_ct, out_bd, x, 1, 0, 0),
alpha/*component*/, vi.background_green);
(void)gamma_component_validate("blue", &vi,
in_ct == 3 ? in_palette[in_index].blue :
sample(std, in_ct, in_bd, x, 2, 0, 0),
out_ct == 3 ? out_palette[out_index].blue :
sample(pRow, out_ct, out_bd, x, 2, 0, 0),
alpha/*component*/, vi.background_blue);
}
}
}
else if (memcmp(std, pRow, cbRow) != 0)
{
char msg[64];
/* No transform is expected on the threshold tests. */
sprintf(msg, "gamma: below threshold row %lu changed",
(unsigned long)y);
png_error(pp, msg);
}
} /* row (y) loop */
dp->this.ps->validated = 1;
}
|
gamma_image_validate(gamma_display *dp, png_const_structp pp,
png_infop pi)
{
/* Get some constants derived from the input and output file formats: */
PNG_CONST png_store* PNG_CONST ps = dp->this.ps;
PNG_CONST png_byte in_ct = dp->this.colour_type;
PNG_CONST png_byte in_bd = dp->this.bit_depth;
PNG_CONST png_uint_32 w = dp->this.w;
PNG_CONST png_uint_32 h = dp->this.h;
PNG_CONST size_t cbRow = dp->this.cbRow;
PNG_CONST png_byte out_ct = png_get_color_type(pp, pi);
PNG_CONST png_byte out_bd = png_get_bit_depth(pp, pi);
/* There are three sources of error, firstly the quantization in the
* file encoding, determined by sbit and/or the file depth, secondly
* the output (screen) gamma and thirdly the output file encoding.
*
* Since this API receives the screen and file gamma in double
* precision it is possible to calculate an exact answer given an input
* pixel value. Therefore we assume that the *input* value is exact -
* sample/maxsample - calculate the corresponding gamma corrected
* output to the limits of double precision arithmetic and compare with
* what libpng returns.
*
* Since the library must quantize the output to 8 or 16 bits there is
* a fundamental limit on the accuracy of the output of +/-.5 - this
* quantization limit is included in addition to the other limits
* specified by the paramaters to the API. (Effectively, add .5
* everywhere.)
*
* The behavior of the 'sbit' paramter is defined by section 12.5
* (sample depth scaling) of the PNG spec. That section forces the
* decoder to assume that the PNG values have been scaled if sBIT is
* present:
*
* png-sample = floor( input-sample * (max-out/max-in) + .5);
*
* This means that only a subset of the possible PNG values should
* appear in the input. However, the spec allows the encoder to use a
* variety of approximations to the above and doesn't require any
* restriction of the values produced.
*
* Nevertheless the spec requires that the upper 'sBIT' bits of the
* value stored in a PNG file be the original sample bits.
* Consequently the code below simply scales the top sbit bits by
* (1<<sbit)-1 to obtain an original sample value.
*
* Because there is limited precision in the input it is arguable that
* an acceptable result is any valid result from input-.5 to input+.5.
* The basic tests below do not do this, however if 'use_input_precision'
* is set a subsequent test is performed above.
*/
PNG_CONST unsigned int samples_per_pixel = (out_ct & 2U) ? 3U : 1U;
int processing;
png_uint_32 y;
PNG_CONST store_palette_entry *in_palette = dp->this.palette;
PNG_CONST int in_is_transparent = dp->this.is_transparent;
int out_npalette = -1;
int out_is_transparent = 0; /* Just refers to the palette case */
store_palette out_palette;
validate_info vi;
/* Check for row overwrite errors */
store_image_check(dp->this.ps, pp, 0);
/* Supply the input and output sample depths here - 8 for an indexed image,
* otherwise the bit depth.
*/
init_validate_info(&vi, dp, pp, in_ct==3?8:in_bd, out_ct==3?8:out_bd);
processing = (vi.gamma_correction > 0 && !dp->threshold_test)
|| in_bd != out_bd || in_ct != out_ct || vi.do_background;
/* TODO: FIX THIS: MAJOR BUG! If the transformations all happen inside
* the palette there is no way of finding out, because libpng fails to
* update the palette on png_read_update_info. Indeed, libpng doesn't
* even do the required work until much later, when it doesn't have any
* info pointer. Oops. For the moment 'processing' is turned off if
* out_ct is palette.
*/
if (in_ct == 3 && out_ct == 3)
processing = 0;
if (processing && out_ct == 3)
out_is_transparent = read_palette(out_palette, &out_npalette, pp, pi);
for (y=0; y<h; ++y)
{
png_const_bytep pRow = store_image_row(ps, pp, 0, y);
png_byte std[STANDARD_ROWMAX];
transform_row(pp, std, in_ct, in_bd, y);
if (processing)
{
unsigned int x;
for (x=0; x<w; ++x)
{
double alpha = 1; /* serves as a flag value */
/* Record the palette index for index images. */
PNG_CONST unsigned int in_index =
in_ct == 3 ? sample(std, 3, in_bd, x, 0) : 256;
PNG_CONST unsigned int out_index =
out_ct == 3 ? sample(std, 3, out_bd, x, 0) : 256;
/* Handle input alpha - png_set_background will cause the output
* alpha to disappear so there is nothing to check.
*/
if ((in_ct & PNG_COLOR_MASK_ALPHA) != 0 || (in_ct == 3 &&
in_is_transparent))
{
PNG_CONST unsigned int input_alpha = in_ct == 3 ?
dp->this.palette[in_index].alpha :
sample(std, in_ct, in_bd, x, samples_per_pixel);
unsigned int output_alpha = 65536 /* as a flag value */;
if (out_ct == 3)
{
if (out_is_transparent)
output_alpha = out_palette[out_index].alpha;
}
else if ((out_ct & PNG_COLOR_MASK_ALPHA) != 0)
output_alpha = sample(pRow, out_ct, out_bd, x,
samples_per_pixel);
if (output_alpha != 65536)
alpha = gamma_component_validate("alpha", &vi, input_alpha,
output_alpha, -1/*alpha*/, 0/*background*/);
else /* no alpha in output */
{
/* This is a copy of the calculation of 'i' above in order to
* have the alpha value to use in the background calculation.
*/
alpha = input_alpha >> vi.isbit_shift;
alpha /= vi.sbit_max;
}
}
/* Handle grayscale or RGB components. */
if ((in_ct & PNG_COLOR_MASK_COLOR) == 0) /* grayscale */
(void)gamma_component_validate("gray", &vi,
sample(std, in_ct, in_bd, x, 0),
sample(pRow, out_ct, out_bd, x, 0), alpha/*component*/,
vi.background_red);
else /* RGB or palette */
{
(void)gamma_component_validate("red", &vi,
in_ct == 3 ? in_palette[in_index].red :
sample(std, in_ct, in_bd, x, 0),
out_ct == 3 ? out_palette[out_index].red :
sample(pRow, out_ct, out_bd, x, 0),
alpha/*component*/, vi.background_red);
(void)gamma_component_validate("green", &vi,
in_ct == 3 ? in_palette[in_index].green :
sample(std, in_ct, in_bd, x, 1),
out_ct == 3 ? out_palette[out_index].green :
sample(pRow, out_ct, out_bd, x, 1),
alpha/*component*/, vi.background_green);
(void)gamma_component_validate("blue", &vi,
in_ct == 3 ? in_palette[in_index].blue :
sample(std, in_ct, in_bd, x, 2),
out_ct == 3 ? out_palette[out_index].blue :
sample(pRow, out_ct, out_bd, x, 2),
alpha/*component*/, vi.background_blue);
}
}
}
else if (memcmp(std, pRow, cbRow) != 0)
{
char msg[64];
/* No transform is expected on the threshold tests. */
sprintf(msg, "gamma: below threshold row %lu changed",
(unsigned long)y);
png_error(pp, msg);
}
} /* row (y) loop */
dp->this.ps->validated = 1;
}
|
C
|
Android
| 1 |
CVE-2018-12714
|
https://www.cvedetails.com/cve/CVE-2018-12714/
|
CWE-787
|
https://github.com/torvalds/linux/commit/81f9c4e4177d31ced6f52a89bb70e93bfb77ca03
|
81f9c4e4177d31ced6f52a89bb70e93bfb77ca03
|
Merge tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt:
"This contains a few fixes and a clean up.
- a bad merge caused an "endif" to go in the wrong place in
scripts/Makefile.build
- softirq tracing fix for tracing that corrupts lockdep and causes a
false splat
- histogram documentation typo fixes
- fix a bad memory reference when passing in no filter to the filter
code
- simplify code by using the swap macro instead of open coding the
swap"
* tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Fix SKIP_STACK_VALIDATION=1 build due to bad merge with -mrecord-mcount
tracing: Fix some errors in histogram documentation
tracing: Use swap macro in update_max_tr
softirq: Reorder trace_softirqs_on to prevent lockdep splat
tracing: Check for no filter when processing event filters
|
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
if (!str)
return 0;
buf_size = memparse(str, &str);
/* nr_entries can not be zero */
if (buf_size == 0)
return 0;
trace_buf_size = buf_size;
return 1;
}
|
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
if (!str)
return 0;
buf_size = memparse(str, &str);
/* nr_entries can not be zero */
if (buf_size == 0)
return 0;
trace_buf_size = buf_size;
return 1;
}
|
C
|
linux
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
[email protected]
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void limitedWithInvalidMissingDefaultAttributeAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, cppValue, jsValue);
CustomElementCallbackDispatcher::CallbackDeliveryScope deliveryScope;
imp->setAttribute(HTMLNames::limitedwithinvalidmissingdefaultattributeAttr, cppValue);
}
|
static void limitedWithInvalidMissingDefaultAttributeAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, cppValue, jsValue);
CustomElementCallbackDispatcher::CallbackDeliveryScope deliveryScope;
imp->setAttribute(HTMLNames::limitedwithinvalidmissingdefaultattributeAttr, cppValue);
}
|
C
|
Chrome
| 0 |
CVE-2016-1705
|
https://www.cvedetails.com/cve/CVE-2016-1705/
| null |
https://github.com/chromium/chromium/commit/4afb628e068367d5b73440537555902cd12416f8
|
4afb628e068367d5b73440537555902cd12416f8
|
gpu/android : Add support for partial swap with surface control.
Add support for PostSubBuffer to GLSurfaceEGLSurfaceControl. This should
allow the display compositor to draw the minimum sub-rect necessary from
the damage tracking in BufferQueue on the client-side, and also to pass
this damage rect to the framework.
[email protected]
Bug: 926020
Change-Id: I73d3320cab68250d4c6865bf21c5531682d8bf61
Reviewed-on: https://chromium-review.googlesource.com/c/1457467
Commit-Queue: Khushal <[email protected]>
Commit-Queue: Antoine Labour <[email protected]>
Reviewed-by: Antoine Labour <[email protected]>
Auto-Submit: Khushal <[email protected]>
Cr-Commit-Position: refs/heads/master@{#629852}
|
bool GLSurfaceEGLSurfaceControl::Initialize(GLSurfaceFormat format) {
format_ = format;
return true;
}
|
bool GLSurfaceEGLSurfaceControl::Initialize(GLSurfaceFormat format) {
format_ = format;
return true;
}
|
C
|
Chrome
| 0 |
CVE-2013-1798
|
https://www.cvedetails.com/cve/CVE-2013-1798/
|
CWE-20
|
https://github.com/torvalds/linux/commit/a2c118bfab8bc6b8bb213abfc35201e441693d55
|
a2c118bfab8bc6b8bb213abfc35201e441693d55
|
KVM: Fix bounds checking in ioapic indirect register reads (CVE-2013-1798)
If the guest specifies a IOAPIC_REG_SELECT with an invalid value and follows
that with a read of the IOAPIC_REG_WINDOW KVM does not properly validate
that request. ioapic_read_indirect contains an
ASSERT(redir_index < IOAPIC_NUM_PINS), but the ASSERT has no effect in
non-debug builds. In recent kernels this allows a guest to cause a kernel
oops by reading invalid memory. In older kernels (pre-3.3) this allows a
guest to read from large ranges of host memory.
Tested: tested against apic unit tests.
Signed-off-by: Andrew Honig <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
smp_rmb();
return test_bit(vector, ioapic->handled_vectors);
}
|
bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
smp_rmb();
return test_bit(vector, ioapic->handled_vectors);
}
|
C
|
linux
| 0 |
CVE-2016-2464
|
https://www.cvedetails.com/cve/CVE-2016-2464/
|
CWE-20
|
https://android.googlesource.com/platform/external/libvpx/+/cc274e2abe8b2a6698a5c47d8aa4bb45f1f9538d
|
cc274e2abe8b2a6698a5c47d8aa4bb45f1f9538d
|
external/libvpx/libwebm: Update snapshot
Update libwebm snapshot. This update contains security fixes from upstream.
Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b
BUG=23167726
Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207
(cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a)
|
BlockEntry::BlockEntry(Cluster* p, long idx) : m_pCluster(p), m_index(idx) {}
|
BlockEntry::BlockEntry(Cluster* p, long idx) : m_pCluster(p), m_index(idx) {}
|
C
|
Android
| 0 |
CVE-2015-9016
|
https://www.cvedetails.com/cve/CVE-2015-9016/
|
CWE-362
|
https://github.com/torvalds/linux/commit/0048b4837affd153897ed1222283492070027aa9
|
0048b4837affd153897ed1222283492070027aa9
|
blk-mq: fix race between timeout and freeing request
Inside timeout handler, blk_mq_tag_to_rq() is called
to retrieve the request from one tag. This way is obviously
wrong because the request can be freed any time and some
fiedds of the request can't be trusted, then kernel oops
might be triggered[1].
Currently wrt. blk_mq_tag_to_rq(), the only special case is
that the flush request can share same tag with the request
cloned from, and the two requests can't be active at the same
time, so this patch fixes the above issue by updating tags->rqs[tag]
with the active request(either flush rq or the request cloned
from) of the tag.
Also blk_mq_tag_to_rq() gets much simplified with this patch.
Given blk_mq_tag_to_rq() is mainly for drivers and the caller must
make sure the request can't be freed, so in bt_for_each() this
helper is replaced with tags->rqs[tag].
[1] kernel oops log
[ 439.696220] BUG: unable to handle kernel NULL pointer dereference at 0000000000000158^M
[ 439.697162] IP: [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.700653] PGD 7ef765067 PUD 7ef764067 PMD 0 ^M
[ 439.700653] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC ^M
[ 439.700653] Dumping ftrace buffer:^M
[ 439.700653] (ftrace buffer empty)^M
[ 439.700653] Modules linked in: nbd ipv6 kvm_intel kvm serio_raw^M
[ 439.700653] CPU: 6 PID: 2779 Comm: stress-ng-sigfd Not tainted 4.2.0-rc5-next-20150805+ #265^M
[ 439.730500] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011^M
[ 439.730500] task: ffff880605308000 ti: ffff88060530c000 task.ti: ffff88060530c000^M
[ 439.730500] RIP: 0010:[<ffffffff812d89ba>] [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.730500] RSP: 0018:ffff880819203da0 EFLAGS: 00010283^M
[ 439.730500] RAX: ffff880811b0e000 RBX: ffff8800bb465f00 RCX: 0000000000000002^M
[ 439.730500] RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000^M
[ 439.730500] RBP: ffff880819203db0 R08: 0000000000000002 R09: 0000000000000000^M
[ 439.730500] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000202^M
[ 439.730500] R13: ffff880814104800 R14: 0000000000000002 R15: ffff880811a2ea00^M
[ 439.730500] FS: 00007f165b3f5740(0000) GS:ffff880819200000(0000) knlGS:0000000000000000^M
[ 439.730500] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b^M
[ 439.730500] CR2: 0000000000000158 CR3: 00000007ef766000 CR4: 00000000000006e0^M
[ 439.730500] Stack:^M
[ 439.730500] 0000000000000008 ffff8808114eed90 ffff880819203e00 ffffffff812dc104^M
[ 439.755663] ffff880819203e40 ffffffff812d9f5e 0000020000000000 ffff8808114eed80^M
[ 439.755663] Call Trace:^M
[ 439.755663] <IRQ> ^M
[ 439.755663] [<ffffffff812dc104>] bt_for_each+0x6e/0xc8^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812dc1b3>] blk_mq_tag_busy_iter+0x55/0x5e^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff812d8911>] blk_mq_rq_timer+0x5d/0xd4^M
[ 439.755663] [<ffffffff810a3e10>] call_timer_fn+0xf7/0x284^M
[ 439.755663] [<ffffffff810a3d1e>] ? call_timer_fn+0x5/0x284^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff810a46d6>] run_timer_softirq+0x1ce/0x1f8^M
[ 439.755663] [<ffffffff8104c367>] __do_softirq+0x181/0x3a4^M
[ 439.755663] [<ffffffff8104c76e>] irq_exit+0x40/0x94^M
[ 439.755663] [<ffffffff81031482>] smp_apic_timer_interrupt+0x33/0x3e^M
[ 439.755663] [<ffffffff815559a4>] apic_timer_interrupt+0x84/0x90^M
[ 439.755663] <EOI> ^M
[ 439.755663] [<ffffffff81554350>] ? _raw_spin_unlock_irq+0x32/0x4a^M
[ 439.755663] [<ffffffff8106a98b>] finish_task_switch+0xe0/0x163^M
[ 439.755663] [<ffffffff8106a94d>] ? finish_task_switch+0xa2/0x163^M
[ 439.755663] [<ffffffff81550066>] __schedule+0x469/0x6cd^M
[ 439.755663] [<ffffffff8155039b>] schedule+0x82/0x9a^M
[ 439.789267] [<ffffffff8119b28b>] signalfd_read+0x186/0x49a^M
[ 439.790911] [<ffffffff8106d86a>] ? wake_up_q+0x47/0x47^M
[ 439.790911] [<ffffffff811618c2>] __vfs_read+0x28/0x9f^M
[ 439.790911] [<ffffffff8117a289>] ? __fget_light+0x4d/0x74^M
[ 439.790911] [<ffffffff811620a7>] vfs_read+0x7a/0xc6^M
[ 439.790911] [<ffffffff8116292b>] SyS_read+0x49/0x7f^M
[ 439.790911] [<ffffffff81554c17>] entry_SYSCALL_64_fastpath+0x12/0x6f^M
[ 439.790911] Code: 48 89 e5 e8 a9 b8 e7 ff 5d c3 0f 1f 44 00 00 55 89
f2 48 89 e5 41 54 41 89 f4 53 48 8b 47 60 48 8b 1c d0 48 8b 7b 30 48 8b
53 38 <48> 8b 87 58 01 00 00 48 85 c0 75 09 48 8b 97 88 0c 00 00 eb 10
^M
[ 439.790911] RIP [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.790911] RSP <ffff880819203da0>^M
[ 439.790911] CR2: 0000000000000158^M
[ 439.790911] ---[ end trace d40af58949325661 ]---^M
Cc: <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
unsigned int free, res;
if (!tags)
return 0;
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
"bits_per_word=%u\n",
tags->nr_tags, tags->nr_reserved_tags,
tags->bitmap_tags.bits_per_word);
free = bt_unused_tags(&tags->bitmap_tags);
res = bt_unused_tags(&tags->breserved_tags);
page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
return page - orig_page;
}
|
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
unsigned int free, res;
if (!tags)
return 0;
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
"bits_per_word=%u\n",
tags->nr_tags, tags->nr_reserved_tags,
tags->bitmap_tags.bits_per_word);
free = bt_unused_tags(&tags->bitmap_tags);
res = bt_unused_tags(&tags->breserved_tags);
page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
return page - orig_page;
}
|
C
|
linux
| 0 |
CVE-2017-15395
|
https://www.cvedetails.com/cve/CVE-2017-15395/
|
CWE-416
|
https://github.com/chromium/chromium/commit/84ca1ee18bbc32f3cb035d071e8271e064dfd4d7
|
84ca1ee18bbc32f3cb035d071e8271e064dfd4d7
|
Convert MediaTrackConstraints to a ScriptValue
IDLDictionaries such as MediaTrackConstraints should not be stored on
the heap which would happen when binding one as a parameter to a
callback. This change converts the object to a ScriptValue ahead of
time. This is fine because the value will be passed to a
ScriptPromiseResolver that will converted it to a V8 value if it
isn't already.
Bug: 759457
Change-Id: I3009a0f7711cc264aeaae07a36c18a6db8c915c8
Reviewed-on: https://chromium-review.googlesource.com/701358
Reviewed-by: Kentaro Hara <[email protected]>
Commit-Queue: Reilly Grant <[email protected]>
Cr-Commit-Position: refs/heads/master@{#507177}
|
const MediaTrackConstraintSet& ImageCapture::GetMediaTrackConstraints() const {
return current_constraints_;
}
|
const MediaTrackConstraintSet& ImageCapture::GetMediaTrackConstraints() const {
return current_constraints_;
}
|
C
|
Chrome
| 0 |
CVE-2017-18174
|
https://www.cvedetails.com/cve/CVE-2017-18174/
|
CWE-415
|
https://github.com/torvalds/linux/commit/8dca4a41f1ad65043a78c2338d9725f859c8d2c3
|
8dca4a41f1ad65043a78c2338d9725f859c8d2c3
|
pinctrl/amd: Drop pinctrl_unregister for devm_ registered device
It's not necessary to unregister pin controller device registered
with devm_pinctrl_register() and using pinctrl_unregister() leads
to a double free.
Fixes: 3bfd44306c65 ("pinctrl: amd: Add support for additional GPIO")
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Linus Walleij <[email protected]>
|
static int amd_gpio_remove(struct platform_device *pdev)
{
struct amd_gpio *gpio_dev;
gpio_dev = platform_get_drvdata(pdev);
gpiochip_remove(&gpio_dev->gc);
return 0;
}
|
static int amd_gpio_remove(struct platform_device *pdev)
{
struct amd_gpio *gpio_dev;
gpio_dev = platform_get_drvdata(pdev);
gpiochip_remove(&gpio_dev->gc);
pinctrl_unregister(gpio_dev->pctrl);
return 0;
}
|
C
|
linux
| 1 |
CVE-2017-16932
|
https://www.cvedetails.com/cve/CVE-2017-16932/
|
CWE-835
|
https://github.com/GNOME/libxml2/commit/899a5d9f0ed13b8e32449a08a361e0de127dd961
|
899a5d9f0ed13b8e32449a08a361e0de127dd961
|
Detect infinite recursion in parameter entities
When expanding a parameter entity in a DTD, infinite recursion could
lead to an infinite loop or memory exhaustion.
Thanks to Wei Lei for the first of many reports.
Fixes bug 759579.
|
xmlRecoverDoc(const xmlChar *cur) {
return(xmlSAXParseDoc(NULL, cur, 1));
}
|
xmlRecoverDoc(const xmlChar *cur) {
return(xmlSAXParseDoc(NULL, cur, 1));
}
|
C
|
libxml2
| 0 |
CVE-2017-5120
|
https://www.cvedetails.com/cve/CVE-2017-5120/
| null |
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
|
b7277af490d28ac7f802c015bb0ff31395768556
|
bindings: Support "attribute FrozenArray<T>?"
Adds a quick hack to support a case of "attribute FrozenArray<T>?".
Bug: 1028047
Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866
Reviewed-by: Hitoshi Yoshida <[email protected]>
Commit-Queue: Yuki Shiino <[email protected]>
Cr-Commit-Position: refs/heads/master@{#718676}
|
static void UnforgeableVoidMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info) {
TestObject* impl = V8TestObject::ToImpl(info.Holder());
impl->unforgeableVoidMethod();
}
|
static void UnforgeableVoidMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info) {
TestObject* impl = V8TestObject::ToImpl(info.Holder());
impl->unforgeableVoidMethod();
}
|
C
|
Chrome
| 0 |
CVE-2014-3176, CVE-2014-3177
| null | null |
https://github.com/chromium/chromium/commit/2f663de43634c1197a7a2ed8afc12cb6dc565bd0
|
2f663de43634c1197a7a2ed8afc12cb6dc565bd0
|
ozone: fix crash when running video decode unittests.
In GLSurfaceOzoneSurfacelessSurfaceImpl::Destroy, if the previous
context was NULL, do not make it current.
BUG=chromium:602921
TEST=Run vda unittests on oak.
Review URL: https://codereview.chromium.org/1887563002
Cr-Commit-Position: refs/heads/master@{#386988}
|
void GLSurfaceOzoneSurfaceless::PostSubBufferAsync(
int x,
int y,
int width,
int height,
const SwapCompletionCallback& callback) {
SwapBuffersAsync(callback);
}
|
void GLSurfaceOzoneSurfaceless::PostSubBufferAsync(
int x,
int y,
int width,
int height,
const SwapCompletionCallback& callback) {
SwapBuffersAsync(callback);
}
|
C
|
Chrome
| 0 |
CVE-2017-13050
|
https://www.cvedetails.com/cve/CVE-2017-13050/
|
CWE-125
|
https://github.com/the-tcpdump-group/tcpdump/commit/83c64fce3a5226b080e535f5131a8a318f30e79b
|
83c64fce3a5226b080e535f5131a8a318f30e79b
|
CVE-2017-13050/RPKI-Router: fix a few bugs
The decoder didn't properly check that the PDU length stored in the PDU
header is correct. The only check in place was in rpki_rtr_print() and it
tested whether the length is zero but that is not sufficient. Make all
necessary length and bounds checks, both generic and type-specific, in
rpki_rtr_pdu_print() and reduce rpki_rtr_print() to a simple loop.
This also fixes a minor bug and PDU type 0 (Serial Notify from RFC 6810
Section 5.2) is valid again.
In rpki_rtr_pdu_print() any protocol version was considered version 0,
fix it to skip the rest of input if the PDU protocol version is unknown.
Ibid, the PDU type 10 (Error Report from RFC 6810 Section 5.10) case
block didn't consider the "Length of Error Text" data element mandatory,
put it right.
Ibid, when printing an encapsulated PDU, give itself (via recursion)
respective buffer length to make it possible to tell whether the
encapsulated PDU fits. Do not recurse deeper than 2nd level.
Update prior RPKI-Router test cases that now stop to decode earlier
because of the stricter checks.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s).
|
rpki_rtr_print(netdissect_options *ndo, register const u_char *pptr, register u_int len)
{
if (!ndo->ndo_vflag) {
ND_PRINT((ndo, ", RPKI-RTR"));
return;
}
while (len) {
u_int pdu_len = rpki_rtr_pdu_print(ndo, pptr, len, 1, 8);
len -= pdu_len;
pptr += pdu_len;
}
}
|
rpki_rtr_print(netdissect_options *ndo, register const u_char *pptr, register u_int len)
{
u_int tlen, pdu_type, pdu_len;
const u_char *tptr;
const rpki_rtr_pdu *pdu_header;
tptr = pptr;
tlen = len;
if (!ndo->ndo_vflag) {
ND_PRINT((ndo, ", RPKI-RTR"));
return;
}
while (tlen >= sizeof(rpki_rtr_pdu)) {
ND_TCHECK2(*tptr, sizeof(rpki_rtr_pdu));
pdu_header = (const rpki_rtr_pdu *)tptr;
pdu_type = pdu_header->pdu_type;
pdu_len = EXTRACT_32BITS(pdu_header->length);
ND_TCHECK2(*tptr, pdu_len);
/* infinite loop check */
if (!pdu_type || !pdu_len) {
break;
}
if (tlen < pdu_len) {
goto trunc;
}
/*
* Print the PDU.
*/
if (rpki_rtr_pdu_print(ndo, tptr, 8))
goto trunc;
tlen -= pdu_len;
tptr += pdu_len;
}
return;
trunc:
ND_PRINT((ndo, "\n\t%s", tstr));
}
|
C
|
tcpdump
| 1 |
CVE-2017-12168
|
https://www.cvedetails.com/cve/CVE-2017-12168/
|
CWE-617
|
https://github.com/torvalds/linux/commit/9e3f7a29694049edd728e2400ab57ad7553e5aa9
|
9e3f7a29694049edd728e2400ab57ad7553e5aa9
|
arm64: KVM: pmu: Fix AArch32 cycle counter access
We're missing the handling code for the cycle counter accessed
from a 32bit guest, leading to unexpected results.
Cc: [email protected] # 4.6+
Signed-off-by: Wei Huang <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
|
static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
{
if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
|
static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
{
if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
|
C
|
linux
| 0 |
CVE-2015-0253
|
https://www.cvedetails.com/cve/CVE-2015-0253/
| null |
https://github.com/apache/httpd/commit/6a974059190b8a0c7e499f4ab12fe108127099cb
|
6a974059190b8a0c7e499f4ab12fe108127099cb
|
*) SECURITY: CVE-2015-0253 (cve.mitre.org)
core: Fix a crash introduced in with ErrorDocument 400 pointing
to a local URL-path with the INCLUDES filter active, introduced
in 2.4.11. PR 57531. [Yann Ylavic]
Submitted By: ylavic
Committed By: covener
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1664205 13f79535-47bb-0310-9956-ffa450edef68
|
AP_DECLARE(const char *)ap_make_content_type(request_rec *r, const char *type)
{
const apr_strmatch_pattern **pcset;
core_dir_config *conf =
(core_dir_config *)ap_get_core_module_config(r->per_dir_config);
core_request_config *request_conf;
apr_size_t type_len;
if (!type || *type == '\0') {
return NULL;
}
if (conf->add_default_charset != ADD_DEFAULT_CHARSET_ON) {
return type;
}
request_conf = ap_get_core_module_config(r->request_config);
if (request_conf->suppress_charset) {
return type;
}
type_len = strlen(type);
if (apr_strmatch(charset_pattern, type, type_len) != NULL) {
/* already has parameter, do nothing */
/* XXX we don't check the validity */
;
}
else {
/* see if it makes sense to add the charset. At present,
* we only add it if the Content-type is one of needcset[]
*/
for (pcset = needcset_patterns; *pcset ; pcset++) {
if (apr_strmatch(*pcset, type, type_len) != NULL) {
struct iovec concat[3];
concat[0].iov_base = (void *)type;
concat[0].iov_len = type_len;
concat[1].iov_base = (void *)"; charset=";
concat[1].iov_len = sizeof("; charset=") - 1;
concat[2].iov_base = (void *)(conf->add_default_charset_name);
concat[2].iov_len = strlen(conf->add_default_charset_name);
type = apr_pstrcatv(r->pool, concat, 3, NULL);
break;
}
}
}
return type;
}
|
AP_DECLARE(const char *)ap_make_content_type(request_rec *r, const char *type)
{
const apr_strmatch_pattern **pcset;
core_dir_config *conf =
(core_dir_config *)ap_get_core_module_config(r->per_dir_config);
core_request_config *request_conf;
apr_size_t type_len;
if (!type || *type == '\0') {
return NULL;
}
if (conf->add_default_charset != ADD_DEFAULT_CHARSET_ON) {
return type;
}
request_conf = ap_get_core_module_config(r->request_config);
if (request_conf->suppress_charset) {
return type;
}
type_len = strlen(type);
if (apr_strmatch(charset_pattern, type, type_len) != NULL) {
/* already has parameter, do nothing */
/* XXX we don't check the validity */
;
}
else {
/* see if it makes sense to add the charset. At present,
* we only add it if the Content-type is one of needcset[]
*/
for (pcset = needcset_patterns; *pcset ; pcset++) {
if (apr_strmatch(*pcset, type, type_len) != NULL) {
struct iovec concat[3];
concat[0].iov_base = (void *)type;
concat[0].iov_len = type_len;
concat[1].iov_base = (void *)"; charset=";
concat[1].iov_len = sizeof("; charset=") - 1;
concat[2].iov_base = (void *)(conf->add_default_charset_name);
concat[2].iov_len = strlen(conf->add_default_charset_name);
type = apr_pstrcatv(r->pool, concat, 3, NULL);
break;
}
}
}
return type;
}
|
C
|
httpd
| 0 |
CVE-2012-2673
|
https://www.cvedetails.com/cve/CVE-2012-2673/
|
CWE-189
|
https://github.com/ivmai/bdwgc/commit/6a93f8e5bcad22137f41b6c60a1c7384baaec2b3
|
6a93f8e5bcad22137f41b6c60a1c7384baaec2b3
|
Fix calloc-related code to prevent SIZE_MAX redefinition in sys headers
* malloc.c: Include limits.h for SIZE_MAX.
* malloc.c (SIZE_MAX, calloc): Define GC_SIZE_MAX instead of SIZE_MAX.
|
GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
{
void * result;
DCL_LOCK_STATE;
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
if (SMALL_OBJ(lb)) {
LOCK();
result = GC_generic_malloc_inner((word)lb, k);
UNLOCK();
} else {
size_t lg;
size_t lb_rounded;
word n_blocks;
GC_bool init;
lg = ROUNDED_UP_GRANULES(lb);
lb_rounded = GRANULES_TO_BYTES(lg);
if (lb_rounded < lb)
return((*GC_get_oom_fn())(lb));
n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
init = GC_obj_kinds[k].ok_init;
LOCK();
result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
if (0 != result) {
if (GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
} else {
# ifdef THREADS
/* Clear any memory that might be used for GC descriptors */
/* before we release the lock. */
((word *)result)[0] = 0;
((word *)result)[1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
# endif
}
}
GC_bytes_allocd += lb_rounded;
UNLOCK();
if (init && !GC_debugging_started && 0 != result) {
BZERO(result, n_blocks * HBLKSIZE);
}
}
if (0 == result) {
return((*GC_get_oom_fn())(lb));
} else {
return(result);
}
}
|
GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
{
void * result;
DCL_LOCK_STATE;
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
if (SMALL_OBJ(lb)) {
LOCK();
result = GC_generic_malloc_inner((word)lb, k);
UNLOCK();
} else {
size_t lg;
size_t lb_rounded;
word n_blocks;
GC_bool init;
lg = ROUNDED_UP_GRANULES(lb);
lb_rounded = GRANULES_TO_BYTES(lg);
if (lb_rounded < lb)
return((*GC_get_oom_fn())(lb));
n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
init = GC_obj_kinds[k].ok_init;
LOCK();
result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
if (0 != result) {
if (GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
} else {
# ifdef THREADS
/* Clear any memory that might be used for GC descriptors */
/* before we release the lock. */
((word *)result)[0] = 0;
((word *)result)[1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
# endif
}
}
GC_bytes_allocd += lb_rounded;
UNLOCK();
if (init && !GC_debugging_started && 0 != result) {
BZERO(result, n_blocks * HBLKSIZE);
}
}
if (0 == result) {
return((*GC_get_oom_fn())(lb));
} else {
return(result);
}
}
|
C
|
bdwgc
| 0 |
CVE-2011-3209
|
https://www.cvedetails.com/cve/CVE-2011-3209/
|
CWE-189
|
https://github.com/torvalds/linux/commit/f8bd2258e2d520dff28c855658bd24bdafb5102d
|
f8bd2258e2d520dff28c855658bd24bdafb5102d
|
remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: john stultz <[email protected]>
Cc: Christoph Lameter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int list_locations(struct kmem_cache *s, char *buf,
enum track_item alloc)
{
int len = 0;
unsigned long i;
struct loc_track t = { 0, 0, NULL };
int node;
if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
GFP_TEMPORARY))
return sprintf(buf, "Out of memory\n");
/* Push back cpu slabs */
flush_all(s);
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
unsigned long flags;
struct page *page;
if (!atomic_long_read(&n->nr_slabs))
continue;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
process_slab(&t, s, page, alloc);
list_for_each_entry(page, &n->full, lru)
process_slab(&t, s, page, alloc);
spin_unlock_irqrestore(&n->list_lock, flags);
}
for (i = 0; i < t.count; i++) {
struct location *l = &t.loc[i];
if (len > PAGE_SIZE - 100)
break;
len += sprintf(buf + len, "%7ld ", l->count);
if (l->addr)
len += sprint_symbol(buf + len, (unsigned long)l->addr);
else
len += sprintf(buf + len, "<not-available>");
if (l->sum_time != l->min_time) {
len += sprintf(buf + len, " age=%ld/%ld/%ld",
l->min_time,
(long)div_u64(l->sum_time, l->count),
l->max_time);
} else
len += sprintf(buf + len, " age=%ld",
l->min_time);
if (l->min_pid != l->max_pid)
len += sprintf(buf + len, " pid=%ld-%ld",
l->min_pid, l->max_pid);
else
len += sprintf(buf + len, " pid=%ld",
l->min_pid);
if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " cpus=");
len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
l->cpus);
}
if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " nodes=");
len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
l->nodes);
}
len += sprintf(buf + len, "\n");
}
free_loc_track(&t);
if (!t.count)
len += sprintf(buf, "No data\n");
return len;
}
|
static int list_locations(struct kmem_cache *s, char *buf,
enum track_item alloc)
{
int len = 0;
unsigned long i;
struct loc_track t = { 0, 0, NULL };
int node;
if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
GFP_TEMPORARY))
return sprintf(buf, "Out of memory\n");
/* Push back cpu slabs */
flush_all(s);
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
unsigned long flags;
struct page *page;
if (!atomic_long_read(&n->nr_slabs))
continue;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
process_slab(&t, s, page, alloc);
list_for_each_entry(page, &n->full, lru)
process_slab(&t, s, page, alloc);
spin_unlock_irqrestore(&n->list_lock, flags);
}
for (i = 0; i < t.count; i++) {
struct location *l = &t.loc[i];
if (len > PAGE_SIZE - 100)
break;
len += sprintf(buf + len, "%7ld ", l->count);
if (l->addr)
len += sprint_symbol(buf + len, (unsigned long)l->addr);
else
len += sprintf(buf + len, "<not-available>");
if (l->sum_time != l->min_time) {
unsigned long remainder;
len += sprintf(buf + len, " age=%ld/%ld/%ld",
l->min_time,
div_long_long_rem(l->sum_time, l->count, &remainder),
l->max_time);
} else
len += sprintf(buf + len, " age=%ld",
l->min_time);
if (l->min_pid != l->max_pid)
len += sprintf(buf + len, " pid=%ld-%ld",
l->min_pid, l->max_pid);
else
len += sprintf(buf + len, " pid=%ld",
l->min_pid);
if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " cpus=");
len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
l->cpus);
}
if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " nodes=");
len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
l->nodes);
}
len += sprintf(buf + len, "\n");
}
free_loc_track(&t);
if (!t.count)
len += sprintf(buf, "No data\n");
return len;
}
|
C
|
linux
| 1 |
CVE-2017-12154
|
https://www.cvedetails.com/cve/CVE-2017-12154/
| null |
https://github.com/torvalds/linux/commit/51aa68e7d57e3217192d88ce90fd5b8ef29ec94f
|
51aa68e7d57e3217192d88ce90fd5b8ef29ec94f
|
kvm: nVMX: Don't allow L2 to access the hardware CR8
If L1 does not specify the "use TPR shadow" VM-execution control in
vmcs12, then L0 must specify the "CR8-load exiting" and "CR8-store
exiting" VM-execution controls in vmcs02. Failure to do so will give
the L2 VM unrestricted read/write access to the hardware CR8.
This fixes CVE-2017-12154.
Signed-off-by: Jim Mattson <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
{
if (enable_ept)
vmx_flush_tlb(vcpu);
}
|
static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
{
if (enable_ept)
vmx_flush_tlb(vcpu);
}
|
C
|
linux
| 0 |
CVE-2013-4205
|
https://www.cvedetails.com/cve/CVE-2013-4205/
|
CWE-399
|
https://github.com/torvalds/linux/commit/6160968cee8b90a5dd95318d716e31d7775c4ef3
|
6160968cee8b90a5dd95318d716e31d7775c4ef3
|
userns: unshare_userns(&cred) should not populate cred on failure
unshare_userns(new_cred) does *new_cred = prepare_creds() before
create_user_ns() which can fail. However, the caller expects that
it doesn't need to take care of new_cred if unshare_userns() fails.
We could change the single caller, sys_unshare(), but I think it
would be more clean to avoid the side effects on failure, so with
this patch unshare_userns() does put_cred() itself and initializes
*new_cred only if create_user_ns() succeeeds.
Cc: [email protected]
Signed-off-by: Oleg Nesterov <[email protected]>
Reviewed-by: Andy Lutomirski <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
ssize_t proc_uid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
struct user_namespace *seq_ns = seq_user_ns(seq);
if (!ns->parent)
return -EPERM;
if ((seq_ns != ns) && (seq_ns != ns->parent))
return -EPERM;
return map_write(file, buf, size, ppos, CAP_SETUID,
&ns->uid_map, &ns->parent->uid_map);
}
|
ssize_t proc_uid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
struct user_namespace *seq_ns = seq_user_ns(seq);
if (!ns->parent)
return -EPERM;
if ((seq_ns != ns) && (seq_ns != ns->parent))
return -EPERM;
return map_write(file, buf, size, ppos, CAP_SETUID,
&ns->uid_map, &ns->parent->uid_map);
}
|
C
|
linux
| 0 |
CVE-2016-7914
|
https://www.cvedetails.com/cve/CVE-2016-7914/
|
CWE-125
|
https://github.com/torvalds/linux/commit/8d4a2ec1e0b41b0cf9a0c5cd4511da7f8e4f3de2
|
8d4a2ec1e0b41b0cf9a0c5cd4511da7f8e4f3de2
|
assoc_array: don't call compare_object() on a node
Changes since V1: fixed the description and added KASan warning.
In assoc_array_insert_into_terminal_node(), we call the
compare_object() method on all non-empty slots, even when they're
not leaves, passing a pointer to an unexpected structure to
compare_object(). Currently it causes an out-of-bound read access
in keyring_compare_object detected by KASan (see below). The issue
is easily reproduced with keyutils testsuite.
Only call compare_object() when the slot is a leave.
KASan warning:
==================================================================
BUG: KASAN: slab-out-of-bounds in keyring_compare_object+0x213/0x240 at addr ffff880060a6f838
Read of size 8 by task keyctl/1655
=============================================================================
BUG kmalloc-192 (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in assoc_array_insert+0xfd0/0x3a60 age=69 cpu=1 pid=1647
___slab_alloc+0x563/0x5c0
__slab_alloc+0x51/0x90
kmem_cache_alloc_trace+0x263/0x300
assoc_array_insert+0xfd0/0x3a60
__key_link_begin+0xfc/0x270
key_create_or_update+0x459/0xaf0
SyS_add_key+0x1ba/0x350
entry_SYSCALL_64_fastpath+0x12/0x76
INFO: Slab 0xffffea0001829b80 objects=16 used=8 fp=0xffff880060a6f550 flags=0x3fff8000004080
INFO: Object 0xffff880060a6f740 @offset=5952 fp=0xffff880060a6e5d1
Bytes b4 ffff880060a6f730: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f740: d1 e5 a6 60 00 88 ff ff 0e 00 00 00 00 00 00 00 ...`............
Object ffff880060a6f750: 02 cf 8e 60 00 88 ff ff 02 c0 8e 60 00 88 ff ff ...`.......`....
Object ffff880060a6f760: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f770: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f780: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f790: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7a0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7b0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7c0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7d0: 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
Object ffff880060a6f7f0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
CPU: 0 PID: 1655 Comm: keyctl Tainted: G B 4.5.0-rc4-kasan+ #291
Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
0000000000000000 000000001b2800b4 ffff880060a179e0 ffffffff81b60491
ffff88006c802900 ffff880060a6f740 ffff880060a17a10 ffffffff815e2969
ffff88006c802900 ffffea0001829b80 ffff880060a6f740 ffff880060a6e650
Call Trace:
[<ffffffff81b60491>] dump_stack+0x85/0xc4
[<ffffffff815e2969>] print_trailer+0xf9/0x150
[<ffffffff815e9454>] object_err+0x34/0x40
[<ffffffff815ebe50>] kasan_report_error+0x230/0x550
[<ffffffff819949be>] ? keyring_get_key_chunk+0x13e/0x210
[<ffffffff815ec62d>] __asan_report_load_n_noabort+0x5d/0x70
[<ffffffff81994cc3>] ? keyring_compare_object+0x213/0x240
[<ffffffff81994cc3>] keyring_compare_object+0x213/0x240
[<ffffffff81bc238c>] assoc_array_insert+0x86c/0x3a60
[<ffffffff81bc1b20>] ? assoc_array_cancel_edit+0x70/0x70
[<ffffffff8199797d>] ? __key_link_begin+0x20d/0x270
[<ffffffff8199786c>] __key_link_begin+0xfc/0x270
[<ffffffff81993389>] key_create_or_update+0x459/0xaf0
[<ffffffff8128ce0d>] ? trace_hardirqs_on+0xd/0x10
[<ffffffff81992f30>] ? key_type_lookup+0xc0/0xc0
[<ffffffff8199e19d>] ? lookup_user_key+0x13d/0xcd0
[<ffffffff81534763>] ? memdup_user+0x53/0x80
[<ffffffff819983ea>] SyS_add_key+0x1ba/0x350
[<ffffffff81998230>] ? key_get_type_from_user.constprop.6+0xa0/0xa0
[<ffffffff828bcf4e>] ? retint_user+0x18/0x23
[<ffffffff8128cc7e>] ? trace_hardirqs_on_caller+0x3fe/0x580
[<ffffffff81004017>] ? trace_hardirqs_on_thunk+0x17/0x19
[<ffffffff828bc432>] entry_SYSCALL_64_fastpath+0x12/0x76
Memory state around the buggy address:
ffff880060a6f700: fc fc fc fc fc fc fc fc 00 00 00 00 00 00 00 00
ffff880060a6f780: 00 00 00 00 00 00 00 00 00 00 00 fc fc fc fc fc
>ffff880060a6f800: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
^
ffff880060a6f880: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff880060a6f900: fc fc fc fc fc fc 00 00 00 00 00 00 00 00 00 00
==================================================================
Signed-off-by: Jerome Marchand <[email protected]>
Signed-off-by: David Howells <[email protected]>
cc: [email protected]
|
static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
const struct assoc_array_ops *ops,
const void *index_key,
struct assoc_array_walk_result *result)
{
struct assoc_array_shortcut *shortcut, *new_s0;
struct assoc_array_node *node, *new_n0, *new_n1, *side;
struct assoc_array_ptr *ptr;
unsigned long dissimilarity, base_seg, blank;
size_t keylen;
bool have_meta;
int level, diff;
int slot, next_slot, free_slot, i, j;
node = result->terminal_node.node;
level = result->terminal_node.level;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
pr_devel("-->%s()\n", __func__);
/* We arrived at a node which doesn't have an onward node or shortcut
* pointer that we have to follow. This means that (a) the leaf we
* want must go here (either by insertion or replacement) or (b) we
* need to split this node and insert in one of the fragments.
*/
free_slot = -1;
/* Firstly, we have to check the leaves in this node to see if there's
* a matching one we should replace in place.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (!ptr) {
free_slot = i;
continue;
}
if (assoc_array_ptr_is_leaf(ptr) &&
ops->compare_object(assoc_array_ptr_to_leaf(ptr),
index_key)) {
pr_devel("replace in slot %d\n", i);
edit->leaf_p = &node->slots[i];
edit->dead_leaf = node->slots[i];
pr_devel("<--%s() = ok [replace]\n", __func__);
return true;
}
}
/* If there is a free slot in this node then we can just insert the
* leaf here.
*/
if (free_slot >= 0) {
pr_devel("insert in free slot %d\n", free_slot);
edit->leaf_p = &node->slots[free_slot];
edit->adjust_count_on = node;
pr_devel("<--%s() = ok [insert]\n", __func__);
return true;
}
/* The node has no spare slots - so we're either going to have to split
* it or insert another node before it.
*
* Whatever, we're going to need at least two new nodes - so allocate
* those now. We may also need a new shortcut, but we deal with that
* when we need it.
*/
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n1)
return false;
edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
/* We need to find out how similar the leaves are. */
pr_devel("no spare slots\n");
have_meta = false;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (assoc_array_ptr_is_meta(ptr)) {
edit->segment_cache[i] = 0xff;
have_meta = true;
continue;
}
base_seg = ops->get_object_key_chunk(
assoc_array_ptr_to_leaf(ptr), level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
if (have_meta) {
pr_devel("have meta\n");
goto split_node;
}
/* The node contains only leaves */
dissimilarity = 0;
base_seg = edit->segment_cache[0];
for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
dissimilarity |= edit->segment_cache[i] ^ base_seg;
pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
/* The old leaves all cluster in the same slot. We will need
* to insert a shortcut if the new node wants to cluster with them.
*/
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
/* Otherwise we can just insert a new node ahead of the old
* one.
*/
goto present_leaves_cluster_but_not_new_leaf;
}
split_node:
pr_devel("split node\n");
/* We need to split the current node; we know that the node doesn't
* simply contain a full set of leaves that cluster together (it
* contains meta pointers and/or non-clustering leaves).
*
* We need to expel at least two leaves out of a set consisting of the
* leaves in the node and the new leaf.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
*/
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
do_split_node:
pr_devel("do_split_node\n");
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->nr_leaves_on_branch = 0;
/* Begin by finding two matching leaves. There have to be at least two
* that match - even if there are meta pointers - because any leaf that
* would match a slot with a meta pointer in it must be somewhere
* behind that meta pointer and cannot be here. Further, given N
* remaining leaf slots, we now have N+1 leaves to go in them.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
slot = edit->segment_cache[i];
if (slot != 0xff)
for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
if (edit->segment_cache[j] == slot)
goto found_slot_for_multiple_occupancy;
}
found_slot_for_multiple_occupancy:
pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
new_n1->parent_slot = slot;
/* Metadata pointers cannot change slot */
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
if (assoc_array_ptr_is_meta(node->slots[i]))
new_n0->slots[i] = node->slots[i];
else
new_n0->slots[i] = NULL;
BUG_ON(new_n0->slots[slot] != NULL);
new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
/* Filter the leaf pointers between the new nodes */
free_slot = -1;
next_slot = 0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (assoc_array_ptr_is_meta(node->slots[i]))
continue;
if (edit->segment_cache[i] == slot) {
new_n1->slots[next_slot++] = node->slots[i];
new_n1->nr_leaves_on_branch++;
} else {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
new_n0->slots[free_slot] = node->slots[i];
}
}
pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
edit->leaf_p = &new_n0->slots[free_slot];
edit->adjust_count_on = new_n0;
} else {
edit->leaf_p = &new_n1->slots[next_slot++];
edit->adjust_count_on = new_n1;
}
BUG_ON(next_slot <= 1);
edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (edit->segment_cache[i] == 0xff) {
ptr = node->slots[i];
BUG_ON(assoc_array_ptr_is_leaf(ptr));
if (assoc_array_ptr_is_node(ptr)) {
side = assoc_array_ptr_to_node(ptr);
edit->set_backpointers[i] = &side->back_pointer;
} else {
shortcut = assoc_array_ptr_to_shortcut(ptr);
edit->set_backpointers[i] = &shortcut->back_pointer;
}
}
}
ptr = node->back_pointer;
if (!ptr)
edit->set[0].ptr = &edit->array->root;
else if (assoc_array_ptr_is_node(ptr))
edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
else
edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
present_leaves_cluster_but_not_new_leaf:
/* All the old leaves cluster in the same slot, but the new leaf wants
* to go into a different slot, so we create a new node to hold the new
* leaf and a pointer to a new node holding all the old leaves.
*/
pr_devel("present leaves cluster but not new leaf\n");
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = edit->segment_cache[0];
new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
edit->adjust_count_on = new_n0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
new_n1->slots[i] = node->slots[i];
new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [insert node before]\n", __func__);
return true;
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to
* skip over the identical parts of the key and then place a pair of
* nodes, one inside the other, at the end of the shortcut and
* distribute the keys between them.
*
* Firstly we need to work out where the leaves start diverging as a
* bit position into their keys so that we know how big the shortcut
* needs to be.
*
* We only need to make a single pass of N of the N+1 leaves because if
* any keys differ between themselves at bit X then at least one of
* them must also differ with the base key at bit X or before.
*/
pr_devel("all leaves cluster together\n");
diff = INT_MAX;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
index_key);
if (x < diff) {
BUG_ON(x < 0);
diff = x;
}
}
BUG_ON(diff == INT_MAX);
BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
new_s0->back_pointer = node->back_pointer;
new_s0->parent_slot = node->parent_slot;
new_s0->next_node = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
new_n0->parent_slot = 0;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
BUG_ON(level <= 0);
for (i = 0; i < keylen; i++)
new_s0->index_key[i] =
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
new_s0->index_key[keylen - 1] &= ~blank;
/* This now reduces to a node splitting exercise for which we'll need
* to regenerate the disparity table.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
base_seg = ops->get_key_chunk(index_key, level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
goto do_split_node;
}
|
static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
const struct assoc_array_ops *ops,
const void *index_key,
struct assoc_array_walk_result *result)
{
struct assoc_array_shortcut *shortcut, *new_s0;
struct assoc_array_node *node, *new_n0, *new_n1, *side;
struct assoc_array_ptr *ptr;
unsigned long dissimilarity, base_seg, blank;
size_t keylen;
bool have_meta;
int level, diff;
int slot, next_slot, free_slot, i, j;
node = result->terminal_node.node;
level = result->terminal_node.level;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
pr_devel("-->%s()\n", __func__);
/* We arrived at a node which doesn't have an onward node or shortcut
* pointer that we have to follow. This means that (a) the leaf we
* want must go here (either by insertion or replacement) or (b) we
* need to split this node and insert in one of the fragments.
*/
free_slot = -1;
/* Firstly, we have to check the leaves in this node to see if there's
* a matching one we should replace in place.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (!ptr) {
free_slot = i;
continue;
}
if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
pr_devel("replace in slot %d\n", i);
edit->leaf_p = &node->slots[i];
edit->dead_leaf = node->slots[i];
pr_devel("<--%s() = ok [replace]\n", __func__);
return true;
}
}
/* If there is a free slot in this node then we can just insert the
* leaf here.
*/
if (free_slot >= 0) {
pr_devel("insert in free slot %d\n", free_slot);
edit->leaf_p = &node->slots[free_slot];
edit->adjust_count_on = node;
pr_devel("<--%s() = ok [insert]\n", __func__);
return true;
}
/* The node has no spare slots - so we're either going to have to split
* it or insert another node before it.
*
* Whatever, we're going to need at least two new nodes - so allocate
* those now. We may also need a new shortcut, but we deal with that
* when we need it.
*/
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n1)
return false;
edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
/* We need to find out how similar the leaves are. */
pr_devel("no spare slots\n");
have_meta = false;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (assoc_array_ptr_is_meta(ptr)) {
edit->segment_cache[i] = 0xff;
have_meta = true;
continue;
}
base_seg = ops->get_object_key_chunk(
assoc_array_ptr_to_leaf(ptr), level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
if (have_meta) {
pr_devel("have meta\n");
goto split_node;
}
/* The node contains only leaves */
dissimilarity = 0;
base_seg = edit->segment_cache[0];
for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
dissimilarity |= edit->segment_cache[i] ^ base_seg;
pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
/* The old leaves all cluster in the same slot. We will need
* to insert a shortcut if the new node wants to cluster with them.
*/
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
/* Otherwise we can just insert a new node ahead of the old
* one.
*/
goto present_leaves_cluster_but_not_new_leaf;
}
split_node:
pr_devel("split node\n");
/* We need to split the current node; we know that the node doesn't
* simply contain a full set of leaves that cluster together (it
* contains meta pointers and/or non-clustering leaves).
*
* We need to expel at least two leaves out of a set consisting of the
* leaves in the node and the new leaf.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
*/
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
do_split_node:
pr_devel("do_split_node\n");
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->nr_leaves_on_branch = 0;
/* Begin by finding two matching leaves. There have to be at least two
* that match - even if there are meta pointers - because any leaf that
* would match a slot with a meta pointer in it must be somewhere
* behind that meta pointer and cannot be here. Further, given N
* remaining leaf slots, we now have N+1 leaves to go in them.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
slot = edit->segment_cache[i];
if (slot != 0xff)
for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
if (edit->segment_cache[j] == slot)
goto found_slot_for_multiple_occupancy;
}
found_slot_for_multiple_occupancy:
pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
new_n1->parent_slot = slot;
/* Metadata pointers cannot change slot */
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
if (assoc_array_ptr_is_meta(node->slots[i]))
new_n0->slots[i] = node->slots[i];
else
new_n0->slots[i] = NULL;
BUG_ON(new_n0->slots[slot] != NULL);
new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
/* Filter the leaf pointers between the new nodes */
free_slot = -1;
next_slot = 0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (assoc_array_ptr_is_meta(node->slots[i]))
continue;
if (edit->segment_cache[i] == slot) {
new_n1->slots[next_slot++] = node->slots[i];
new_n1->nr_leaves_on_branch++;
} else {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
new_n0->slots[free_slot] = node->slots[i];
}
}
pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
edit->leaf_p = &new_n0->slots[free_slot];
edit->adjust_count_on = new_n0;
} else {
edit->leaf_p = &new_n1->slots[next_slot++];
edit->adjust_count_on = new_n1;
}
BUG_ON(next_slot <= 1);
edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (edit->segment_cache[i] == 0xff) {
ptr = node->slots[i];
BUG_ON(assoc_array_ptr_is_leaf(ptr));
if (assoc_array_ptr_is_node(ptr)) {
side = assoc_array_ptr_to_node(ptr);
edit->set_backpointers[i] = &side->back_pointer;
} else {
shortcut = assoc_array_ptr_to_shortcut(ptr);
edit->set_backpointers[i] = &shortcut->back_pointer;
}
}
}
ptr = node->back_pointer;
if (!ptr)
edit->set[0].ptr = &edit->array->root;
else if (assoc_array_ptr_is_node(ptr))
edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
else
edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
present_leaves_cluster_but_not_new_leaf:
/* All the old leaves cluster in the same slot, but the new leaf wants
* to go into a different slot, so we create a new node to hold the new
* leaf and a pointer to a new node holding all the old leaves.
*/
pr_devel("present leaves cluster but not new leaf\n");
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = edit->segment_cache[0];
new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
edit->adjust_count_on = new_n0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
new_n1->slots[i] = node->slots[i];
new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [insert node before]\n", __func__);
return true;
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to
* skip over the identical parts of the key and then place a pair of
* nodes, one inside the other, at the end of the shortcut and
* distribute the keys between them.
*
* Firstly we need to work out where the leaves start diverging as a
* bit position into their keys so that we know how big the shortcut
* needs to be.
*
* We only need to make a single pass of N of the N+1 leaves because if
* any keys differ between themselves at bit X then at least one of
* them must also differ with the base key at bit X or before.
*/
pr_devel("all leaves cluster together\n");
diff = INT_MAX;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
index_key);
if (x < diff) {
BUG_ON(x < 0);
diff = x;
}
}
BUG_ON(diff == INT_MAX);
BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
new_s0->back_pointer = node->back_pointer;
new_s0->parent_slot = node->parent_slot;
new_s0->next_node = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
new_n0->parent_slot = 0;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
BUG_ON(level <= 0);
for (i = 0; i < keylen; i++)
new_s0->index_key[i] =
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
new_s0->index_key[keylen - 1] &= ~blank;
/* This now reduces to a node splitting exercise for which we'll need
* to regenerate the disparity table.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
base_seg = ops->get_key_chunk(index_key, level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
goto do_split_node;
}
|
C
|
linux
| 1 |
CVE-2016-7035
|
https://www.cvedetails.com/cve/CVE-2016-7035/
|
CWE-285
|
https://github.com/ClusterLabs/pacemaker/commit/5d71e65049
|
5d71e65049
|
High: libcrmcommon: fix CVE-2016-7035 (improper IPC guarding)
It was discovered that at some not so uncommon circumstances, some
pacemaker daemons could be talked to, via libqb-facilitated IPC, by
unprivileged clients due to flawed authorization decision. Depending
on the capabilities of affected daemons, this might equip unauthorized
user with local privilege escalation or up to cluster-wide remote
execution of possibly arbitrary commands when such user happens to
reside at standard or remote/guest cluster node, respectively.
The original vulnerability was introduced in an attempt to allow
unprivileged IPC clients to clean up the file system materialized
leftovers in case the server (otherwise responsible for the lifecycle
of these files) crashes. While the intended part of such behavior is
now effectively voided (along with the unintended one), a best-effort
fix to address this corner case systemically at libqb is coming along
(https://github.com/ClusterLabs/libqb/pull/231).
Affected versions: 1.1.10-rc1 (2013-04-17) - 1.1.15 (2016-06-21)
Impact: Important
CVSSv3 ranking: 8.8 : AV:L/AC:L/PR:L/UI:N/S:C/C:H/I:H/A:H
Credits for independent findings, in chronological order:
Jan "poki" Pokorný, of Red Hat
Alain Moulle, of ATOS/BULL
|
crm_ipcs_send_ack(crm_client_t * c, uint32_t request, uint32_t flags, const char *tag, const char *function,
int line)
{
if (flags & crm_ipc_client_response) {
xmlNode *ack = create_xml_node(NULL, tag);
crm_trace("Ack'ing msg from %s (%p)", crm_client_name(c), c);
c->request_id = 0;
crm_xml_add(ack, "function", function);
crm_xml_add_int(ack, "line", line);
crm_ipcs_send(c, request, ack, flags);
free_xml(ack);
}
}
|
crm_ipcs_send_ack(crm_client_t * c, uint32_t request, uint32_t flags, const char *tag, const char *function,
int line)
{
if (flags & crm_ipc_client_response) {
xmlNode *ack = create_xml_node(NULL, tag);
crm_trace("Ack'ing msg from %s (%p)", crm_client_name(c), c);
c->request_id = 0;
crm_xml_add(ack, "function", function);
crm_xml_add_int(ack, "line", line);
crm_ipcs_send(c, request, ack, flags);
free_xml(ack);
}
}
|
C
|
pacemaker
| 0 |
CVE-2016-4303
|
https://www.cvedetails.com/cve/CVE-2016-4303/
|
CWE-119
|
https://github.com/esnet/iperf/commit/91f2fa59e8ed80dfbf400add0164ee0e508e412a
|
91f2fa59e8ed80dfbf400add0164ee0e508e412a
|
Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <[email protected]>
|
void cJSON_AddItemToObject( cJSON *object, const char *string, cJSON *item )
|
void cJSON_AddItemToObject( cJSON *object, const char *string, cJSON *item )
{
if ( ! item )
return;
if ( item->string )
cJSON_free( item->string );
item->string = cJSON_strdup( string );
cJSON_AddItemToArray( object, item );
}
|
C
|
iperf
| 1 |
null | null | null |
https://github.com/chromium/chromium/commit/76f36a8362a3e817cc3ec721d591f2f8878dc0c7
|
76f36a8362a3e817cc3ec721d591f2f8878dc0c7
|
Scheduler/child/TimeSource could be replaced with base/time/DefaultTickClock.
They both are totally same and TimeSource is removed.
BUG=494892
[email protected], [email protected]
Review URL: https://codereview.chromium.org/1163143002
Cr-Commit-Position: refs/heads/master@{#333035}
|
bool TaskQueue::ShouldAutoPumpQueueLocked(
bool should_trigger_wakeup,
const base::PendingTask* previous_task) {
lock_.AssertAcquired();
if (pump_policy_ == TaskQueueManager::PumpPolicy::MANUAL)
return false;
if (pump_policy_ == TaskQueueManager::PumpPolicy::AFTER_WAKEUP &&
(!should_trigger_wakeup || TaskIsOlderThanQueuedTasks(previous_task)))
return false;
if (incoming_queue_.empty())
return false;
return true;
}
|
bool TaskQueue::ShouldAutoPumpQueueLocked(
bool should_trigger_wakeup,
const base::PendingTask* previous_task) {
lock_.AssertAcquired();
if (pump_policy_ == TaskQueueManager::PumpPolicy::MANUAL)
return false;
if (pump_policy_ == TaskQueueManager::PumpPolicy::AFTER_WAKEUP &&
(!should_trigger_wakeup || TaskIsOlderThanQueuedTasks(previous_task)))
return false;
if (incoming_queue_.empty())
return false;
return true;
}
|
C
|
Chrome
| 0 |
CVE-2019-6978
|
https://www.cvedetails.com/cve/CVE-2019-6978/
|
CWE-415
|
https://github.com/php/php-src/commit/089f7c0bc28d399b0420aa6ef058e4c1c120b2ae
|
089f7c0bc28d399b0420aa6ef058e4c1c120b2ae
|
Sync with upstream
Even though libgd/libgd#492 is not a relevant bug fix for PHP, since
the binding doesn't use the `gdImage*Ptr()` functions at all, we're
porting the fix to stay in sync here.
|
void skip_input_data (j_decompress_ptr cinfo, long num_bytes)
{
my_src_ptr src = (my_src_ptr) cinfo->src;
/* Just a dumb implementation for now. Not clear that being smart is worth
* any trouble anyway --- large skips are infrequent.
*/
if (num_bytes > 0) {
while (num_bytes > (long) src->pub.bytes_in_buffer) {
num_bytes -= (long) src->pub.bytes_in_buffer;
(void) fill_input_buffer (cinfo);
/* note we assume that fill_input_buffer will never return FALSE,
* so suspension need not be handled.
*/
}
src->pub.next_input_byte += (size_t) num_bytes;
src->pub.bytes_in_buffer -= (size_t) num_bytes;
}
}
|
void skip_input_data (j_decompress_ptr cinfo, long num_bytes)
{
my_src_ptr src = (my_src_ptr) cinfo->src;
/* Just a dumb implementation for now. Not clear that being smart is worth
* any trouble anyway --- large skips are infrequent.
*/
if (num_bytes > 0) {
while (num_bytes > (long) src->pub.bytes_in_buffer) {
num_bytes -= (long) src->pub.bytes_in_buffer;
(void) fill_input_buffer (cinfo);
/* note we assume that fill_input_buffer will never return FALSE,
* so suspension need not be handled.
*/
}
src->pub.next_input_byte += (size_t) num_bytes;
src->pub.bytes_in_buffer -= (size_t) num_bytes;
}
}
|
C
|
php-src
| 0 |
CVE-2016-5225
|
https://www.cvedetails.com/cve/CVE-2016-5225/
|
CWE-19
|
https://github.com/chromium/chromium/commit/4ac4aff49c4c539bce6d8a0d8800c01324bb6bc0
|
4ac4aff49c4c539bce6d8a0d8800c01324bb6bc0
|
Enforce form-action CSP even when form.target is present.
BUG=630332
Review-Url: https://codereview.chromium.org/2464123004
Cr-Commit-Position: refs/heads/master@{#429922}
|
void HTMLFormElement::submit(Event* event,
HTMLFormControlElement* submitButton) {
FrameView* view = document().view();
LocalFrame* frame = document().frame();
if (!view || !frame || !frame->page())
return;
if (!isConnected()) {
document().addConsoleMessage(ConsoleMessage::create(
JSMessageSource, WarningMessageLevel,
"Form submission canceled because the form is not connected"));
return;
}
if (m_isSubmitting)
return;
EventQueueScope scopeForDialogClose;
AutoReset<bool> submitScope(&m_isSubmitting, true);
if (event && !submitButton) {
for (const auto& associatedElement : associatedElements()) {
if (!associatedElement->isFormControlElement())
continue;
HTMLFormControlElement* control =
toHTMLFormControlElement(associatedElement);
DCHECK(!control->isActivatedSubmit());
if (control->isSuccessfulSubmitButton()) {
submitButton = control;
break;
}
}
}
FormSubmission* formSubmission =
FormSubmission::create(this, m_attributes, event, submitButton);
if (formSubmission->method() == FormSubmission::DialogMethod) {
submitDialog(formSubmission);
} else if (m_inUserJSSubmitEvent) {
m_plannedNavigation = formSubmission;
} else {
scheduleFormSubmission(formSubmission);
}
}
|
void HTMLFormElement::submit(Event* event,
HTMLFormControlElement* submitButton) {
FrameView* view = document().view();
LocalFrame* frame = document().frame();
if (!view || !frame || !frame->page())
return;
if (!isConnected()) {
document().addConsoleMessage(ConsoleMessage::create(
JSMessageSource, WarningMessageLevel,
"Form submission canceled because the form is not connected"));
return;
}
if (m_isSubmitting)
return;
EventQueueScope scopeForDialogClose;
AutoReset<bool> submitScope(&m_isSubmitting, true);
if (event && !submitButton) {
for (const auto& associatedElement : associatedElements()) {
if (!associatedElement->isFormControlElement())
continue;
HTMLFormControlElement* control =
toHTMLFormControlElement(associatedElement);
DCHECK(!control->isActivatedSubmit());
if (control->isSuccessfulSubmitButton()) {
submitButton = control;
break;
}
}
}
FormSubmission* formSubmission =
FormSubmission::create(this, m_attributes, event, submitButton);
if (formSubmission->method() == FormSubmission::DialogMethod) {
submitDialog(formSubmission);
} else if (m_inUserJSSubmitEvent) {
m_plannedNavigation = formSubmission;
} else {
scheduleFormSubmission(formSubmission);
}
}
|
C
|
Chrome
| 0 |
CVE-2017-9526
|
https://www.cvedetails.com/cve/CVE-2017-9526/
|
CWE-200
|
https://git.gnupg.org/cgi-bin/gitweb.cgi?p=libgcrypt.git;a=commit;h=5a22de904a0a366ae79f03ff1e13a1232a89e26b
|
5a22de904a0a366ae79f03ff1e13a1232a89e26b
| null |
eddsa_encode_x_y (gcry_mpi_t x, gcry_mpi_t y, unsigned int minlen,
int with_prefix,
unsigned char **r_buffer, unsigned int *r_buflen)
{
unsigned char *rawmpi;
unsigned int rawmpilen;
int off = with_prefix? 1:0;
rawmpi = _gcry_mpi_get_buffer_extra (y, minlen, off?-1:0, &rawmpilen, NULL);
if (!rawmpi)
return gpg_err_code_from_syserror ();
if (mpi_test_bit (x, 0) && rawmpilen)
rawmpi[off + rawmpilen - 1] |= 0x80; /* Set sign bit. */
if (off)
rawmpi[0] = 0x40;
*r_buffer = rawmpi;
*r_buflen = rawmpilen + off;
return 0;
}
|
eddsa_encode_x_y (gcry_mpi_t x, gcry_mpi_t y, unsigned int minlen,
int with_prefix,
unsigned char **r_buffer, unsigned int *r_buflen)
{
unsigned char *rawmpi;
unsigned int rawmpilen;
int off = with_prefix? 1:0;
rawmpi = _gcry_mpi_get_buffer_extra (y, minlen, off?-1:0, &rawmpilen, NULL);
if (!rawmpi)
return gpg_err_code_from_syserror ();
if (mpi_test_bit (x, 0) && rawmpilen)
rawmpi[off + rawmpilen - 1] |= 0x80; /* Set sign bit. */
if (off)
rawmpi[0] = 0x40;
*r_buffer = rawmpi;
*r_buflen = rawmpilen + off;
return 0;
}
|
C
|
gnupg
| 0 |
CVE-2016-9537
|
https://www.cvedetails.com/cve/CVE-2016-9537/
|
CWE-787
|
https://github.com/vadz/libtiff/commit/83a4b92815ea04969d494416eaae3d4c6b338e4a#diff-c8b4b355f9b5c06d585b23138e1c185f
|
83a4b92815ea04969d494416eaae3d4c6b338e4a#diff-c8b4b355f9b5c06d585b23138e1c185f
|
* tools/tiffcrop.c: fix various out-of-bounds write vulnerabilities
in heap or stack allocated buffers. Reported as MSVR 35093,
MSVR 35096 and MSVR 35097. Discovered by Axel Souchet and Vishal
Chauhan from the MSRC Vulnerabilities & Mitigations team.
* tools/tiff2pdf.c: fix out-of-bounds write vulnerabilities in
heap allocate buffer in t2p_process_jpeg_strip(). Reported as MSVR
35098. Discovered by Axel Souchet and Vishal Chauhan from the MSRC
Vulnerabilities & Mitigations team.
* libtiff/tif_pixarlog.c: fix out-of-bounds write vulnerabilities
in heap allocated buffers. Reported as MSVR 35094. Discovered by
Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities &
Mitigations team.
* libtiff/tif_write.c: fix issue in error code path of TIFFFlushData1()
that didn't reset the tif_rawcc and tif_rawcp members. I'm not
completely sure if that could happen in practice outside of the odd
behaviour of t2p_seekproc() of tiff2pdf). The report points that a
better fix could be to check the return value of TIFFFlushData1() in
places where it isn't done currently, but it seems this patch is enough.
Reported as MSVR 35095. Discovered by Axel Souchet & Vishal Chauhan &
Suha Can from the MSRC Vulnerabilities & Mitigations team.
|
tsize_t t2p_write_pdf_trailer(T2P* t2p, TIFF* output)
{
tsize_t written = 0;
char buffer[32];
int buflen = 0;
size_t i = 0;
for (i = 0; i < sizeof(t2p->pdf_fileid) - 8; i += 8)
snprintf(t2p->pdf_fileid + i, 9, "%.8X", rand());
written += t2pWriteFile(output, (tdata_t) "trailer\n<<\n/Size ", 17);
buflen = snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)(t2p->pdf_xrefcount+1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/Root ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_catalog);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n/Info ", 12);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_info);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n/ID[<", 11);
written += t2pWriteFile(output, (tdata_t) t2p->pdf_fileid,
sizeof(t2p->pdf_fileid) - 1);
written += t2pWriteFile(output, (tdata_t) "><", 2);
written += t2pWriteFile(output, (tdata_t) t2p->pdf_fileid,
sizeof(t2p->pdf_fileid) - 1);
written += t2pWriteFile(output, (tdata_t) ">]\n>>\nstartxref\n", 16);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_startxref);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n%%EOF\n", 7);
return(written);
}
|
tsize_t t2p_write_pdf_trailer(T2P* t2p, TIFF* output)
{
tsize_t written = 0;
char buffer[32];
int buflen = 0;
size_t i = 0;
for (i = 0; i < sizeof(t2p->pdf_fileid) - 8; i += 8)
snprintf(t2p->pdf_fileid + i, 9, "%.8X", rand());
written += t2pWriteFile(output, (tdata_t) "trailer\n<<\n/Size ", 17);
buflen = snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)(t2p->pdf_xrefcount+1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/Root ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_catalog);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n/Info ", 12);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_info);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n/ID[<", 11);
written += t2pWriteFile(output, (tdata_t) t2p->pdf_fileid,
sizeof(t2p->pdf_fileid) - 1);
written += t2pWriteFile(output, (tdata_t) "><", 2);
written += t2pWriteFile(output, (tdata_t) t2p->pdf_fileid,
sizeof(t2p->pdf_fileid) - 1);
written += t2pWriteFile(output, (tdata_t) ">]\n>>\nstartxref\n", 16);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_startxref);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n%%EOF\n", 7);
return(written);
}
|
C
|
libtiff
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/fc790462b4f248712bbc8c3734664dd6b05f80f2
|
fc790462b4f248712bbc8c3734664dd6b05f80f2
|
Set the job name for the print job on the Mac.
BUG=http://crbug.com/29188
TEST=as in bug
Review URL: http://codereview.chromium.org/1997016
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@47056 0039d316-1c4b-4281-b951-d872f2087c98
|
void PrintJobWorker::OnFailure() {
DCHECK_EQ(message_loop(), MessageLoop::current());
scoped_refptr<PrintJobWorkerOwner> handle(owner_);
NotificationTask* task = new NotificationTask();
task->Init(owner_,
JobEventDetails::FAILED,
document_.get(),
NULL);
owner_->message_loop()->PostTask(FROM_HERE, task);
Cancel();
document_ = NULL;
page_number_ = PageNumber::npos();
}
|
void PrintJobWorker::OnFailure() {
DCHECK_EQ(message_loop(), MessageLoop::current());
scoped_refptr<PrintJobWorkerOwner> handle(owner_);
NotificationTask* task = new NotificationTask();
task->Init(owner_,
JobEventDetails::FAILED,
document_.get(),
NULL);
owner_->message_loop()->PostTask(FROM_HERE, task);
Cancel();
document_ = NULL;
page_number_ = PageNumber::npos();
}
|
C
|
Chrome
| 0 |
CVE-2016-5200
|
https://www.cvedetails.com/cve/CVE-2016-5200/
|
CWE-119
|
https://github.com/chromium/chromium/commit/2f19869af13bbfdcfd682a55c0d2c61c6e102475
|
2f19869af13bbfdcfd682a55c0d2c61c6e102475
|
chrome/browser/ui/webauthn: long domains may cause a line break.
As requested by UX in [1], allow long host names to split a title into
two lines. This allows us to show more of the name before eliding,
although sufficiently long names will still trigger elision.
Screenshot at
https://drive.google.com/open?id=1_V6t2CeZDAVazy3Px-OET2LnB__aEW1r.
[1] https://docs.google.com/presentation/d/1TtxkPUchyVZulqgdMcfui-68B0W-DWaFFVJEffGIbLA/edit#slide=id.g5913c4105f_1_12
Change-Id: I70f6541e0db3e9942239304de43b487a7561ca34
Bug: 870892
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1601812
Auto-Submit: Adam Langley <[email protected]>
Commit-Queue: Nina Satragno <[email protected]>
Reviewed-by: Nina Satragno <[email protected]>
Cr-Commit-Position: refs/heads/master@{#658114}
|
AuthenticatorNotRegisteredErrorModel::GetStepIllustration(
ImageColorScheme color_scheme) const {
return color_scheme == ImageColorScheme::kDark ? kWebauthnErrorDarkIcon
: kWebauthnErrorIcon;
}
|
AuthenticatorNotRegisteredErrorModel::GetStepIllustration(
ImageColorScheme color_scheme) const {
return color_scheme == ImageColorScheme::kDark ? kWebauthnErrorDarkIcon
: kWebauthnErrorIcon;
}
|
C
|
Chrome
| 0 |
CVE-2013-6371
|
https://www.cvedetails.com/cve/CVE-2013-6371/
|
CWE-310
|
https://github.com/json-c/json-c/commit/64e36901a0614bf64a19bc3396469c66dcd0b015
|
64e36901a0614bf64a19bc3396469c66dcd0b015
|
Patch to address the following issues:
* CVE-2013-6371: hash collision denial of service
* CVE-2013-6370: buffer overflow if size_t is larger than int
|
int lh_table_delete(struct lh_table *t, const void *k)
{
struct lh_entry *e = lh_table_lookup_entry(t, k);
if(!e) return -1;
return lh_table_delete_entry(t, e);
}
|
int lh_table_delete(struct lh_table *t, const void *k)
{
struct lh_entry *e = lh_table_lookup_entry(t, k);
if(!e) return -1;
return lh_table_delete_entry(t, e);
}
|
C
|
json-c
| 0 |
CVE-2015-1285
|
https://www.cvedetails.com/cve/CVE-2015-1285/
|
CWE-200
|
https://github.com/chromium/chromium/commit/39595f8d4dffcb644d438106dcb64a30c139ff0e
|
39595f8d4dffcb644d438106dcb64a30c139ff0e
|
[reland] Do not set default wallpaper unless it should do so.
[email protected], [email protected]
Bug: 751382
Change-Id: Id0793dfe467f737526a95b1e66ed01fbb8860bda
Reviewed-on: https://chromium-review.googlesource.com/619754
Commit-Queue: Xiaoqian Dai <[email protected]>
Reviewed-by: Alexander Alekseev <[email protected]>
Reviewed-by: Biao She <[email protected]>
Cr-Original-Commit-Position: refs/heads/master@{#498325}
Reviewed-on: https://chromium-review.googlesource.com/646430
Cr-Commit-Position: refs/heads/master@{#498982}
|
void WallpaperManager::OnWindowDestroying(aura::Window* window) {
window_observer_.Remove(window);
chromeos::WallpaperWindowStateManager::RestoreWindows(
user_manager::UserManager::Get()->GetActiveUser()->username_hash());
}
|
void WallpaperManager::OnWindowDestroying(aura::Window* window) {
window_observer_.Remove(window);
chromeos::WallpaperWindowStateManager::RestoreWindows(
user_manager::UserManager::Get()->GetActiveUser()->username_hash());
}
|
C
|
Chrome
| 0 |
CVE-2013-7271
|
https://www.cvedetails.com/cve/CVE-2013-7271/
|
CWE-20
|
https://github.com/torvalds/linux/commit/f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
|
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
nr_kill_by_device(dev);
nr_rt_device_down(dev);
return NOTIFY_DONE;
}
|
static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
nr_kill_by_device(dev);
nr_rt_device_down(dev);
return NOTIFY_DONE;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/181c7400b2bf50ba02ac77149749fb419b4d4797
|
181c7400b2bf50ba02ac77149749fb419b4d4797
|
gpu: Use GetUniformSetup computed result size.
[email protected]
BUG=468936
Review URL: https://codereview.chromium.org/1016193003
Cr-Commit-Position: refs/heads/master@{#321489}
|
void GLES2DecoderImpl::FinishReadPixels(
const cmds::ReadPixels& c,
GLuint buffer) {
TRACE_EVENT0("gpu", "GLES2DecoderImpl::FinishReadPixels");
GLsizei width = c.width;
GLsizei height = c.height;
GLenum format = c.format;
GLenum type = c.type;
typedef cmds::ReadPixels::Result Result;
uint32 pixels_size;
Result* result = NULL;
if (c.result_shm_id != 0) {
result = GetSharedMemoryAs<Result*>(
c.result_shm_id, c.result_shm_offset, sizeof(*result));
if (!result) {
if (buffer != 0) {
glDeleteBuffersARB(1, &buffer);
}
return;
}
}
GLES2Util::ComputeImageDataSizes(
width, height, 1, format, type, state_.pack_alignment, &pixels_size,
NULL, NULL);
void* pixels = GetSharedMemoryAs<void*>(
c.pixels_shm_id, c.pixels_shm_offset, pixels_size);
if (!pixels) {
if (buffer != 0) {
glDeleteBuffersARB(1, &buffer);
}
return;
}
if (buffer != 0) {
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, buffer);
void* data;
if (features().map_buffer_range) {
data = glMapBufferRange(
GL_PIXEL_PACK_BUFFER_ARB, 0, pixels_size, GL_MAP_READ_BIT);
} else {
data = glMapBuffer(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY);
}
if (!data) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glMapBuffer",
"Unable to map memory for readback.");
return;
}
memcpy(pixels, data, pixels_size);
glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
glDeleteBuffersARB(1, &buffer);
}
if (result != NULL) {
*result = true;
}
GLenum read_format = GetBoundReadFrameBufferInternalFormat();
uint32 channels_exist = GLES2Util::GetChannelsForFormat(read_format);
if ((channels_exist & 0x0008) == 0 &&
workarounds().clear_alpha_in_readpixels) {
uint32 temp_size;
uint32 unpadded_row_size;
uint32 padded_row_size;
if (!GLES2Util::ComputeImageDataSizes(
width, 2, 1, format, type, state_.pack_alignment, &temp_size,
&unpadded_row_size, &padded_row_size)) {
return;
}
uint32 channel_count = 0;
uint32 alpha_channel = 0;
switch (format) {
case GL_RGBA:
case GL_BGRA_EXT:
channel_count = 4;
alpha_channel = 3;
break;
case GL_ALPHA:
channel_count = 1;
alpha_channel = 0;
break;
}
if (channel_count > 0) {
switch (type) {
case GL_UNSIGNED_BYTE:
WriteAlphaData<uint8>(
pixels, height, channel_count, alpha_channel, unpadded_row_size,
padded_row_size, 0xFF);
break;
case GL_FLOAT:
WriteAlphaData<float>(
pixels, height, channel_count, alpha_channel, unpadded_row_size,
padded_row_size, 1.0f);
break;
case GL_HALF_FLOAT:
WriteAlphaData<uint16>(
pixels, height, channel_count, alpha_channel, unpadded_row_size,
padded_row_size, 0x3C00);
break;
}
}
}
}
|
void GLES2DecoderImpl::FinishReadPixels(
const cmds::ReadPixels& c,
GLuint buffer) {
TRACE_EVENT0("gpu", "GLES2DecoderImpl::FinishReadPixels");
GLsizei width = c.width;
GLsizei height = c.height;
GLenum format = c.format;
GLenum type = c.type;
typedef cmds::ReadPixels::Result Result;
uint32 pixels_size;
Result* result = NULL;
if (c.result_shm_id != 0) {
result = GetSharedMemoryAs<Result*>(
c.result_shm_id, c.result_shm_offset, sizeof(*result));
if (!result) {
if (buffer != 0) {
glDeleteBuffersARB(1, &buffer);
}
return;
}
}
GLES2Util::ComputeImageDataSizes(
width, height, 1, format, type, state_.pack_alignment, &pixels_size,
NULL, NULL);
void* pixels = GetSharedMemoryAs<void*>(
c.pixels_shm_id, c.pixels_shm_offset, pixels_size);
if (!pixels) {
if (buffer != 0) {
glDeleteBuffersARB(1, &buffer);
}
return;
}
if (buffer != 0) {
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, buffer);
void* data;
if (features().map_buffer_range) {
data = glMapBufferRange(
GL_PIXEL_PACK_BUFFER_ARB, 0, pixels_size, GL_MAP_READ_BIT);
} else {
data = glMapBuffer(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY);
}
if (!data) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glMapBuffer",
"Unable to map memory for readback.");
return;
}
memcpy(pixels, data, pixels_size);
glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);
glDeleteBuffersARB(1, &buffer);
}
if (result != NULL) {
*result = true;
}
GLenum read_format = GetBoundReadFrameBufferInternalFormat();
uint32 channels_exist = GLES2Util::GetChannelsForFormat(read_format);
if ((channels_exist & 0x0008) == 0 &&
workarounds().clear_alpha_in_readpixels) {
uint32 temp_size;
uint32 unpadded_row_size;
uint32 padded_row_size;
if (!GLES2Util::ComputeImageDataSizes(
width, 2, 1, format, type, state_.pack_alignment, &temp_size,
&unpadded_row_size, &padded_row_size)) {
return;
}
uint32 channel_count = 0;
uint32 alpha_channel = 0;
switch (format) {
case GL_RGBA:
case GL_BGRA_EXT:
channel_count = 4;
alpha_channel = 3;
break;
case GL_ALPHA:
channel_count = 1;
alpha_channel = 0;
break;
}
if (channel_count > 0) {
switch (type) {
case GL_UNSIGNED_BYTE:
WriteAlphaData<uint8>(
pixels, height, channel_count, alpha_channel, unpadded_row_size,
padded_row_size, 0xFF);
break;
case GL_FLOAT:
WriteAlphaData<float>(
pixels, height, channel_count, alpha_channel, unpadded_row_size,
padded_row_size, 1.0f);
break;
case GL_HALF_FLOAT:
WriteAlphaData<uint16>(
pixels, height, channel_count, alpha_channel, unpadded_row_size,
padded_row_size, 0x3C00);
break;
}
}
}
}
|
C
|
Chrome
| 0 |
CVE-2014-3145
|
https://www.cvedetails.com/cve/CVE-2014-3145/
|
CWE-189
|
https://github.com/torvalds/linux/commit/05ab8f2647e4221cbdb3856dd7d32bd5407316b3
|
05ab8f2647e4221cbdb3856dd7d32bd5407316b3
|
filter: prevent nla extensions to peek beyond the end of the message
The BPF_S_ANC_NLATTR and BPF_S_ANC_NLATTR_NEST extensions fail to check
for a minimal message length before testing the supplied offset to be
within the bounds of the message. This allows the subtraction of the nla
header to underflow and therefore -- as the data type is unsigned --
allowing far to big offset and length values for the search of the
netlink attribute.
The remainder calculation for the BPF_S_ANC_NLATTR_NEST extension is
also wrong. It has the minuend and subtrahend mixed up, therefore
calculates a huge length value, allowing to overrun the end of the
message while looking for the netlink attribute.
The following three BPF snippets will trigger the bugs when attached to
a UNIX datagram socket and parsing a message with length 1, 2 or 3.
,-[ PoC for missing size check in BPF_S_ANC_NLATTR ]--
| ld #0x87654321
| ldx #42
| ld #nla
| ret a
`---
,-[ PoC for the same bug in BPF_S_ANC_NLATTR_NEST ]--
| ld #0x87654321
| ldx #42
| ld #nlan
| ret a
`---
,-[ PoC for wrong remainder calculation in BPF_S_ANC_NLATTR_NEST ]--
| ; (needs a fake netlink header at offset 0)
| ld #0
| ldx #42
| ld #nlan
| ret a
`---
Fix the first issue by ensuring the message length fulfills the minimal
size constrains of a nla header. Fix the second bug by getting the math
for the remainder calculation right.
Fixes: 4738c1db15 ("[SKFILTER]: Add SKF_ADF_NLATTR instruction")
Fixes: d214c7537b ("filter: add SKF_AD_NLATTR_NEST to look for nested..")
Cc: Patrick McHardy <[email protected]>
Cc: Pablo Neira Ayuso <[email protected]>
Signed-off-by: Mathias Krause <[email protected]>
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (skb->len < sizeof(struct nlattr))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
|
static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
|
C
|
linux
| 1 |
CVE-2011-3084
|
https://www.cvedetails.com/cve/CVE-2011-3084/
|
CWE-264
|
https://github.com/chromium/chromium/commit/744c2a2d90c3c9a33c818e1ea4b7ccb5010663a0
|
744c2a2d90c3c9a33c818e1ea4b7ccb5010663a0
|
Allow browser to handle all WebUI navigations.
BUG=113496
TEST="Google Dashboard" link in Sync settings loads in new process.
Review URL: http://codereview.chromium.org/9663045
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@126949 0039d316-1c4b-4281-b951-d872f2087c98
|
bool ShellContentClient::SandboxPlugin(CommandLine* command_line,
sandbox::TargetPolicy* policy) {
return false;
}
|
bool ShellContentClient::SandboxPlugin(CommandLine* command_line,
sandbox::TargetPolicy* policy) {
return false;
}
|
C
|
Chrome
| 0 |
CVE-2017-6307
|
https://www.cvedetails.com/cve/CVE-2017-6307/
|
CWE-125
|
https://github.com/verdammelt/tnef/commit/1a17af1ed0c791aec44dbdc9eab91218cc1e335a
|
1a17af1ed0c791aec44dbdc9eab91218cc1e335a
|
Use asserts on lengths to prevent invalid reads/writes.
|
mapi_attr_free (MAPI_Attr* attr)
{
if (attr)
{
size_t i;
for (i = 0; i < attr->num_values; i++)
{
if ((attr->type == szMAPI_STRING)
|| (attr->type == szMAPI_UNICODE_STRING)
|| (attr->type == szMAPI_BINARY))
{
XFREE (attr->values[i].data.buf);
}
}
if (attr->num_names > 0) {
for (i = 0; i < attr->num_names; i++)
{
XFREE(attr->names[i].data);
}
XFREE(attr->names);
}
XFREE (attr->values);
XFREE (attr->guid);
memset (attr, '\0', sizeof (MAPI_Attr));
}
}
|
mapi_attr_free (MAPI_Attr* attr)
{
if (attr)
{
size_t i;
for (i = 0; i < attr->num_values; i++)
{
if ((attr->type == szMAPI_STRING)
|| (attr->type == szMAPI_UNICODE_STRING)
|| (attr->type == szMAPI_BINARY))
{
XFREE (attr->values[i].data.buf);
}
}
if (attr->num_names > 0) {
for (i = 0; i < attr->num_names; i++)
{
XFREE(attr->names[i].data);
}
XFREE(attr->names);
}
XFREE (attr->values);
XFREE (attr->guid);
memset (attr, '\0', sizeof (MAPI_Attr));
}
}
|
C
|
tnef
| 0 |
CVE-2011-2880
|
https://www.cvedetails.com/cve/CVE-2011-2880/
|
CWE-399
|
https://github.com/chromium/chromium/commit/244c78b3f737f2cacab2d212801b0524cbcc3a7b
|
244c78b3f737f2cacab2d212801b0524cbcc3a7b
|
Reset the device policy machinery upon retrying enrollment.
BUG=chromium-os:18208
TEST=See bug description
Review URL: http://codereview.chromium.org/7676005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@97615 0039d316-1c4b-4281-b951-d872f2087c98
|
void EnterpriseEnrollmentScreen::PrepareToShow() {
actor_->PrepareToShow();
}
|
void EnterpriseEnrollmentScreen::PrepareToShow() {
actor_->PrepareToShow();
}
|
C
|
Chrome
| 0 |
CVE-2016-7513
|
https://www.cvedetails.com/cve/CVE-2016-7513/
|
CWE-189
|
https://github.com/ImageMagick/ImageMagick/commit/a54fe0e8600eaf3dc6fe717d3c0398001507f723
|
a54fe0e8600eaf3dc6fe717d3c0398001507f723
| null |
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickSizeType
number_pixels;
NexusInfo
**restrict clip_nexus,
**restrict image_nexus;
register const PixelPacket
*restrict r;
register IndexPacket
*restrict nexus_indexes,
*restrict indexes;
register PixelPacket
*restrict p,
*restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->clip_mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
clip_nexus=AcquirePixelCacheNexus(1);
if ((image_nexus == (NexusInfo **) NULL) ||
(clip_nexus == (NexusInfo **) NULL))
ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,image_nexus[0],
exception);
indexes=image_nexus[0]->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelsFromNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
if (GetPixelIntensity(image,r) > (QuantumRange/2))
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
}
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
|
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickSizeType
number_pixels;
NexusInfo
**restrict clip_nexus,
**restrict image_nexus;
register const PixelPacket
*restrict r;
register IndexPacket
*restrict nexus_indexes,
*restrict indexes;
register PixelPacket
*restrict p,
*restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->clip_mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
clip_nexus=AcquirePixelCacheNexus(1);
if ((image_nexus == (NexusInfo **) NULL) ||
(clip_nexus == (NexusInfo **) NULL))
ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,image_nexus[0],
exception);
indexes=image_nexus[0]->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelsFromNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
if (GetPixelIntensity(image,r) > (QuantumRange/2))
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
}
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
|
C
|
ImageMagick
| 0 |
CVE-2014-1743
|
https://www.cvedetails.com/cve/CVE-2014-1743/
|
CWE-399
|
https://github.com/chromium/chromium/commit/6d9425ec7badda912555d46ea7abcfab81fdd9b9
|
6d9425ec7badda912555d46ea7abcfab81fdd9b9
|
sync compositor: pass simple gfx types by const ref
See bug for reasoning
BUG=159273
Review URL: https://codereview.chromium.org/1417893006
Cr-Commit-Position: refs/heads/master@{#356653}
|
SoftwareDevice(SynchronousCompositorOutputSurface* surface)
: surface_(surface) {
}
|
SoftwareDevice(SynchronousCompositorOutputSurface* surface)
: surface_(surface) {
}
|
C
|
Chrome
| 0 |
CVE-2016-7917
|
https://www.cvedetails.com/cve/CVE-2016-7917/
|
CWE-125
|
https://github.com/torvalds/linux/commit/c58d6c93680f28ac58984af61d0a7ebf4319c241
|
c58d6c93680f28ac58984af61d0a7ebf4319c241
|
netfilter: nfnetlink: correctly validate length of batch messages
If nlh->nlmsg_len is zero then an infinite loop is triggered because
'skb_pull(skb, msglen);' pulls zero bytes.
The calculation in nlmsg_len() underflows if 'nlh->nlmsg_len <
NLMSG_HDRLEN' which bypasses the length validation and will later
trigger an out-of-bound read.
If the length validation does fail then the malformed batch message is
copied back to userspace. However, we cannot do this because the
nlh->nlmsg_len can be invalid. This leads to an out-of-bounds read in
netlink_ack:
[ 41.455421] ==================================================================
[ 41.456431] BUG: KASAN: slab-out-of-bounds in memcpy+0x1d/0x40 at addr ffff880119e79340
[ 41.456431] Read of size 4294967280 by task a.out/987
[ 41.456431] =============================================================================
[ 41.456431] BUG kmalloc-512 (Not tainted): kasan: bad access detected
[ 41.456431] -----------------------------------------------------------------------------
...
[ 41.456431] Bytes b4 ffff880119e79310: 00 00 00 00 d5 03 00 00 b0 fb fe ff 00 00 00 00 ................
[ 41.456431] Object ffff880119e79320: 20 00 00 00 10 00 05 00 00 00 00 00 00 00 00 00 ...............
[ 41.456431] Object ffff880119e79330: 14 00 0a 00 01 03 fc 40 45 56 11 22 33 10 00 05 .......@EV."3...
[ 41.456431] Object ffff880119e79340: f0 ff ff ff 88 99 aa bb 00 14 00 0a 00 06 fe fb ................
^^ start of batch nlmsg with
nlmsg_len=4294967280
...
[ 41.456431] Memory state around the buggy address:
[ 41.456431] ffff880119e79400: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 41.456431] ffff880119e79480: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 41.456431] >ffff880119e79500: 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc fc
[ 41.456431] ^
[ 41.456431] ffff880119e79580: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 41.456431] ffff880119e79600: fc fc fc fc fc fc fc fc fc fc fb fb fb fb fb fb
[ 41.456431] ==================================================================
Fix this with better validation of nlh->nlmsg_len and by setting
NFNL_BATCH_FAILURE if any batch message fails length validation.
CAP_NET_ADMIN is required to trigger the bugs.
Fixes: 9ea2aa8b7dba ("netfilter: nfnetlink: validate nfnetlink header from batch")
Signed-off-by: Phil Turnbull <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
{
nfnl_lock(n->subsys_id);
table[n->subsys_id].subsys = NULL;
nfnl_unlock(n->subsys_id);
synchronize_rcu();
return 0;
}
|
int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
{
nfnl_lock(n->subsys_id);
table[n->subsys_id].subsys = NULL;
nfnl_unlock(n->subsys_id);
synchronize_rcu();
return 0;
}
|
C
|
linux
| 0 |
CVE-2018-10017
|
https://www.cvedetails.com/cve/CVE-2018-10017/
|
CWE-125
|
https://github.com/OpenMPT/openmpt/commit/492022c7297ede682161d9c0ec2de15526424e76
|
492022c7297ede682161d9c0ec2de15526424e76
|
[Fix] Possible out-of-bounds read when computing length of some IT files with pattern loops (OpenMPT: formats that are converted to IT, libopenmpt: IT/ITP/MO3), caught with afl-fuzz.
git-svn-id: https://source.openmpt.org/svn/openmpt/trunk/OpenMPT@10027 56274372-70c3-4bfc-bfc3-4c3a0b034d27
|
static uint32 GetFineLinearSlideDownTable(const CSoundFile *sndFile, uint32 i) { MPT_ASSERT(i < CountOf(FineLinearSlideDownTable)); return sndFile->m_playBehaviour[kHertzInLinearMode] ? FineLinearSlideDownTable[i] : FineLinearSlideUpTable[i]; }
|
static uint32 GetFineLinearSlideDownTable(const CSoundFile *sndFile, uint32 i) { MPT_ASSERT(i < CountOf(FineLinearSlideDownTable)); return sndFile->m_playBehaviour[kHertzInLinearMode] ? FineLinearSlideDownTable[i] : FineLinearSlideUpTable[i]; }
|
C
|
openmpt
| 0 |
CVE-2011-2494
|
https://www.cvedetails.com/cve/CVE-2011-2494/
|
CWE-200
|
https://github.com/torvalds/linux/commit/1a51410abe7d0ee4b1d112780f46df87d3621043
|
1a51410abe7d0ee4b1d112780f46df87d3621043
|
Make TASKSTATS require root access
Ok, this isn't optimal, since it means that 'iotop' needs admin
capabilities, and we may have to work on this some more. But at the
same time it is very much not acceptable to let anybody just read
anybody elses IO statistics quite at this level.
Use of the GENL_ADMIN_PERM suggested by Johannes Berg as an alternative
to checking the capabilities by hand.
Reported-by: Vasiliy Kulikov <[email protected]>
Cc: Johannes Berg <[email protected]>
Acked-by: Balbir Singh <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void __init taskstats_init_early(void)
{
unsigned int i;
taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
for_each_possible_cpu(i) {
INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
init_rwsem(&(per_cpu(listener_array, i).sem));
}
}
|
void __init taskstats_init_early(void)
{
unsigned int i;
taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
for_each_possible_cpu(i) {
INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
init_rwsem(&(per_cpu(listener_array, i).sem));
}
}
|
C
|
linux
| 0 |
CVE-2018-14017
|
https://www.cvedetails.com/cve/CVE-2018-14017/
|
CWE-125
|
https://github.com/radare/radare2/commit/eb0fb72b3c5307ec8e33effb6bf947e38cfdffe8
|
eb0fb72b3c5307ec8e33effb6bf947e38cfdffe8
|
Fix #10498 - Crash in fuzzed java file
|
R_API void r_bin_java_bootstrap_method_free(void /*/RBinJavaBootStrapMethod*/ *b) {
RBinJavaBootStrapMethod *bsm = b;
RListIter *iter, *iter_tmp;
RBinJavaBootStrapArgument *obj = NULL;
if (bsm) {
if (bsm->bootstrap_arguments) {
r_list_foreach_safe (bsm->bootstrap_arguments, iter, iter_tmp, obj) {
if (obj) {
r_bin_java_bootstrap_method_argument_free (obj);
}
}
r_list_free (bsm->bootstrap_arguments);
bsm->bootstrap_arguments = NULL;
}
free (bsm);
}
}
|
R_API void r_bin_java_bootstrap_method_free(void /*/RBinJavaBootStrapMethod*/ *b) {
RBinJavaBootStrapMethod *bsm = b;
RListIter *iter, *iter_tmp;
RBinJavaBootStrapArgument *obj = NULL;
if (bsm) {
if (bsm->bootstrap_arguments) {
r_list_foreach_safe (bsm->bootstrap_arguments, iter, iter_tmp, obj) {
if (obj) {
r_bin_java_bootstrap_method_argument_free (obj);
}
}
r_list_free (bsm->bootstrap_arguments);
bsm->bootstrap_arguments = NULL;
}
free (bsm);
}
}
|
C
|
radare2
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.