CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2016-5158
|
https://www.cvedetails.com/cve/CVE-2016-5158/
|
CWE-190
|
https://github.com/chromium/chromium/commit/6a310d99a741f9ba5e4e537c5ec49d3adbe5876f
|
6a310d99a741f9ba5e4e537c5ec49d3adbe5876f
|
Position info (item n of m) incorrect if hidden focusable items in list
Bug: 836997
Change-Id: I971fa7076f72d51829b36af8e379260d48ca25ec
Reviewed-on: https://chromium-review.googlesource.com/c/1450235
Commit-Queue: Aaron Leventhal <[email protected]>
Reviewed-by: Nektarios Paisios <[email protected]>
Cr-Commit-Position: refs/heads/master@{#628890}
|
void DumpAccessibilityTreeTest::AddDefaultFilters(
std::vector<Filter>* filters) {
AddFilter(filters, "ALERT*");
AddFilter(filters, "ANIMATED*");
AddFilter(filters, "BUSY");
AddFilter(filters, "CHECKED");
AddFilter(filters, "COLLAPSED");
AddFilter(filters, "EXPANDED");
AddFilter(filters, "FLOATING");
AddFilter(filters, "FOCUSABLE");
AddFilter(filters, "HASPOPUP");
AddFilter(filters, "INVISIBLE");
AddFilter(filters, "MARQUEED");
AddFilter(filters, "MIXED");
AddFilter(filters, "MOVEABLE");
AddFilter(filters, "MULTISELECTABLE");
AddFilter(filters, "PRESSED");
AddFilter(filters, "PROTECTED");
AddFilter(filters, "READONLY");
AddFilter(filters, "SELECTED");
AddFilter(filters, "SIZEABLE");
AddFilter(filters, "TRAVERSED");
AddFilter(filters, "UNAVAILABLE");
AddFilter(filters, "IA2_STATE_ACTIVE");
AddFilter(filters, "IA2_STATE_ARMED");
AddFilter(filters, "IA2_STATE_CHECKABLE");
AddFilter(filters, "IA2_STATE_DEFUNCT");
AddFilter(filters, "IA2_STATE_HORIZONTAL");
AddFilter(filters, "IA2_STATE_ICONIFIED");
AddFilter(filters, "IA2_STATE_INVALID_ENTRY");
AddFilter(filters, "IA2_STATE_MODAL");
AddFilter(filters, "IA2_STATE_MULTI_LINE");
AddFilter(filters, "IA2_STATE_PINNED");
AddFilter(filters, "IA2_STATE_REQUIRED");
AddFilter(filters, "IA2_STATE_STALE");
AddFilter(filters, "IA2_STATE_TRANSIENT");
AddFilter(filters, "FOCUSED", Filter::DENY);
AddFilter(filters, "HOTTRACKED", Filter::DENY);
AddFilter(filters, "OFFSCREEN", Filter::DENY);
AddFilter(filters, "value='*'");
AddFilter(filters, "value='http*'", Filter::DENY);
AddFilter(filters, "layout-guess:*", Filter::ALLOW);
AddFilter(filters, "check*");
AddFilter(filters, "descript*");
AddFilter(filters, "collapsed");
AddFilter(filters, "haspopup");
AddFilter(filters, "horizontal");
AddFilter(filters, "invisible");
AddFilter(filters, "multiline");
AddFilter(filters, "multiselectable");
AddFilter(filters, "protected");
AddFilter(filters, "required");
AddFilter(filters, "select*");
AddFilter(filters, "visited");
AddFilter(filters, "busy=true");
AddFilter(filters, "valueForRange*");
AddFilter(filters, "minValueForRange*");
AddFilter(filters, "maxValueForRange*");
AddFilter(filters, "hierarchicalLevel*");
AddFilter(filters, "autoComplete*");
AddFilter(filters, "restriction*");
AddFilter(filters, "keyShortcuts*");
AddFilter(filters, "activedescendantId*");
AddFilter(filters, "controlsIds*");
AddFilter(filters, "flowtoIds*");
AddFilter(filters, "detailsIds*");
AddFilter(filters, "invalidState=*");
AddFilter(filters, "invalidState=false",
Filter::DENY); // Don't show false value
AddFilter(filters, "roleDescription=*");
AddFilter(filters, "errormessageId=*");
AddFilter(filters, "AXValueAutofill*");
AddFilter(filters, "AXAutocomplete*");
AddFilter(filters, "hint=*");
AddFilter(filters, "interesting", Filter::DENY);
AddFilter(filters, "has_character_locations", Filter::DENY);
AddFilter(filters, "has_image", Filter::DENY);
AddFilter(filters, "*=''", Filter::DENY);
AddFilter(filters, "name=*", Filter::ALLOW_EMPTY);
}
|
void DumpAccessibilityTreeTest::AddDefaultFilters(
std::vector<Filter>* filters) {
AddFilter(filters, "ALERT*");
AddFilter(filters, "ANIMATED*");
AddFilter(filters, "BUSY");
AddFilter(filters, "CHECKED");
AddFilter(filters, "COLLAPSED");
AddFilter(filters, "EXPANDED");
AddFilter(filters, "FLOATING");
AddFilter(filters, "FOCUSABLE");
AddFilter(filters, "HASPOPUP");
AddFilter(filters, "INVISIBLE");
AddFilter(filters, "MARQUEED");
AddFilter(filters, "MIXED");
AddFilter(filters, "MOVEABLE");
AddFilter(filters, "MULTISELECTABLE");
AddFilter(filters, "PRESSED");
AddFilter(filters, "PROTECTED");
AddFilter(filters, "READONLY");
AddFilter(filters, "SELECTED");
AddFilter(filters, "SIZEABLE");
AddFilter(filters, "TRAVERSED");
AddFilter(filters, "UNAVAILABLE");
AddFilter(filters, "IA2_STATE_ACTIVE");
AddFilter(filters, "IA2_STATE_ARMED");
AddFilter(filters, "IA2_STATE_CHECKABLE");
AddFilter(filters, "IA2_STATE_DEFUNCT");
AddFilter(filters, "IA2_STATE_HORIZONTAL");
AddFilter(filters, "IA2_STATE_ICONIFIED");
AddFilter(filters, "IA2_STATE_INVALID_ENTRY");
AddFilter(filters, "IA2_STATE_MODAL");
AddFilter(filters, "IA2_STATE_MULTI_LINE");
AddFilter(filters, "IA2_STATE_PINNED");
AddFilter(filters, "IA2_STATE_REQUIRED");
AddFilter(filters, "IA2_STATE_STALE");
AddFilter(filters, "IA2_STATE_TRANSIENT");
AddFilter(filters, "FOCUSED", Filter::DENY);
AddFilter(filters, "HOTTRACKED", Filter::DENY);
AddFilter(filters, "OFFSCREEN", Filter::DENY);
AddFilter(filters, "value='*'");
AddFilter(filters, "value='http*'", Filter::DENY);
AddFilter(filters, "layout-guess:*", Filter::ALLOW);
AddFilter(filters, "check*");
AddFilter(filters, "descript*");
AddFilter(filters, "collapsed");
AddFilter(filters, "haspopup");
AddFilter(filters, "horizontal");
AddFilter(filters, "invisible");
AddFilter(filters, "multiline");
AddFilter(filters, "multiselectable");
AddFilter(filters, "protected");
AddFilter(filters, "required");
AddFilter(filters, "select*");
AddFilter(filters, "visited");
AddFilter(filters, "busy=true");
AddFilter(filters, "valueForRange*");
AddFilter(filters, "minValueForRange*");
AddFilter(filters, "maxValueForRange*");
AddFilter(filters, "hierarchicalLevel*");
AddFilter(filters, "autoComplete*");
AddFilter(filters, "restriction*");
AddFilter(filters, "keyShortcuts*");
AddFilter(filters, "activedescendantId*");
AddFilter(filters, "controlsIds*");
AddFilter(filters, "flowtoIds*");
AddFilter(filters, "detailsIds*");
AddFilter(filters, "invalidState=*");
AddFilter(filters, "invalidState=false",
Filter::DENY); // Don't show false value
AddFilter(filters, "roleDescription=*");
AddFilter(filters, "errormessageId=*");
AddFilter(filters, "AXValueAutofill*");
AddFilter(filters, "AXAutocomplete*");
AddFilter(filters, "hint=*");
AddFilter(filters, "interesting", Filter::DENY);
AddFilter(filters, "has_character_locations", Filter::DENY);
AddFilter(filters, "has_image", Filter::DENY);
AddFilter(filters, "*=''", Filter::DENY);
AddFilter(filters, "name=*", Filter::ALLOW_EMPTY);
}
|
C
|
Chrome
| 0 |
CVE-2012-2875
|
https://www.cvedetails.com/cve/CVE-2012-2875/
| null |
https://github.com/chromium/chromium/commit/1266ba494530a267ec8a21442ea1b5cae94da4fb
|
1266ba494530a267ec8a21442ea1b5cae94da4fb
|
Introduce XGetImage() for GrabWindowSnapshot() in ChromeOS.
BUG=119492
TEST=manually done
Review URL: https://chromiumcodereview.appspot.com/10386124
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137556 0039d316-1c4b-4281-b951-d872f2087c98
|
void RootWindow::OnHostResized(const gfx::Size& size_in_pixel) {
DispatchHeldMouseMove();
compositor_->SetScaleAndSize(GetDeviceScaleFactorFromMonitor(this),
size_in_pixel);
gfx::Size old(bounds().size());
gfx::Rect bounds(ui::ConvertSizeToDIP(layer(), size_in_pixel));
layer()->transform().TransformRect(&bounds);
SetBounds(bounds);
FOR_EACH_OBSERVER(RootWindowObserver, observers_,
OnRootWindowResized(this, old));
}
|
void RootWindow::OnHostResized(const gfx::Size& size_in_pixel) {
DispatchHeldMouseMove();
compositor_->SetScaleAndSize(GetDeviceScaleFactorFromMonitor(this),
size_in_pixel);
gfx::Size old(bounds().size());
gfx::Rect bounds(ui::ConvertSizeToDIP(layer(), size_in_pixel));
layer()->transform().TransformRect(&bounds);
SetBounds(bounds);
FOR_EACH_OBSERVER(RootWindowObserver, observers_,
OnRootWindowResized(this, old));
}
|
C
|
Chrome
| 0 |
CVE-2018-1000039
|
https://www.cvedetails.com/cve/CVE-2018-1000039/
|
CWE-416
|
http://git.ghostscript.com/?p=mupdf.git;a=commitdiff;h=4dcc6affe04368461310a21238f7e1871a752a05;hp=8ec561d1bccc46e9db40a9f61310cd8b3763914e
|
4dcc6affe04368461310a21238f7e1871a752a05
| null |
static void pdf_run_Td(fz_context *ctx, pdf_processor *proc, float tx, float ty)
{
pdf_run_processor *pr = (pdf_run_processor *)proc;
pdf_tos_translate(&pr->tos, tx, ty);
}
|
static void pdf_run_Td(fz_context *ctx, pdf_processor *proc, float tx, float ty)
{
pdf_run_processor *pr = (pdf_run_processor *)proc;
pdf_tos_translate(&pr->tos, tx, ty);
}
|
C
|
ghostscript
| 0 |
CVE-2012-2895
|
https://www.cvedetails.com/cve/CVE-2012-2895/
|
CWE-119
|
https://github.com/chromium/chromium/commit/16dcd30c215801941d9890859fd79a234128fc3e
|
16dcd30c215801941d9890859fd79a234128fc3e
|
Refactors to simplify rename pathway in DownloadFileManager.
This is https://chromiumcodereview.appspot.com/10668004 / r144817 (reverted
due to CrOS failure) with the completion logic moved to after the
auto-opening. The tests that test the auto-opening (for web store install)
were waiting for download completion to check install, and hence were
failing when completion was moved earlier.
Doing this right would probably require another state (OPENED).
BUG=123998
BUG-134930
[email protected]
Review URL: https://chromiumcodereview.appspot.com/10701040
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@145157 0039d316-1c4b-4281-b951-d872f2087c98
|
void ChromeDownloadManagerDelegate::GetSaveDir(WebContents* web_contents,
FilePath* website_save_dir,
FilePath* download_save_dir,
bool* skip_dir_check) {
Profile* profile =
Profile::FromBrowserContext(web_contents->GetBrowserContext());
PrefService* prefs = profile->GetPrefs();
if (!prefs->FindPreference(prefs::kSaveFileDefaultDirectory)) {
DCHECK(prefs->FindPreference(prefs::kDownloadDefaultDirectory));
FilePath default_save_path = prefs->GetFilePath(
prefs::kDownloadDefaultDirectory);
prefs->RegisterFilePathPref(prefs::kSaveFileDefaultDirectory,
default_save_path,
PrefService::UNSYNCABLE_PREF);
}
*website_save_dir = prefs->GetFilePath(prefs::kSaveFileDefaultDirectory);
DCHECK(!website_save_dir->empty());
*download_save_dir = prefs->GetFilePath(prefs::kDownloadDefaultDirectory);
*skip_dir_check = false;
#if defined(OS_CHROMEOS)
*skip_dir_check = gdata::util::IsUnderGDataMountPoint(*website_save_dir);
#endif
}
|
void ChromeDownloadManagerDelegate::GetSaveDir(WebContents* web_contents,
FilePath* website_save_dir,
FilePath* download_save_dir,
bool* skip_dir_check) {
Profile* profile =
Profile::FromBrowserContext(web_contents->GetBrowserContext());
PrefService* prefs = profile->GetPrefs();
if (!prefs->FindPreference(prefs::kSaveFileDefaultDirectory)) {
DCHECK(prefs->FindPreference(prefs::kDownloadDefaultDirectory));
FilePath default_save_path = prefs->GetFilePath(
prefs::kDownloadDefaultDirectory);
prefs->RegisterFilePathPref(prefs::kSaveFileDefaultDirectory,
default_save_path,
PrefService::UNSYNCABLE_PREF);
}
*website_save_dir = prefs->GetFilePath(prefs::kSaveFileDefaultDirectory);
DCHECK(!website_save_dir->empty());
*download_save_dir = prefs->GetFilePath(prefs::kDownloadDefaultDirectory);
*skip_dir_check = false;
#if defined(OS_CHROMEOS)
*skip_dir_check = gdata::util::IsUnderGDataMountPoint(*website_save_dir);
#endif
}
|
C
|
Chrome
| 0 |
CVE-2018-19489
|
https://www.cvedetails.com/cve/CVE-2018-19489/
|
CWE-362
|
https://git.qemu.org/?p=qemu.git;a=commit;h=1d20398694a3b67a388d955b7a945ba4aa90a8a8
|
1d20398694a3b67a388d955b7a945ba4aa90a8a8
| null |
static void coroutine_fn v9fs_read(void *opaque)
{
int32_t fid;
uint64_t off;
ssize_t err = 0;
int32_t count = 0;
size_t offset = 7;
uint32_t max_count;
V9fsFidState *fidp;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &max_count);
if (err < 0) {
goto out_nofid;
}
trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count);
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
err = -EINVAL;
goto out_nofid;
}
if (fidp->fid_type == P9_FID_DIR) {
if (off == 0) {
v9fs_co_rewinddir(pdu, fidp);
}
count = v9fs_do_readdir_with_stat(pdu, fidp, max_count);
if (count < 0) {
err = count;
goto out;
}
err = pdu_marshal(pdu, offset, "d", count);
if (err < 0) {
goto out;
}
err += offset + count;
} else if (fidp->fid_type == P9_FID_FILE) {
QEMUIOVector qiov_full;
QEMUIOVector qiov;
int32_t len;
v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
qemu_iovec_init(&qiov, qiov_full.niov);
do {
qemu_iovec_reset(&qiov);
qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
if (0) {
print_sg(qiov.iov, qiov.niov);
}
/* Loop in case of EINTR */
do {
len = v9fs_co_preadv(pdu, fidp, qiov.iov, qiov.niov, off);
if (len >= 0) {
off += len;
count += len;
}
} while (len == -EINTR && !pdu->cancelled);
if (len < 0) {
/* IO error return the error */
err = len;
goto out_free_iovec;
}
} while (count < max_count && len > 0);
err = pdu_marshal(pdu, offset, "d", count);
if (err < 0) {
goto out_free_iovec;
}
err += offset + count;
out_free_iovec:
qemu_iovec_destroy(&qiov);
qemu_iovec_destroy(&qiov_full);
} else if (fidp->fid_type == P9_FID_XATTR) {
err = v9fs_xattr_read(s, pdu, fidp, off, max_count);
} else {
err = -EINVAL;
}
trace_v9fs_read_return(pdu->tag, pdu->id, count, err);
out:
put_fid(pdu, fidp);
out_nofid:
pdu_complete(pdu, err);
}
|
static void coroutine_fn v9fs_read(void *opaque)
{
int32_t fid;
uint64_t off;
ssize_t err = 0;
int32_t count = 0;
size_t offset = 7;
uint32_t max_count;
V9fsFidState *fidp;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &max_count);
if (err < 0) {
goto out_nofid;
}
trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count);
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
err = -EINVAL;
goto out_nofid;
}
if (fidp->fid_type == P9_FID_DIR) {
if (off == 0) {
v9fs_co_rewinddir(pdu, fidp);
}
count = v9fs_do_readdir_with_stat(pdu, fidp, max_count);
if (count < 0) {
err = count;
goto out;
}
err = pdu_marshal(pdu, offset, "d", count);
if (err < 0) {
goto out;
}
err += offset + count;
} else if (fidp->fid_type == P9_FID_FILE) {
QEMUIOVector qiov_full;
QEMUIOVector qiov;
int32_t len;
v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
qemu_iovec_init(&qiov, qiov_full.niov);
do {
qemu_iovec_reset(&qiov);
qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
if (0) {
print_sg(qiov.iov, qiov.niov);
}
/* Loop in case of EINTR */
do {
len = v9fs_co_preadv(pdu, fidp, qiov.iov, qiov.niov, off);
if (len >= 0) {
off += len;
count += len;
}
} while (len == -EINTR && !pdu->cancelled);
if (len < 0) {
/* IO error return the error */
err = len;
goto out_free_iovec;
}
} while (count < max_count && len > 0);
err = pdu_marshal(pdu, offset, "d", count);
if (err < 0) {
goto out_free_iovec;
}
err += offset + count;
out_free_iovec:
qemu_iovec_destroy(&qiov);
qemu_iovec_destroy(&qiov_full);
} else if (fidp->fid_type == P9_FID_XATTR) {
err = v9fs_xattr_read(s, pdu, fidp, off, max_count);
} else {
err = -EINVAL;
}
trace_v9fs_read_return(pdu->tag, pdu->id, count, err);
out:
put_fid(pdu, fidp);
out_nofid:
pdu_complete(pdu, err);
}
|
C
|
qemu
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/a03d4448faf2c40f4ef444a88cb9aace5b98e8c4
|
a03d4448faf2c40f4ef444a88cb9aace5b98e8c4
|
Introduce background.scripts feature for extension manifests.
This optimizes for the common use case where background pages
just include a reference to one or more script files and no
additional HTML.
BUG=107791
Review URL: http://codereview.chromium.org/9150008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@117110 0039d316-1c4b-4281-b951-d872f2087c98
|
void TestingAutomationProvider::CloseBrowser(int browser_handle,
IPC::Message* reply_message) {
if (!browser_tracker_->ContainsHandle(browser_handle))
return;
Browser* browser = browser_tracker_->GetResource(browser_handle);
new BrowserClosedNotificationObserver(browser, this, reply_message);
browser->window()->Close();
}
|
void TestingAutomationProvider::CloseBrowser(int browser_handle,
IPC::Message* reply_message) {
if (!browser_tracker_->ContainsHandle(browser_handle))
return;
Browser* browser = browser_tracker_->GetResource(browser_handle);
new BrowserClosedNotificationObserver(browser, this, reply_message);
browser->window()->Close();
}
|
C
|
Chrome
| 0 |
CVE-2014-3610
|
https://www.cvedetails.com/cve/CVE-2014-3610/
|
CWE-264
|
https://github.com/torvalds/linux/commit/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void svm_clear_vintr(struct vcpu_svm *svm)
{
clr_intercept(svm, INTERCEPT_VINTR);
}
|
static void svm_clear_vintr(struct vcpu_svm *svm)
{
clr_intercept(svm, INTERCEPT_VINTR);
}
|
C
|
linux
| 0 |
CVE-2019-9162
|
https://www.cvedetails.com/cve/CVE-2019-9162/
|
CWE-129
|
https://github.com/torvalds/linux/commit/c4c07b4d6fa1f11880eab8e076d3d060ef3f55fc
|
c4c07b4d6fa1f11880eab8e076d3d060ef3f55fc
|
netfilter: nf_nat_snmp_basic: add missing length checks in ASN.1 cbs
The generic ASN.1 decoder infrastructure doesn't guarantee that callbacks
will get as much data as they expect; callbacks have to check the `datalen`
parameter before looking at `data`. Make sure that snmp_version() and
snmp_helper() don't read/write beyond the end of the packet data.
(Also move the assignment to `pdata` down below the check to make it clear
that it isn't necessarily a pointer we can use before the `datalen` check.)
Fixes: cc2d58634e0f ("netfilter: nf_nat_snmp_basic: use asn1 decoder library")
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
const void *data, size_t datalen)
{
struct snmp_ctx *ctx = (struct snmp_ctx *)context;
__be32 *pdata;
if (datalen != 4)
return -EINVAL;
pdata = (__be32 *)data;
if (*pdata == ctx->from) {
pr_debug("%s: %pI4 to %pI4\n", __func__,
(void *)&ctx->from, (void *)&ctx->to);
if (*ctx->check)
fast_csum(ctx, (unsigned char *)data - ctx->begin);
*pdata = ctx->to;
}
return 1;
}
|
int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
const void *data, size_t datalen)
{
struct snmp_ctx *ctx = (struct snmp_ctx *)context;
__be32 *pdata = (__be32 *)data;
if (*pdata == ctx->from) {
pr_debug("%s: %pI4 to %pI4\n", __func__,
(void *)&ctx->from, (void *)&ctx->to);
if (*ctx->check)
fast_csum(ctx, (unsigned char *)data - ctx->begin);
*pdata = ctx->to;
}
return 1;
}
|
C
|
linux
| 1 |
null | null | null |
https://github.com/chromium/chromium/commit/df831400bcb63db4259b5858281b1727ba972a2a
|
df831400bcb63db4259b5858281b1727ba972a2a
|
WebKit2: Support window bounce when panning.
https://bugs.webkit.org/show_bug.cgi?id=58065
<rdar://problem/9244367>
Reviewed by Adam Roben.
Make gestureDidScroll synchronous, as once we scroll, we need to know
whether or not we are at the beginning or end of the scrollable document.
If we are at either end of the scrollable document, we call the Windows 7
API to bounce the window to give an indication that you are past an end
of the document.
* UIProcess/WebPageProxy.cpp:
(WebKit::WebPageProxy::gestureDidScroll): Pass a boolean for the reply, and return it.
* UIProcess/WebPageProxy.h:
* UIProcess/win/WebView.cpp:
(WebKit::WebView::WebView): Inititalize a new variable.
(WebKit::WebView::onGesture): Once we send the message to scroll, check if have gone to
an end of the document, and if we have, bounce the window.
* UIProcess/win/WebView.h:
* WebProcess/WebPage/WebPage.h:
* WebProcess/WebPage/WebPage.messages.in: GestureDidScroll is now sync.
* WebProcess/WebPage/win/WebPageWin.cpp:
(WebKit::WebPage::gestureDidScroll): When we are done scrolling, check if we have a vertical
scrollbar and if we are at the beginning or the end of the scrollable document.
git-svn-id: svn://svn.chromium.org/blink/trunk@83197 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void WebPageProxy::initializeFormClient(const WKPageFormClient* formClient)
{
m_formClient.initialize(formClient);
}
|
void WebPageProxy::initializeFormClient(const WKPageFormClient* formClient)
{
m_formClient.initialize(formClient);
}
|
C
|
Chrome
| 0 |
CVE-2016-4301
|
https://www.cvedetails.com/cve/CVE-2016-4301/
|
CWE-119
|
https://github.com/libarchive/libarchive/commit/a550daeecf6bc689ade371349892ea17b5b97c77
|
a550daeecf6bc689ade371349892ea17b5b97c77
|
Fix libarchive/archive_read_support_format_mtree.c:1388:11: error: array subscript is above array bounds
|
free_options(struct mtree_option *head)
{
struct mtree_option *next;
for (; head != NULL; head = next) {
next = head->next;
free(head->value);
free(head);
}
}
|
free_options(struct mtree_option *head)
{
struct mtree_option *next;
for (; head != NULL; head = next) {
next = head->next;
free(head->value);
free(head);
}
}
|
C
|
libarchive
| 0 |
CVE-2016-10066
|
https://www.cvedetails.com/cve/CVE-2016-10066/
|
CWE-119
|
https://github.com/ImageMagick/ImageMagick/commit/f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
|
f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
| null |
static MagickBooleanType IsPFA(const unsigned char *magick,const size_t length)
{
if (length < 14)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"%!PS-AdobeFont",14) == 0)
return(MagickTrue);
return(MagickFalse);
}
|
static MagickBooleanType IsPFA(const unsigned char *magick,const size_t length)
{
if (length < 14)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"%!PS-AdobeFont",14) == 0)
return(MagickTrue);
return(MagickFalse);
}
|
C
|
ImageMagick
| 0 |
CVE-2018-17205
|
https://www.cvedetails.com/cve/CVE-2018-17205/
|
CWE-617
|
https://github.com/openvswitch/ovs/commit/0befd1f3745055c32940f5faf9559be6a14395e6
|
0befd1f3745055c32940f5faf9559be6a14395e6
|
ofproto: Fix OVS crash when reverting old flows in bundle commit
During bundle commit flows which are added in bundle are applied
to ofproto in-order. In case if a flow cannot be added (e.g. flow
action is go-to group id which does not exist), OVS tries to
revert back all previous flows which were successfully applied
from the same bundle. This is possible since OVS maintains list
of old flows which were replaced by flows from the bundle.
While reinserting old flows ovs asserts due to check on rule
state != RULE_INITIALIZED. This will work only for new flows, but
for old flow the rule state will be RULE_REMOVED. This is causing
an assert and OVS crash.
The ovs assert check should be modified to != RULE_INSERTED to prevent
any existing rule being re-inserted and allow new rules and old rules
(in case of revert) to get inserted.
Here is an example to trigger the assert:
$ ovs-vsctl add-br br-test -- set Bridge br-test datapath_type=netdev
$ cat flows.txt
flow add table=1,priority=0,in_port=2,actions=NORMAL
flow add table=1,priority=0,in_port=3,actions=NORMAL
$ ovs-ofctl dump-flows -OOpenflow13 br-test
cookie=0x0, duration=2.465s, table=1, n_packets=0, n_bytes=0, priority=0,in_port=2 actions=NORMAL
cookie=0x0, duration=2.465s, table=1, n_packets=0, n_bytes=0, priority=0,in_port=3 actions=NORMAL
$ cat flow-modify.txt
flow modify table=1,priority=0,in_port=2,actions=drop
flow modify table=1,priority=0,in_port=3,actions=group:10
$ ovs-ofctl bundle br-test flow-modify.txt -OOpenflow13
First flow rule will be modified since it is a valid rule. However second
rule is invalid since no group with id 10 exists. Bundle commit tries to
revert (insert) the first rule to old flow which results in ovs_assert at
ofproto_rule_insert__() since old rule->state = RULE_REMOVED.
Signed-off-by: Vishal Deep Ajmera <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
|
OVS_REQUIRES(ofproto_mutex)
{
enum ofperr error;
ogm->new_group = NULL;
group_collection_init(&ogm->old_groups);
switch (ogm->gm.command) {
case OFPGC11_ADD:
error = add_group_start(ofproto, ogm);
break;
case OFPGC11_MODIFY:
error = modify_group_start(ofproto, ogm);
break;
case OFPGC11_ADD_OR_MOD:
error = add_or_modify_group_start(ofproto, ogm);
break;
case OFPGC11_DELETE:
delete_groups_start(ofproto, ogm);
error = 0;
break;
case OFPGC15_INSERT_BUCKET:
error = modify_group_start(ofproto, ogm);
break;
case OFPGC15_REMOVE_BUCKET:
error = modify_group_start(ofproto, ogm);
break;
default:
if (ogm->gm.command > OFPGC11_DELETE) {
VLOG_INFO_RL(&rl, "%s: Invalid group_mod command type %d",
ofproto->name, ogm->gm.command);
}
error = OFPERR_OFPGMFC_BAD_COMMAND;
break;
}
return error;
}
|
OVS_REQUIRES(ofproto_mutex)
{
enum ofperr error;
ogm->new_group = NULL;
group_collection_init(&ogm->old_groups);
switch (ogm->gm.command) {
case OFPGC11_ADD:
error = add_group_start(ofproto, ogm);
break;
case OFPGC11_MODIFY:
error = modify_group_start(ofproto, ogm);
break;
case OFPGC11_ADD_OR_MOD:
error = add_or_modify_group_start(ofproto, ogm);
break;
case OFPGC11_DELETE:
delete_groups_start(ofproto, ogm);
error = 0;
break;
case OFPGC15_INSERT_BUCKET:
error = modify_group_start(ofproto, ogm);
break;
case OFPGC15_REMOVE_BUCKET:
error = modify_group_start(ofproto, ogm);
break;
default:
if (ogm->gm.command > OFPGC11_DELETE) {
VLOG_INFO_RL(&rl, "%s: Invalid group_mod command type %d",
ofproto->name, ogm->gm.command);
}
error = OFPERR_OFPGMFC_BAD_COMMAND;
break;
}
return error;
}
|
C
|
ovs
| 0 |
CVE-2017-5009
|
https://www.cvedetails.com/cve/CVE-2017-5009/
|
CWE-119
|
https://github.com/chromium/chromium/commit/1c40f9042ae2d6ee7483d72998aabb5e73b2ff60
|
1c40f9042ae2d6ee7483d72998aabb5e73b2ff60
|
DevTools: send proper resource type in Network.RequestWillBeSent
This patch plumbs resoure type into the DispatchWillSendRequest
instrumenation. This allows us to report accurate type in
Network.RequestWillBeSent event, instead of "Other", that we report
today.
BUG=765501
R=dgozman
Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c
Reviewed-on: https://chromium-review.googlesource.com/667504
Reviewed-by: Pavel Feldman <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Commit-Queue: Andrey Lushnikov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#507936}
|
void ProvideWorkerFetchContextToWorker(
WorkerClients* clients,
std::unique_ptr<WebWorkerFetchContext> web_context) {
DCHECK(clients);
WorkerFetchContextHolder::ProvideTo(
*clients, WorkerFetchContextHolder::SupplementName(),
new WorkerFetchContextHolder(std::move(web_context)));
}
|
void ProvideWorkerFetchContextToWorker(
WorkerClients* clients,
std::unique_ptr<WebWorkerFetchContext> web_context) {
DCHECK(clients);
WorkerFetchContextHolder::ProvideTo(
*clients, WorkerFetchContextHolder::SupplementName(),
new WorkerFetchContextHolder(std::move(web_context)));
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/610f904d8215075c4681be4eb413f4348860bf9f
|
610f904d8215075c4681be4eb413f4348860bf9f
|
Retrieve per host storage usage from QuotaManager.
[email protected]
BUG=none
TEST=QuotaManagerTest.GetUsage
Review URL: http://codereview.chromium.org/8079004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@103921 0039d316-1c4b-4281-b951-d872f2087c98
|
QuotaCallback* NewWaitableGlobalQuotaCallback() {
++waiting_callbacks_;
return callback_factory_.NewCallback(
&UsageAndQuotaDispatcherTask::DidGetGlobalQuota);
}
|
QuotaCallback* NewWaitableGlobalQuotaCallback() {
++waiting_callbacks_;
return callback_factory_.NewCallback(
&UsageAndQuotaDispatcherTask::DidGetGlobalQuota);
}
|
C
|
Chrome
| 0 |
CVE-2016-3839
|
https://www.cvedetails.com/cve/CVE-2016-3839/
|
CWE-284
|
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
|
472271b153c5dc53c28beac55480a8d8434b2d5c
|
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
|
void btif_hh_disconnect(bt_bdaddr_t *bd_addr)
{
btif_hh_device_t *p_dev;
p_dev = btif_hh_find_connected_dev_by_bda(bd_addr);
if (p_dev != NULL)
{
BTA_HhClose(p_dev->dev_handle);
}
else
BTIF_TRACE_DEBUG("%s-- Error: device not connected:",__FUNCTION__);
}
|
void btif_hh_disconnect(bt_bdaddr_t *bd_addr)
{
btif_hh_device_t *p_dev;
p_dev = btif_hh_find_connected_dev_by_bda(bd_addr);
if (p_dev != NULL)
{
BTA_HhClose(p_dev->dev_handle);
}
else
BTIF_TRACE_DEBUG("%s-- Error: device not connected:",__FUNCTION__);
}
|
C
|
Android
| 0 |
CVE-2010-1149
|
https://www.cvedetails.com/cve/CVE-2010-1149/
|
CWE-200
|
https://cgit.freedesktop.org/udisks/commit/?id=0fcc7cb3b66f23fac53ae08647aa0007a2bd56c4
|
0fcc7cb3b66f23fac53ae08647aa0007a2bd56c4
| null |
lvm2_lv_create_filesystem_create_hook (DBusGMethodInvocation *context,
Device *device,
gboolean filesystem_create_succeeded,
gpointer user_data)
{
if (!filesystem_create_succeeded)
{
/* dang.. FilesystemCreate already reported an error */
}
else
{
/* it worked.. */
dbus_g_method_return (context, device->priv->object_path);
}
}
|
lvm2_lv_create_filesystem_create_hook (DBusGMethodInvocation *context,
Device *device,
gboolean filesystem_create_succeeded,
gpointer user_data)
{
if (!filesystem_create_succeeded)
{
/* dang.. FilesystemCreate already reported an error */
}
else
{
/* it worked.. */
dbus_g_method_return (context, device->priv->object_path);
}
}
|
C
|
udisks
| 0 |
CVE-2016-1670
|
https://www.cvedetails.com/cve/CVE-2016-1670/
|
CWE-362
|
https://github.com/chromium/chromium/commit/1af4fada49c4f3890f16daac31d38379a9d782b2
|
1af4fada49c4f3890f16daac31d38379a9d782b2
|
Block a compromised renderer from reusing request ids.
BUG=578882
Review URL: https://codereview.chromium.org/1608573002
Cr-Commit-Position: refs/heads/master@{#372547}
|
int GetCertID(net::URLRequest* request, int child_id) {
if (request->ssl_info().cert.get()) {
return CertStore::GetInstance()->StoreCert(request->ssl_info().cert.get(),
child_id);
}
return 0;
}
|
int GetCertID(net::URLRequest* request, int child_id) {
if (request->ssl_info().cert.get()) {
return CertStore::GetInstance()->StoreCert(request->ssl_info().cert.get(),
child_id);
}
return 0;
}
|
C
|
Chrome
| 0 |
CVE-2015-5283
|
https://www.cvedetails.com/cve/CVE-2015-5283/
|
CWE-119
|
https://github.com/torvalds/linux/commit/8e2d61e0aed2b7c4ecb35844fe07e0b2b762dee4
|
8e2d61e0aed2b7c4ecb35844fe07e0b2b762dee4
|
sctp: fix race on protocol/netns initialization
Consider sctp module is unloaded and is being requested because an user
is creating a sctp socket.
During initialization, sctp will add the new protocol type and then
initialize pernet subsys:
status = sctp_v4_protosw_init();
if (status)
goto err_protosw_init;
status = sctp_v6_protosw_init();
if (status)
goto err_v6_protosw_init;
status = register_pernet_subsys(&sctp_net_ops);
The problem is that after those calls to sctp_v{4,6}_protosw_init(), it
is possible for userspace to create SCTP sockets like if the module is
already fully loaded. If that happens, one of the possible effects is
that we will have readers for net->sctp.local_addr_list list earlier
than expected and sctp_net_init() does not take precautions while
dealing with that list, leading to a potential panic but not limited to
that, as sctp_sock_init() will copy a bunch of blank/partially
initialized values from net->sctp.
The race happens like this:
CPU 0 | CPU 1
socket() |
__sock_create | socket()
inet_create | __sock_create
list_for_each_entry_rcu( |
answer, &inetsw[sock->type], |
list) { | inet_create
/* no hits */ |
if (unlikely(err)) { |
... |
request_module() |
/* socket creation is blocked |
* the module is fully loaded |
*/ |
sctp_init |
sctp_v4_protosw_init |
inet_register_protosw |
list_add_rcu(&p->list, |
last_perm); |
| list_for_each_entry_rcu(
| answer, &inetsw[sock->type],
sctp_v6_protosw_init | list) {
| /* hit, so assumes protocol
| * is already loaded
| */
| /* socket creation continues
| * before netns is initialized
| */
register_pernet_subsys |
Simply inverting the initialization order between
register_pernet_subsys() and sctp_v4_protosw_init() is not possible
because register_pernet_subsys() will create a control sctp socket, so
the protocol must be already visible by then. Deferring the socket
creation to a work-queue is not good specially because we loose the
ability to handle its errors.
So, as suggested by Vlad, the fix is to split netns initialization in
two moments: defaults and control socket, so that the defaults are
already loaded by when we register the protocol, while control socket
initialization is kept at the same moment it is today.
Fixes: 4db67e808640 ("sctp: Make the address lists per network namespace")
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
{
inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr;
}
|
static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
{
inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr;
}
|
C
|
linux
| 0 |
CVE-2014-3690
|
https://www.cvedetails.com/cve/CVE-2014-3690/
|
CWE-399
|
https://github.com/torvalds/linux/commit/d974baa398f34393db76be45f7d4d04fbdbb4a0a
|
d974baa398f34393db76be45f7d4d04fbdbb4a0a
|
x86,kvm,vmx: Preserve CR4 across VM entry
CR4 isn't constant; at least the TSD and PCE bits can vary.
TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks
like it's correct.
This adds a branch and a read from cr4 to each vm entry. Because it is
extremely likely that consecutive entries into the same vcpu will have
the same host cr4 value, this fixes up the vmcs instead of restoring cr4
after the fact. A subsequent patch will add a kernel-wide cr4 shadow,
reducing the overhead in the common case to just two memory reads and a
branch.
Signed-off-by: Andy Lutomirski <[email protected]>
Acked-by: Paolo Bonzini <[email protected]>
Cc: [email protected]
Cc: Petr Matousek <[email protected]>
Cc: Gleb Natapov <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static __always_inline u32 vmcs_read32(unsigned long field)
{
return vmcs_readl(field);
}
|
static __always_inline u32 vmcs_read32(unsigned long field)
{
return vmcs_readl(field);
}
|
C
|
linux
| 0 |
CVE-2011-4621
|
https://www.cvedetails.com/cve/CVE-2011-4621/
| null |
https://github.com/torvalds/linux/commit/f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <[email protected]>
Reported-by: Bjoern B. Brandenburg <[email protected]>
Tested-by: Yong Zhang <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
{
__wake_up_common(q, mode, 1, 0, key);
}
|
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
{
__wake_up_common(q, mode, 1, 0, key);
}
|
C
|
linux
| 0 |
CVE-2018-20762
|
https://www.cvedetails.com/cve/CVE-2018-20762/
|
CWE-119
|
https://github.com/gpac/gpac/commit/35ab4475a7df9b2a4bcab235e379c0c3ec543658
|
35ab4475a7df9b2a4bcab235e379c0c3ec543658
|
fix some overflows due to strcpy
fixes #1184, #1186, #1187 among other things
|
void gf_sm_update_bitwrapper_buffer(GF_Node *node, const char *fileName)
{
u32 data_size = 0;
char *data = NULL;
char *buffer;
M_BitWrapper *bw = (M_BitWrapper *)node;
if (!bw->buffer.buffer) return;
buffer = bw->buffer.buffer;
if (!strnicmp(buffer, "file://", 7)) {
char *url = gf_url_concatenate(fileName, buffer+7);
if (url) {
FILE *f = gf_fopen(url, "rb");
if (f) {
fseek(f, 0, SEEK_END);
data_size = (u32) ftell(f);
fseek(f, 0, SEEK_SET);
data = gf_malloc(sizeof(char)*data_size);
if (data) {
if (fread(data, 1, data_size, f) != data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_SCENE, ("[Scene Manager] error reading bitwrapper file %s\n", url));
}
}
gf_fclose(f);
}
gf_free(url);
}
} else {
Bool base_64 = 0;
if (!strnicmp(buffer, "data:application/octet-string", 29)) {
char *sep = strchr(bw->buffer.buffer, ',');
base_64 = strstr(bw->buffer.buffer, ";base64") ? 1 : 0;
if (sep) buffer = sep+1;
}
if (base_64) {
data_size = 2 * (u32) strlen(buffer);
data = (char*)gf_malloc(sizeof(char)*data_size);
if (data)
data_size = gf_base64_decode(buffer, (u32) strlen(buffer), data, data_size);
} else {
u32 i, c;
char s[3];
data_size = (u32) strlen(buffer) / 3;
data = (char*)gf_malloc(sizeof(char) * data_size);
if (data) {
s[2] = 0;
for (i=0; i<data_size; i++) {
s[0] = buffer[3*i+1];
s[1] = buffer[3*i+2];
sscanf(s, "%02X", &c);
data[i] = (unsigned char) c;
}
}
}
}
gf_free(bw->buffer.buffer);
bw->buffer.buffer = NULL;
bw->buffer_len = 0;
if (data) {
bw->buffer.buffer = data;
bw->buffer_len = data_size;
}
}
|
void gf_sm_update_bitwrapper_buffer(GF_Node *node, const char *fileName)
{
u32 data_size = 0;
char *data = NULL;
char *buffer;
M_BitWrapper *bw = (M_BitWrapper *)node;
if (!bw->buffer.buffer) return;
buffer = bw->buffer.buffer;
if (!strnicmp(buffer, "file://", 7)) {
char *url = gf_url_concatenate(fileName, buffer+7);
if (url) {
FILE *f = gf_fopen(url, "rb");
if (f) {
fseek(f, 0, SEEK_END);
data_size = (u32) ftell(f);
fseek(f, 0, SEEK_SET);
data = gf_malloc(sizeof(char)*data_size);
if (data) {
if (fread(data, 1, data_size, f) != data_size) {
GF_LOG(GF_LOG_ERROR, GF_LOG_SCENE, ("[Scene Manager] error reading bitwrapper file %s\n", url));
}
}
gf_fclose(f);
}
gf_free(url);
}
} else {
Bool base_64 = 0;
if (!strnicmp(buffer, "data:application/octet-string", 29)) {
char *sep = strchr(bw->buffer.buffer, ',');
base_64 = strstr(bw->buffer.buffer, ";base64") ? 1 : 0;
if (sep) buffer = sep+1;
}
if (base_64) {
data_size = 2 * (u32) strlen(buffer);
data = (char*)gf_malloc(sizeof(char)*data_size);
if (data)
data_size = gf_base64_decode(buffer, (u32) strlen(buffer), data, data_size);
} else {
u32 i, c;
char s[3];
data_size = (u32) strlen(buffer) / 3;
data = (char*)gf_malloc(sizeof(char) * data_size);
if (data) {
s[2] = 0;
for (i=0; i<data_size; i++) {
s[0] = buffer[3*i+1];
s[1] = buffer[3*i+2];
sscanf(s, "%02X", &c);
data[i] = (unsigned char) c;
}
}
}
}
gf_free(bw->buffer.buffer);
bw->buffer.buffer = NULL;
bw->buffer_len = 0;
if (data) {
bw->buffer.buffer = data;
bw->buffer_len = data_size;
}
}
|
C
|
gpac
| 0 |
CVE-2015-8812
|
https://www.cvedetails.com/cve/CVE-2015-8812/
| null |
https://github.com/torvalds/linux/commit/67f1aee6f45059fd6b0f5b0ecb2c97ad0451f6b3
|
67f1aee6f45059fd6b0f5b0ecb2c97ad0451f6b3
|
iw_cxgb3: Fix incorrectly returning error on success
The cxgb3_*_send() functions return NET_XMIT_ values, which are
positive integers values. So don't treat positive return values
as an error.
Signed-off-by: Steve Wise <[email protected]>
Signed-off-by: Hariprasad Shenai <[email protected]>
Signed-off-by: Doug Ledford <[email protected]>
|
void __free_ep(struct kref *kref)
{
struct iwch_ep *ep;
ep = container_of(container_of(kref, struct iwch_ep_common, kref),
struct iwch_ep, com);
PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
dst_release(ep->dst);
l2t_release(ep->com.tdev, ep->l2t);
}
kfree(ep);
}
|
void __free_ep(struct kref *kref)
{
struct iwch_ep *ep;
ep = container_of(container_of(kref, struct iwch_ep_common, kref),
struct iwch_ep, com);
PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
dst_release(ep->dst);
l2t_release(ep->com.tdev, ep->l2t);
}
kfree(ep);
}
|
C
|
linux
| 0 |
CVE-2018-10017
|
https://www.cvedetails.com/cve/CVE-2018-10017/
|
CWE-125
|
https://github.com/OpenMPT/openmpt/commit/492022c7297ede682161d9c0ec2de15526424e76
|
492022c7297ede682161d9c0ec2de15526424e76
|
[Fix] Possible out-of-bounds read when computing length of some IT files with pattern loops (OpenMPT: formats that are converted to IT, libopenmpt: IT/ITP/MO3), caught with afl-fuzz.
git-svn-id: https://source.openmpt.org/svn/openmpt/trunk/OpenMPT@10027 56274372-70c3-4bfc-bfc3-4c3a0b034d27
|
void CSoundFile::ApplyInstrumentPanning(ModChannel *pChn, const ModInstrument *instr, const ModSample *smp) const
{
int32 newPan = int32_min;
if(instr != nullptr && instr->dwFlags[INS_SETPANNING])
newPan = instr->nPan;
if(smp != nullptr && smp->uFlags[CHN_PANNING])
newPan = smp->nPan;
if(newPan != int32_min)
{
pChn->nPan = newPan;
if(m_playBehaviour[kPanOverride] && !m_SongFlags[SONG_SURROUNDPAN])
{
pChn->dwFlags.reset(CHN_SURROUND);
}
}
}
|
void CSoundFile::ApplyInstrumentPanning(ModChannel *pChn, const ModInstrument *instr, const ModSample *smp) const
{
int32 newPan = int32_min;
if(instr != nullptr && instr->dwFlags[INS_SETPANNING])
newPan = instr->nPan;
if(smp != nullptr && smp->uFlags[CHN_PANNING])
newPan = smp->nPan;
if(newPan != int32_min)
{
pChn->nPan = newPan;
if(m_playBehaviour[kPanOverride] && !m_SongFlags[SONG_SURROUNDPAN])
{
pChn->dwFlags.reset(CHN_SURROUND);
}
}
}
|
C
|
openmpt
| 0 |
CVE-2018-17206
|
https://www.cvedetails.com/cve/CVE-2018-17206/
| null |
https://github.com/openvswitch/ovs/commit/9237a63c47bd314b807cda0bd2216264e82edbe8
|
9237a63c47bd314b807cda0bd2216264e82edbe8
|
ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
parse_MULTIPATH(const char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return multipath_parse(ofpact_put_MULTIPATH(ofpacts), arg);
}
|
parse_MULTIPATH(const char *arg, struct ofpbuf *ofpacts,
enum ofputil_protocol *usable_protocols OVS_UNUSED)
{
return multipath_parse(ofpact_put_MULTIPATH(ofpacts), arg);
}
|
C
|
ovs
| 0 |
CVE-2012-0879
|
https://www.cvedetails.com/cve/CVE-2012-0879/
|
CWE-20
|
https://github.com/torvalds/linux/commit/61cc74fbb87af6aa551a06a370590c9bc07e29d9
|
61cc74fbb87af6aa551a06a370590c9bc07e29d9
|
block: Fix io_context leak after clone with CLONE_IO
With CLONE_IO, copy_io() increments both ioc->refcount and ioc->nr_tasks.
However exit_io_context() only decrements ioc->refcount if ioc->nr_tasks
reaches 0.
Always call put_io_context() in exit_io_context().
Signed-off-by: Louis Rilling <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
void copy_io_context(struct io_context **pdst, struct io_context **psrc)
{
struct io_context *src = *psrc;
struct io_context *dst = *pdst;
if (src) {
BUG_ON(atomic_long_read(&src->refcount) == 0);
atomic_long_inc(&src->refcount);
put_io_context(dst);
*pdst = src;
}
}
|
void copy_io_context(struct io_context **pdst, struct io_context **psrc)
{
struct io_context *src = *psrc;
struct io_context *dst = *pdst;
if (src) {
BUG_ON(atomic_long_read(&src->refcount) == 0);
atomic_long_inc(&src->refcount);
put_io_context(dst);
*pdst = src;
}
}
|
C
|
linux
| 0 |
CVE-2013-7421
|
https://www.cvedetails.com/cve/CVE-2013-7421/
|
CWE-264
|
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
|
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
|
crypto: prefix module autoloading with "crypto-"
This prefixes all crypto module loading with "crypto-" so we never run
the risk of exposing module auto-loading to userspace via a crypto API,
as demonstrated by Mathias Krause:
https://lkml.org/lkml/2013/3/4/70
Signed-off-by: Kees Cook <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
static int cryp_blk_encrypt(struct ablkcipher_request *areq)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
pr_debug(DEV_DBG_NAME " [%s]", __func__);
ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
/*
* DMA does not work for DES due to a hw bug */
if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
return ablk_dma_crypt(areq);
/* For everything except DMA, we run the non DMA version. */
return ablk_crypt(areq);
}
|
static int cryp_blk_encrypt(struct ablkcipher_request *areq)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
pr_debug(DEV_DBG_NAME " [%s]", __func__);
ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
/*
* DMA does not work for DES due to a hw bug */
if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
return ablk_dma_crypt(areq);
/* For everything except DMA, we run the non DMA version. */
return ablk_crypt(areq);
}
|
C
|
linux
| 0 |
CVE-2018-12232
|
https://www.cvedetails.com/cve/CVE-2018-12232/
|
CWE-362
|
https://github.com/torvalds/linux/commit/6d8c50dcb029872b298eea68cc6209c866fd3e14
|
6d8c50dcb029872b298eea68cc6209c866fd3e14
|
socket: close race condition between sock_close() and sockfs_setattr()
fchownat() doesn't even hold refcnt of fd until it figures out
fd is really needed (otherwise is ignored) and releases it after
it resolves the path. This means sock_close() could race with
sockfs_setattr(), which leads to a NULL pointer dereference
since typically we set sock->sk to NULL in ->release().
As pointed out by Al, this is unique to sockfs. So we can fix this
in socket layer by acquiring inode_lock in sock_close() and
checking against NULL in sockfs_setattr().
sock_release() is called in many places, only the sock_close()
path matters here. And fortunately, this should not affect normal
sock_close() as it is only called when the last fd refcnt is gone.
It only affects sock_close() with a parallel sockfs_setattr() in
progress, which is not common.
Fixes: 86741ec25462 ("net: core: Add a UID field to struct sock.")
Reported-by: shankarapailoor <[email protected]>
Cc: Tetsuo Handa <[email protected]>
Cc: Lorenzo Colitti <[email protected]>
Cc: Al Viro <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
{
struct qstr name = { .name = "" };
struct path path;
struct file *file;
if (dname) {
name.name = dname;
name.len = strlen(name.name);
} else if (sock->sk) {
name.name = sock->sk->sk_prot_creator->name;
name.len = strlen(name.name);
}
path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
if (unlikely(!path.dentry)) {
sock_release(sock);
return ERR_PTR(-ENOMEM);
}
path.mnt = mntget(sock_mnt);
d_instantiate(path.dentry, SOCK_INODE(sock));
file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
&socket_file_ops);
if (IS_ERR(file)) {
/* drop dentry, keep inode for a bit */
ihold(d_inode(path.dentry));
path_put(&path);
/* ... and now kill it properly */
sock_release(sock);
return file;
}
sock->file = file;
file->f_flags = O_RDWR | (flags & O_NONBLOCK);
file->private_data = sock;
return file;
}
|
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
{
struct qstr name = { .name = "" };
struct path path;
struct file *file;
if (dname) {
name.name = dname;
name.len = strlen(name.name);
} else if (sock->sk) {
name.name = sock->sk->sk_prot_creator->name;
name.len = strlen(name.name);
}
path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
if (unlikely(!path.dentry)) {
sock_release(sock);
return ERR_PTR(-ENOMEM);
}
path.mnt = mntget(sock_mnt);
d_instantiate(path.dentry, SOCK_INODE(sock));
file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
&socket_file_ops);
if (IS_ERR(file)) {
/* drop dentry, keep inode for a bit */
ihold(d_inode(path.dentry));
path_put(&path);
/* ... and now kill it properly */
sock_release(sock);
return file;
}
sock->file = file;
file->f_flags = O_RDWR | (flags & O_NONBLOCK);
file->private_data = sock;
return file;
}
|
C
|
linux
| 0 |
CVE-2011-2918
|
https://www.cvedetails.com/cve/CVE-2011-2918/
|
CWE-399
|
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
|
perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: Michael Cree <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Deng-Cheng Zhu <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Eric B Munson <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jason Wessel <[email protected]>
Cc: Don Zickus <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn, unsigned int rd)
{
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
if (insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd, from_kernel);
return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd, from_kernel);
return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
}
|
unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn, unsigned int rd)
{
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
if (insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd, from_kernel);
return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd, from_kernel);
return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
}
|
C
|
linux
| 0 |
CVE-2017-17858
|
https://www.cvedetails.com/cve/CVE-2017-17858/
|
CWE-119
|
http://git.ghostscript.com/?p=mupdf.git;a=commit;h=55c3f68d638ac1263a386e0aaa004bb6e8bde731
|
55c3f68d638ac1263a386e0aaa004bb6e8bde731
| null |
pdf_xref_size_from_old_trailer(fz_context *ctx, pdf_document *doc, pdf_lexbuf *buf)
{
int64_t len;
char *s;
int64_t t;
pdf_token tok;
int c;
int size = 0;
int64_t ofs;
pdf_obj *trailer = NULL;
size_t n;
fz_var(trailer);
/* Record the current file read offset so that we can reinstate it */
ofs = fz_tell(ctx, doc->file);
fz_skip_space(ctx, doc->file);
if (fz_skip_string(ctx, doc->file, "xref"))
fz_throw(ctx, FZ_ERROR_GENERIC, "cannot find xref marker");
fz_skip_space(ctx, doc->file);
while (1)
{
c = fz_peek_byte(ctx, doc->file);
if (!(c >= '0' && c <= '9'))
break;
fz_read_line(ctx, doc->file, buf->scratch, buf->size);
s = buf->scratch;
fz_strsep(&s, " "); /* ignore ofs */
if (!s)
fz_throw(ctx, FZ_ERROR_GENERIC, "invalid range marker in xref");
len = fz_atoi64(fz_strsep(&s, " "));
if (len < 0)
fz_throw(ctx, FZ_ERROR_GENERIC, "xref range marker must be positive");
/* broken pdfs where the section is not on a separate line */
if (s && *s != '\0')
fz_seek(ctx, doc->file, -(2 + (int)strlen(s)), SEEK_CUR);
t = fz_tell(ctx, doc->file);
if (t < 0)
fz_throw(ctx, FZ_ERROR_GENERIC, "cannot tell in file");
/* Spec says xref entries should be 20 bytes, but it's not infrequent
* to see 19, in particular for some PCLm drivers. Cope. */
if (len > 0)
{
n = fz_read(ctx, doc->file, (unsigned char *)buf->scratch, 20);
if (n < 19)
fz_throw(ctx, FZ_ERROR_GENERIC, "malformed xref table");
if (n == 20 && buf->scratch[19] > 32)
n = 19;
}
else
n = 20;
if (len > (int64_t)((INT64_MAX - t) / n))
fz_throw(ctx, FZ_ERROR_GENERIC, "xref has too many entries");
fz_seek(ctx, doc->file, (int64_t)(t + n * len), SEEK_SET);
}
fz_try(ctx)
{
tok = pdf_lex(ctx, doc->file, buf);
if (tok != PDF_TOK_TRAILER)
fz_throw(ctx, FZ_ERROR_GENERIC, "expected trailer marker");
tok = pdf_lex(ctx, doc->file, buf);
if (tok != PDF_TOK_OPEN_DICT)
fz_throw(ctx, FZ_ERROR_GENERIC, "expected trailer dictionary");
trailer = pdf_parse_dict(ctx, doc, doc->file, buf);
size = pdf_to_int(ctx, pdf_dict_get(ctx, trailer, PDF_NAME_Size));
if (!size)
fz_throw(ctx, FZ_ERROR_GENERIC, "trailer missing Size entry");
}
fz_always(ctx)
{
pdf_drop_obj(ctx, trailer);
}
fz_catch(ctx)
{
fz_rethrow(ctx);
}
fz_seek(ctx, doc->file, ofs, SEEK_SET);
return size;
}
|
pdf_xref_size_from_old_trailer(fz_context *ctx, pdf_document *doc, pdf_lexbuf *buf)
{
int64_t len;
char *s;
int64_t t;
pdf_token tok;
int c;
int size = 0;
int64_t ofs;
pdf_obj *trailer = NULL;
size_t n;
fz_var(trailer);
/* Record the current file read offset so that we can reinstate it */
ofs = fz_tell(ctx, doc->file);
fz_skip_space(ctx, doc->file);
if (fz_skip_string(ctx, doc->file, "xref"))
fz_throw(ctx, FZ_ERROR_GENERIC, "cannot find xref marker");
fz_skip_space(ctx, doc->file);
while (1)
{
c = fz_peek_byte(ctx, doc->file);
if (!(c >= '0' && c <= '9'))
break;
fz_read_line(ctx, doc->file, buf->scratch, buf->size);
s = buf->scratch;
fz_strsep(&s, " "); /* ignore ofs */
if (!s)
fz_throw(ctx, FZ_ERROR_GENERIC, "invalid range marker in xref");
len = fz_atoi64(fz_strsep(&s, " "));
if (len < 0)
fz_throw(ctx, FZ_ERROR_GENERIC, "xref range marker must be positive");
/* broken pdfs where the section is not on a separate line */
if (s && *s != '\0')
fz_seek(ctx, doc->file, -(2 + (int)strlen(s)), SEEK_CUR);
t = fz_tell(ctx, doc->file);
if (t < 0)
fz_throw(ctx, FZ_ERROR_GENERIC, "cannot tell in file");
/* Spec says xref entries should be 20 bytes, but it's not infrequent
* to see 19, in particular for some PCLm drivers. Cope. */
if (len > 0)
{
n = fz_read(ctx, doc->file, (unsigned char *)buf->scratch, 20);
if (n < 19)
fz_throw(ctx, FZ_ERROR_GENERIC, "malformed xref table");
if (n == 20 && buf->scratch[19] > 32)
n = 19;
}
else
n = 20;
if (len > (int64_t)((INT64_MAX - t) / n))
fz_throw(ctx, FZ_ERROR_GENERIC, "xref has too many entries");
fz_seek(ctx, doc->file, (int64_t)(t + n * len), SEEK_SET);
}
fz_try(ctx)
{
tok = pdf_lex(ctx, doc->file, buf);
if (tok != PDF_TOK_TRAILER)
fz_throw(ctx, FZ_ERROR_GENERIC, "expected trailer marker");
tok = pdf_lex(ctx, doc->file, buf);
if (tok != PDF_TOK_OPEN_DICT)
fz_throw(ctx, FZ_ERROR_GENERIC, "expected trailer dictionary");
trailer = pdf_parse_dict(ctx, doc, doc->file, buf);
size = pdf_to_int(ctx, pdf_dict_get(ctx, trailer, PDF_NAME_Size));
if (!size)
fz_throw(ctx, FZ_ERROR_GENERIC, "trailer missing Size entry");
}
fz_always(ctx)
{
pdf_drop_obj(ctx, trailer);
}
fz_catch(ctx)
{
fz_rethrow(ctx);
}
fz_seek(ctx, doc->file, ofs, SEEK_SET);
return size;
}
|
C
|
ghostscript
| 0 |
CVE-2014-1743
|
https://www.cvedetails.com/cve/CVE-2014-1743/
|
CWE-399
|
https://github.com/chromium/chromium/commit/6d9425ec7badda912555d46ea7abcfab81fdd9b9
|
6d9425ec7badda912555d46ea7abcfab81fdd9b9
|
sync compositor: pass simple gfx types by const ref
See bug for reasoning
BUG=159273
Review URL: https://codereview.chromium.org/1417893006
Cr-Commit-Position: refs/heads/master@{#356653}
|
void BrowserViewRenderer::ReleaseHardware() {
DCHECK(hardware_enabled_);
ReturnUnusedResource(shared_renderer_state_.PassUncommittedFrameOnUI());
ReturnResourceFromParent();
DCHECK(shared_renderer_state_.ReturnedResourcesEmptyOnUI());
if (compositor_) {
compositor_->SetMemoryPolicy(0u);
}
hardware_enabled_ = false;
}
|
void BrowserViewRenderer::ReleaseHardware() {
DCHECK(hardware_enabled_);
ReturnUnusedResource(shared_renderer_state_.PassUncommittedFrameOnUI());
ReturnResourceFromParent();
DCHECK(shared_renderer_state_.ReturnedResourcesEmptyOnUI());
if (compositor_) {
compositor_->SetMemoryPolicy(0u);
}
hardware_enabled_ = false;
}
|
C
|
Chrome
| 0 |
CVE-2012-2880
|
https://www.cvedetails.com/cve/CVE-2012-2880/
|
CWE-362
|
https://github.com/chromium/chromium/commit/fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
|
fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
|
[Sync] Cleanup all tab sync enabling logic now that its on by default.
BUG=none
TEST=
Review URL: https://chromiumcodereview.appspot.com/10443046
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139462 0039d316-1c4b-4281-b951-d872f2087c98
|
void SyncManager::SyncInternal::SetDecryptionPassphrase(
const std::string& passphrase) {
if (passphrase.empty()) {
NOTREACHED() << "Cannot decrypt with an empty passphrase.";
return;
}
WriteTransaction trans(FROM_HERE, GetUserShare());
Cryptographer* cryptographer = trans.GetCryptographer();
KeyParams key_params = {"localhost", "dummy", passphrase};
WriteNode node(&trans);
if (node.InitByTagLookup(kNigoriTag) != sync_api::BaseNode::INIT_OK) {
NOTREACHED();
return;
}
if (!cryptographer->has_pending_keys()) {
NOTREACHED() << "Attempt to set decryption passphrase failed because there "
<< "were no pending keys.";
return;
}
bool nigori_has_explicit_passphrase =
node.GetNigoriSpecifics().using_explicit_passphrase();
std::string bootstrap_token;
sync_pb::EncryptedData pending_keys;
pending_keys = cryptographer->GetPendingKeys();
bool success = false;
if (!nigori_has_explicit_passphrase) {
if (cryptographer->is_initialized()) {
Cryptographer temp_cryptographer(encryptor_);
temp_cryptographer.SetPendingKeys(cryptographer->GetPendingKeys());
if (temp_cryptographer.DecryptPendingKeys(key_params)) {
sync_pb::EncryptedData encrypted;
cryptographer->GetKeys(&encrypted);
if (temp_cryptographer.CanDecrypt(encrypted)) {
DVLOG(1) << "Implicit user provided passphrase accepted for "
<< "decryption, overwriting default.";
cryptographer->DecryptPendingKeys(key_params);
cryptographer->GetBootstrapToken(&bootstrap_token);
success = true;
} else {
DVLOG(1) << "Implicit user provided passphrase accepted for "
<< "decryption, restoring implicit internal passphrase "
<< "as default.";
std::string bootstrap_token_from_current_key;
cryptographer->GetBootstrapToken(
&bootstrap_token_from_current_key);
cryptographer->DecryptPendingKeys(key_params);
cryptographer->AddKeyFromBootstrapToken(
bootstrap_token_from_current_key);
success = true;
}
} else { // !temp_cryptographer.DecryptPendingKeys(..)
DVLOG(1) << "Implicit user provided passphrase failed to decrypt.";
success = false;
} // temp_cryptographer.DecryptPendingKeys(...)
} else { // cryptographer->is_initialized() == false
if (cryptographer->DecryptPendingKeys(key_params)) {
cryptographer->GetBootstrapToken(&bootstrap_token);
DVLOG(1) << "Implicit user provided passphrase accepted, initializing"
<< " cryptographer.";
success = true;
} else {
DVLOG(1) << "Implicit user provided passphrase failed to decrypt.";
success = false;
}
} // cryptographer->is_initialized()
} else { // nigori_has_explicit_passphrase == true
if (cryptographer->DecryptPendingKeys(key_params)) {
DVLOG(1) << "Explicit passphrase accepted for decryption.";
cryptographer->GetBootstrapToken(&bootstrap_token);
success = true;
} else {
DVLOG(1) << "Explicit passphrase failed to decrypt.";
success = false;
}
} // nigori_has_explicit_passphrase
DVLOG_IF(1, !success)
<< "Failure in SetDecryptionPassphrase; notifying and returning.";
DVLOG_IF(1, success)
<< "Successfully set decryption passphrase; updating nigori and "
"reencrypting.";
FinishSetPassphrase(success,
bootstrap_token,
nigori_has_explicit_passphrase,
&trans,
&node);
}
|
void SyncManager::SyncInternal::SetDecryptionPassphrase(
const std::string& passphrase) {
if (passphrase.empty()) {
NOTREACHED() << "Cannot decrypt with an empty passphrase.";
return;
}
WriteTransaction trans(FROM_HERE, GetUserShare());
Cryptographer* cryptographer = trans.GetCryptographer();
KeyParams key_params = {"localhost", "dummy", passphrase};
WriteNode node(&trans);
if (node.InitByTagLookup(kNigoriTag) != sync_api::BaseNode::INIT_OK) {
NOTREACHED();
return;
}
if (!cryptographer->has_pending_keys()) {
NOTREACHED() << "Attempt to set decryption passphrase failed because there "
<< "were no pending keys.";
return;
}
bool nigori_has_explicit_passphrase =
node.GetNigoriSpecifics().using_explicit_passphrase();
std::string bootstrap_token;
sync_pb::EncryptedData pending_keys;
pending_keys = cryptographer->GetPendingKeys();
bool success = false;
if (!nigori_has_explicit_passphrase) {
if (cryptographer->is_initialized()) {
Cryptographer temp_cryptographer(encryptor_);
temp_cryptographer.SetPendingKeys(cryptographer->GetPendingKeys());
if (temp_cryptographer.DecryptPendingKeys(key_params)) {
sync_pb::EncryptedData encrypted;
cryptographer->GetKeys(&encrypted);
if (temp_cryptographer.CanDecrypt(encrypted)) {
DVLOG(1) << "Implicit user provided passphrase accepted for "
<< "decryption, overwriting default.";
cryptographer->DecryptPendingKeys(key_params);
cryptographer->GetBootstrapToken(&bootstrap_token);
success = true;
} else {
DVLOG(1) << "Implicit user provided passphrase accepted for "
<< "decryption, restoring implicit internal passphrase "
<< "as default.";
std::string bootstrap_token_from_current_key;
cryptographer->GetBootstrapToken(
&bootstrap_token_from_current_key);
cryptographer->DecryptPendingKeys(key_params);
cryptographer->AddKeyFromBootstrapToken(
bootstrap_token_from_current_key);
success = true;
}
} else { // !temp_cryptographer.DecryptPendingKeys(..)
DVLOG(1) << "Implicit user provided passphrase failed to decrypt.";
success = false;
} // temp_cryptographer.DecryptPendingKeys(...)
} else { // cryptographer->is_initialized() == false
if (cryptographer->DecryptPendingKeys(key_params)) {
cryptographer->GetBootstrapToken(&bootstrap_token);
DVLOG(1) << "Implicit user provided passphrase accepted, initializing"
<< " cryptographer.";
success = true;
} else {
DVLOG(1) << "Implicit user provided passphrase failed to decrypt.";
success = false;
}
} // cryptographer->is_initialized()
} else { // nigori_has_explicit_passphrase == true
if (cryptographer->DecryptPendingKeys(key_params)) {
DVLOG(1) << "Explicit passphrase accepted for decryption.";
cryptographer->GetBootstrapToken(&bootstrap_token);
success = true;
} else {
DVLOG(1) << "Explicit passphrase failed to decrypt.";
success = false;
}
} // nigori_has_explicit_passphrase
DVLOG_IF(1, !success)
<< "Failure in SetDecryptionPassphrase; notifying and returning.";
DVLOG_IF(1, success)
<< "Successfully set decryption passphrase; updating nigori and "
"reencrypting.";
FinishSetPassphrase(success,
bootstrap_token,
nigori_has_explicit_passphrase,
&trans,
&node);
}
|
C
|
Chrome
| 0 |
CVE-2016-3741
|
https://www.cvedetails.com/cve/CVE-2016-3741/
|
CWE-20
|
https://android.googlesource.com/platform/external/libavc/+/e629194c62a9a129ce378e08cb1059a8a53f1795
|
e629194c62a9a129ce378e08cb1059a8a53f1795
|
Decoder: Initialize slice parameters before concealing error MBs
Also memset ps_dec_op structure to zero.
For error input, this ensures dimensions are initialized to zero
Bug: 28165661
Change-Id: I66eb2ddc5e02e74b7ff04da5f749443920f37141
|
WORD32 ih264d_video_decode(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op)
{
/* ! */
dec_struct_t * ps_dec = (dec_struct_t *)(dec_hdl->pv_codec_handle);
WORD32 i4_err_status = 0;
UWORD8 *pu1_buf = NULL;
WORD32 buflen;
UWORD32 u4_max_ofst, u4_length_of_start_code = 0;
UWORD32 bytes_consumed = 0;
UWORD32 cur_slice_is_nonref = 0;
UWORD32 u4_next_is_aud;
UWORD32 u4_first_start_code_found = 0;
WORD32 ret = 0,api_ret_value = IV_SUCCESS;
WORD32 header_data_left = 0,frame_data_left = 0;
UWORD8 *pu1_bitstrm_buf;
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
ithread_set_name((void*)"Parse_thread");
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
{
UWORD32 u4_size;
u4_size = ps_dec_op->u4_size;
memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t));
ps_dec_op->u4_size = u4_size;
}
ps_dec->pv_dec_out = ps_dec_op;
if(ps_dec->init_done != 1)
{
return IV_FAIL;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
DATA_SYNC();
if(0 == ps_dec->u1_flushfrm)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
ps_dec->u1_pic_decode_done = 0;
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec->ps_out_buffer = NULL;
if(ps_dec_ip->u4_size
>= offsetof(ivd_video_decode_ip_t, s_out_buffer))
ps_dec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 0;
ps_dec->s_disp_op.u4_error_code = 1;
ps_dec->u4_fmt_conv_num_rows = FMT_CONV_NUM_ROWS;
if(0 == ps_dec->u4_share_disp_buf
&& ps_dec->i4_decode_header == 0)
{
UWORD32 i;
if(ps_dec->ps_out_buffer->u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec->ps_out_buffer->u4_num_bufs; i++)
{
if(ps_dec->ps_out_buffer->pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec->ps_out_buffer->u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |=
IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
if(ps_dec->u4_total_frames_decoded >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code = ERROR_FRAME_LIMIT_OVER;
return IV_FAIL;
}
/* ! */
ps_dec->u4_ts = ps_dec_ip->u4_ts;
ps_dec_op->u4_error_code = 0;
ps_dec_op->e_pic_type = -1;
ps_dec_op->u4_output_present = 0;
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec->i4_frametype = -1;
ps_dec->i4_content_type = -1;
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
{
if((TOP_FIELD_ONLY | BOT_FIELD_ONLY) == ps_dec->u1_top_bottom_decoded)
{
ps_dec->u1_top_bottom_decoded = 0;
}
}
ps_dec->u4_slice_start_code_found = 0;
/* In case the deocder is not in flush mode(in shared mode),
then decoder has to pick up a buffer to write current frame.
Check if a frame is available in such cases */
if(ps_dec->u1_init_dec_flag == 1 && ps_dec->u4_share_disp_buf == 1
&& ps_dec->u1_flushfrm == 0)
{
UWORD32 i;
WORD32 disp_avail = 0, free_id;
/* Check if at least one buffer is available with the codec */
/* If not then return to application with error */
for(i = 0; i < ps_dec->u1_pic_bufs; i++)
{
if(0 == ps_dec->u4_disp_buf_mapping[i]
|| 1 == ps_dec->u4_disp_buf_to_be_freed[i])
{
disp_avail = 1;
break;
}
}
if(0 == disp_avail)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
while(1)
{
pic_buffer_t *ps_pic_buf;
ps_pic_buf = (pic_buffer_t *)ih264_buf_mgr_get_next_free(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &free_id);
if(ps_pic_buf == NULL)
{
UWORD32 i, display_queued = 0;
/* check if any buffer was given for display which is not returned yet */
for(i = 0; i < (MAX_DISP_BUFS_NEW); i++)
{
if(0 != ps_dec->u4_disp_buf_mapping[i])
{
display_queued = 1;
break;
}
}
/* If some buffer is queued for display, then codec has to singal an error and wait
for that buffer to be returned.
If nothing is queued for display then codec has ownership of all display buffers
and it can reuse any of the existing buffers and continue decoding */
if(1 == display_queued)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
}
else
{
/* If the buffer is with display, then mark it as in use and then look for a buffer again */
if(1 == ps_dec->u4_disp_buf_mapping[free_id])
{
ih264_buf_mgr_set_status(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
}
else
{
/**
* Found a free buffer for present call. Release it now.
* Will be again obtained later.
*/
ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
break;
}
}
}
}
if(ps_dec->u1_flushfrm && ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
ps_dec->u4_output_present = 1;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
ps_dec_op->u4_new_seq = 0;
ps_dec_op->u4_output_present = ps_dec->u4_output_present;
ps_dec_op->u4_progressive_frame_flag =
ps_dec->s_disp_op.u4_progressive_frame_flag;
ps_dec_op->e_output_format =
ps_dec->s_disp_op.e_output_format;
ps_dec_op->s_disp_frm_buf = ps_dec->s_disp_op.s_disp_frm_buf;
ps_dec_op->e4_fld_type = ps_dec->s_disp_op.e4_fld_type;
ps_dec_op->u4_ts = ps_dec->s_disp_op.u4_ts;
ps_dec_op->u4_disp_buf_id = ps_dec->s_disp_op.u4_disp_buf_id;
/*In the case of flush ,since no frame is decoded set pic type as invalid*/
ps_dec_op->u4_is_ref_flag = -1;
ps_dec_op->e_pic_type = IV_NA_FRAME;
ps_dec_op->u4_frame_decoded_flag = 0;
if(0 == ps_dec->s_disp_op.u4_error_code)
{
return (IV_SUCCESS);
}
else
return (IV_FAIL);
}
if(ps_dec->u1_res_changed == 1)
{
/*if resolution has changed and all buffers have been flushed, reset decoder*/
ih264d_init_decoder(ps_dec);
}
ps_dec->u4_prev_nal_skipped = 0;
ps_dec->u2_cur_mb_addr = 0;
ps_dec->u2_total_mbs_coded = 0;
ps_dec->u2_cur_slice_num = 0;
ps_dec->cur_dec_mb_num = 0;
ps_dec->cur_recon_mb_num = 0;
ps_dec->u4_first_slice_in_pic = 2;
ps_dec->u1_slice_header_done = 0;
ps_dec->u1_dangling_field = 0;
ps_dec->u4_dec_thread_created = 0;
ps_dec->u4_bs_deblk_thread_created = 0;
ps_dec->u4_cur_bs_mb_num = 0;
DEBUG_THREADS_PRINTF(" Starting process call\n");
ps_dec->u4_pic_buf_got = 0;
do
{
WORD32 buf_size;
pu1_buf = (UWORD8*)ps_dec_ip->pv_stream_buffer
+ ps_dec_op->u4_num_bytes_consumed;
u4_max_ofst = ps_dec_ip->u4_num_Bytes
- ps_dec_op->u4_num_bytes_consumed;
/* If dynamic bitstream buffer is not allocated and
* header decode is done, then allocate dynamic bitstream buffer
*/
if((NULL == ps_dec->pu1_bits_buf_dynamic) &&
(ps_dec->i4_header_decoded & 1))
{
WORD32 size;
void *pv_buf;
void *pv_mem_ctxt = ps_dec->pv_mem_ctxt;
size = MAX(256000, ps_dec->u2_pic_wd * ps_dec->u2_pic_ht * 3 / 2);
pv_buf = ps_dec->pf_aligned_alloc(pv_mem_ctxt, 128, size);
RETURN_IF((NULL == pv_buf), IV_FAIL);
ps_dec->pu1_bits_buf_dynamic = pv_buf;
ps_dec->u4_dynamic_bits_buf_size = size;
}
if(ps_dec->pu1_bits_buf_dynamic)
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_dynamic;
buf_size = ps_dec->u4_dynamic_bits_buf_size;
}
else
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_static;
buf_size = ps_dec->u4_static_bits_buf_size;
}
u4_next_is_aud = 0;
buflen = ih264d_find_start_code(pu1_buf, 0, u4_max_ofst,
&u4_length_of_start_code,
&u4_next_is_aud);
if(buflen == -1)
buflen = 0;
/* Ignore bytes beyond the allocated size of intermediate buffer */
buflen = MIN(buflen, buf_size);
bytes_consumed = buflen + u4_length_of_start_code;
ps_dec_op->u4_num_bytes_consumed += bytes_consumed;
{
UWORD8 u1_firstbyte, u1_nal_ref_idc;
if(ps_dec->i4_app_skip_mode == IVD_SKIP_B)
{
u1_firstbyte = *(pu1_buf + u4_length_of_start_code);
u1_nal_ref_idc = (UWORD8)(NAL_REF_IDC(u1_firstbyte));
if(u1_nal_ref_idc == 0)
{
/*skip non reference frames*/
cur_slice_is_nonref = 1;
continue;
}
else
{
if(1 == cur_slice_is_nonref)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -=
bytes_consumed;
ps_dec_op->e_pic_type = IV_B_FRAME;
ps_dec_op->u4_error_code =
IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size =
sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
}
}
}
if(buflen)
{
memcpy(pu1_bitstrm_buf, pu1_buf + u4_length_of_start_code,
buflen);
/* Decoder may read extra 8 bytes near end of the frame */
if((buflen + 8) < buf_size)
{
memset(pu1_bitstrm_buf + buflen, 0, 8);
}
u4_first_start_code_found = 1;
}
else
{
/*start code not found*/
if(u4_first_start_code_found == 0)
{
/*no start codes found in current process call*/
ps_dec->i4_error_code = ERROR_START_CODE_NOT_FOUND;
ps_dec_op->u4_error_code |= 1 << IVD_INSUFFICIENTDATA;
if(ps_dec->u4_pic_buf_got == 0)
{
ih264d_fill_output_struct_from_context(ps_dec,
ps_dec_op);
ps_dec_op->u4_error_code = ps_dec->i4_error_code;
ps_dec_op->u4_frame_decoded_flag = 0;
return (IV_FAIL);
}
else
{
ps_dec->u1_pic_decode_done = 1;
continue;
}
}
else
{
/* a start code has already been found earlier in the same process call*/
frame_data_left = 0;
continue;
}
}
ps_dec->u4_return_to_app = 0;
ret = ih264d_parse_nal_unit(dec_hdl, ps_dec_op,
pu1_bitstrm_buf, buflen);
if(ret != OK)
{
UWORD32 error = ih264d_map_error(ret);
ps_dec_op->u4_error_code = error | ret;
api_ret_value = IV_FAIL;
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T))
{
break;
}
if((ret == ERROR_INCOMPLETE_FRAME) || (ret == ERROR_DANGLING_FIELD_IN_PIC))
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
api_ret_value = IV_FAIL;
break;
}
if(ret == ERROR_IN_LAST_SLICE_OF_PIC)
{
api_ret_value = IV_FAIL;
break;
}
}
if(ps_dec->u4_return_to_app)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
header_data_left = ((ps_dec->i4_decode_header == 1)
&& (ps_dec->i4_header_decoded != 3)
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
frame_data_left = (((ps_dec->i4_decode_header == 0)
&& ((ps_dec->u1_pic_decode_done == 0)
|| (u4_next_is_aud == 1)))
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
}
while(( header_data_left == 1)||(frame_data_left == 1));
if((ps_dec->u4_slice_start_code_found == 1)
&& (ret != IVD_MEM_ALLOC_FAILED)
&& ps_dec->u2_total_mbs_coded < ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
{
WORD32 num_mb_skipped;
WORD32 prev_slice_err;
pocstruct_t temp_poc;
WORD32 ret1;
num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
- ps_dec->u2_total_mbs_coded;
if(ps_dec->u4_first_slice_in_pic && (ps_dec->u4_pic_buf_got == 0))
prev_slice_err = 1;
else
prev_slice_err = 2;
ret1 = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, ps_dec->u1_nal_unit_type == IDR_SLICE_NAL, ps_dec->ps_cur_slice->u2_frame_num,
&temp_poc, prev_slice_err);
if((ret1 == ERROR_UNAVAIL_PICBUF_T) || (ret1 == ERROR_UNAVAIL_MVBUF_T))
{
return IV_FAIL;
}
}
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T))
{
/* signal the decode thread */
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet */
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
/* dont consume bitstream for change in resolution case */
if(ret == IVD_RES_CHANGED)
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
}
return IV_FAIL;
}
if(ps_dec->u1_separate_parse)
{
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_num_cores == 2)
{
/*do deblocking of all mbs*/
if((ps_dec->u4_nmb_deblk == 0) &&(ps_dec->u4_start_recon_deblk == 1) && (ps_dec->ps_cur_sps->u1_mb_aff_flag == 0))
{
UWORD32 u4_num_mbs,u4_max_addr;
tfr_ctxt_t s_tfr_ctxt;
tfr_ctxt_t *ps_tfr_cxt = &s_tfr_ctxt;
pad_mgr_t *ps_pad_mgr = &ps_dec->s_pad_mgr;
/*BS is done for all mbs while parsing*/
u4_max_addr = (ps_dec->u2_frm_wd_in_mbs * ps_dec->u2_frm_ht_in_mbs) - 1;
ps_dec->u4_cur_bs_mb_num = u4_max_addr + 1;
ih264d_init_deblk_tfr_ctxt(ps_dec, ps_pad_mgr, ps_tfr_cxt,
ps_dec->u2_frm_wd_in_mbs, 0);
u4_num_mbs = u4_max_addr
- ps_dec->u4_cur_deblk_mb_num + 1;
DEBUG_PERF_PRINTF("mbs left for deblocking= %d \n",u4_num_mbs);
if(u4_num_mbs != 0)
ih264d_check_mb_map_deblk(ps_dec, u4_num_mbs,
ps_tfr_cxt,1);
ps_dec->u4_start_recon_deblk = 0;
}
}
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
}
DATA_SYNC();
if((ps_dec_op->u4_error_code & 0xff)
!= ERROR_DYNAMIC_RESOLUTION_NOT_SUPPORTED)
{
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
}
if(ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->i4_decode_header == 1 && ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->u4_prev_nal_skipped)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
if((ps_dec->u4_slice_start_code_found == 1)
&& (ERROR_DANGLING_FIELD_IN_PIC != i4_err_status))
{
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
if(ps_dec->ps_cur_slice->u1_field_pic_flag)
{
if(1 == ps_dec->ps_cur_slice->u1_bottom_field_flag)
{
ps_dec->u1_top_bottom_decoded |= BOT_FIELD_ONLY;
}
else
{
ps_dec->u1_top_bottom_decoded |= TOP_FIELD_ONLY;
}
}
/* if new frame in not found (if we are still getting slices from previous frame)
* ih264d_deblock_display is not called. Such frames will not be added to reference /display
*/
if((ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) == 0)
{
/* Calling Function to deblock Picture and Display */
ret = ih264d_deblock_display(ps_dec);
if(ret != 0)
{
return IV_FAIL;
}
}
/*set to complete ,as we dont support partial frame decode*/
if(ps_dec->i4_header_decoded == 3)
{
ps_dec->u2_total_mbs_coded = ps_dec->ps_cur_sps->u2_max_mb_addr + 1;
}
/*Update the i4_frametype at the end of picture*/
if(ps_dec->ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL)
{
ps_dec->i4_frametype = IV_IDR_FRAME;
}
else if(ps_dec->i4_pic_type == B_SLICE)
{
ps_dec->i4_frametype = IV_B_FRAME;
}
else if(ps_dec->i4_pic_type == P_SLICE)
{
ps_dec->i4_frametype = IV_P_FRAME;
}
else if(ps_dec->i4_pic_type == I_SLICE)
{
ps_dec->i4_frametype = IV_I_FRAME;
}
else
{
H264_DEC_DEBUG_PRINT("Shouldn't come here\n");
}
ps_dec->i4_content_type = ps_dec->ps_cur_slice->u1_field_pic_flag;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded + 2;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded
- ps_dec->ps_cur_slice->u1_field_pic_flag;
}
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
{
/* In case the decoder is configured to run in low delay mode,
* then get display buffer and then format convert.
* Note in this mode, format conversion does not run paralelly in a thread and adds to the codec cycles
*/
if((IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode)
&& ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 1;
}
}
ih264d_fill_output_struct_from_context(ps_dec, ps_dec_op);
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_output_present &&
(ps_dec->u4_fmt_conv_cur_row < ps_dec->s_disp_frame_info.u4_y_ht))
{
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht
- ps_dec->u4_fmt_conv_cur_row;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
}
if(ps_dec->i4_decode_header == 1 && (ps_dec->i4_header_decoded & 1) == 1)
{
ps_dec_op->u4_progressive_frame_flag = 1;
if((NULL != ps_dec->ps_cur_sps) && (1 == (ps_dec->ps_cur_sps->u1_is_valid)))
{
if((0 == ps_dec->ps_sps->u1_frame_mbs_only_flag)
&& (0 == ps_dec->ps_sps->u1_mb_aff_flag))
ps_dec_op->u4_progressive_frame_flag = 0;
}
}
/*Data memory barrier instruction,so that yuv write by the library is complete*/
DATA_SYNC();
H264_DEC_DEBUG_PRINT("The num bytes consumed: %d\n",
ps_dec_op->u4_num_bytes_consumed);
return api_ret_value;
}
|
WORD32 ih264d_video_decode(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op)
{
/* ! */
dec_struct_t * ps_dec = (dec_struct_t *)(dec_hdl->pv_codec_handle);
WORD32 i4_err_status = 0;
UWORD8 *pu1_buf = NULL;
WORD32 buflen;
UWORD32 u4_max_ofst, u4_length_of_start_code = 0;
UWORD32 bytes_consumed = 0;
UWORD32 cur_slice_is_nonref = 0;
UWORD32 u4_next_is_aud;
UWORD32 u4_first_start_code_found = 0;
WORD32 ret = 0,api_ret_value = IV_SUCCESS;
WORD32 header_data_left = 0,frame_data_left = 0;
UWORD8 *pu1_bitstrm_buf;
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
ithread_set_name((void*)"Parse_thread");
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
ps_dec->pv_dec_out = ps_dec_op;
if(ps_dec->init_done != 1)
{
return IV_FAIL;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
DATA_SYNC();
if(0 == ps_dec->u1_flushfrm)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
ps_dec->u1_pic_decode_done = 0;
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec->ps_out_buffer = NULL;
if(ps_dec_ip->u4_size
>= offsetof(ivd_video_decode_ip_t, s_out_buffer))
ps_dec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 0;
ps_dec->s_disp_op.u4_error_code = 1;
ps_dec->u4_fmt_conv_num_rows = FMT_CONV_NUM_ROWS;
if(0 == ps_dec->u4_share_disp_buf
&& ps_dec->i4_decode_header == 0)
{
UWORD32 i;
if(ps_dec->ps_out_buffer->u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec->ps_out_buffer->u4_num_bufs; i++)
{
if(ps_dec->ps_out_buffer->pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec->ps_out_buffer->u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |=
IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
if(ps_dec->u4_total_frames_decoded >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code = ERROR_FRAME_LIMIT_OVER;
return IV_FAIL;
}
/* ! */
ps_dec->u4_ts = ps_dec_ip->u4_ts;
ps_dec_op->u4_error_code = 0;
ps_dec_op->e_pic_type = -1;
ps_dec_op->u4_output_present = 0;
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec->i4_frametype = -1;
ps_dec->i4_content_type = -1;
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
{
if((TOP_FIELD_ONLY | BOT_FIELD_ONLY) == ps_dec->u1_top_bottom_decoded)
{
ps_dec->u1_top_bottom_decoded = 0;
}
}
ps_dec->u4_slice_start_code_found = 0;
/* In case the deocder is not in flush mode(in shared mode),
then decoder has to pick up a buffer to write current frame.
Check if a frame is available in such cases */
if(ps_dec->u1_init_dec_flag == 1 && ps_dec->u4_share_disp_buf == 1
&& ps_dec->u1_flushfrm == 0)
{
UWORD32 i;
WORD32 disp_avail = 0, free_id;
/* Check if at least one buffer is available with the codec */
/* If not then return to application with error */
for(i = 0; i < ps_dec->u1_pic_bufs; i++)
{
if(0 == ps_dec->u4_disp_buf_mapping[i]
|| 1 == ps_dec->u4_disp_buf_to_be_freed[i])
{
disp_avail = 1;
break;
}
}
if(0 == disp_avail)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
while(1)
{
pic_buffer_t *ps_pic_buf;
ps_pic_buf = (pic_buffer_t *)ih264_buf_mgr_get_next_free(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &free_id);
if(ps_pic_buf == NULL)
{
UWORD32 i, display_queued = 0;
/* check if any buffer was given for display which is not returned yet */
for(i = 0; i < (MAX_DISP_BUFS_NEW); i++)
{
if(0 != ps_dec->u4_disp_buf_mapping[i])
{
display_queued = 1;
break;
}
}
/* If some buffer is queued for display, then codec has to singal an error and wait
for that buffer to be returned.
If nothing is queued for display then codec has ownership of all display buffers
and it can reuse any of the existing buffers and continue decoding */
if(1 == display_queued)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
}
else
{
/* If the buffer is with display, then mark it as in use and then look for a buffer again */
if(1 == ps_dec->u4_disp_buf_mapping[free_id])
{
ih264_buf_mgr_set_status(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
}
else
{
/**
* Found a free buffer for present call. Release it now.
* Will be again obtained later.
*/
ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
break;
}
}
}
}
if(ps_dec->u1_flushfrm && ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
ps_dec->u4_output_present = 1;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
ps_dec_op->u4_new_seq = 0;
ps_dec_op->u4_output_present = ps_dec->u4_output_present;
ps_dec_op->u4_progressive_frame_flag =
ps_dec->s_disp_op.u4_progressive_frame_flag;
ps_dec_op->e_output_format =
ps_dec->s_disp_op.e_output_format;
ps_dec_op->s_disp_frm_buf = ps_dec->s_disp_op.s_disp_frm_buf;
ps_dec_op->e4_fld_type = ps_dec->s_disp_op.e4_fld_type;
ps_dec_op->u4_ts = ps_dec->s_disp_op.u4_ts;
ps_dec_op->u4_disp_buf_id = ps_dec->s_disp_op.u4_disp_buf_id;
/*In the case of flush ,since no frame is decoded set pic type as invalid*/
ps_dec_op->u4_is_ref_flag = -1;
ps_dec_op->e_pic_type = IV_NA_FRAME;
ps_dec_op->u4_frame_decoded_flag = 0;
if(0 == ps_dec->s_disp_op.u4_error_code)
{
return (IV_SUCCESS);
}
else
return (IV_FAIL);
}
if(ps_dec->u1_res_changed == 1)
{
/*if resolution has changed and all buffers have been flushed, reset decoder*/
ih264d_init_decoder(ps_dec);
}
ps_dec->u4_prev_nal_skipped = 0;
ps_dec->u2_cur_mb_addr = 0;
ps_dec->u2_total_mbs_coded = 0;
ps_dec->u2_cur_slice_num = 0;
ps_dec->cur_dec_mb_num = 0;
ps_dec->cur_recon_mb_num = 0;
ps_dec->u4_first_slice_in_pic = 2;
ps_dec->u1_slice_header_done = 0;
ps_dec->u1_dangling_field = 0;
ps_dec->u4_dec_thread_created = 0;
ps_dec->u4_bs_deblk_thread_created = 0;
ps_dec->u4_cur_bs_mb_num = 0;
DEBUG_THREADS_PRINTF(" Starting process call\n");
ps_dec->u4_pic_buf_got = 0;
do
{
WORD32 buf_size;
pu1_buf = (UWORD8*)ps_dec_ip->pv_stream_buffer
+ ps_dec_op->u4_num_bytes_consumed;
u4_max_ofst = ps_dec_ip->u4_num_Bytes
- ps_dec_op->u4_num_bytes_consumed;
/* If dynamic bitstream buffer is not allocated and
* header decode is done, then allocate dynamic bitstream buffer
*/
if((NULL == ps_dec->pu1_bits_buf_dynamic) &&
(ps_dec->i4_header_decoded & 1))
{
WORD32 size;
void *pv_buf;
void *pv_mem_ctxt = ps_dec->pv_mem_ctxt;
size = MAX(256000, ps_dec->u2_pic_wd * ps_dec->u2_pic_ht * 3 / 2);
pv_buf = ps_dec->pf_aligned_alloc(pv_mem_ctxt, 128, size);
RETURN_IF((NULL == pv_buf), IV_FAIL);
ps_dec->pu1_bits_buf_dynamic = pv_buf;
ps_dec->u4_dynamic_bits_buf_size = size;
}
if(ps_dec->pu1_bits_buf_dynamic)
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_dynamic;
buf_size = ps_dec->u4_dynamic_bits_buf_size;
}
else
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_static;
buf_size = ps_dec->u4_static_bits_buf_size;
}
u4_next_is_aud = 0;
buflen = ih264d_find_start_code(pu1_buf, 0, u4_max_ofst,
&u4_length_of_start_code,
&u4_next_is_aud);
if(buflen == -1)
buflen = 0;
/* Ignore bytes beyond the allocated size of intermediate buffer */
buflen = MIN(buflen, buf_size);
bytes_consumed = buflen + u4_length_of_start_code;
ps_dec_op->u4_num_bytes_consumed += bytes_consumed;
{
UWORD8 u1_firstbyte, u1_nal_ref_idc;
if(ps_dec->i4_app_skip_mode == IVD_SKIP_B)
{
u1_firstbyte = *(pu1_buf + u4_length_of_start_code);
u1_nal_ref_idc = (UWORD8)(NAL_REF_IDC(u1_firstbyte));
if(u1_nal_ref_idc == 0)
{
/*skip non reference frames*/
cur_slice_is_nonref = 1;
continue;
}
else
{
if(1 == cur_slice_is_nonref)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -=
bytes_consumed;
ps_dec_op->e_pic_type = IV_B_FRAME;
ps_dec_op->u4_error_code =
IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size =
sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
}
}
}
if(buflen)
{
memcpy(pu1_bitstrm_buf, pu1_buf + u4_length_of_start_code,
buflen);
/* Decoder may read extra 8 bytes near end of the frame */
if((buflen + 8) < buf_size)
{
memset(pu1_bitstrm_buf + buflen, 0, 8);
}
u4_first_start_code_found = 1;
}
else
{
/*start code not found*/
if(u4_first_start_code_found == 0)
{
/*no start codes found in current process call*/
ps_dec->i4_error_code = ERROR_START_CODE_NOT_FOUND;
ps_dec_op->u4_error_code |= 1 << IVD_INSUFFICIENTDATA;
if(ps_dec->u4_pic_buf_got == 0)
{
ih264d_fill_output_struct_from_context(ps_dec,
ps_dec_op);
ps_dec_op->u4_error_code = ps_dec->i4_error_code;
ps_dec_op->u4_frame_decoded_flag = 0;
return (IV_FAIL);
}
else
{
ps_dec->u1_pic_decode_done = 1;
continue;
}
}
else
{
/* a start code has already been found earlier in the same process call*/
frame_data_left = 0;
continue;
}
}
ps_dec->u4_return_to_app = 0;
ret = ih264d_parse_nal_unit(dec_hdl, ps_dec_op,
pu1_bitstrm_buf, buflen);
if(ret != OK)
{
UWORD32 error = ih264d_map_error(ret);
ps_dec_op->u4_error_code = error | ret;
api_ret_value = IV_FAIL;
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T))
{
break;
}
if((ret == ERROR_INCOMPLETE_FRAME) || (ret == ERROR_DANGLING_FIELD_IN_PIC))
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
api_ret_value = IV_FAIL;
break;
}
if(ret == ERROR_IN_LAST_SLICE_OF_PIC)
{
api_ret_value = IV_FAIL;
break;
}
}
if(ps_dec->u4_return_to_app)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
header_data_left = ((ps_dec->i4_decode_header == 1)
&& (ps_dec->i4_header_decoded != 3)
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
frame_data_left = (((ps_dec->i4_decode_header == 0)
&& ((ps_dec->u1_pic_decode_done == 0)
|| (u4_next_is_aud == 1)))
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
}
while(( header_data_left == 1)||(frame_data_left == 1));
if((ps_dec->u4_slice_start_code_found == 1)
&& (ret != IVD_MEM_ALLOC_FAILED)
&& ps_dec->u2_total_mbs_coded < ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
{
WORD32 num_mb_skipped;
WORD32 prev_slice_err;
pocstruct_t temp_poc;
WORD32 ret1;
num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
- ps_dec->u2_total_mbs_coded;
if(ps_dec->u4_first_slice_in_pic && (ps_dec->u4_pic_buf_got == 0))
prev_slice_err = 1;
else
prev_slice_err = 2;
ret1 = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, ps_dec->u1_nal_unit_type == IDR_SLICE_NAL, ps_dec->ps_cur_slice->u2_frame_num,
&temp_poc, prev_slice_err);
if((ret1 == ERROR_UNAVAIL_PICBUF_T) || (ret1 == ERROR_UNAVAIL_MVBUF_T))
{
return IV_FAIL;
}
}
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T))
{
/* signal the decode thread */
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet */
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
/* dont consume bitstream for change in resolution case */
if(ret == IVD_RES_CHANGED)
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
}
return IV_FAIL;
}
if(ps_dec->u1_separate_parse)
{
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_num_cores == 2)
{
/*do deblocking of all mbs*/
if((ps_dec->u4_nmb_deblk == 0) &&(ps_dec->u4_start_recon_deblk == 1) && (ps_dec->ps_cur_sps->u1_mb_aff_flag == 0))
{
UWORD32 u4_num_mbs,u4_max_addr;
tfr_ctxt_t s_tfr_ctxt;
tfr_ctxt_t *ps_tfr_cxt = &s_tfr_ctxt;
pad_mgr_t *ps_pad_mgr = &ps_dec->s_pad_mgr;
/*BS is done for all mbs while parsing*/
u4_max_addr = (ps_dec->u2_frm_wd_in_mbs * ps_dec->u2_frm_ht_in_mbs) - 1;
ps_dec->u4_cur_bs_mb_num = u4_max_addr + 1;
ih264d_init_deblk_tfr_ctxt(ps_dec, ps_pad_mgr, ps_tfr_cxt,
ps_dec->u2_frm_wd_in_mbs, 0);
u4_num_mbs = u4_max_addr
- ps_dec->u4_cur_deblk_mb_num + 1;
DEBUG_PERF_PRINTF("mbs left for deblocking= %d \n",u4_num_mbs);
if(u4_num_mbs != 0)
ih264d_check_mb_map_deblk(ps_dec, u4_num_mbs,
ps_tfr_cxt,1);
ps_dec->u4_start_recon_deblk = 0;
}
}
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
}
DATA_SYNC();
if((ps_dec_op->u4_error_code & 0xff)
!= ERROR_DYNAMIC_RESOLUTION_NOT_SUPPORTED)
{
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
}
if(ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->i4_decode_header == 1 && ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->u4_prev_nal_skipped)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
if((ps_dec->u4_slice_start_code_found == 1)
&& (ERROR_DANGLING_FIELD_IN_PIC != i4_err_status))
{
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
if(ps_dec->ps_cur_slice->u1_field_pic_flag)
{
if(1 == ps_dec->ps_cur_slice->u1_bottom_field_flag)
{
ps_dec->u1_top_bottom_decoded |= BOT_FIELD_ONLY;
}
else
{
ps_dec->u1_top_bottom_decoded |= TOP_FIELD_ONLY;
}
}
/* if new frame in not found (if we are still getting slices from previous frame)
* ih264d_deblock_display is not called. Such frames will not be added to reference /display
*/
if((ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) == 0)
{
/* Calling Function to deblock Picture and Display */
ret = ih264d_deblock_display(ps_dec);
if(ret != 0)
{
return IV_FAIL;
}
}
/*set to complete ,as we dont support partial frame decode*/
if(ps_dec->i4_header_decoded == 3)
{
ps_dec->u2_total_mbs_coded = ps_dec->ps_cur_sps->u2_max_mb_addr + 1;
}
/*Update the i4_frametype at the end of picture*/
if(ps_dec->ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL)
{
ps_dec->i4_frametype = IV_IDR_FRAME;
}
else if(ps_dec->i4_pic_type == B_SLICE)
{
ps_dec->i4_frametype = IV_B_FRAME;
}
else if(ps_dec->i4_pic_type == P_SLICE)
{
ps_dec->i4_frametype = IV_P_FRAME;
}
else if(ps_dec->i4_pic_type == I_SLICE)
{
ps_dec->i4_frametype = IV_I_FRAME;
}
else
{
H264_DEC_DEBUG_PRINT("Shouldn't come here\n");
}
ps_dec->i4_content_type = ps_dec->ps_cur_slice->u1_field_pic_flag;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded + 2;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded
- ps_dec->ps_cur_slice->u1_field_pic_flag;
}
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
{
/* In case the decoder is configured to run in low delay mode,
* then get display buffer and then format convert.
* Note in this mode, format conversion does not run paralelly in a thread and adds to the codec cycles
*/
if((IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode)
&& ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 1;
}
}
ih264d_fill_output_struct_from_context(ps_dec, ps_dec_op);
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_output_present &&
(ps_dec->u4_fmt_conv_cur_row < ps_dec->s_disp_frame_info.u4_y_ht))
{
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht
- ps_dec->u4_fmt_conv_cur_row;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
}
if(ps_dec->i4_decode_header == 1 && (ps_dec->i4_header_decoded & 1) == 1)
{
ps_dec_op->u4_progressive_frame_flag = 1;
if((NULL != ps_dec->ps_cur_sps) && (1 == (ps_dec->ps_cur_sps->u1_is_valid)))
{
if((0 == ps_dec->ps_sps->u1_frame_mbs_only_flag)
&& (0 == ps_dec->ps_sps->u1_mb_aff_flag))
ps_dec_op->u4_progressive_frame_flag = 0;
}
}
/*Data memory barrier instruction,so that yuv write by the library is complete*/
DATA_SYNC();
H264_DEC_DEBUG_PRINT("The num bytes consumed: %d\n",
ps_dec_op->u4_num_bytes_consumed);
return api_ret_value;
}
|
C
|
Android
| 1 |
CVE-2016-5767
|
https://www.cvedetails.com/cve/CVE-2016-5767/
|
CWE-190
|
https://github.com/php/php-src/commit/c395c6e5d7e8df37a21265ff76e48fe75ceb5ae6?w=1
|
c395c6e5d7e8df37a21265ff76e48fe75ceb5ae6?w=1
|
iFixed bug #72446 - Integer Overflow in gdImagePaletteToTrueColor() resulting in heap overflow
|
void gdImageSetAntiAliased (gdImagePtr im, int c)
{
im->AA = 1;
im->AA_color = c;
im->AA_dont_blend = -1;
}
|
void gdImageSetAntiAliased (gdImagePtr im, int c)
{
im->AA = 1;
im->AA_color = c;
im->AA_dont_blend = -1;
}
|
C
|
php-src
| 0 |
CVE-2012-5148
|
https://www.cvedetails.com/cve/CVE-2012-5148/
|
CWE-20
|
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
|
e89cfcb9090e8c98129ae9160c513f504db74599
|
Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
|
void TabStripModelObserver::TabClosingAt(TabStripModel* tab_strip_model,
WebContents* contents,
int index) {
}
|
void TabStripModelObserver::TabClosingAt(TabStripModel* tab_strip_model,
WebContents* contents,
int index) {
}
|
C
|
Chrome
| 0 |
CVE-2011-4324
|
https://www.cvedetails.com/cve/CVE-2011-4324/
| null |
https://github.com/torvalds/linux/commit/dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
|
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]>
|
static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat)
{
__be32 *savep;
uint32_t attrlen,
bitmap[2] = {0};
int status;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto xdr_error;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto xdr_error;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto xdr_error;
if ((status = decode_attr_files_avail(xdr, bitmap, &fsstat->afiles)) != 0)
goto xdr_error;
if ((status = decode_attr_files_free(xdr, bitmap, &fsstat->ffiles)) != 0)
goto xdr_error;
if ((status = decode_attr_files_total(xdr, bitmap, &fsstat->tfiles)) != 0)
goto xdr_error;
if ((status = decode_attr_space_avail(xdr, bitmap, &fsstat->abytes)) != 0)
goto xdr_error;
if ((status = decode_attr_space_free(xdr, bitmap, &fsstat->fbytes)) != 0)
goto xdr_error;
if ((status = decode_attr_space_total(xdr, bitmap, &fsstat->tbytes)) != 0)
goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
dprintk("%s: xdr returned %d!\n", __func__, -status);
return status;
}
|
static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat)
{
__be32 *savep;
uint32_t attrlen,
bitmap[2] = {0};
int status;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto xdr_error;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto xdr_error;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto xdr_error;
if ((status = decode_attr_files_avail(xdr, bitmap, &fsstat->afiles)) != 0)
goto xdr_error;
if ((status = decode_attr_files_free(xdr, bitmap, &fsstat->ffiles)) != 0)
goto xdr_error;
if ((status = decode_attr_files_total(xdr, bitmap, &fsstat->tfiles)) != 0)
goto xdr_error;
if ((status = decode_attr_space_avail(xdr, bitmap, &fsstat->abytes)) != 0)
goto xdr_error;
if ((status = decode_attr_space_free(xdr, bitmap, &fsstat->fbytes)) != 0)
goto xdr_error;
if ((status = decode_attr_space_total(xdr, bitmap, &fsstat->tbytes)) != 0)
goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
dprintk("%s: xdr returned %d!\n", __func__, -status);
return status;
}
|
C
|
linux
| 0 |
CVE-2011-3353
|
https://www.cvedetails.com/cve/CVE-2011-3353/
|
CWE-119
|
https://github.com/torvalds/linux/commit/c2183d1e9b3f313dd8ba2b1b0197c8d9fb86a7ae
|
c2183d1e9b3f313dd8ba2b1b0197c8d9fb86a7ae
|
fuse: check size of FUSE_NOTIFY_INVAL_ENTRY message
FUSE_NOTIFY_INVAL_ENTRY didn't check the length of the write so the
message processing could overrun and result in a "kernel BUG at
fs/fuse/dev.c:629!"
Reported-by: Han-Wen Nienhuys <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
CC: [email protected]
|
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
u64 nodeid, u64 nlookup)
{
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
spin_lock(&fc->lock);
fc->forget_list_tail->next = forget;
fc->forget_list_tail = forget;
wake_up(&fc->waitq);
kill_fasync(&fc->fasync, SIGIO, POLL_IN);
spin_unlock(&fc->lock);
}
|
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
u64 nodeid, u64 nlookup)
{
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
spin_lock(&fc->lock);
fc->forget_list_tail->next = forget;
fc->forget_list_tail = forget;
wake_up(&fc->waitq);
kill_fasync(&fc->fasync, SIGIO, POLL_IN);
spin_unlock(&fc->lock);
}
|
C
|
linux
| 0 |
CVE-2011-2804
|
https://www.cvedetails.com/cve/CVE-2011-2804/
|
CWE-399
|
https://github.com/chromium/chromium/commit/dc7b094a338c6c521f918f478e993f0f74bbea0d
|
dc7b094a338c6c521f918f478e993f0f74bbea0d
|
Remove use of libcros from InputMethodLibrary.
BUG=chromium-os:16238
TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before.
Review URL: http://codereview.chromium.org/7003086
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98
|
std::map<std::string, std::string>* GetKeyboardOverlayMapForTesting() {
KeyboardOverlayMap* keyboard_overlay_map =
new KeyboardOverlayMap;
(*keyboard_overlay_map)["xkb:nl::nld"] = "nl";
(*keyboard_overlay_map)["xkb:be::nld"] = "nl";
(*keyboard_overlay_map)["xkb:fr::fra"] = "fr";
(*keyboard_overlay_map)["xkb:be::fra"] = "fr";
(*keyboard_overlay_map)["xkb:ca::fra"] = "fr_CA";
(*keyboard_overlay_map)["xkb:ch:fr:fra"] = "fr";
(*keyboard_overlay_map)["xkb:de::ger"] = "de";
(*keyboard_overlay_map)["xkb:be::ger"] = "de";
(*keyboard_overlay_map)["xkb:ch::ger"] = "de";
(*keyboard_overlay_map)["mozc"] = "en_US";
(*keyboard_overlay_map)["mozc-jp"] = "ja";
(*keyboard_overlay_map)["mozc-dv"] = "en_US_dvorak";
(*keyboard_overlay_map)["xkb:jp::jpn"] = "ja";
(*keyboard_overlay_map)["xkb:ru::rus"] = "ru";
(*keyboard_overlay_map)["xkb:ru:phonetic:rus"] = "ru";
(*keyboard_overlay_map)["m17n:th:kesmanee"] = "th";
(*keyboard_overlay_map)["m17n:th:pattachote"] = "th";
(*keyboard_overlay_map)["m17n:th:tis820"] = "th";
(*keyboard_overlay_map)["mozc-chewing"] = "zh_TW";
(*keyboard_overlay_map)["m17n:zh:cangjie"] = "zh_TW";
(*keyboard_overlay_map)["m17n:zh:quick"] = "zh_TW";
(*keyboard_overlay_map)["m17n:vi:tcvn"] = "vi";
(*keyboard_overlay_map)["m17n:vi:telex"] = "vi";
(*keyboard_overlay_map)["m17n:vi:viqr"] = "vi";
(*keyboard_overlay_map)["m17n:vi:vni"] = "vi";
(*keyboard_overlay_map)["xkb:us::eng"] = "en_US";
(*keyboard_overlay_map)["xkb:us:intl:eng"] = "en_US";
(*keyboard_overlay_map)["xkb:us:altgr-intl:eng"] = "en_US";
(*keyboard_overlay_map)["xkb:us:dvorak:eng"] =
"en_US_dvorak";
(*keyboard_overlay_map)["xkb:us:colemak:eng"] =
"en_US";
(*keyboard_overlay_map)["hangul"] = "ko";
(*keyboard_overlay_map)["pinyin"] = "zh_CN";
(*keyboard_overlay_map)["m17n:ar:kbd"] = "ar";
(*keyboard_overlay_map)["m17n:hi:itrans"] = "hi";
(*keyboard_overlay_map)["m17n:fa:isiri"] = "ar";
(*keyboard_overlay_map)["xkb:br::por"] = "pt_BR";
(*keyboard_overlay_map)["xkb:bg::bul"] = "bg";
(*keyboard_overlay_map)["xkb:bg:phonetic:bul"] = "bg";
(*keyboard_overlay_map)["xkb:ca:eng:eng"] = "ca";
(*keyboard_overlay_map)["xkb:cz::cze"] = "cs";
(*keyboard_overlay_map)["xkb:ee::est"] = "et";
(*keyboard_overlay_map)["xkb:es::spa"] = "es";
(*keyboard_overlay_map)["xkb:es:cat:cat"] = "ca";
(*keyboard_overlay_map)["xkb:dk::dan"] = "da";
(*keyboard_overlay_map)["xkb:gr::gre"] = "el";
(*keyboard_overlay_map)["xkb:il::heb"] = "iw";
(*keyboard_overlay_map)["xkb:kr:kr104:kor"] = "ko";
(*keyboard_overlay_map)["xkb:latam::spa"] = "es_419";
(*keyboard_overlay_map)["xkb:lt::lit"] = "lt";
(*keyboard_overlay_map)["xkb:lv:apostrophe:lav"] = "lv";
(*keyboard_overlay_map)["xkb:hr::scr"] = "hr";
(*keyboard_overlay_map)["xkb:gb:extd:eng"] = "en_GB";
(*keyboard_overlay_map)["xkb:gb:dvorak:eng"] = "en_GB_dvorak";
(*keyboard_overlay_map)["xkb:fi::fin"] = "fi";
(*keyboard_overlay_map)["xkb:hu::hun"] = "hu";
(*keyboard_overlay_map)["xkb:it::ita"] = "it";
(*keyboard_overlay_map)["xkb:no::nob"] = "no";
(*keyboard_overlay_map)["xkb:pl::pol"] = "pl";
(*keyboard_overlay_map)["xkb:pt::por"] = "pt_PT";
(*keyboard_overlay_map)["xkb:ro::rum"] = "ro";
(*keyboard_overlay_map)["xkb:se::swe"] = "sv";
(*keyboard_overlay_map)["xkb:sk::slo"] = "sk";
(*keyboard_overlay_map)["xkb:si::slv"] = "sl";
(*keyboard_overlay_map)["xkb:rs::srp"] = "sr";
(*keyboard_overlay_map)["xkb:tr::tur"] = "tr";
(*keyboard_overlay_map)["xkb:ua::ukr"] = "uk";
return keyboard_overlay_map;
}
|
std::map<std::string, std::string>* GetKeyboardOverlayMapForTesting() {
KeyboardOverlayMap* keyboard_overlay_map =
new KeyboardOverlayMap;
(*keyboard_overlay_map)["xkb:nl::nld"] = "nl";
(*keyboard_overlay_map)["xkb:be::nld"] = "nl";
(*keyboard_overlay_map)["xkb:fr::fra"] = "fr";
(*keyboard_overlay_map)["xkb:be::fra"] = "fr";
(*keyboard_overlay_map)["xkb:ca::fra"] = "fr_CA";
(*keyboard_overlay_map)["xkb:ch:fr:fra"] = "fr";
(*keyboard_overlay_map)["xkb:de::ger"] = "de";
(*keyboard_overlay_map)["xkb:be::ger"] = "de";
(*keyboard_overlay_map)["xkb:ch::ger"] = "de";
(*keyboard_overlay_map)["mozc"] = "en_US";
(*keyboard_overlay_map)["mozc-jp"] = "ja";
(*keyboard_overlay_map)["mozc-dv"] = "en_US_dvorak";
(*keyboard_overlay_map)["xkb:jp::jpn"] = "ja";
(*keyboard_overlay_map)["xkb:ru::rus"] = "ru";
(*keyboard_overlay_map)["xkb:ru:phonetic:rus"] = "ru";
(*keyboard_overlay_map)["m17n:th:kesmanee"] = "th";
(*keyboard_overlay_map)["m17n:th:pattachote"] = "th";
(*keyboard_overlay_map)["m17n:th:tis820"] = "th";
(*keyboard_overlay_map)["mozc-chewing"] = "zh_TW";
(*keyboard_overlay_map)["m17n:zh:cangjie"] = "zh_TW";
(*keyboard_overlay_map)["m17n:zh:quick"] = "zh_TW";
(*keyboard_overlay_map)["m17n:vi:tcvn"] = "vi";
(*keyboard_overlay_map)["m17n:vi:telex"] = "vi";
(*keyboard_overlay_map)["m17n:vi:viqr"] = "vi";
(*keyboard_overlay_map)["m17n:vi:vni"] = "vi";
(*keyboard_overlay_map)["xkb:us::eng"] = "en_US";
(*keyboard_overlay_map)["xkb:us:intl:eng"] = "en_US";
(*keyboard_overlay_map)["xkb:us:altgr-intl:eng"] = "en_US";
(*keyboard_overlay_map)["xkb:us:dvorak:eng"] =
"en_US_dvorak";
(*keyboard_overlay_map)["xkb:us:colemak:eng"] =
"en_US";
(*keyboard_overlay_map)["hangul"] = "ko";
(*keyboard_overlay_map)["pinyin"] = "zh_CN";
(*keyboard_overlay_map)["m17n:ar:kbd"] = "ar";
(*keyboard_overlay_map)["m17n:hi:itrans"] = "hi";
(*keyboard_overlay_map)["m17n:fa:isiri"] = "ar";
(*keyboard_overlay_map)["xkb:br::por"] = "pt_BR";
(*keyboard_overlay_map)["xkb:bg::bul"] = "bg";
(*keyboard_overlay_map)["xkb:bg:phonetic:bul"] = "bg";
(*keyboard_overlay_map)["xkb:ca:eng:eng"] = "ca";
(*keyboard_overlay_map)["xkb:cz::cze"] = "cs";
(*keyboard_overlay_map)["xkb:ee::est"] = "et";
(*keyboard_overlay_map)["xkb:es::spa"] = "es";
(*keyboard_overlay_map)["xkb:es:cat:cat"] = "ca";
(*keyboard_overlay_map)["xkb:dk::dan"] = "da";
(*keyboard_overlay_map)["xkb:gr::gre"] = "el";
(*keyboard_overlay_map)["xkb:il::heb"] = "iw";
(*keyboard_overlay_map)["xkb:kr:kr104:kor"] = "ko";
(*keyboard_overlay_map)["xkb:latam::spa"] = "es_419";
(*keyboard_overlay_map)["xkb:lt::lit"] = "lt";
(*keyboard_overlay_map)["xkb:lv:apostrophe:lav"] = "lv";
(*keyboard_overlay_map)["xkb:hr::scr"] = "hr";
(*keyboard_overlay_map)["xkb:gb:extd:eng"] = "en_GB";
(*keyboard_overlay_map)["xkb:gb:dvorak:eng"] = "en_GB_dvorak";
(*keyboard_overlay_map)["xkb:fi::fin"] = "fi";
(*keyboard_overlay_map)["xkb:hu::hun"] = "hu";
(*keyboard_overlay_map)["xkb:it::ita"] = "it";
(*keyboard_overlay_map)["xkb:no::nob"] = "no";
(*keyboard_overlay_map)["xkb:pl::pol"] = "pl";
(*keyboard_overlay_map)["xkb:pt::por"] = "pt_PT";
(*keyboard_overlay_map)["xkb:ro::rum"] = "ro";
(*keyboard_overlay_map)["xkb:se::swe"] = "sv";
(*keyboard_overlay_map)["xkb:sk::slo"] = "sk";
(*keyboard_overlay_map)["xkb:si::slv"] = "sl";
(*keyboard_overlay_map)["xkb:rs::srp"] = "sr";
(*keyboard_overlay_map)["xkb:tr::tur"] = "tr";
(*keyboard_overlay_map)["xkb:ua::ukr"] = "uk";
return keyboard_overlay_map;
}
|
C
|
Chrome
| 0 |
CVE-2013-6420
|
https://www.cvedetails.com/cve/CVE-2013-6420/
|
CWE-119
|
https://git.php.net/?p=php-src.git;a=commit;h=c1224573c773b6845e83505f717fbf820fc18415
|
c1224573c773b6845e83505f717fbf820fc18415
| null |
PHP_FUNCTION(openssl_csr_get_public_key)
{
zval ** zcsr;
zend_bool use_shortnames = 1;
long csr_resource;
X509_REQ * csr;
EVP_PKEY *tpubkey;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z|b", &zcsr, &use_shortnames) == FAILURE) {
return;
}
csr = php_openssl_csr_from_zval(zcsr, 0, &csr_resource TSRMLS_CC);
if (csr == NULL) {
RETURN_FALSE;
}
tpubkey=X509_REQ_get_pubkey(csr);
RETVAL_RESOURCE(zend_list_insert(tpubkey, le_key));
return;
}
|
PHP_FUNCTION(openssl_csr_get_public_key)
{
zval ** zcsr;
zend_bool use_shortnames = 1;
long csr_resource;
X509_REQ * csr;
EVP_PKEY *tpubkey;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z|b", &zcsr, &use_shortnames) == FAILURE) {
return;
}
csr = php_openssl_csr_from_zval(zcsr, 0, &csr_resource TSRMLS_CC);
if (csr == NULL) {
RETURN_FALSE;
}
tpubkey=X509_REQ_get_pubkey(csr);
RETVAL_RESOURCE(zend_list_insert(tpubkey, le_key));
return;
}
|
C
|
php
| 0 |
CVE-2017-5118
|
https://www.cvedetails.com/cve/CVE-2017-5118/
|
CWE-732
|
https://github.com/chromium/chromium/commit/0ab2412a104d2f235d7b9fe19d30ef605a410832
|
0ab2412a104d2f235d7b9fe19d30ef605a410832
|
Inherit CSP when we inherit the security origin
This prevents attacks that use main window navigation to get out of the
existing csp constraints such as the related bug
Bug: 747847
Change-Id: I1e57b50da17f65d38088205b0a3c7c49ef2ae4d8
Reviewed-on: https://chromium-review.googlesource.com/592027
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Andy Paicu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#492333}
|
size_t WebLocalFrameImpl::CharacterIndexForPoint(
const WebPoint& point_in_viewport) const {
if (!GetFrame())
return kNotFound;
IntPoint point = GetFrame()->View()->ViewportToContents(point_in_viewport);
HitTestResult result = GetFrame()->GetEventHandler().HitTestResultAtPoint(
point, HitTestRequest::kReadOnly | HitTestRequest::kActive);
const EphemeralRange range =
GetFrame()->RangeForPoint(result.RoundedPointInInnerNodeFrame());
if (range.IsNull())
return kNotFound;
Element* editable =
GetFrame()->Selection().RootEditableElementOrDocumentElement();
DCHECK(editable);
return PlainTextRange::Create(*editable, range).Start();
}
|
size_t WebLocalFrameImpl::CharacterIndexForPoint(
const WebPoint& point_in_viewport) const {
if (!GetFrame())
return kNotFound;
IntPoint point = GetFrame()->View()->ViewportToContents(point_in_viewport);
HitTestResult result = GetFrame()->GetEventHandler().HitTestResultAtPoint(
point, HitTestRequest::kReadOnly | HitTestRequest::kActive);
const EphemeralRange range =
GetFrame()->RangeForPoint(result.RoundedPointInInnerNodeFrame());
if (range.IsNull())
return kNotFound;
Element* editable =
GetFrame()->Selection().RootEditableElementOrDocumentElement();
DCHECK(editable);
return PlainTextRange::Create(*editable, range).Start();
}
|
C
|
Chrome
| 0 |
CVE-2018-13095
|
https://www.cvedetails.com/cve/CVE-2018-13095/
|
CWE-476
|
https://github.com/torvalds/linux/commit/23fcb3340d033d9f081e21e6c12c2db7eaa541d3
|
23fcb3340d033d9f081e21e6c12c2db7eaa541d3
|
xfs: More robust inode extent count validation
When the inode is in extent format, it can't have more extents that
fit in the inode fork. We don't currenty check this, and so this
corruption goes unnoticed by the inode verifiers. This can lead to
crashes operating on invalid in-memory structures.
Attempts to access such a inode will now error out in the verifier
rather than allowing modification operations to proceed.
Reported-by: Wen Xu <[email protected]>
Signed-off-by: Dave Chinner <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
[darrick: fix a typedef, add some braces and breaks to shut up compiler warnings]
Signed-off-by: Darrick J. Wong <[email protected]>
|
xfs_iread(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_inode_t *ip,
uint iget_flags)
{
xfs_buf_t *bp;
xfs_dinode_t *dip;
xfs_failaddr_t fa;
int error;
/*
* Fill in the location information in the in-core inode.
*/
error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
if (error)
return error;
/* shortcut IO on inode allocation if possible */
if ((iget_flags & XFS_IGET_CREATE) &&
xfs_sb_version_hascrc(&mp->m_sb) &&
!(mp->m_flags & XFS_MOUNT_IKEEP)) {
/* initialise the on-disk inode core */
memset(&ip->i_d, 0, sizeof(ip->i_d));
VFS_I(ip)->i_generation = prandom_u32();
ip->i_d.di_version = 3;
return 0;
}
/*
* Get pointers to the on-disk inode and the buffer containing it.
*/
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
if (error)
return error;
/* even unallocated inodes are verified */
fa = xfs_dinode_verify(mp, ip->i_ino, dip);
if (fa) {
xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
sizeof(*dip), fa);
error = -EFSCORRUPTED;
goto out_brelse;
}
/*
* If the on-disk inode is already linked to a directory
* entry, copy all of the inode into the in-core inode.
* xfs_iformat_fork() handles copying in the inode format
* specific information.
* Otherwise, just get the truly permanent information.
*/
if (dip->di_mode) {
xfs_inode_from_disk(ip, dip);
error = xfs_iformat_fork(ip, dip);
if (error) {
#ifdef DEBUG
xfs_alert(mp, "%s: xfs_iformat() returned error %d",
__func__, error);
#endif /* DEBUG */
goto out_brelse;
}
} else {
/*
* Partial initialisation of the in-core inode. Just the bits
* that xfs_ialloc won't overwrite or relies on being correct.
*/
ip->i_d.di_version = dip->di_version;
VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
/*
* Make sure to pull in the mode here as well in
* case the inode is released without being used.
* This ensures that xfs_inactive() will see that
* the inode is already free and not try to mess
* with the uninitialized part of it.
*/
VFS_I(ip)->i_mode = 0;
}
ASSERT(ip->i_d.di_version >= 2);
ip->i_delayed_blks = 0;
/*
* Mark the buffer containing the inode as something to keep
* around for a while. This helps to keep recently accessed
* meta-data in-core longer.
*/
xfs_buf_set_ref(bp, XFS_INO_REF);
/*
* Use xfs_trans_brelse() to release the buffer containing the on-disk
* inode, because it was acquired with xfs_trans_read_buf() in
* xfs_imap_to_bp() above. If tp is NULL, this is just a normal
* brelse(). If we're within a transaction, then xfs_trans_brelse()
* will only release the buffer if it is not dirty within the
* transaction. It will be OK to release the buffer in this case,
* because inodes on disk are never destroyed and we will be locking the
* new in-core inode before putting it in the cache where other
* processes can find it. Thus we don't have to worry about the inode
* being changed just because we released the buffer.
*/
out_brelse:
xfs_trans_brelse(tp, bp);
return error;
}
|
xfs_iread(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_inode_t *ip,
uint iget_flags)
{
xfs_buf_t *bp;
xfs_dinode_t *dip;
xfs_failaddr_t fa;
int error;
/*
* Fill in the location information in the in-core inode.
*/
error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
if (error)
return error;
/* shortcut IO on inode allocation if possible */
if ((iget_flags & XFS_IGET_CREATE) &&
xfs_sb_version_hascrc(&mp->m_sb) &&
!(mp->m_flags & XFS_MOUNT_IKEEP)) {
/* initialise the on-disk inode core */
memset(&ip->i_d, 0, sizeof(ip->i_d));
VFS_I(ip)->i_generation = prandom_u32();
ip->i_d.di_version = 3;
return 0;
}
/*
* Get pointers to the on-disk inode and the buffer containing it.
*/
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
if (error)
return error;
/* even unallocated inodes are verified */
fa = xfs_dinode_verify(mp, ip->i_ino, dip);
if (fa) {
xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
sizeof(*dip), fa);
error = -EFSCORRUPTED;
goto out_brelse;
}
/*
* If the on-disk inode is already linked to a directory
* entry, copy all of the inode into the in-core inode.
* xfs_iformat_fork() handles copying in the inode format
* specific information.
* Otherwise, just get the truly permanent information.
*/
if (dip->di_mode) {
xfs_inode_from_disk(ip, dip);
error = xfs_iformat_fork(ip, dip);
if (error) {
#ifdef DEBUG
xfs_alert(mp, "%s: xfs_iformat() returned error %d",
__func__, error);
#endif /* DEBUG */
goto out_brelse;
}
} else {
/*
* Partial initialisation of the in-core inode. Just the bits
* that xfs_ialloc won't overwrite or relies on being correct.
*/
ip->i_d.di_version = dip->di_version;
VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
/*
* Make sure to pull in the mode here as well in
* case the inode is released without being used.
* This ensures that xfs_inactive() will see that
* the inode is already free and not try to mess
* with the uninitialized part of it.
*/
VFS_I(ip)->i_mode = 0;
}
ASSERT(ip->i_d.di_version >= 2);
ip->i_delayed_blks = 0;
/*
* Mark the buffer containing the inode as something to keep
* around for a while. This helps to keep recently accessed
* meta-data in-core longer.
*/
xfs_buf_set_ref(bp, XFS_INO_REF);
/*
* Use xfs_trans_brelse() to release the buffer containing the on-disk
* inode, because it was acquired with xfs_trans_read_buf() in
* xfs_imap_to_bp() above. If tp is NULL, this is just a normal
* brelse(). If we're within a transaction, then xfs_trans_brelse()
* will only release the buffer if it is not dirty within the
* transaction. It will be OK to release the buffer in this case,
* because inodes on disk are never destroyed and we will be locking the
* new in-core inode before putting it in the cache where other
* processes can find it. Thus we don't have to worry about the inode
* being changed just because we released the buffer.
*/
out_brelse:
xfs_trans_brelse(tp, bp);
return error;
}
|
C
|
linux
| 0 |
CVE-2012-1179
|
https://www.cvedetails.com/cve/CVE-2012-1179/
|
CWE-264
|
https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850
|
4a1d704194a441bf83c636004a479e01360ec850
|
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream.
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
int retry_count;
u64 memswlimit, memlimit;
int ret = 0;
int children = mem_cgroup_count_children(memcg);
u64 curusage, oldusage;
int enlarge;
/*
* For keeping hierarchical_reclaim simple, how long we should retry
* is depends on callers. We set our retry-count to be function
* of # of children which we should visit in this loop.
*/
retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
enlarge = 0;
while (retry_count) {
if (signal_pending(current)) {
ret = -EINTR;
break;
}
/*
* Rather than hide all in some function, I do this in
* open coded manner. You see what this really does.
* We have to guarantee memcg->res.limit < memcg->memsw.limit.
*/
mutex_lock(&set_limit_mutex);
memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
if (memswlimit < val) {
ret = -EINVAL;
mutex_unlock(&set_limit_mutex);
break;
}
memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
if (memlimit < val)
enlarge = 1;
ret = res_counter_set_limit(&memcg->res, val);
if (!ret) {
if (memswlimit == val)
memcg->memsw_is_minimum = true;
else
memcg->memsw_is_minimum = false;
}
mutex_unlock(&set_limit_mutex);
if (!ret)
break;
mem_cgroup_reclaim(memcg, GFP_KERNEL,
MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
else
oldusage = curusage;
}
if (!ret && enlarge)
memcg_oom_recover(memcg);
return ret;
}
|
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
int retry_count;
u64 memswlimit, memlimit;
int ret = 0;
int children = mem_cgroup_count_children(memcg);
u64 curusage, oldusage;
int enlarge;
/*
* For keeping hierarchical_reclaim simple, how long we should retry
* is depends on callers. We set our retry-count to be function
* of # of children which we should visit in this loop.
*/
retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
enlarge = 0;
while (retry_count) {
if (signal_pending(current)) {
ret = -EINTR;
break;
}
/*
* Rather than hide all in some function, I do this in
* open coded manner. You see what this really does.
* We have to guarantee memcg->res.limit < memcg->memsw.limit.
*/
mutex_lock(&set_limit_mutex);
memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
if (memswlimit < val) {
ret = -EINVAL;
mutex_unlock(&set_limit_mutex);
break;
}
memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
if (memlimit < val)
enlarge = 1;
ret = res_counter_set_limit(&memcg->res, val);
if (!ret) {
if (memswlimit == val)
memcg->memsw_is_minimum = true;
else
memcg->memsw_is_minimum = false;
}
mutex_unlock(&set_limit_mutex);
if (!ret)
break;
mem_cgroup_reclaim(memcg, GFP_KERNEL,
MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
else
oldusage = curusage;
}
if (!ret && enlarge)
memcg_oom_recover(memcg);
return ret;
}
|
C
|
linux
| 0 |
CVE-2015-0273
|
https://www.cvedetails.com/cve/CVE-2015-0273/
| null |
https://git.php.net/?p=php-src.git;a=commit;h=71335e6ebabc1b12c057d8017fd811892ecdfd24
|
71335e6ebabc1b12c057d8017fd811892ecdfd24
| null |
PHP_FUNCTION(mktime)
{
php_mktime(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0);
}
|
PHP_FUNCTION(mktime)
{
php_mktime(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0);
}
|
C
|
php
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/9ad7483d8e7c20e9f1a5a08d00150fb51899f14c
|
9ad7483d8e7c20e9f1a5a08d00150fb51899f14c
|
Shutdown Timebomb - In canary, get a callstack if it takes longer than
10 minutes. In Dev, get callstack if it takes longer than 20 minutes.
In Beta (50 minutes) and Stable (100 minutes) it is same as before.
BUG=519321
[email protected]
Review URL: https://codereview.chromium.org/1409333005
Cr-Commit-Position: refs/heads/master@{#355586}
|
base::StackSamplingProfiler::SamplingParams GetJankTimeBombSamplingParams() {
base::StackSamplingProfiler::SamplingParams params;
params.initial_delay = base::TimeDelta::FromMilliseconds(0);
params.bursts = 1;
params.samples_per_burst = 50;
params.sampling_interval = base::TimeDelta::FromMilliseconds(100);
return params;
}
|
base::StackSamplingProfiler::SamplingParams GetJankTimeBombSamplingParams() {
base::StackSamplingProfiler::SamplingParams params;
params.initial_delay = base::TimeDelta::FromMilliseconds(0);
params.bursts = 1;
params.samples_per_burst = 50;
params.sampling_interval = base::TimeDelta::FromMilliseconds(100);
return params;
}
|
C
|
Chrome
| 0 |
CVE-2017-6903
|
https://www.cvedetails.com/cve/CVE-2017-6903/
|
CWE-269
|
https://github.com/iortcw/iortcw/commit/b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
|
b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
|
All: Don't load .pk3s as .dlls, and don't load user config files from .pk3s
|
void CL_Configstrings_f( void ) {
int i;
int ofs;
if ( clc.state != CA_ACTIVE ) {
Com_Printf( "Not connected to a server.\n" );
return;
}
for ( i = 0 ; i < MAX_CONFIGSTRINGS ; i++ ) {
ofs = cl.gameState.stringOffsets[ i ];
if ( !ofs ) {
continue;
}
Com_Printf( "%4i: %s\n", i, cl.gameState.stringData + ofs );
}
}
|
void CL_Configstrings_f( void ) {
int i;
int ofs;
if ( clc.state != CA_ACTIVE ) {
Com_Printf( "Not connected to a server.\n" );
return;
}
for ( i = 0 ; i < MAX_CONFIGSTRINGS ; i++ ) {
ofs = cl.gameState.stringOffsets[ i ];
if ( !ofs ) {
continue;
}
Com_Printf( "%4i: %s\n", i, cl.gameState.stringData + ofs );
}
}
|
C
|
OpenJK
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
gnuk_write_certificate(sc_card_t *card, const u8 *buf, size_t length)
{
size_t i = 0;
sc_apdu_t apdu;
int r = SC_SUCCESS;
LOG_FUNC_CALLED(card->ctx);
/* If null data is passed, delete certificate */
if (buf == NULL || length == 0) {
sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0xD6, 0x85, 0);
r = sc_transmit_apdu(card, &apdu);
LOG_TEST_RET(card->ctx, r, "APDU transmit failed");
/* Check response */
LOG_TEST_RET(card->ctx, sc_check_sw(card, apdu.sw1, apdu.sw2), "Certificate writing failed");
}
/* Ref: gnuk_put_binary_libusb.py and gnuk_token.py in Gnuk source tree */
/* Split data to segments of 256 bytes. Send each segment via command chaining,
* with particular P1 byte for each segment */
for (i = 0; i*256 < length; i++) {
u8 *part = (u8 *)buf + i*256;
size_t plen = MIN(length - i*256, 256);
u8 roundbuf[256]; /* space to build APDU data with even length for Gnuk */
sc_log(card->ctx,
"Write part %"SC_FORMAT_LEN_SIZE_T"u from offset 0x%"SC_FORMAT_LEN_SIZE_T"X, len %"SC_FORMAT_LEN_SIZE_T"u",
i+1, i*256, plen);
/* 1st chunk: P1 = 0x85, further chunks: P1 = chunk no */
sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xD6, (i == 0) ? 0x85 : i, 0);
apdu.flags |= SC_APDU_FLAGS_CHAINING;
apdu.data = part;
apdu.datalen = apdu.lc = plen;
/* If the last part has odd length, we add zero padding to make it even.
* Gnuk does not allow data with odd length */
if (plen < 256 && (plen % 2) != 0) {
memcpy(roundbuf, part, plen);
roundbuf[plen++] = 0;
apdu.data = roundbuf;
apdu.datalen = apdu.lc = plen;
}
r = sc_transmit_apdu(card, &apdu);
LOG_TEST_RET(card->ctx, r, "APDU transmit failed");
/* Check response */
LOG_TEST_RET(card->ctx, sc_check_sw(card, apdu.sw1, apdu.sw2), "UPDATE BINARY returned error");
}
LOG_FUNC_RETURN(card->ctx, (int)length);
}
|
gnuk_write_certificate(sc_card_t *card, const u8 *buf, size_t length)
{
size_t i = 0;
sc_apdu_t apdu;
int r = SC_SUCCESS;
LOG_FUNC_CALLED(card->ctx);
/* If null data is passed, delete certificate */
if (buf == NULL || length == 0) {
sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0xD6, 0x85, 0);
r = sc_transmit_apdu(card, &apdu);
LOG_TEST_RET(card->ctx, r, "APDU transmit failed");
/* Check response */
LOG_TEST_RET(card->ctx, sc_check_sw(card, apdu.sw1, apdu.sw2), "Certificate writing failed");
}
/* Ref: gnuk_put_binary_libusb.py and gnuk_token.py in Gnuk source tree */
/* Split data to segments of 256 bytes. Send each segment via command chaining,
* with particular P1 byte for each segment */
for (i = 0; i*256 < length; i++) {
u8 *part = (u8 *)buf + i*256;
size_t plen = MIN(length - i*256, 256);
u8 roundbuf[256]; /* space to build APDU data with even length for Gnuk */
sc_log(card->ctx,
"Write part %"SC_FORMAT_LEN_SIZE_T"u from offset 0x%"SC_FORMAT_LEN_SIZE_T"X, len %"SC_FORMAT_LEN_SIZE_T"u",
i+1, i*256, plen);
/* 1st chunk: P1 = 0x85, further chunks: P1 = chunk no */
sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xD6, (i == 0) ? 0x85 : i, 0);
apdu.flags |= SC_APDU_FLAGS_CHAINING;
apdu.data = part;
apdu.datalen = apdu.lc = plen;
/* If the last part has odd length, we add zero padding to make it even.
* Gnuk does not allow data with odd length */
if (plen < 256 && (plen % 2) != 0) {
memcpy(roundbuf, part, plen);
roundbuf[plen++] = 0;
apdu.data = roundbuf;
apdu.datalen = apdu.lc = plen;
}
r = sc_transmit_apdu(card, &apdu);
LOG_TEST_RET(card->ctx, r, "APDU transmit failed");
/* Check response */
LOG_TEST_RET(card->ctx, sc_check_sw(card, apdu.sw1, apdu.sw2), "UPDATE BINARY returned error");
}
LOG_FUNC_RETURN(card->ctx, (int)length);
}
|
C
|
OpenSC
| 0 |
CVE-2014-1738
|
https://www.cvedetails.com/cve/CVE-2014-1738/
|
CWE-264
|
https://github.com/torvalds/linux/commit/2145e15e0557a01b9195d1c7199a1b92cb9be81f
|
2145e15e0557a01b9195d1c7199a1b92cb9be81f
|
floppy: don't write kernel-only members to FDRAWCMD ioctl output
Do not leak kernel-only floppy_raw_cmd structure members to userspace.
This includes the linked-list pointer and the pointer to the allocated
DMA space.
Signed-off-by: Matthew Daley <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void recal_interrupt(void)
{
debugt(__func__, "");
if (inr != 2)
FDCS->reset = 1;
else if (ST0 & ST0_ECE) {
switch (DRS->track) {
case NEED_1_RECAL:
debugt(__func__, "need 1 recal");
/* after a second recalibrate, we still haven't
* reached track 0. Probably no drive. Raise an
* error, as failing immediately might upset
* computers possessed by the Devil :-) */
cont->error();
cont->redo();
return;
case NEED_2_RECAL:
debugt(__func__, "need 2 recal");
/* If we already did a recalibrate,
* and we are not at track 0, this
* means we have moved. (The only way
* not to move at recalibration is to
* be already at track 0.) Clear the
* new change flag */
debug_dcl(DP->flags,
"clearing NEWCHANGE flag because of second recalibrate\n");
clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
DRS->select_date = jiffies;
/* fall through */
default:
debugt(__func__, "default");
/* Recalibrate moves the head by at
* most 80 steps. If after one
* recalibrate we don't have reached
* track 0, this might mean that we
* started beyond track 80. Try
* again. */
DRS->track = NEED_1_RECAL;
break;
}
} else
DRS->track = ST1;
floppy_ready();
}
|
static void recal_interrupt(void)
{
debugt(__func__, "");
if (inr != 2)
FDCS->reset = 1;
else if (ST0 & ST0_ECE) {
switch (DRS->track) {
case NEED_1_RECAL:
debugt(__func__, "need 1 recal");
/* after a second recalibrate, we still haven't
* reached track 0. Probably no drive. Raise an
* error, as failing immediately might upset
* computers possessed by the Devil :-) */
cont->error();
cont->redo();
return;
case NEED_2_RECAL:
debugt(__func__, "need 2 recal");
/* If we already did a recalibrate,
* and we are not at track 0, this
* means we have moved. (The only way
* not to move at recalibration is to
* be already at track 0.) Clear the
* new change flag */
debug_dcl(DP->flags,
"clearing NEWCHANGE flag because of second recalibrate\n");
clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
DRS->select_date = jiffies;
/* fall through */
default:
debugt(__func__, "default");
/* Recalibrate moves the head by at
* most 80 steps. If after one
* recalibrate we don't have reached
* track 0, this might mean that we
* started beyond track 80. Try
* again. */
DRS->track = NEED_1_RECAL;
break;
}
} else
DRS->track = ST1;
floppy_ready();
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/5c9d37f8055700c36b4c9006b0d4d81f4f961a06
|
5c9d37f8055700c36b4c9006b0d4d81f4f961a06
|
2010-07-26 Tony Gentilcore <[email protected]>
Reviewed by Darin Fisher.
Move DocumentLoadTiming struct to a new file
https://bugs.webkit.org/show_bug.cgi?id=42917
Also makes DocumentLoadTiming Noncopyable.
No new tests because no new functionality.
* GNUmakefile.am:
* WebCore.gypi:
* WebCore.vcproj/WebCore.vcproj:
* WebCore.xcodeproj/project.pbxproj:
* loader/DocumentLoadTiming.h: Added.
(WebCore::DocumentLoadTiming::DocumentLoadTiming):
* loader/DocumentLoader.h:
* loader/FrameLoader.cpp:
* loader/FrameLoaderTypes.h:
* loader/MainResourceLoader.cpp:
* page/Timing.cpp:
git-svn-id: svn://svn.chromium.org/blink/trunk@64051 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void FrameLoader::receivedFirstData()
{
writer()->begin(m_workingURL, false);
dispatchDidCommitLoad();
dispatchDidClearWindowObjectsInAllWorlds();
if (m_documentLoader) {
String ptitle = m_documentLoader->title();
if (!ptitle.isNull())
m_client->dispatchDidReceiveTitle(ptitle);
}
m_workingURL = KURL();
double delay;
String url;
if (!m_documentLoader)
return;
if (m_frame->inViewSourceMode())
return;
if (!parseHTTPRefresh(m_documentLoader->response().httpHeaderField("Refresh"), false, delay, url))
return;
if (url.isEmpty())
url = m_URL.string();
else
url = m_frame->document()->completeURL(url).string();
m_frame->redirectScheduler()->scheduleRedirect(delay, url);
}
|
void FrameLoader::receivedFirstData()
{
writer()->begin(m_workingURL, false);
dispatchDidCommitLoad();
dispatchDidClearWindowObjectsInAllWorlds();
if (m_documentLoader) {
String ptitle = m_documentLoader->title();
if (!ptitle.isNull())
m_client->dispatchDidReceiveTitle(ptitle);
}
m_workingURL = KURL();
double delay;
String url;
if (!m_documentLoader)
return;
if (m_frame->inViewSourceMode())
return;
if (!parseHTTPRefresh(m_documentLoader->response().httpHeaderField("Refresh"), false, delay, url))
return;
if (url.isEmpty())
url = m_URL.string();
else
url = m_frame->document()->completeURL(url).string();
m_frame->redirectScheduler()->scheduleRedirect(delay, url);
}
|
C
|
Chrome
| 0 |
CVE-2017-9211
|
https://www.cvedetails.com/cve/CVE-2017-9211/
|
CWE-476
|
https://github.com/torvalds/linux/commit/9933e113c2e87a9f46a40fde8dafbf801dca1ab9
|
9933e113c2e87a9f46a40fde8dafbf801dca1ab9
|
crypto: skcipher - Add missing API setkey checks
The API setkey checks for key sizes and alignment went AWOL during the
skcipher conversion. This patch restores them.
Cc: <[email protected]>
Fixes: 4e6c3df4d729 ("crypto: skcipher - Add low-level skcipher...")
Reported-by: Baozeng <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
{
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
}
|
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
{
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
}
|
C
|
linux
| 0 |
CVE-2016-1683
|
https://www.cvedetails.com/cve/CVE-2016-1683/
|
CWE-119
|
https://github.com/chromium/chromium/commit/96dbafe288dbe2f0cc45fa3c39daf6d0c37acbab
|
96dbafe288dbe2f0cc45fa3c39daf6d0c37acbab
|
Roll libxslt to 891681e3e948f31732229f53cb6db7215f740fc7
BUG=583156,583171
Review URL: https://codereview.chromium.org/1853083002
Cr-Commit-Position: refs/heads/master@{#385338}
|
xsltCopyOf(xsltTransformContextPtr ctxt, xmlNodePtr node,
xmlNodePtr inst, xsltStylePreCompPtr castedComp) {
#ifdef XSLT_REFACTORED
xsltStyleItemCopyOfPtr comp = (xsltStyleItemCopyOfPtr) castedComp;
#else
xsltStylePreCompPtr comp = castedComp;
#endif
xmlXPathObjectPtr res = NULL;
xmlNodeSetPtr list = NULL;
int i;
if ((ctxt == NULL) || (node == NULL) || (inst == NULL))
return;
if ((comp == NULL) || (comp->select == NULL) || (comp->comp == NULL)) {
xsltTransformError(ctxt, NULL, inst,
"xsl:copy-of : compilation failed\n");
return;
}
/*
* SPEC XSLT 1.0:
* "The xsl:copy-of element can be used to insert a result tree
* fragment into the result tree, without first converting it to
* a string as xsl:value-of does (see [7.6.1 Generating Text with
* xsl:value-of]). The required select attribute contains an
* expression. When the result of evaluating the expression is a
* result tree fragment, the complete fragment is copied into the
* result tree. When the result is a node-set, all the nodes in the
* set are copied in document order into the result tree; copying
* an element node copies the attribute nodes, namespace nodes and
* children of the element node as well as the element node itself;
* a root node is copied by copying its children. When the result
* is neither a node-set nor a result tree fragment, the result is
* converted to a string and then inserted into the result tree,
* as with xsl:value-of.
*/
#ifdef WITH_XSLT_DEBUG_PROCESS
XSLT_TRACE(ctxt,XSLT_TRACE_COPY_OF,xsltGenericDebug(xsltGenericDebugContext,
"xsltCopyOf: select %s\n", comp->select));
#endif
/*
* Evaluate the "select" expression.
*/
res = xsltPreCompEval(ctxt, node, comp);
if (res != NULL) {
if (res->type == XPATH_NODESET) {
/*
* Node-set
* --------
*/
#ifdef WITH_XSLT_DEBUG_PROCESS
XSLT_TRACE(ctxt,XSLT_TRACE_COPY_OF,xsltGenericDebug(xsltGenericDebugContext,
"xsltCopyOf: result is a node set\n"));
#endif
list = res->nodesetval;
if (list != NULL) {
xmlNodePtr cur;
/*
* The list is already sorted in document order by XPath.
* Append everything in this order under ctxt->insert.
*/
for (i = 0;i < list->nodeNr;i++) {
cur = list->nodeTab[i];
if (cur == NULL)
continue;
if ((cur->type == XML_DOCUMENT_NODE) ||
(cur->type == XML_HTML_DOCUMENT_NODE))
{
xsltCopyTreeList(ctxt, inst,
cur->children, ctxt->insert, 0, 0);
} else if (cur->type == XML_ATTRIBUTE_NODE) {
xsltShallowCopyAttr(ctxt, inst,
ctxt->insert, (xmlAttrPtr) cur);
} else {
xsltCopyTreeInternal(ctxt, inst,
cur, ctxt->insert, 0, 0);
}
}
}
} else if (res->type == XPATH_XSLT_TREE) {
/*
* Result tree fragment
* --------------------
* E.g. via <xsl:variable ...><foo/></xsl:variable>
* Note that the root node of such trees is an xmlDocPtr in Libxslt.
*/
#ifdef WITH_XSLT_DEBUG_PROCESS
XSLT_TRACE(ctxt,XSLT_TRACE_COPY_OF,xsltGenericDebug(xsltGenericDebugContext,
"xsltCopyOf: result is a result tree fragment\n"));
#endif
list = res->nodesetval;
if ((list != NULL) && (list->nodeTab != NULL) &&
(list->nodeTab[0] != NULL) &&
(IS_XSLT_REAL_NODE(list->nodeTab[0])))
{
xsltCopyTreeList(ctxt, inst,
list->nodeTab[0]->children, ctxt->insert, 0, 0);
}
} else {
xmlChar *value = NULL;
/*
* Convert to a string.
*/
value = xmlXPathCastToString(res);
if (value == NULL) {
xsltTransformError(ctxt, NULL, inst,
"Internal error in xsltCopyOf(): "
"failed to cast an XPath object to string.\n");
ctxt->state = XSLT_STATE_STOPPED;
} else {
if (value[0] != 0) {
/*
* Append content as text node.
*/
xsltCopyTextString(ctxt, ctxt->insert, value, 0);
}
xmlFree(value);
#ifdef WITH_XSLT_DEBUG_PROCESS
XSLT_TRACE(ctxt,XSLT_TRACE_COPY_OF,xsltGenericDebug(xsltGenericDebugContext,
"xsltCopyOf: result %s\n", res->stringval));
#endif
}
}
} else {
ctxt->state = XSLT_STATE_STOPPED;
}
if (res != NULL)
xmlXPathFreeObject(res);
}
|
xsltCopyOf(xsltTransformContextPtr ctxt, xmlNodePtr node,
xmlNodePtr inst, xsltStylePreCompPtr castedComp) {
#ifdef XSLT_REFACTORED
xsltStyleItemCopyOfPtr comp = (xsltStyleItemCopyOfPtr) castedComp;
#else
xsltStylePreCompPtr comp = castedComp;
#endif
xmlXPathObjectPtr res = NULL;
xmlNodeSetPtr list = NULL;
int i;
xmlDocPtr oldXPContextDoc;
xmlNsPtr *oldXPNamespaces;
xmlNodePtr oldXPContextNode;
int oldXPProximityPosition, oldXPContextSize, oldXPNsNr;
xmlXPathContextPtr xpctxt;
if ((ctxt == NULL) || (node == NULL) || (inst == NULL))
return;
if ((comp == NULL) || (comp->select == NULL) || (comp->comp == NULL)) {
xsltTransformError(ctxt, NULL, inst,
"xsl:copy-of : compilation failed\n");
return;
}
/*
* SPEC XSLT 1.0:
* "The xsl:copy-of element can be used to insert a result tree
* fragment into the result tree, without first converting it to
* a string as xsl:value-of does (see [7.6.1 Generating Text with
* xsl:value-of]). The required select attribute contains an
* expression. When the result of evaluating the expression is a
* result tree fragment, the complete fragment is copied into the
* result tree. When the result is a node-set, all the nodes in the
* set are copied in document order into the result tree; copying
* an element node copies the attribute nodes, namespace nodes and
* children of the element node as well as the element node itself;
* a root node is copied by copying its children. When the result
* is neither a node-set nor a result tree fragment, the result is
* converted to a string and then inserted into the result tree,
* as with xsl:value-of.
*/
#ifdef WITH_XSLT_DEBUG_PROCESS
XSLT_TRACE(ctxt,XSLT_TRACE_COPY_OF,xsltGenericDebug(xsltGenericDebugContext,
"xsltCopyOf: select %s\n", comp->select));
#endif
/*
* Evaluate the "select" expression.
*/
xpctxt = ctxt->xpathCtxt;
oldXPContextDoc = xpctxt->doc;
oldXPContextNode = xpctxt->node;
oldXPProximityPosition = xpctxt->proximityPosition;
oldXPContextSize = xpctxt->contextSize;
oldXPNsNr = xpctxt->nsNr;
oldXPNamespaces = xpctxt->namespaces;
xpctxt->node = node;
if (comp != NULL) {
#ifdef XSLT_REFACTORED
if (comp->inScopeNs != NULL) {
xpctxt->namespaces = comp->inScopeNs->list;
xpctxt->nsNr = comp->inScopeNs->xpathNumber;
} else {
xpctxt->namespaces = NULL;
xpctxt->nsNr = 0;
}
#else
xpctxt->namespaces = comp->nsList;
xpctxt->nsNr = comp->nsNr;
#endif
} else {
xpctxt->namespaces = NULL;
xpctxt->nsNr = 0;
}
res = xmlXPathCompiledEval(comp->comp, xpctxt);
xpctxt->doc = oldXPContextDoc;
xpctxt->node = oldXPContextNode;
xpctxt->contextSize = oldXPContextSize;
xpctxt->proximityPosition = oldXPProximityPosition;
xpctxt->nsNr = oldXPNsNr;
xpctxt->namespaces = oldXPNamespaces;
if (res != NULL) {
if (res->type == XPATH_NODESET) {
/*
* Node-set
* --------
*/
#ifdef WITH_XSLT_DEBUG_PROCESS
XSLT_TRACE(ctxt,XSLT_TRACE_COPY_OF,xsltGenericDebug(xsltGenericDebugContext,
"xsltCopyOf: result is a node set\n"));
#endif
list = res->nodesetval;
if (list != NULL) {
xmlNodePtr cur;
/*
* The list is already sorted in document order by XPath.
* Append everything in this order under ctxt->insert.
*/
for (i = 0;i < list->nodeNr;i++) {
cur = list->nodeTab[i];
if (cur == NULL)
continue;
if ((cur->type == XML_DOCUMENT_NODE) ||
(cur->type == XML_HTML_DOCUMENT_NODE))
{
xsltCopyTreeList(ctxt, inst,
cur->children, ctxt->insert, 0, 0);
} else if (cur->type == XML_ATTRIBUTE_NODE) {
xsltShallowCopyAttr(ctxt, inst,
ctxt->insert, (xmlAttrPtr) cur);
} else {
xsltCopyTreeInternal(ctxt, inst,
cur, ctxt->insert, 0, 0);
}
}
}
} else if (res->type == XPATH_XSLT_TREE) {
/*
* Result tree fragment
* --------------------
* E.g. via <xsl:variable ...><foo/></xsl:variable>
* Note that the root node of such trees is an xmlDocPtr in Libxslt.
*/
#ifdef WITH_XSLT_DEBUG_PROCESS
XSLT_TRACE(ctxt,XSLT_TRACE_COPY_OF,xsltGenericDebug(xsltGenericDebugContext,
"xsltCopyOf: result is a result tree fragment\n"));
#endif
list = res->nodesetval;
if ((list != NULL) && (list->nodeTab != NULL) &&
(list->nodeTab[0] != NULL) &&
(IS_XSLT_REAL_NODE(list->nodeTab[0])))
{
xsltCopyTreeList(ctxt, inst,
list->nodeTab[0]->children, ctxt->insert, 0, 0);
}
} else {
xmlChar *value = NULL;
/*
* Convert to a string.
*/
value = xmlXPathCastToString(res);
if (value == NULL) {
xsltTransformError(ctxt, NULL, inst,
"Internal error in xsltCopyOf(): "
"failed to cast an XPath object to string.\n");
ctxt->state = XSLT_STATE_STOPPED;
} else {
if (value[0] != 0) {
/*
* Append content as text node.
*/
xsltCopyTextString(ctxt, ctxt->insert, value, 0);
}
xmlFree(value);
#ifdef WITH_XSLT_DEBUG_PROCESS
XSLT_TRACE(ctxt,XSLT_TRACE_COPY_OF,xsltGenericDebug(xsltGenericDebugContext,
"xsltCopyOf: result %s\n", res->stringval));
#endif
}
}
} else {
ctxt->state = XSLT_STATE_STOPPED;
}
if (res != NULL)
xmlXPathFreeObject(res);
}
|
C
|
Chrome
| 1 |
null | null | null |
https://github.com/chromium/chromium/commit/432eb007ad1d67d12d2a9d69a0f6e78b9efee9b1
|
432eb007ad1d67d12d2a9d69a0f6e78b9efee9b1
|
Hide the "Open PDF in Reader" bubble on navigations.
BUG=444957
Review URL: https://codereview.chromium.org/831283002
Cr-Commit-Position: refs/heads/master@{#310167}
|
bool OpenPDFInReaderView::OnKeyPressed(const ui::KeyEvent& event) {
if (event.key_code() != ui::VKEY_SPACE &&
event.key_code() != ui::VKEY_RETURN) {
return false;
}
ShowBubble();
return true;
}
|
bool OpenPDFInReaderView::OnKeyPressed(const ui::KeyEvent& event) {
if (event.key_code() != ui::VKEY_SPACE &&
event.key_code() != ui::VKEY_RETURN) {
return false;
}
ShowBubble();
return true;
}
|
C
|
Chrome
| 0 |
CVE-2013-0886
|
https://www.cvedetails.com/cve/CVE-2013-0886/
| null |
https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76
|
18d67244984a574ba2dd8779faabc0e3e34f4b76
|
Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
[email protected]
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderWidgetHostImpl::AddKeyboardListener(KeyboardListener* listener) {
keyboard_listeners_.push_back(listener);
}
|
void RenderWidgetHostImpl::AddKeyboardListener(KeyboardListener* listener) {
keyboard_listeners_.push_back(listener);
}
|
C
|
Chrome
| 0 |
CVE-2014-9922
|
https://www.cvedetails.com/cve/CVE-2014-9922/
|
CWE-264
|
https://github.com/torvalds/linux/commit/69c433ed2ecd2d3264efd7afec4439524b319121
|
69c433ed2ecd2d3264efd7afec4439524b319121
|
fs: limit filesystem stacking depth
Add a simple read-only counter to super_block that indicates how deep this
is in the stack of filesystems. Previously ecryptfs was the only stackable
filesystem and it explicitly disallowed multiple layers of itself.
Overlayfs, however, can be stacked recursively and also may be stacked
on top of ecryptfs or vice versa.
To limit the kernel stack usage we must limit the depth of the
filesystem stack. Initially the limit is set to 2.
Signed-off-by: Miklos Szeredi <[email protected]>
|
struct dentry *ovl_dentry_real(struct dentry *dentry)
{
struct ovl_entry *oe = dentry->d_fsdata;
struct dentry *realdentry;
realdentry = ovl_upperdentry_dereference(oe);
if (!realdentry)
realdentry = oe->lowerdentry;
return realdentry;
}
|
struct dentry *ovl_dentry_real(struct dentry *dentry)
{
struct ovl_entry *oe = dentry->d_fsdata;
struct dentry *realdentry;
realdentry = ovl_upperdentry_dereference(oe);
if (!realdentry)
realdentry = oe->lowerdentry;
return realdentry;
}
|
C
|
linux
| 0 |
CVE-2016-9120
|
https://www.cvedetails.com/cve/CVE-2016-9120/
|
CWE-416
|
https://github.com/torvalds/linux/commit/9590232bb4f4cc824f3425a6e1349afbe6d6d2b7
|
9590232bb4f4cc824f3425a6e1349afbe6d6d2b7
|
staging/android/ion : fix a race condition in the ion driver
There is a use-after-free problem in the ion driver.
This is caused by a race condition in the ion_ioctl()
function.
A handle has ref count of 1 and two tasks on different
cpus calls ION_IOC_FREE simultaneously.
cpu 0 cpu 1
-------------------------------------------------------
ion_handle_get_by_id()
(ref == 2)
ion_handle_get_by_id()
(ref == 3)
ion_free()
(ref == 2)
ion_handle_put()
(ref == 1)
ion_free()
(ref == 0 so ion_handle_destroy() is
called
and the handle is freed.)
ion_handle_put() is called and it
decreases the slub's next free pointer
The problem is detected as an unaligned access in the
spin lock functions since it uses load exclusive
instruction. In some cases it corrupts the slub's
free pointer which causes a mis-aligned access to the
next free pointer.(kmalloc returns a pointer like
ffffc0745b4580aa). And it causes lots of other
hard-to-debug problems.
This symptom is caused since the first member in the
ion_handle structure is the reference count and the
ion driver decrements the reference after it has been
freed.
To fix this problem client->lock mutex is extended
to protect all the codes that uses the handle.
Signed-off-by: Eun Taik Lee <[email protected]>
Reviewed-by: Laura Abbott <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return (buffer->flags & ION_FLAG_CACHED) &&
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
}
|
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return (buffer->flags & ION_FLAG_CACHED) &&
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
}
|
C
|
linux
| 0 |
CVE-2011-2836
|
https://www.cvedetails.com/cve/CVE-2011-2836/
|
CWE-264
|
https://github.com/chromium/chromium/commit/d662b905d30cec7899bbb15140dcfacd73506167
|
d662b905d30cec7899bbb15140dcfacd73506167
|
Infobar Windows Media Player plug-in by default.
BUG=51464
Review URL: http://codereview.chromium.org/7080048
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@87500 0039d316-1c4b-4281-b951-d872f2087c98
|
PluginInfoBarDelegate::PluginInfoBarDelegate(TabContents* tab_contents,
const string16& name)
: ConfirmInfoBarDelegate(tab_contents),
name_(name),
tab_contents_(tab_contents) {
}
|
PluginInfoBarDelegate::PluginInfoBarDelegate(TabContents* tab_contents,
const string16& name)
: ConfirmInfoBarDelegate(tab_contents),
name_(name),
tab_contents_(tab_contents) {
}
|
C
|
Chrome
| 0 |
CVE-2014-3610
|
https://www.cvedetails.com/cve/CVE-2014-3610/
|
CWE-264
|
https://github.com/torvalds/linux/commit/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
if (!kvm_mtrr_valid(vcpu, msr, data))
return 1;
if (msr == MSR_MTRRdefType) {
vcpu->arch.mtrr_state.def_type = data;
vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
} else if (msr == MSR_MTRRfix64K_00000)
p[0] = data;
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
p[1 + msr - MSR_MTRRfix16K_80000] = data;
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
p[3 + msr - MSR_MTRRfix4K_C0000] = data;
else if (msr == MSR_IA32_CR_PAT)
vcpu->arch.pat = data;
else { /* Variable MTRRs */
int idx, is_mtrr_mask;
u64 *pt;
idx = (msr - 0x200) / 2;
is_mtrr_mask = msr - 0x200 - 2 * idx;
if (!is_mtrr_mask)
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
else
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
*pt = data;
}
kvm_mmu_reset_context(vcpu);
return 0;
}
|
static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
if (!kvm_mtrr_valid(vcpu, msr, data))
return 1;
if (msr == MSR_MTRRdefType) {
vcpu->arch.mtrr_state.def_type = data;
vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
} else if (msr == MSR_MTRRfix64K_00000)
p[0] = data;
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
p[1 + msr - MSR_MTRRfix16K_80000] = data;
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
p[3 + msr - MSR_MTRRfix4K_C0000] = data;
else if (msr == MSR_IA32_CR_PAT)
vcpu->arch.pat = data;
else { /* Variable MTRRs */
int idx, is_mtrr_mask;
u64 *pt;
idx = (msr - 0x200) / 2;
is_mtrr_mask = msr - 0x200 - 2 * idx;
if (!is_mtrr_mask)
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
else
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
*pt = data;
}
kvm_mmu_reset_context(vcpu);
return 0;
}
|
C
|
linux
| 0 |
CVE-2019-5827
|
https://www.cvedetails.com/cve/CVE-2019-5827/
|
CWE-190
|
https://github.com/chromium/chromium/commit/517ac71c9ee27f856f9becde8abea7d1604af9d4
|
517ac71c9ee27f856f9becde8abea7d1604af9d4
|
sqlite: backport bugfixes for dbfuzz2
Bug: 952406
Change-Id: Icbec429742048d6674828726c96d8e265c41b595
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1568152
Reviewed-by: Chris Mumford <[email protected]>
Commit-Queue: Darwin Huang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#651030}
|
static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){
int rc;
int idx;
MemPage *pPage;
assert( cursorOwnsBtShared(pCur) );
if( pCur->eState!=CURSOR_VALID ){
assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );
rc = restoreCursorPosition(pCur);
if( rc!=SQLITE_OK ){
return rc;
}
if( CURSOR_INVALID==pCur->eState ){
return SQLITE_DONE;
}
if( pCur->eState==CURSOR_SKIPNEXT ){
pCur->eState = CURSOR_VALID;
if( pCur->skipNext>0 ) return SQLITE_OK;
}
}
pPage = pCur->pPage;
idx = ++pCur->ix;
if( !pPage->isInit ){
/* The only known way for this to happen is for there to be a
** recursive SQL function that does a DELETE operation as part of a
** SELECT which deletes content out from under an active cursor
** in a corrupt database file where the table being DELETE-ed from
** has pages in common with the table being queried. See TH3
** module cov1/btree78.test testcase 220 (2018-06-08) for an
** example. */
return SQLITE_CORRUPT_BKPT;
}
/* If the database file is corrupt, it is possible for the value of idx
** to be invalid here. This can only occur if a second cursor modifies
** the page while cursor pCur is holding a reference to it. Which can
** only happen if the database is corrupt in such a way as to link the
** page into more than one b-tree structure. */
testcase( idx>pPage->nCell );
if( idx>=pPage->nCell ){
if( !pPage->leaf ){
rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8]));
if( rc ) return rc;
return moveToLeftmost(pCur);
}
do{
if( pCur->iPage==0 ){
pCur->eState = CURSOR_INVALID;
return SQLITE_DONE;
}
moveToParent(pCur);
pPage = pCur->pPage;
}while( pCur->ix>=pPage->nCell );
if( pPage->intKey ){
return sqlite3BtreeNext(pCur, 0);
}else{
return SQLITE_OK;
}
}
if( pPage->leaf ){
return SQLITE_OK;
}else{
return moveToLeftmost(pCur);
}
}
|
static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){
int rc;
int idx;
MemPage *pPage;
assert( cursorOwnsBtShared(pCur) );
if( pCur->eState!=CURSOR_VALID ){
assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );
rc = restoreCursorPosition(pCur);
if( rc!=SQLITE_OK ){
return rc;
}
if( CURSOR_INVALID==pCur->eState ){
return SQLITE_DONE;
}
if( pCur->eState==CURSOR_SKIPNEXT ){
pCur->eState = CURSOR_VALID;
if( pCur->skipNext>0 ) return SQLITE_OK;
}
}
pPage = pCur->pPage;
idx = ++pCur->ix;
if( !pPage->isInit ){
/* The only known way for this to happen is for there to be a
** recursive SQL function that does a DELETE operation as part of a
** SELECT which deletes content out from under an active cursor
** in a corrupt database file where the table being DELETE-ed from
** has pages in common with the table being queried. See TH3
** module cov1/btree78.test testcase 220 (2018-06-08) for an
** example. */
return SQLITE_CORRUPT_BKPT;
}
/* If the database file is corrupt, it is possible for the value of idx
** to be invalid here. This can only occur if a second cursor modifies
** the page while cursor pCur is holding a reference to it. Which can
** only happen if the database is corrupt in such a way as to link the
** page into more than one b-tree structure. */
testcase( idx>pPage->nCell );
if( idx>=pPage->nCell ){
if( !pPage->leaf ){
rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8]));
if( rc ) return rc;
return moveToLeftmost(pCur);
}
do{
if( pCur->iPage==0 ){
pCur->eState = CURSOR_INVALID;
return SQLITE_DONE;
}
moveToParent(pCur);
pPage = pCur->pPage;
}while( pCur->ix>=pPage->nCell );
if( pPage->intKey ){
return sqlite3BtreeNext(pCur, 0);
}else{
return SQLITE_OK;
}
}
if( pPage->leaf ){
return SQLITE_OK;
}else{
return moveToLeftmost(pCur);
}
}
|
C
|
Chrome
| 0 |
CVE-2016-4558
|
https://www.cvedetails.com/cve/CVE-2016-4558/
| null |
https://github.com/torvalds/linux/commit/92117d8443bc5afacc8d5ba82e541946310f106e
|
92117d8443bc5afacc8d5ba82e541946310f106e
|
bpf: fix refcnt overflow
On a system with >32Gbyte of phyiscal memory and infinite RLIMIT_MEMLOCK,
the malicious application may overflow 32-bit bpf program refcnt.
It's also possible to overflow map refcnt on 1Tb system.
Impose 32k hard limit which means that the same bpf program or
map cannot be shared by more than 32k processes.
Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
union bpf_attr attr = {};
int err;
if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
return -EPERM;
if (!access_ok(VERIFY_READ, uattr, 1))
return -EFAULT;
if (size > PAGE_SIZE) /* silly large */
return -E2BIG;
/* If we're handed a bigger struct than we know of,
* ensure all the unknown bits are 0 - i.e. new
* user-space does not rely on any kernel feature
* extensions we dont know about yet.
*/
if (size > sizeof(attr)) {
unsigned char __user *addr;
unsigned char __user *end;
unsigned char val;
addr = (void __user *)uattr + sizeof(attr);
end = (void __user *)uattr + size;
for (; addr < end; addr++) {
err = get_user(val, addr);
if (err)
return err;
if (val)
return -E2BIG;
}
size = sizeof(attr);
}
/* copy attributes from user space, may be less than sizeof(bpf_attr) */
if (copy_from_user(&attr, uattr, size) != 0)
return -EFAULT;
switch (cmd) {
case BPF_MAP_CREATE:
err = map_create(&attr);
break;
case BPF_MAP_LOOKUP_ELEM:
err = map_lookup_elem(&attr);
break;
case BPF_MAP_UPDATE_ELEM:
err = map_update_elem(&attr);
break;
case BPF_MAP_DELETE_ELEM:
err = map_delete_elem(&attr);
break;
case BPF_MAP_GET_NEXT_KEY:
err = map_get_next_key(&attr);
break;
case BPF_PROG_LOAD:
err = bpf_prog_load(&attr);
break;
case BPF_OBJ_PIN:
err = bpf_obj_pin(&attr);
break;
case BPF_OBJ_GET:
err = bpf_obj_get(&attr);
break;
default:
err = -EINVAL;
break;
}
return err;
}
|
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
union bpf_attr attr = {};
int err;
if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
return -EPERM;
if (!access_ok(VERIFY_READ, uattr, 1))
return -EFAULT;
if (size > PAGE_SIZE) /* silly large */
return -E2BIG;
/* If we're handed a bigger struct than we know of,
* ensure all the unknown bits are 0 - i.e. new
* user-space does not rely on any kernel feature
* extensions we dont know about yet.
*/
if (size > sizeof(attr)) {
unsigned char __user *addr;
unsigned char __user *end;
unsigned char val;
addr = (void __user *)uattr + sizeof(attr);
end = (void __user *)uattr + size;
for (; addr < end; addr++) {
err = get_user(val, addr);
if (err)
return err;
if (val)
return -E2BIG;
}
size = sizeof(attr);
}
/* copy attributes from user space, may be less than sizeof(bpf_attr) */
if (copy_from_user(&attr, uattr, size) != 0)
return -EFAULT;
switch (cmd) {
case BPF_MAP_CREATE:
err = map_create(&attr);
break;
case BPF_MAP_LOOKUP_ELEM:
err = map_lookup_elem(&attr);
break;
case BPF_MAP_UPDATE_ELEM:
err = map_update_elem(&attr);
break;
case BPF_MAP_DELETE_ELEM:
err = map_delete_elem(&attr);
break;
case BPF_MAP_GET_NEXT_KEY:
err = map_get_next_key(&attr);
break;
case BPF_PROG_LOAD:
err = bpf_prog_load(&attr);
break;
case BPF_OBJ_PIN:
err = bpf_obj_pin(&attr);
break;
case BPF_OBJ_GET:
err = bpf_obj_get(&attr);
break;
default:
err = -EINVAL;
break;
}
return err;
}
|
C
|
linux
| 0 |
CVE-2017-15420
|
https://www.cvedetails.com/cve/CVE-2017-15420/
|
CWE-20
|
https://github.com/chromium/chromium/commit/56a84aa67bb071a33a48ac1481b555c48e0a9a59
|
56a84aa67bb071a33a48ac1481b555c48e0a9a59
|
Do not use NavigationEntry to block history navigations.
This is no longer necessary after r477371.
BUG=777419
TEST=See bug for repro steps.
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_site_isolation
Change-Id: I701e4d4853858281b43e3743b12274dbeadfbf18
Reviewed-on: https://chromium-review.googlesource.com/733959
Reviewed-by: Devlin <[email protected]>
Reviewed-by: Nasko Oskov <[email protected]>
Commit-Queue: Charlie Reis <[email protected]>
Cr-Commit-Position: refs/heads/master@{#511942}
|
int NavigationControllerImpl::GetPendingEntryIndex() const {
DCHECK_LT(pending_entry_index_, GetEntryCount());
DCHECK(GetEntryCount() != 0 || pending_entry_index_ == -1);
return pending_entry_index_;
}
|
int NavigationControllerImpl::GetPendingEntryIndex() const {
DCHECK_LT(pending_entry_index_, GetEntryCount());
DCHECK(GetEntryCount() != 0 || pending_entry_index_ == -1);
return pending_entry_index_;
}
|
C
|
Chrome
| 0 |
CVE-2017-8070
|
https://www.cvedetails.com/cve/CVE-2017-8070/
|
CWE-119
|
https://github.com/torvalds/linux/commit/2d6a0e9de03ee658a9adc3bfb2f0ca55dff1e478
|
2d6a0e9de03ee658a9adc3bfb2f0ca55dff1e478
|
catc: Use heap buffer for memory size test
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void catc_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct catc *catc = netdev_priv(dev);
strlcpy(info->driver, driver_name, sizeof(info->driver));
strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
}
|
static void catc_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct catc *catc = netdev_priv(dev);
strlcpy(info->driver, driver_name, sizeof(info->driver));
strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
}
|
C
|
linux
| 0 |
CVE-2018-17467
|
https://www.cvedetails.com/cve/CVE-2018-17467/
|
CWE-20
|
https://github.com/chromium/chromium/commit/7da6c3419fd172405bcece1ae4ec6ec8316cd345
|
7da6c3419fd172405bcece1ae4ec6ec8316cd345
|
Start rendering timer after first navigation
Currently the new content rendering timer in the browser process,
which clears an old page's contents 4 seconds after a navigation if the
new page doesn't draw in that time, is not set on the first navigation
for a top-level frame.
This is problematic because content can exist before the first
navigation, for instance if it was created by a javascript: URL.
This CL removes the code that skips the timer activation on the first
navigation.
Bug: 844881
Change-Id: I19b3ad1ff62c69ded3a5f7b1c0afde191aaf4584
Reviewed-on: https://chromium-review.googlesource.com/1188589
Reviewed-by: Fady Samuel <[email protected]>
Reviewed-by: ccameron <[email protected]>
Commit-Queue: Ken Buchanan <[email protected]>
Cr-Commit-Position: refs/heads/master@{#586913}
|
void RenderWidgetHostImpl::FilterDropData(DropData* drop_data) {
#if DCHECK_IS_ON()
drop_data->view_id = GetRoutingID();
#endif // DCHECK_IS_ON()
GetProcess()->FilterURL(true, &drop_data->url);
if (drop_data->did_originate_from_renderer) {
drop_data->filenames.clear();
}
}
|
void RenderWidgetHostImpl::FilterDropData(DropData* drop_data) {
#if DCHECK_IS_ON()
drop_data->view_id = GetRoutingID();
#endif // DCHECK_IS_ON()
GetProcess()->FilterURL(true, &drop_data->url);
if (drop_data->did_originate_from_renderer) {
drop_data->filenames.clear();
}
}
|
C
|
Chrome
| 0 |
CVE-2012-3552
|
https://www.cvedetails.com/cve/CVE-2012-3552/
|
CWE-362
|
https://github.com/torvalds/linux/commit/f6d8bd051c391c1c0458a30b2a7abcd939329259
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
|
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int do_raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_geticmpfilter(sk, optval, optlen);
}
return -ENOPROTOOPT;
}
|
static int do_raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_geticmpfilter(sk, optval, optlen);
}
return -ENOPROTOOPT;
}
|
C
|
linux
| 0 |
CVE-2012-2817
|
https://www.cvedetails.com/cve/CVE-2012-2817/
|
CWE-399
|
https://github.com/chromium/chromium/commit/9b9a9f33f0a26f40d083be85a539dd7963adfc9b
|
9b9a9f33f0a26f40d083be85a539dd7963adfc9b
|
Explicitly stopping thread in MediaStreamImpl dtor to avoid any racing issues.
This may solve the below bugs.
BUG=112408,111202
TEST=content_unittests
Review URL: https://chromiumcodereview.appspot.com/9307058
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@120222 0039d316-1c4b-4281-b951-d872f2087c98
|
MediaStreamImpl::MediaStreamImpl(
MediaStreamDispatcher* media_stream_dispatcher,
content::P2PSocketDispatcher* p2p_socket_dispatcher,
VideoCaptureImplManager* vc_manager,
MediaStreamDependencyFactory* dependency_factory)
: dependency_factory_(dependency_factory),
media_stream_dispatcher_(media_stream_dispatcher),
p2p_socket_dispatcher_(p2p_socket_dispatcher),
network_manager_(NULL),
vc_manager_(vc_manager),
peer_connection_handler_(NULL),
message_loop_proxy_(base::MessageLoopProxy::current()),
signaling_thread_(NULL),
worker_thread_(NULL),
chrome_worker_thread_("Chrome_libJingle_WorkerThread") {
}
|
MediaStreamImpl::MediaStreamImpl(
MediaStreamDispatcher* media_stream_dispatcher,
content::P2PSocketDispatcher* p2p_socket_dispatcher,
VideoCaptureImplManager* vc_manager,
MediaStreamDependencyFactory* dependency_factory)
: dependency_factory_(dependency_factory),
media_stream_dispatcher_(media_stream_dispatcher),
p2p_socket_dispatcher_(p2p_socket_dispatcher),
network_manager_(NULL),
vc_manager_(vc_manager),
peer_connection_handler_(NULL),
message_loop_proxy_(base::MessageLoopProxy::current()),
signaling_thread_(NULL),
worker_thread_(NULL),
chrome_worker_thread_("Chrome_libJingle_WorkerThread") {
}
|
C
|
Chrome
| 0 |
CVE-2017-9059
|
https://www.cvedetails.com/cve/CVE-2017-9059/
|
CWE-404
|
https://github.com/torvalds/linux/commit/c70422f760c120480fee4de6c38804c72aa26bc1
|
c70422f760c120480fee4de6c38804c72aa26bc1
|
Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Another RDMA update from Chuck Lever, and a bunch of miscellaneous
bugfixes"
* tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits)
nfsd: Fix up the "supattr_exclcreat" attributes
nfsd: encoders mustn't use unitialized values in error cases
nfsd: fix undefined behavior in nfsd4_layout_verify
lockd: fix lockd shutdown race
NFSv4: Fix callback server shutdown
SUNRPC: Refactor svc_set_num_threads()
NFSv4.x/callback: Create the callback service through svc_create_pooled
lockd: remove redundant check on block
svcrdma: Clean out old XDR encoders
svcrdma: Remove the req_map cache
svcrdma: Remove unused RDMA Write completion handler
svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt
svcrdma: Clean up RPC-over-RDMA backchannel reply processing
svcrdma: Report Write/Reply chunk overruns
svcrdma: Clean up RDMA_ERROR path
svcrdma: Use rdma_rw API in RPC reply path
svcrdma: Introduce local rdma_rw API helpers
svcrdma: Clean up svc_rdma_get_inv_rkey()
svcrdma: Add helper to save pages under I/O
svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT
...
|
nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid *setclientid)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_opaque(argp, &setclientid->se_name);
if (status)
return nfserr_bad_xdr;
READ_BUF(8);
setclientid->se_callback_prog = be32_to_cpup(p++);
setclientid->se_callback_netid_len = be32_to_cpup(p++);
READ_BUF(setclientid->se_callback_netid_len);
SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
READ_BUF(4);
setclientid->se_callback_addr_len = be32_to_cpup(p++);
READ_BUF(setclientid->se_callback_addr_len);
SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
READ_BUF(4);
setclientid->se_callback_ident = be32_to_cpup(p++);
DECODE_TAIL;
}
|
nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid *setclientid)
{
DECODE_HEAD;
if (argp->minorversion >= 1)
return nfserr_notsupp;
READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_opaque(argp, &setclientid->se_name);
if (status)
return nfserr_bad_xdr;
READ_BUF(8);
setclientid->se_callback_prog = be32_to_cpup(p++);
setclientid->se_callback_netid_len = be32_to_cpup(p++);
READ_BUF(setclientid->se_callback_netid_len);
SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
READ_BUF(4);
setclientid->se_callback_addr_len = be32_to_cpup(p++);
READ_BUF(setclientid->se_callback_addr_len);
SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
READ_BUF(4);
setclientid->se_callback_ident = be32_to_cpup(p++);
DECODE_TAIL;
}
|
C
|
linux
| 0 |
CVE-2017-5093
|
https://www.cvedetails.com/cve/CVE-2017-5093/
|
CWE-20
|
https://github.com/chromium/chromium/commit/0720b02e4f303ea6b114d4ae9453e3a7ff55f8dc
|
0720b02e4f303ea6b114d4ae9453e3a7ff55f8dc
|
If JavaScript shows a dialog, cause the page to lose fullscreen.
BUG=670135, 550017, 726761, 728276
Review-Url: https://codereview.chromium.org/2906133004
Cr-Commit-Position: refs/heads/master@{#478884}
|
RenderWidgetHostImpl* WebContentsImpl::GetRenderWidgetHostWithPageFocus() {
WebContentsImpl* focused_web_contents = GetFocusedWebContents();
if (focused_web_contents->ShowingInterstitialPage()) {
return static_cast<RenderFrameHostImpl*>(
focused_web_contents->GetRenderManager()
->interstitial_page()
->GetMainFrame())
->GetRenderWidgetHost();
}
return focused_web_contents->GetMainFrame()->GetRenderWidgetHost();
}
|
RenderWidgetHostImpl* WebContentsImpl::GetRenderWidgetHostWithPageFocus() {
WebContentsImpl* focused_web_contents = GetFocusedWebContents();
if (focused_web_contents->ShowingInterstitialPage()) {
return static_cast<RenderFrameHostImpl*>(
focused_web_contents->GetRenderManager()
->interstitial_page()
->GetMainFrame())
->GetRenderWidgetHost();
}
return focused_web_contents->GetMainFrame()->GetRenderWidgetHost();
}
|
C
|
Chrome
| 0 |
CVE-2017-5101
|
https://www.cvedetails.com/cve/CVE-2017-5101/
|
CWE-20
|
https://github.com/chromium/chromium/commit/29734f46c6dc9362783091180c2ee279ad53637f
|
29734f46c6dc9362783091180c2ee279ad53637f
|
media: remove base::SharedMemoryHandle usage in v4l2 encoder
This replaces a use of the legacy UnalignedSharedMemory ctor
taking a SharedMemoryHandle with the current ctor taking a
PlatformSharedMemoryRegion.
Bug: 849207
Change-Id: Iea24ebdcd941cf2fa97e19cf2aeac1a18f9773d9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1697602
Commit-Queue: Matthew Cary (CET) <[email protected]>
Reviewed-by: Ricky Liang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#681740}
|
V4L2JpegEncodeAccelerator::EncodedInstanceDmaBuf::~EncodedInstanceDmaBuf() {}
|
V4L2JpegEncodeAccelerator::EncodedInstanceDmaBuf::~EncodedInstanceDmaBuf() {}
|
C
|
Chrome
| 0 |
CVE-2018-12896
|
https://www.cvedetails.com/cve/CVE-2018-12896/
|
CWE-190
|
https://github.com/torvalds/linux/commit/78c9c4dfbf8c04883941445a195276bb4bb92c76
|
78c9c4dfbf8c04883941445a195276bb4bb92c76
|
posix-timers: Sanitize overrun handling
The posix timer overrun handling is broken because the forwarding functions
can return a huge number of overruns which does not fit in an int. As a
consequence timer_getoverrun(2) and siginfo::si_overrun can turn into
random number generators.
The k_clock::timer_forward() callbacks return a 64 bit value now. Make
k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal
accounting is correct. 3Remove the temporary (int) casts.
Add a helper function which clamps the overrun value returned to user space
via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value
between 0 and INT_MAX. INT_MAX is an indicator for user space that the
overrun value has been clamped.
Reported-by: Team OWL337 <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Michael Kerrisk <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
|
static inline int fastpath_timer_check(struct task_struct *tsk)
{
struct signal_struct *sig;
if (!task_cputime_zero(&tsk->cputime_expires)) {
struct task_cputime task_sample;
task_cputime(tsk, &task_sample.utime, &task_sample.stime);
task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
return 1;
}
sig = tsk->signal;
/*
* Check if thread group timers expired when the cputimer is
* running and no other thread in the group is already checking
* for thread group cputimers. These fields are read without the
* sighand lock. However, this is fine because this is meant to
* be a fastpath heuristic to determine whether we should try to
* acquire the sighand lock to check/handle timers.
*
* In the worst case scenario, if 'running' or 'checking_timer' gets
* set but the current thread doesn't see the change yet, we'll wait
* until the next thread in the group gets a scheduler interrupt to
* handle the timer. This isn't an issue in practice because these
* types of delays with signals actually getting sent are expected.
*/
if (READ_ONCE(sig->cputimer.running) &&
!READ_ONCE(sig->cputimer.checking_timer)) {
struct task_cputime group_sample;
sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
}
if (dl_task(tsk) && tsk->dl.dl_overrun)
return 1;
return 0;
}
|
static inline int fastpath_timer_check(struct task_struct *tsk)
{
struct signal_struct *sig;
if (!task_cputime_zero(&tsk->cputime_expires)) {
struct task_cputime task_sample;
task_cputime(tsk, &task_sample.utime, &task_sample.stime);
task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
return 1;
}
sig = tsk->signal;
/*
* Check if thread group timers expired when the cputimer is
* running and no other thread in the group is already checking
* for thread group cputimers. These fields are read without the
* sighand lock. However, this is fine because this is meant to
* be a fastpath heuristic to determine whether we should try to
* acquire the sighand lock to check/handle timers.
*
* In the worst case scenario, if 'running' or 'checking_timer' gets
* set but the current thread doesn't see the change yet, we'll wait
* until the next thread in the group gets a scheduler interrupt to
* handle the timer. This isn't an issue in practice because these
* types of delays with signals actually getting sent are expected.
*/
if (READ_ONCE(sig->cputimer.running) &&
!READ_ONCE(sig->cputimer.checking_timer)) {
struct task_cputime group_sample;
sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
}
if (dl_task(tsk) && tsk->dl.dl_overrun)
return 1;
return 0;
}
|
C
|
linux
| 0 |
CVE-2014-8172
|
https://www.cvedetails.com/cve/CVE-2014-8172/
|
CWE-17
|
https://github.com/torvalds/linux/commit/eee5cc2702929fd41cce28058dc6d6717f723f87
|
eee5cc2702929fd41cce28058dc6d6717f723f87
|
get rid of s_files and files_lock
The only thing we need it for is alt-sysrq-r (emergency remount r/o)
and these days we can do just as well without going through the
list of files.
Signed-off-by: Al Viro <[email protected]>
|
void mark_files_ro(struct super_block *sb)
|
void mark_files_ro(struct super_block *sb)
{
struct file *f;
lg_global_lock(&files_lglock);
do_file_list_for_each_entry(sb, f) {
if (!file_count(f))
continue;
if (!(f->f_mode & FMODE_WRITE))
continue;
spin_lock(&f->f_lock);
f->f_mode &= ~FMODE_WRITE;
spin_unlock(&f->f_lock);
if (file_check_writeable(f) != 0)
continue;
__mnt_drop_write(f->f_path.mnt);
file_release_write(f);
} while_file_list_for_each_entry;
lg_global_unlock(&files_lglock);
}
|
C
|
linux
| 1 |
CVE-2018-18349
|
https://www.cvedetails.com/cve/CVE-2018-18349/
|
CWE-732
|
https://github.com/chromium/chromium/commit/5f8671e7667b8b133bd3664100012a3906e92d65
|
5f8671e7667b8b133bd3664100012a3906e92d65
|
Add a check for disallowing remote frame navigations to local resources.
Previously, RemoteFrame navigations did not perform any renderer-side
checks and relied solely on the browser-side logic to block disallowed
navigations via mechanisms like FilterURL. This means that blocked
remote frame navigations were silently navigated to about:blank
without any console error message.
This CL adds a CanDisplay check to the remote navigation path to match
an equivalent check done for local frame navigations. This way, the
renderer can consistently block disallowed navigations in both cases
and output an error message.
Bug: 894399
Change-Id: I172f68f77c1676f6ca0172d2a6c78f7edc0e3b7a
Reviewed-on: https://chromium-review.googlesource.com/c/1282390
Reviewed-by: Charlie Reis <[email protected]>
Reviewed-by: Nate Chapin <[email protected]>
Commit-Queue: Alex Moshchuk <[email protected]>
Cr-Commit-Position: refs/heads/master@{#601022}
|
RenderWidgetHostViewAndroid* root_rwhv() { return root_rwhv_; }
|
RenderWidgetHostViewAndroid* root_rwhv() { return root_rwhv_; }
|
C
|
Chrome
| 0 |
CVE-2017-9310
|
https://www.cvedetails.com/cve/CVE-2017-9310/
|
CWE-835
|
https://git.qemu.org/?p=qemu.git;a=commitdiff;h=4154c7e03fa55b4cf52509a83d50d6c09d743b7
|
4154c7e03fa55b4cf52509a83d50d6c09d743b77
| null |
e1000e_ring_enabled(E1000ECore *core, const E1000E_RingInfo *r)
{
return core->mac[r->dlen] > 0;
}
|
e1000e_ring_enabled(E1000ECore *core, const E1000E_RingInfo *r)
{
return core->mac[r->dlen] > 0;
}
|
C
|
qemu
| 0 |
CVE-2012-2890
|
https://www.cvedetails.com/cve/CVE-2012-2890/
|
CWE-399
|
https://github.com/chromium/chromium/commit/eb4bcacd683a68534bbe2e4d8d6eeafafc7f57ba
|
eb4bcacd683a68534bbe2e4d8d6eeafafc7f57ba
|
Make chrome.appWindow.create() provide access to the child window at a predictable time.
When you first create a window with chrome.appWindow.create(), it won't have
loaded any resources. So, at create time, you are guaranteed that:
child_window.location.href == 'about:blank'
child_window.document.documentElement.outerHTML ==
'<html><head></head><body></body></html>'
This is in line with the behaviour of window.open().
BUG=131735
TEST=browser_tests:PlatformAppBrowserTest.WindowsApi
Committed: http://src.chromium.org/viewvc/chrome?view=rev&revision=144072
Review URL: https://chromiumcodereview.appspot.com/10644006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@144356 0039d316-1c4b-4281-b951-d872f2087c98
|
void ResourceDispatcherHostImpl::OnDataReceivedACK(int request_id) {
ResourceLoader* loader = GetLoader(filter_->child_id(), request_id);
if (!loader)
return;
ResourceRequestInfoImpl* info = loader->GetRequestInfo();
if (info->async_handler())
info->async_handler()->OnDataReceivedACK();
}
|
void ResourceDispatcherHostImpl::OnDataReceivedACK(int request_id) {
ResourceLoader* loader = GetLoader(filter_->child_id(), request_id);
if (!loader)
return;
ResourceRequestInfoImpl* info = loader->GetRequestInfo();
if (info->async_handler())
info->async_handler()->OnDataReceivedACK();
}
|
C
|
Chrome
| 0 |
CVE-2019-16161
|
https://www.cvedetails.com/cve/CVE-2019-16161/
|
CWE-476
|
https://github.com/k-takata/Onigmo/commit/9827d5a0298ee766f6041db9c0080166ff6cdce8
|
9827d5a0298ee766f6041db9c0080166ff6cdce8
|
Merge pull request #134 from k-takata/fix-segv-in-error-str
Fix SEGV in onig_error_code_to_str() (Fix #132)
|
add_code_range(BBuf** pbuf, ScanEnv* env, OnigCodePoint from, OnigCodePoint to)
{
return add_code_range0(pbuf, env, from, to, 1);
}
|
add_code_range(BBuf** pbuf, ScanEnv* env, OnigCodePoint from, OnigCodePoint to)
{
return add_code_range0(pbuf, env, from, to, 1);
}
|
C
|
Onigmo
| 0 |
CVE-2014-9870
|
https://www.cvedetails.com/cve/CVE-2014-9870/
|
CWE-264
|
https://github.com/torvalds/linux/commit/a4780adeefd042482f624f5e0d577bf9cdcbb760
|
a4780adeefd042482f624f5e0d577bf9cdcbb760
|
ARM: 7735/2: Preserve the user r/w register TPIDRURW on context switch and fork
Since commit 6a1c53124aa1 the user writeable TLS register was zeroed to
prevent it from being used as a covert channel between two tasks.
There are more and more applications coming to Windows RT,
Wine could support them, but mostly they expect to have
the thread environment block (TEB) in TPIDRURW.
This patch preserves that register per thread instead of clearing it.
Unlike the TPIDRURO, which is already switched, the TPIDRURW
can be updated from userspace so needs careful treatment in the case that we
modify TPIDRURW and call fork(). To avoid this we must always read
TPIDRURW in copy_thread.
Signed-off-by: André Hentschel <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: Jonathan Austin <[email protected]>
Signed-off-by: Russell King <[email protected]>
|
void arm_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, unsigned long err, unsigned long trap)
{
if (user_mode(regs)) {
current->thread.error_code = err;
current->thread.trap_no = trap;
force_sig_info(info->si_signo, info, current);
} else {
die(str, regs, err);
}
}
|
void arm_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, unsigned long err, unsigned long trap)
{
if (user_mode(regs)) {
current->thread.error_code = err;
current->thread.trap_no = trap;
force_sig_info(info->si_signo, info, current);
} else {
die(str, regs, err);
}
}
|
C
|
linux
| 0 |
CVE-2019-12111
|
https://www.cvedetails.com/cve/CVE-2019-12111/
|
CWE-476
|
https://github.com/miniupnp/miniupnp/commit/cb8a02af7a5677cf608e86d57ab04241cf34e24f
|
cb8a02af7a5677cf608e86d57ab04241cf34e24f
|
pcpserver.c: copyIPv6IfDifferent() check for NULL src argument
|
static int parseSADSCP(const uint8_t *buf, pcp_info_t *pcp_msg_info)
{
pcp_msg_info->delay_tolerance = (buf[12]>>6)&3;
pcp_msg_info->loss_tolerance = (buf[12]>>4)&3;
pcp_msg_info->jitter_tolerance = (buf[12]>>2)&3;
if (pcp_msg_info->delay_tolerance == 3 ||
pcp_msg_info->loss_tolerance == 3 ||
pcp_msg_info->jitter_tolerance == 3 ) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 1;
}
pcp_msg_info->app_name = (const char *)(buf + 14);
pcp_msg_info->app_name_len = buf[13];
return 0;
}
|
static int parseSADSCP(const uint8_t *buf, pcp_info_t *pcp_msg_info)
{
pcp_msg_info->delay_tolerance = (buf[12]>>6)&3;
pcp_msg_info->loss_tolerance = (buf[12]>>4)&3;
pcp_msg_info->jitter_tolerance = (buf[12]>>2)&3;
if (pcp_msg_info->delay_tolerance == 3 ||
pcp_msg_info->loss_tolerance == 3 ||
pcp_msg_info->jitter_tolerance == 3 ) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 1;
}
pcp_msg_info->app_name = (const char *)(buf + 14);
pcp_msg_info->app_name_len = buf[13];
return 0;
}
|
C
|
miniupnp
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/27c68f543e5eba779902447445dfb05ec3f5bf75
|
27c68f543e5eba779902447445dfb05ec3f5bf75
|
Revert of Add accelerated VP9 decode infrastructure and an implementation for VA-API. (patchset #7 id:260001 of https://codereview.chromium.org/1318863003/ )
Reason for revert:
I think this patch broke compile step for Chromium Linux ChromeOS MSan Builder.
First failing build:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20Linux%20ChromeOS%20MSan%20Builder/builds/8310
All recent builds:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20Linux%20ChromeOS%20MSan%20Builder?numbuilds=200
Sorry for the revert. I'll re-revert if I'm wrong.
Cheers,
Tommy
Original issue's description:
> Add accelerated VP9 decode infrastructure and an implementation for VA-API.
>
> - Add a hardware/platform-independent VP9Decoder class and related
> infrastructure, implementing AcceleratedVideoDecoder interface. VP9Decoder
> performs the initial stages of the decode process, which are to be done
> on host/in software, such as stream parsing and reference frame management.
>
> - Add a VP9Accelerator interface, used by the VP9Decoder to offload the
> remaining stages of the decode process to hardware. VP9Accelerator
> implementations are platform-specific.
>
> - Add the first implementation of VP9Accelerator - VaapiVP9Accelerator - and
> integrate it with VaapiVideoDecodeAccelerator, for devices which provide
> hardware VP9 acceleration through VA-API. Hook it up to the new
> infrastructure and VP9Decoder.
>
> - Extend Vp9Parser to provide functionality required by VP9Decoder and
> VP9Accelerator, including superframe parsing, handling of loop filter
> and segmentation initialization, state persistence across frames and
> resetting when needed. Also add code calculating segmentation dequants
> and loop filter levels.
>
> - Update vp9_parser_unittest to the new Vp9Parser interface and flow.
>
> TEST=vp9_parser_unittest,vda_unittest,Chrome VP9 playback
> BUG=chrome-os-partner:41469,chrome-os-partner:41470,chromium:525331
> [email protected]
>
> Committed: https://crrev.com/e3cc0a661b8abfdc74f569940949bc1f336ece40
> Cr-Commit-Position: refs/heads/master@{#349312}
[email protected],[email protected],[email protected],[email protected],[email protected]
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chrome-os-partner:41469,chrome-os-partner:41470,chromium:525331
Review URL: https://codereview.chromium.org/1357513002
Cr-Commit-Position: refs/heads/master@{#349443}
|
void VaapiWrapper::PreSandboxInitialization() {
#if defined(USE_OZONE)
const char* kDriRenderNode0Path = "/dev/dri/renderD128";
base::File drm_file = base::File(
base::FilePath::FromUTF8Unsafe(kDriRenderNode0Path),
base::File::FLAG_OPEN | base::File::FLAG_READ | base::File::FLAG_WRITE);
if (drm_file.IsValid())
va_display_state_.Get().SetDrmFd(drm_file.GetPlatformFile());
#endif
}
|
void VaapiWrapper::PreSandboxInitialization() {
#if defined(USE_OZONE)
const char* kDriRenderNode0Path = "/dev/dri/renderD128";
base::File drm_file = base::File(
base::FilePath::FromUTF8Unsafe(kDriRenderNode0Path),
base::File::FLAG_OPEN | base::File::FLAG_READ | base::File::FLAG_WRITE);
if (drm_file.IsValid())
va_display_state_.Get().SetDrmFd(drm_file.GetPlatformFile());
#endif
}
|
C
|
Chrome
| 0 |
CVE-2018-17204
|
https://www.cvedetails.com/cve/CVE-2018-17204/
|
CWE-617
|
https://github.com/openvswitch/ovs/commit/4af6da3b275b764b1afe194df6499b33d2bf4cde
|
4af6da3b275b764b1afe194df6499b33d2bf4cde
|
ofp-group: Don't assert-fail decoding bad OF1.5 group mod type or command.
When decoding a group mod, the current code validates the group type and
command after the whole group mod has been decoded. The OF1.5 decoder,
however, tries to use the type and command earlier, when it might still be
invalid. This caused an assertion failure (via OVS_NOT_REACHED). This
commit fixes the problem.
ovs-vswitchd does not enable support for OpenFlow 1.5 by default.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9249
Signed-off-by: Ben Pfaff <[email protected]>
Reviewed-by: Yifeng Sun <[email protected]>
|
ofputil_decode_hello(const struct ofp_header *oh, uint32_t *allowed_versions)
{
struct ofpbuf msg = ofpbuf_const_initializer(oh, ntohs(oh->length));
ofpbuf_pull(&msg, sizeof *oh);
*allowed_versions = version_bitmap_from_version(oh->version);
bool ok = true;
while (msg.size) {
const struct ofp_hello_elem_header *oheh;
unsigned int len;
if (msg.size < sizeof *oheh) {
return false;
}
oheh = msg.data;
len = ntohs(oheh->length);
if (len < sizeof *oheh || !ofpbuf_try_pull(&msg, ROUND_UP(len, 8))) {
return false;
}
if (oheh->type != htons(OFPHET_VERSIONBITMAP)
|| !ofputil_decode_hello_bitmap(oheh, allowed_versions)) {
ok = false;
}
}
return ok;
}
|
ofputil_decode_hello(const struct ofp_header *oh, uint32_t *allowed_versions)
{
struct ofpbuf msg = ofpbuf_const_initializer(oh, ntohs(oh->length));
ofpbuf_pull(&msg, sizeof *oh);
*allowed_versions = version_bitmap_from_version(oh->version);
bool ok = true;
while (msg.size) {
const struct ofp_hello_elem_header *oheh;
unsigned int len;
if (msg.size < sizeof *oheh) {
return false;
}
oheh = msg.data;
len = ntohs(oheh->length);
if (len < sizeof *oheh || !ofpbuf_try_pull(&msg, ROUND_UP(len, 8))) {
return false;
}
if (oheh->type != htons(OFPHET_VERSIONBITMAP)
|| !ofputil_decode_hello_bitmap(oheh, allowed_versions)) {
ok = false;
}
}
return ok;
}
|
C
|
ovs
| 0 |
CVE-2015-7884
|
https://www.cvedetails.com/cve/CVE-2015-7884/
|
CWE-200
|
https://github.com/torvalds/linux/commit/eda98796aff0d9bf41094b06811f5def3b4c333c
|
eda98796aff0d9bf41094b06811f5def3b4c333c
|
[media] media/vivid-osd: fix info leak in ioctl
The vivid_fb_ioctl() code fails to initialize the 16 _reserved bytes of
struct fb_vblank after the ->hcount member. Add an explicit
memset(0) before filling the structure to avoid the info leak.
Signed-off-by: Salva Peiró <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
static int vivid_fb_blank(int blank_mode, struct fb_info *info)
{
struct vivid_dev *dev = (struct vivid_dev *)info->par;
dprintk(dev, 1, "Set blanking mode : %d\n", blank_mode);
switch (blank_mode) {
case FB_BLANK_UNBLANK:
break;
case FB_BLANK_NORMAL:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
break;
}
return 0;
}
|
static int vivid_fb_blank(int blank_mode, struct fb_info *info)
{
struct vivid_dev *dev = (struct vivid_dev *)info->par;
dprintk(dev, 1, "Set blanking mode : %d\n", blank_mode);
switch (blank_mode) {
case FB_BLANK_UNBLANK:
break;
case FB_BLANK_NORMAL:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
break;
}
return 0;
}
|
C
|
linux
| 0 |
CVE-2015-8324
|
https://www.cvedetails.com/cve/CVE-2015-8324/
| null |
https://github.com/torvalds/linux/commit/744692dc059845b2a3022119871846e74d4f6e11
|
744692dc059845b2a3022119871846e74d4f6e11
|
ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
|
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
ssize_t len)
{
handle_t *handle;
ext4_lblk_t block;
unsigned int max_blocks;
int ret = 0;
int ret2 = 0;
struct buffer_head map_bh;
unsigned int credits, blkbits = inode->i_blkbits;
block = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
- block;
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, max_blocks);
while (ret >= 0 && ret < max_blocks) {
block = block + ret;
max_blocks = max_blocks - ret;
handle = ext4_journal_start(inode, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
map_bh.b_state = 0;
ret = ext4_get_blocks(handle, inode, block,
max_blocks, &map_bh,
EXT4_GET_BLOCKS_IO_CONVERT_EXT);
if (ret <= 0) {
WARN_ON(ret <= 0);
printk(KERN_ERR "%s: ext4_ext_get_blocks "
"returned error inode#%lu, block=%u, "
"max_blocks=%u", __func__,
inode->i_ino, block, max_blocks);
}
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
if (ret <= 0 || ret2 )
break;
}
return ret > 0 ? ret2 : ret;
}
|
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
ssize_t len)
{
handle_t *handle;
ext4_lblk_t block;
unsigned int max_blocks;
int ret = 0;
int ret2 = 0;
struct buffer_head map_bh;
unsigned int credits, blkbits = inode->i_blkbits;
block = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
- block;
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, max_blocks);
while (ret >= 0 && ret < max_blocks) {
block = block + ret;
max_blocks = max_blocks - ret;
handle = ext4_journal_start(inode, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
map_bh.b_state = 0;
ret = ext4_get_blocks(handle, inode, block,
max_blocks, &map_bh,
EXT4_GET_BLOCKS_IO_CONVERT_EXT);
if (ret <= 0) {
WARN_ON(ret <= 0);
printk(KERN_ERR "%s: ext4_ext_get_blocks "
"returned error inode#%lu, block=%u, "
"max_blocks=%u", __func__,
inode->i_ino, block, max_blocks);
}
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
if (ret <= 0 || ret2 )
break;
}
return ret > 0 ? ret2 : ret;
}
|
C
|
linux
| 0 |
CVE-2011-1292
|
https://www.cvedetails.com/cve/CVE-2011-1292/
|
CWE-399
|
https://github.com/chromium/chromium/commit/5f372f899b8709dac700710b5f0f90959dcf9ecb
|
5f372f899b8709dac700710b5f0f90959dcf9ecb
|
Add support for autofill server experiments
BUG=none
TEST=unit_tests --gtest_filter=AutoFillMetricsTest.QualityMetricsWithExperimentId:AutoFillQueryXmlParserTest.ParseExperimentId
Review URL: http://codereview.chromium.org/6260027
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@73216 0039d316-1c4b-4281-b951-d872f2087c98
|
void AutoFillManager::LogMetricsAboutSubmittedForm(
const FormData& form,
const FormStructure* submitted_form) {
FormStructure* cached_submitted_form;
if (!FindCachedForm(form, &cached_submitted_form)) {
NOTREACHED();
return;
}
std::map<std::string, const AutoFillField*> cached_fields;
for (size_t i = 0; i < cached_submitted_form->field_count(); ++i) {
const AutoFillField* field = cached_submitted_form->field(i);
cached_fields[field->FieldSignature()] = field;
}
std::string experiment_id = cached_submitted_form->server_experiment_id();
for (size_t i = 0; i < submitted_form->field_count(); ++i) {
const AutoFillField* field = submitted_form->field(i);
FieldTypeSet field_types;
personal_data_->GetPossibleFieldTypes(field->value(), &field_types);
DCHECK(!field_types.empty());
if (field->form_control_type() == ASCIIToUTF16("select-one")) {
continue;
}
metric_logger_->Log(AutoFillMetrics::FIELD_SUBMITTED, experiment_id);
if (field_types.find(EMPTY_TYPE) == field_types.end() &&
field_types.find(UNKNOWN_TYPE) == field_types.end()) {
if (field->is_autofilled()) {
metric_logger_->Log(AutoFillMetrics::FIELD_AUTOFILLED, experiment_id);
} else {
metric_logger_->Log(AutoFillMetrics::FIELD_AUTOFILL_FAILED,
experiment_id);
AutoFillFieldType heuristic_type = UNKNOWN_TYPE;
AutoFillFieldType server_type = NO_SERVER_DATA;
std::map<std::string, const AutoFillField*>::const_iterator
cached_field = cached_fields.find(field->FieldSignature());
if (cached_field != cached_fields.end()) {
heuristic_type = cached_field->second->heuristic_type();
server_type = cached_field->second->server_type();
}
if (heuristic_type == UNKNOWN_TYPE) {
metric_logger_->Log(AutoFillMetrics::FIELD_HEURISTIC_TYPE_UNKNOWN,
experiment_id);
} else if (field_types.count(heuristic_type)) {
metric_logger_->Log(AutoFillMetrics::FIELD_HEURISTIC_TYPE_MATCH,
experiment_id);
} else {
metric_logger_->Log(AutoFillMetrics::FIELD_HEURISTIC_TYPE_MISMATCH,
experiment_id);
}
if (server_type == NO_SERVER_DATA) {
metric_logger_->Log(AutoFillMetrics::FIELD_SERVER_TYPE_UNKNOWN,
experiment_id);
} else if (field_types.count(server_type)) {
metric_logger_->Log(AutoFillMetrics::FIELD_SERVER_TYPE_MATCH,
experiment_id);
} else {
metric_logger_->Log(AutoFillMetrics::FIELD_SERVER_TYPE_MISMATCH,
experiment_id);
}
}
}
}
}
|
void AutoFillManager::LogMetricsAboutSubmittedForm(
const FormData& form,
const FormStructure* submitted_form) {
FormStructure* cached_submitted_form;
if (!FindCachedForm(form, &cached_submitted_form)) {
NOTREACHED();
return;
}
std::map<std::string, const AutoFillField*> cached_fields;
for (size_t i = 0; i < cached_submitted_form->field_count(); ++i) {
const AutoFillField* field = cached_submitted_form->field(i);
cached_fields[field->FieldSignature()] = field;
}
for (size_t i = 0; i < submitted_form->field_count(); ++i) {
const AutoFillField* field = submitted_form->field(i);
FieldTypeSet field_types;
personal_data_->GetPossibleFieldTypes(field->value(), &field_types);
DCHECK(!field_types.empty());
if (field->form_control_type() == ASCIIToUTF16("select-one")) {
continue;
}
metric_logger_->Log(AutoFillMetrics::FIELD_SUBMITTED);
if (field_types.find(EMPTY_TYPE) == field_types.end() &&
field_types.find(UNKNOWN_TYPE) == field_types.end()) {
if (field->is_autofilled()) {
metric_logger_->Log(AutoFillMetrics::FIELD_AUTOFILLED);
} else {
metric_logger_->Log(AutoFillMetrics::FIELD_AUTOFILL_FAILED);
AutoFillFieldType heuristic_type = UNKNOWN_TYPE;
AutoFillFieldType server_type = NO_SERVER_DATA;
std::map<std::string, const AutoFillField*>::const_iterator
cached_field = cached_fields.find(field->FieldSignature());
if (cached_field != cached_fields.end()) {
heuristic_type = cached_field->second->heuristic_type();
server_type = cached_field->second->server_type();
}
if (heuristic_type == UNKNOWN_TYPE)
metric_logger_->Log(AutoFillMetrics::FIELD_HEURISTIC_TYPE_UNKNOWN);
else if (field_types.count(heuristic_type))
metric_logger_->Log(AutoFillMetrics::FIELD_HEURISTIC_TYPE_MATCH);
else
metric_logger_->Log(AutoFillMetrics::FIELD_HEURISTIC_TYPE_MISMATCH);
if (server_type == NO_SERVER_DATA)
metric_logger_->Log(AutoFillMetrics::FIELD_SERVER_TYPE_UNKNOWN);
else if (field_types.count(server_type))
metric_logger_->Log(AutoFillMetrics::FIELD_SERVER_TYPE_MATCH);
else
metric_logger_->Log(AutoFillMetrics::FIELD_SERVER_TYPE_MISMATCH);
}
}
}
}
|
C
|
Chrome
| 1 |
CVE-2018-6927
|
https://www.cvedetails.com/cve/CVE-2018-6927/
|
CWE-190
|
https://github.com/torvalds/linux/commit/fbe0e839d1e22d88810f3ee3e2f1479be4c0aa4a
|
fbe0e839d1e22d88810f3ee3e2f1479be4c0aa4a
|
futex: Prevent overflow by strengthen input validation
UBSAN reports signed integer overflow in kernel/futex.c:
UBSAN: Undefined behaviour in kernel/futex.c:2041:18
signed integer overflow:
0 - -2147483648 cannot be represented in type 'int'
Add a sanity check to catch negative values of nr_wake and nr_requeue.
Signed-off-by: Li Jinyue <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
|
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
union futex_key key = FUTEX_KEY_INIT;
int ret;
DEFINE_WAKE_Q(wake_q);
if (!bitset)
return -EINVAL;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
hb = hash_futex(&key);
/* Make sure we really have tasks to wakeup */
if (!hb_waiters_pending(hb))
goto out_put_key;
spin_lock(&hb->lock);
plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (match_futex (&this->key, &key)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
break;
}
/* Check if one of the bits is set in both bitsets */
if (!(this->bitset & bitset))
continue;
mark_wake_futex(&wake_q, this);
if (++ret >= nr_wake)
break;
}
}
spin_unlock(&hb->lock);
wake_up_q(&wake_q);
out_put_key:
put_futex_key(&key);
out:
return ret;
}
|
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
union futex_key key = FUTEX_KEY_INIT;
int ret;
DEFINE_WAKE_Q(wake_q);
if (!bitset)
return -EINVAL;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
hb = hash_futex(&key);
/* Make sure we really have tasks to wakeup */
if (!hb_waiters_pending(hb))
goto out_put_key;
spin_lock(&hb->lock);
plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (match_futex (&this->key, &key)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
break;
}
/* Check if one of the bits is set in both bitsets */
if (!(this->bitset & bitset))
continue;
mark_wake_futex(&wake_q, this);
if (++ret >= nr_wake)
break;
}
}
spin_unlock(&hb->lock);
wake_up_q(&wake_q);
out_put_key:
put_futex_key(&key);
out:
return ret;
}
|
C
|
linux
| 0 |
CVE-2016-10270
|
https://www.cvedetails.com/cve/CVE-2016-10270/
|
CWE-125
|
https://github.com/vadz/libtiff/commit/9a72a69e035ee70ff5c41541c8c61cd97990d018
|
9a72a69e035ee70ff5c41541c8c61cd97990d018
|
* libtiff/tif_dirread.c: modify ChopUpSingleUncompressedStrip() to
instanciate compute ntrips as TIFFhowmany_32(td->td_imagelength, rowsperstrip),
instead of a logic based on the total size of data. Which is faulty is
the total size of data is not sufficient to fill the whole image, and thus
results in reading outside of the StripByCounts/StripOffsets arrays when
using TIFFReadScanline().
Reported by Agostino Sarubbo.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2608.
* libtiff/tif_strip.c: revert the change in TIFFNumberOfStrips() done
for http://bugzilla.maptools.org/show_bug.cgi?id=2587 / CVE-2016-9273 since
the above change is a better fix that makes it unnecessary.
|
static enum TIFFReadDirEntryErr TIFFReadDirEntryIfd8(TIFF* tif, TIFFDirEntry* direntry, uint64* value)
{
enum TIFFReadDirEntryErr err;
if (direntry->tdir_count!=1)
return(TIFFReadDirEntryErrCount);
switch (direntry->tdir_type)
{
case TIFF_LONG:
case TIFF_IFD:
{
uint32 m;
TIFFReadDirEntryCheckedLong(tif,direntry,&m);
*value=(uint64)m;
return(TIFFReadDirEntryErrOk);
}
case TIFF_LONG8:
case TIFF_IFD8:
err=TIFFReadDirEntryCheckedLong8(tif,direntry,value);
return(err);
default:
return(TIFFReadDirEntryErrType);
}
}
|
static enum TIFFReadDirEntryErr TIFFReadDirEntryIfd8(TIFF* tif, TIFFDirEntry* direntry, uint64* value)
{
enum TIFFReadDirEntryErr err;
if (direntry->tdir_count!=1)
return(TIFFReadDirEntryErrCount);
switch (direntry->tdir_type)
{
case TIFF_LONG:
case TIFF_IFD:
{
uint32 m;
TIFFReadDirEntryCheckedLong(tif,direntry,&m);
*value=(uint64)m;
return(TIFFReadDirEntryErrOk);
}
case TIFF_LONG8:
case TIFF_IFD8:
err=TIFFReadDirEntryCheckedLong8(tif,direntry,value);
return(err);
default:
return(TIFFReadDirEntryErrType);
}
}
|
C
|
libtiff
| 0 |
CVE-2017-5104
|
https://www.cvedetails.com/cve/CVE-2017-5104/
|
CWE-20
|
https://github.com/chromium/chromium/commit/adca986a53b31b6da4cb22f8e755f6856daea89a
|
adca986a53b31b6da4cb22f8e755f6856daea89a
|
Don't show current RenderWidgetHostView while interstitial is showing.
Also moves interstitial page tracking from RenderFrameHostManager to
WebContents, since interstitial pages are not frame-specific. This was
necessary for subframes to detect if an interstitial page is showing.
BUG=729105
TEST=See comment 13 of bug for repro steps
CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_site_isolation
Review-Url: https://codereview.chromium.org/2938313002
Cr-Commit-Position: refs/heads/master@{#480117}
|
bool CreateInputAndSetText(const std::string& text) {
return ExecuteScript(interstitial_->GetMainFrame(),
"create_input_and_set_text('" + text + "')");
}
|
bool CreateInputAndSetText(const std::string& text) {
return ExecuteScript(interstitial_->GetMainFrame(),
"create_input_and_set_text('" + text + "')");
}
|
C
|
Chrome
| 0 |
CVE-2016-4425
|
https://www.cvedetails.com/cve/CVE-2016-4425/
|
CWE-20
|
https://github.com/akheron/jansson/pull/284/commits/64ce0ad3731ebd77e02897b07920eadd0e2cc318
|
64ce0ad3731ebd77e02897b07920eadd0e2cc318
|
Fix for issue #282
The fix limits recursion depths when parsing arrays and objects.
The limit is configurable via the `JSON_PARSER_MAX_DEPTH` setting
within `jansson_config.h` and is set by default to 2048.
Update the RFC conformance document to note the limit; the RFC
allows limits to be set by the implementation so nothing has
actually changed w.r.t. conformance state.
Reported by Gustavo Grieco.
|
json_t *json_loadb(const char *buffer, size_t buflen, size_t flags, json_error_t *error)
{
lex_t lex;
json_t *result;
buffer_data_t stream_data;
jsonp_error_init(error, "<buffer>");
if (buffer == NULL) {
error_set(error, NULL, "wrong arguments");
return NULL;
}
stream_data.data = buffer;
stream_data.pos = 0;
stream_data.len = buflen;
if(lex_init(&lex, buffer_get, flags, (void *)&stream_data))
return NULL;
result = parse_json(&lex, flags, error);
lex_close(&lex);
return result;
}
|
json_t *json_loadb(const char *buffer, size_t buflen, size_t flags, json_error_t *error)
{
lex_t lex;
json_t *result;
buffer_data_t stream_data;
jsonp_error_init(error, "<buffer>");
if (buffer == NULL) {
error_set(error, NULL, "wrong arguments");
return NULL;
}
stream_data.data = buffer;
stream_data.pos = 0;
stream_data.len = buflen;
if(lex_init(&lex, buffer_get, flags, (void *)&stream_data))
return NULL;
result = parse_json(&lex, flags, error);
lex_close(&lex);
return result;
}
|
C
|
jansson
| 0 |
CVE-2013-4282
|
https://www.cvedetails.com/cve/CVE-2013-4282/
|
CWE-119
|
https://cgit.freedesktop.org/spice/spice/commit/?id=8af619009660b24e0b41ad26b30289eea288fcc2
|
8af619009660b24e0b41ad26b30289eea288fcc2
| null |
void reds_handle_agent_mouse_event(const VDAgentMouseState *mouse_state)
{
SpiceCharDeviceWriteBuffer *char_dev_buf;
VDInternalBuf *internal_buf;
uint32_t total_msg_size;
if (!inputs_inited() || !reds->agent_state.base) {
return;
}
total_msg_size = sizeof(VDIChunkHeader) + sizeof(VDAgentMessage) +
sizeof(VDAgentMouseState);
char_dev_buf = spice_char_device_write_buffer_get(reds->agent_state.base,
NULL,
total_msg_size);
if (!char_dev_buf) {
reds->pending_mouse_event = TRUE;
return;
}
reds->pending_mouse_event = FALSE;
internal_buf = (VDInternalBuf *)char_dev_buf->buf;
internal_buf->chunk_header.port = VDP_SERVER_PORT;
internal_buf->chunk_header.size = sizeof(VDAgentMessage) + sizeof(VDAgentMouseState);
internal_buf->header.protocol = VD_AGENT_PROTOCOL;
internal_buf->header.type = VD_AGENT_MOUSE_STATE;
internal_buf->header.opaque = 0;
internal_buf->header.size = sizeof(VDAgentMouseState);
internal_buf->u.mouse_state = *mouse_state;
char_dev_buf->buf_used = total_msg_size;
spice_char_device_write_buffer_add(reds->agent_state.base, char_dev_buf);
}
|
void reds_handle_agent_mouse_event(const VDAgentMouseState *mouse_state)
{
SpiceCharDeviceWriteBuffer *char_dev_buf;
VDInternalBuf *internal_buf;
uint32_t total_msg_size;
if (!inputs_inited() || !reds->agent_state.base) {
return;
}
total_msg_size = sizeof(VDIChunkHeader) + sizeof(VDAgentMessage) +
sizeof(VDAgentMouseState);
char_dev_buf = spice_char_device_write_buffer_get(reds->agent_state.base,
NULL,
total_msg_size);
if (!char_dev_buf) {
reds->pending_mouse_event = TRUE;
return;
}
reds->pending_mouse_event = FALSE;
internal_buf = (VDInternalBuf *)char_dev_buf->buf;
internal_buf->chunk_header.port = VDP_SERVER_PORT;
internal_buf->chunk_header.size = sizeof(VDAgentMessage) + sizeof(VDAgentMouseState);
internal_buf->header.protocol = VD_AGENT_PROTOCOL;
internal_buf->header.type = VD_AGENT_MOUSE_STATE;
internal_buf->header.opaque = 0;
internal_buf->header.size = sizeof(VDAgentMouseState);
internal_buf->u.mouse_state = *mouse_state;
char_dev_buf->buf_used = total_msg_size;
spice_char_device_write_buffer_add(reds->agent_state.base, char_dev_buf);
}
|
C
|
spice
| 0 |
CVE-2013-7009
|
https://www.cvedetails.com/cve/CVE-2013-7009/
|
CWE-119
|
https://github.com/FFmpeg/FFmpeg/commit/3819db745da2ac7fb3faacb116788c32f4753f34
|
3819db745da2ac7fb3faacb116788c32f4753f34
|
avcodec/rpza: Perform pointer advance and checks before using the pointers
Fixes out of array accesses
Fixes Ticket2850
Signed-off-by: Michael Niedermayer <[email protected]>
|
static void rpza_decode_stream(RpzaContext *s)
{
int width = s->avctx->width;
int stride = s->frame.linesize[0] / 2;
int row_inc = stride - 4;
int stream_ptr = 0;
int chunk_size;
unsigned char opcode;
int n_blocks;
unsigned short colorA = 0, colorB;
unsigned short color4[4];
unsigned char index, idx;
unsigned short ta, tb;
unsigned short *pixels = (unsigned short *)s->frame.data[0];
int row_ptr = 0;
int pixel_ptr = -4;
int block_ptr;
int pixel_x, pixel_y;
int total_blocks;
/* First byte is always 0xe1. Warn if it's different */
if (s->buf[stream_ptr] != 0xe1)
av_log(s->avctx, AV_LOG_ERROR, "First chunk byte is 0x%02x instead of 0xe1\n",
s->buf[stream_ptr]);
/* Get chunk size, ingnoring first byte */
chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF;
stream_ptr += 4;
/* If length mismatch use size from MOV file and try to decode anyway */
if (chunk_size != s->size)
av_log(s->avctx, AV_LOG_ERROR, "MOV chunk size != encoded chunk size; using MOV chunk size\n");
chunk_size = s->size;
/* Number of 4x4 blocks in frame. */
total_blocks = ((s->avctx->width + 3) / 4) * ((s->avctx->height + 3) / 4);
/* Process chunk data */
while (stream_ptr < chunk_size) {
opcode = s->buf[stream_ptr++]; /* Get opcode */
n_blocks = (opcode & 0x1f) + 1; /* Extract block counter from opcode */
/* If opcode MSbit is 0, we need more data to decide what to do */
if ((opcode & 0x80) == 0) {
colorA = (opcode << 8) | (s->buf[stream_ptr++]);
opcode = 0;
if ((s->buf[stream_ptr] & 0x80) != 0) {
/* Must behave as opcode 110xxxxx, using colorA computed
* above. Use fake opcode 0x20 to enter switch block at
* the right place */
opcode = 0x20;
n_blocks = 1;
}
}
switch (opcode & 0xe0) {
/* Skip blocks */
case 0x80:
while (n_blocks--) {
ADVANCE_BLOCK();
}
break;
/* Fill blocks with one color */
case 0xa0:
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
while (n_blocks--) {
ADVANCE_BLOCK()
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
for (pixel_x = 0; pixel_x < 4; pixel_x++){
pixels[block_ptr] = colorA;
block_ptr++;
}
block_ptr += row_inc;
}
}
break;
/* Fill blocks with 4 colors */
case 0xc0:
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
case 0x20:
colorB = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
/* sort out the colors */
color4[0] = colorB;
color4[1] = 0;
color4[2] = 0;
color4[3] = colorA;
/* red components */
ta = (colorA >> 10) & 0x1F;
tb = (colorB >> 10) & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5) << 10;
color4[2] |= ((21 * ta + 11 * tb) >> 5) << 10;
/* green components */
ta = (colorA >> 5) & 0x1F;
tb = (colorB >> 5) & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5) << 5;
color4[2] |= ((21 * ta + 11 * tb) >> 5) << 5;
/* blue components */
ta = colorA & 0x1F;
tb = colorB & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5);
color4[2] |= ((21 * ta + 11 * tb) >> 5);
if (s->size - stream_ptr < n_blocks * 4)
return;
while (n_blocks--) {
ADVANCE_BLOCK();
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
index = s->buf[stream_ptr++];
for (pixel_x = 0; pixel_x < 4; pixel_x++){
idx = (index >> (2 * (3 - pixel_x))) & 0x03;
pixels[block_ptr] = color4[idx];
block_ptr++;
}
block_ptr += row_inc;
}
}
break;
/* Fill block with 16 colors */
case 0x00:
if (s->size - stream_ptr < 16)
return;
ADVANCE_BLOCK();
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
for (pixel_x = 0; pixel_x < 4; pixel_x++){
/* We already have color of upper left pixel */
if ((pixel_y != 0) || (pixel_x !=0)) {
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
}
pixels[block_ptr] = colorA;
block_ptr++;
}
block_ptr += row_inc;
}
break;
/* Unknown opcode */
default:
av_log(s->avctx, AV_LOG_ERROR, "Unknown opcode %d in rpza chunk."
" Skip remaining %d bytes of chunk data.\n", opcode,
chunk_size - stream_ptr);
return;
} /* Opcode switch */
}
}
|
static void rpza_decode_stream(RpzaContext *s)
{
int width = s->avctx->width;
int stride = s->frame.linesize[0] / 2;
int row_inc = stride - 4;
int stream_ptr = 0;
int chunk_size;
unsigned char opcode;
int n_blocks;
unsigned short colorA = 0, colorB;
unsigned short color4[4];
unsigned char index, idx;
unsigned short ta, tb;
unsigned short *pixels = (unsigned short *)s->frame.data[0];
int row_ptr = 0;
int pixel_ptr = 0;
int block_ptr;
int pixel_x, pixel_y;
int total_blocks;
/* First byte is always 0xe1. Warn if it's different */
if (s->buf[stream_ptr] != 0xe1)
av_log(s->avctx, AV_LOG_ERROR, "First chunk byte is 0x%02x instead of 0xe1\n",
s->buf[stream_ptr]);
/* Get chunk size, ingnoring first byte */
chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF;
stream_ptr += 4;
/* If length mismatch use size from MOV file and try to decode anyway */
if (chunk_size != s->size)
av_log(s->avctx, AV_LOG_ERROR, "MOV chunk size != encoded chunk size; using MOV chunk size\n");
chunk_size = s->size;
/* Number of 4x4 blocks in frame. */
total_blocks = ((s->avctx->width + 3) / 4) * ((s->avctx->height + 3) / 4);
/* Process chunk data */
while (stream_ptr < chunk_size) {
opcode = s->buf[stream_ptr++]; /* Get opcode */
n_blocks = (opcode & 0x1f) + 1; /* Extract block counter from opcode */
/* If opcode MSbit is 0, we need more data to decide what to do */
if ((opcode & 0x80) == 0) {
colorA = (opcode << 8) | (s->buf[stream_ptr++]);
opcode = 0;
if ((s->buf[stream_ptr] & 0x80) != 0) {
/* Must behave as opcode 110xxxxx, using colorA computed
* above. Use fake opcode 0x20 to enter switch block at
* the right place */
opcode = 0x20;
n_blocks = 1;
}
}
switch (opcode & 0xe0) {
/* Skip blocks */
case 0x80:
while (n_blocks--) {
ADVANCE_BLOCK();
}
break;
/* Fill blocks with one color */
case 0xa0:
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
while (n_blocks--) {
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
for (pixel_x = 0; pixel_x < 4; pixel_x++){
pixels[block_ptr] = colorA;
block_ptr++;
}
block_ptr += row_inc;
}
ADVANCE_BLOCK();
}
break;
/* Fill blocks with 4 colors */
case 0xc0:
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
case 0x20:
colorB = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
/* sort out the colors */
color4[0] = colorB;
color4[1] = 0;
color4[2] = 0;
color4[3] = colorA;
/* red components */
ta = (colorA >> 10) & 0x1F;
tb = (colorB >> 10) & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5) << 10;
color4[2] |= ((21 * ta + 11 * tb) >> 5) << 10;
/* green components */
ta = (colorA >> 5) & 0x1F;
tb = (colorB >> 5) & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5) << 5;
color4[2] |= ((21 * ta + 11 * tb) >> 5) << 5;
/* blue components */
ta = colorA & 0x1F;
tb = colorB & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5);
color4[2] |= ((21 * ta + 11 * tb) >> 5);
if (s->size - stream_ptr < n_blocks * 4)
return;
while (n_blocks--) {
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
index = s->buf[stream_ptr++];
for (pixel_x = 0; pixel_x < 4; pixel_x++){
idx = (index >> (2 * (3 - pixel_x))) & 0x03;
pixels[block_ptr] = color4[idx];
block_ptr++;
}
block_ptr += row_inc;
}
ADVANCE_BLOCK();
}
break;
/* Fill block with 16 colors */
case 0x00:
if (s->size - stream_ptr < 16)
return;
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
for (pixel_x = 0; pixel_x < 4; pixel_x++){
/* We already have color of upper left pixel */
if ((pixel_y != 0) || (pixel_x !=0)) {
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
}
pixels[block_ptr] = colorA;
block_ptr++;
}
block_ptr += row_inc;
}
ADVANCE_BLOCK();
break;
/* Unknown opcode */
default:
av_log(s->avctx, AV_LOG_ERROR, "Unknown opcode %d in rpza chunk."
" Skip remaining %d bytes of chunk data.\n", opcode,
chunk_size - stream_ptr);
return;
} /* Opcode switch */
}
}
|
C
|
FFmpeg
| 1 |
CVE-2015-1285
|
https://www.cvedetails.com/cve/CVE-2015-1285/
|
CWE-200
|
https://github.com/chromium/chromium/commit/39595f8d4dffcb644d438106dcb64a30c139ff0e
|
39595f8d4dffcb644d438106dcb64a30c139ff0e
|
[reland] Do not set default wallpaper unless it should do so.
[email protected], [email protected]
Bug: 751382
Change-Id: Id0793dfe467f737526a95b1e66ed01fbb8860bda
Reviewed-on: https://chromium-review.googlesource.com/619754
Commit-Queue: Xiaoqian Dai <[email protected]>
Reviewed-by: Alexander Alekseev <[email protected]>
Reviewed-by: Biao She <[email protected]>
Cr-Original-Commit-Position: refs/heads/master@{#498325}
Reviewed-on: https://chromium-review.googlesource.com/646430
Cr-Commit-Position: refs/heads/master@{#498982}
|
bool WallpaperManagerBase::GetWallpaperFromCache(const AccountId& account_id,
gfx::ImageSkia* image) {
DCHECK(thread_checker_.CalledOnValidThread());
CustomWallpaperMap::const_iterator it = wallpaper_cache_.find(account_id);
if (it != wallpaper_cache_.end() && !(*it).second.second.isNull()) {
*image = (*it).second.second;
return true;
}
return false;
}
|
bool WallpaperManagerBase::GetWallpaperFromCache(const AccountId& account_id,
gfx::ImageSkia* image) {
DCHECK(thread_checker_.CalledOnValidThread());
CustomWallpaperMap::const_iterator it = wallpaper_cache_.find(account_id);
if (it != wallpaper_cache_.end() && !(*it).second.second.isNull()) {
*image = (*it).second.second;
return true;
}
return false;
}
|
C
|
Chrome
| 0 |
CVE-2011-3896
|
https://www.cvedetails.com/cve/CVE-2011-3896/
|
CWE-119
|
https://github.com/chromium/chromium/commit/5925dff83699508b5e2735afb0297dfb310e159d
|
5925dff83699508b5e2735afb0297dfb310e159d
|
Implement a bubble that appears at the top of the screen when a tab enters
fullscreen mode via webkitRequestFullScreen(), telling the user how to exit
fullscreen.
This is implemented as an NSView rather than an NSWindow because the floating
chrome that appears in presentation mode should overlap the bubble.
Content-initiated fullscreen mode makes use of 'presentation mode' on the Mac:
the mode in which the UI is hidden, accessible by moving the cursor to the top
of the screen. On Snow Leopard, this mode is synonymous with fullscreen mode.
On Lion, however, fullscreen mode does not imply presentation mode: in
non-presentation fullscreen mode, the chrome is permanently shown. It is
possible to switch between presentation mode and fullscreen mode using the
presentation mode UI control.
When a tab initiates fullscreen mode on Lion, we enter presentation mode if not
in presentation mode already. When the user exits fullscreen mode using Chrome
UI (i.e. keyboard shortcuts, menu items, buttons, switching tabs, etc.) we
return the user to the mode they were in before the tab entered fullscreen.
BUG=14471
TEST=Enter fullscreen mode using webkitRequestFullScreen. You should see a bubble pop down from the top of the screen.
Need to test the Lion logic somehow, with no Lion trybots.
BUG=96883
Original review http://codereview.chromium.org/7890056/
TBR=thakis
Review URL: http://codereview.chromium.org/7920024
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101624 0039d316-1c4b-4281-b951-d872f2087c98
|
void Browser::CheckDownloadsInProgress(bool* normal_downloads_are_present,
bool* incognito_downloads_are_present) {
*normal_downloads_are_present = false;
*incognito_downloads_are_present = false;
DownloadManager* download_manager = NULL;
if (profile()->HasCreatedDownloadManager())
download_manager = profile()->GetDownloadManager();
if (profile()->IsOffTheRecord()) {
*incognito_downloads_are_present =
(download_manager && download_manager->in_progress_count() != 0);
if (profile()->GetOriginalProfile()->HasCreatedDownloadManager())
download_manager = profile()->GetOriginalProfile()->GetDownloadManager();
}
*normal_downloads_are_present =
(download_manager && download_manager->in_progress_count() != 0);
}
|
void Browser::CheckDownloadsInProgress(bool* normal_downloads_are_present,
bool* incognito_downloads_are_present) {
*normal_downloads_are_present = false;
*incognito_downloads_are_present = false;
DownloadManager* download_manager = NULL;
if (profile()->HasCreatedDownloadManager())
download_manager = profile()->GetDownloadManager();
if (profile()->IsOffTheRecord()) {
*incognito_downloads_are_present =
(download_manager && download_manager->in_progress_count() != 0);
if (profile()->GetOriginalProfile()->HasCreatedDownloadManager())
download_manager = profile()->GetOriginalProfile()->GetDownloadManager();
}
*normal_downloads_are_present =
(download_manager && download_manager->in_progress_count() != 0);
}
|
C
|
Chrome
| 0 |
CVE-2019-13311
|
https://www.cvedetails.com/cve/CVE-2019-13311/
|
CWE-399
|
https://github.com/ImageMagick/ImageMagick6/commit/bb812022d0bc12107db215c981cab0b1ccd73d91
|
bb812022d0bc12107db215c981cab0b1ccd73d91
|
https://github.com/ImageMagick/ImageMagick/issues/1623
|
WandExport MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,
const int argc,const char **argv,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
ssize_t
count;
register ssize_t
i;
/*
Initialize method variables.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (argc < 0)
return(MagickTrue);
/*
Set the image settings.
*/
for (i=0; i < (ssize_t) argc; i++)
{
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adjoin",option+1) == 0)
{
image_info->adjoin=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
image_info->antialias=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("attenuate",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
if (*option == '+')
(void) CloneString(&image_info->authenticate,(char *) NULL);
else
(void) CloneString(&image_info->authenticate,argv[i+1]);
break;
}
break;
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorDatabase(MogrifyBackgroundColor,
&image_info->background_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(argv[i+1],&image_info->background_color,
exception);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorDatabase(MogrifyBorderColor,
&image_info->border_color,exception);
break;
}
(void) QueryColorDatabase(argv[i+1],&image_info->border_color,
exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,"undercolor","none");
break;
}
(void) SetImageOption(image_info,"undercolor",argv[i+1]);
break;
}
break;
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
MagickSizeType
limit;
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+1]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+1],100.0);
(void) SetMagickResourceLimit(MemoryResource,limit);
(void) SetMagickResourceLimit(MapResource,2*limit);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
if (*option == '+')
{
image_info->channel=DefaultChannels;
break;
}
image_info->channel=(ChannelType) ParseChannelOption(argv[i+1]);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
image_info->colors=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
if (*option == '+')
{
image_info->colorspace=UndefinedColorspace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("comment",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compress",option+1) == 0)
{
if (*option == '+')
{
image_info->compression=UndefinedCompression;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("debug",option+1) == 0)
{
if (*option == '+')
(void) SetLogEventMask("none");
else
(void) SetLogEventMask(argv[i+1]);
image_info->debug=IsEventLogging();
break;
}
if (LocaleCompare("define",option+1) == 0)
{
if (*option == '+')
{
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) DeleteImageRegistry(argv[i+1]+9);
else
(void) DeleteImageOption(image_info,argv[i+1]);
break;
}
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
{
(void) DefineImageRegistry(StringRegistryType,argv[i+1]+9,
exception);
break;
}
(void) DefineImageOption(image_info,argv[i+1]);
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/*
Set image density.
*/
if (*option == '+')
{
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
(void) SetImageOption(image_info,option+1,"72");
break;
}
(void) CloneString(&image_info->density,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
if (*option == '+')
{
image_info->depth=MAGICKCORE_QUANTUM_DEPTH;
break;
}
image_info->depth=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("direction",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
if (*option == '+')
{
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
break;
}
(void) CloneString(&image_info->server_name,argv[i+1]);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
image_info->dither=MagickFalse;
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
image_info->dither=MagickTrue;
break;
}
break;
}
case 'e':
{
if (LocaleCompare("encoding",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
if (*option == '+')
{
image_info->endian=UndefinedEndian;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->endian=(EndianType) ParseCommandOption(
MagickEndianOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
/*
Set image extract geometry.
*/
if (*option == '+')
{
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
break;
}
(void) CloneString(&image_info->extract,argv[i+1]);
break;
}
break;
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option != '+')
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
{
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
break;
}
(void) CloneString(&image_info->font,argv[i+1]);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
register const char
*q;
for (q=strchr(argv[i+1],'%'); q != (char *) NULL; q=strchr(q+1,'%'))
if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL)
image_info->ping=MagickFalse;
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
if (*option == '+')
{
image_info->fuzz=0.0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->fuzz=StringToDoubleInterval(argv[i+1],(double)
QuantumRange+1.0);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gravity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("intensity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
if (*option == '+')
{
image_info->interlace=UndefinedInterlace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->interlace=(InterlaceType) ParseCommandOption(
MagickInterlaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
MagickSizeType
limit;
ResourceType
type;
if (*option == '+')
break;
type=(ResourceType) ParseCommandOption(MagickResourceOptions,
MagickFalse,argv[i+1]);
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+2]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+2],100.0);
(void) SetMagickResourceLimit(type,limit);
break;
}
if (LocaleCompare("list",option+1) == 0)
{
ssize_t
list;
/*
Display configuration list.
*/
list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i+1]);
switch (list)
{
case MagickCoderOptions:
{
(void) ListCoderInfo((FILE *) NULL,exception);
break;
}
case MagickColorOptions:
{
(void) ListColorInfo((FILE *) NULL,exception);
break;
}
case MagickConfigureOptions:
{
(void) ListConfigureInfo((FILE *) NULL,exception);
break;
}
case MagickDelegateOptions:
{
(void) ListDelegateInfo((FILE *) NULL,exception);
break;
}
case MagickFontOptions:
{
(void) ListTypeInfo((FILE *) NULL,exception);
break;
}
case MagickFormatOptions:
{
(void) ListMagickInfo((FILE *) NULL,exception);
break;
}
case MagickLocaleOptions:
{
(void) ListLocaleInfo((FILE *) NULL,exception);
break;
}
case MagickLogOptions:
{
(void) ListLogInfo((FILE *) NULL,exception);
break;
}
case MagickMagicOptions:
{
(void) ListMagicInfo((FILE *) NULL,exception);
break;
}
case MagickMimeOptions:
{
(void) ListMimeInfo((FILE *) NULL,exception);
break;
}
case MagickModuleOptions:
{
(void) ListModuleInfo((FILE *) NULL,exception);
break;
}
case MagickPolicyOptions:
{
(void) ListPolicyInfo((FILE *) NULL,exception);
break;
}
case MagickResourceOptions:
{
(void) ListMagickResourceInfo((FILE *) NULL,exception);
break;
}
case MagickThresholdOptions:
{
(void) ListThresholdMaps((FILE *) NULL,exception);
break;
}
default:
{
(void) ListCommandOptions((FILE *) NULL,(CommandOption) list,
exception);
break;
}
}
break;
}
if (LocaleCompare("log",option+1) == 0)
{
if (*option == '+')
break;
(void) SetLogFormat(argv[i+1]);
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("matte",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("mattecolor",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(MogrifyMatteColor,
&image_info->matte_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(argv[i+1],&image_info->matte_color,
exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
(void) SetImageInfoProgressMonitor(image_info,MonitorProgress,
(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
image_info->monochrome=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
break;
}
case 'o':
{
if (LocaleCompare("orient",option+1) == 0)
{
if (*option == '+')
{
image_info->orientation=UndefinedOrientation;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
char
*canonical_page,
page[MaxTextExtent];
const char
*image_option;
MagickStatusType
flags;
RectangleInfo
geometry;
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) CloneString(&image_info->page,(char *) NULL);
break;
}
(void) memset(&geometry,0,sizeof(geometry));
image_option=GetImageOption(image_info,"page");
if (image_option != (const char *) NULL)
(void) ParseAbsoluteGeometry(image_option,&geometry);
canonical_page=GetPageGeometry(argv[i+1]);
flags=ParseAbsoluteGeometry(canonical_page,&geometry);
canonical_page=DestroyString(canonical_page);
(void) FormatLocaleString(page,MaxTextExtent,"%lux%lu",
(unsigned long) geometry.width,(unsigned long) geometry.height);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) FormatLocaleString(page,MaxTextExtent,"%lux%lu%+ld%+ld",
(unsigned long) geometry.width,(unsigned long) geometry.height,
(long) geometry.x,(long) geometry.y);
(void) SetImageOption(image_info,option+1,page);
(void) CloneString(&image_info->page,page);
break;
}
if (LocaleCompare("pen",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("ping",option+1) == 0)
{
image_info->ping=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
geometry_info.rho=0.0;
else
(void) ParseGeometry(argv[i+1],&geometry_info);
image_info->pointsize=geometry_info.rho;
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
(void) SetMagickPrecision(StringToInteger(argv[i+1]));
break;
}
if (LocaleCompare("preview",option+1) == 0)
{
/*
Preview image.
*/
if (*option == '+')
{
image_info->preview_type=UndefinedPreview;
break;
}
image_info->preview_type=(PreviewType) ParseCommandOption(
MagickPreviewOptions,MagickFalse,argv[i+1]);
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
/*
Set image compression quality.
*/
if (*option == '+')
{
image_info->quality=UndefinedCompressionQuality;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->quality=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("quiet",option+1) == 0)
{
static WarningHandler
warning_handler = (WarningHandler) NULL;
if (*option == '+')
{
/*
Restore error or warning messages.
*/
warning_handler=SetWarningHandler(warning_handler);
break;
}
/*
Suppress error or warning messages.
*/
warning_handler=SetWarningHandler((WarningHandler) NULL);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("red-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 's':
{
if (LocaleCompare("sampling-factor",option+1) == 0)
{
/*
Set image sampling factor.
*/
if (*option == '+')
{
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
break;
}
(void) CloneString(&image_info->sampling_factor,argv[i+1]);
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
/*
Set image scene.
*/
if (*option == '+')
{
image_info->scene=0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->scene=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
unsigned long
seed;
if (*option == '+')
{
seed=(unsigned long) time((time_t *) NULL);
SetRandomSecretKey(seed);
break;
}
seed=StringToUnsignedLong(argv[i+1]);
SetRandomSecretKey(seed);
break;
}
if (LocaleCompare("size",option+1) == 0)
{
if (*option == '+')
{
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
break;
}
(void) CloneString(&image_info->size,argv[i+1]);
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"none");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"none");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("synchronize",option+1) == 0)
{
if (*option == '+')
{
image_info->synchronize=MagickFalse;
break;
}
image_info->synchronize=MagickTrue;
break;
}
break;
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("texture",option+1) == 0)
{
if (*option == '+')
{
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
break;
}
(void) CloneString(&image_info->texture,argv[i+1]);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorDatabase("none",&image_info->transparent_color, exception);
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) QueryColorDatabase(argv[i+1],&image_info->transparent_color,
exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
if (*option == '+')
{
image_info->type=UndefinedType;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->type=(ImageType) ParseCommandOption(MagickTypeOptions,
MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("units",option+1) == 0)
{
if (*option == '+')
{
image_info->units=UndefinedResolution;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->units=(ResolutionType) ParseCommandOption(
MagickResolutionOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
if (*option == '+')
{
image_info->verbose=MagickFalse;
break;
}
image_info->verbose=MagickTrue;
image_info->ping=MagickFalse;
break;
}
if (LocaleCompare("view",option+1) == 0)
{
if (*option == '+')
{
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
break;
}
(void) CloneString(&image_info->view,argv[i+1]);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
if (*option == '+')
{
image_info->virtual_pixel_method=UndefinedVirtualPixelMethod;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->virtual_pixel_method=(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("weight",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
default:
break;
}
i+=count;
}
return(MagickTrue);
}
|
WandExport MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,
const int argc,const char **argv,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
ssize_t
count;
register ssize_t
i;
/*
Initialize method variables.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (argc < 0)
return(MagickTrue);
/*
Set the image settings.
*/
for (i=0; i < (ssize_t) argc; i++)
{
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adjoin",option+1) == 0)
{
image_info->adjoin=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
image_info->antialias=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("attenuate",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
if (*option == '+')
(void) CloneString(&image_info->authenticate,(char *) NULL);
else
(void) CloneString(&image_info->authenticate,argv[i+1]);
break;
}
break;
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorDatabase(MogrifyBackgroundColor,
&image_info->background_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(argv[i+1],&image_info->background_color,
exception);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorDatabase(MogrifyBorderColor,
&image_info->border_color,exception);
break;
}
(void) QueryColorDatabase(argv[i+1],&image_info->border_color,
exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,"undercolor","none");
break;
}
(void) SetImageOption(image_info,"undercolor",argv[i+1]);
break;
}
break;
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
MagickSizeType
limit;
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+1]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+1],100.0);
(void) SetMagickResourceLimit(MemoryResource,limit);
(void) SetMagickResourceLimit(MapResource,2*limit);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
if (*option == '+')
{
image_info->channel=DefaultChannels;
break;
}
image_info->channel=(ChannelType) ParseChannelOption(argv[i+1]);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
image_info->colors=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
if (*option == '+')
{
image_info->colorspace=UndefinedColorspace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("comment",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compress",option+1) == 0)
{
if (*option == '+')
{
image_info->compression=UndefinedCompression;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("debug",option+1) == 0)
{
if (*option == '+')
(void) SetLogEventMask("none");
else
(void) SetLogEventMask(argv[i+1]);
image_info->debug=IsEventLogging();
break;
}
if (LocaleCompare("define",option+1) == 0)
{
if (*option == '+')
{
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) DeleteImageRegistry(argv[i+1]+9);
else
(void) DeleteImageOption(image_info,argv[i+1]);
break;
}
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
{
(void) DefineImageRegistry(StringRegistryType,argv[i+1]+9,
exception);
break;
}
(void) DefineImageOption(image_info,argv[i+1]);
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/*
Set image density.
*/
if (*option == '+')
{
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
(void) SetImageOption(image_info,option+1,"72");
break;
}
(void) CloneString(&image_info->density,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
if (*option == '+')
{
image_info->depth=MAGICKCORE_QUANTUM_DEPTH;
break;
}
image_info->depth=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("direction",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
if (*option == '+')
{
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
break;
}
(void) CloneString(&image_info->server_name,argv[i+1]);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
image_info->dither=MagickFalse;
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
image_info->dither=MagickTrue;
break;
}
break;
}
case 'e':
{
if (LocaleCompare("encoding",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
if (*option == '+')
{
image_info->endian=UndefinedEndian;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->endian=(EndianType) ParseCommandOption(
MagickEndianOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
/*
Set image extract geometry.
*/
if (*option == '+')
{
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
break;
}
(void) CloneString(&image_info->extract,argv[i+1]);
break;
}
break;
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option != '+')
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
{
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
break;
}
(void) CloneString(&image_info->font,argv[i+1]);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
register const char
*q;
for (q=strchr(argv[i+1],'%'); q != (char *) NULL; q=strchr(q+1,'%'))
if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL)
image_info->ping=MagickFalse;
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
if (*option == '+')
{
image_info->fuzz=0.0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->fuzz=StringToDoubleInterval(argv[i+1],(double)
QuantumRange+1.0);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gravity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("intensity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
if (*option == '+')
{
image_info->interlace=UndefinedInterlace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->interlace=(InterlaceType) ParseCommandOption(
MagickInterlaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
MagickSizeType
limit;
ResourceType
type;
if (*option == '+')
break;
type=(ResourceType) ParseCommandOption(MagickResourceOptions,
MagickFalse,argv[i+1]);
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+2]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+2],100.0);
(void) SetMagickResourceLimit(type,limit);
break;
}
if (LocaleCompare("list",option+1) == 0)
{
ssize_t
list;
/*
Display configuration list.
*/
list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i+1]);
switch (list)
{
case MagickCoderOptions:
{
(void) ListCoderInfo((FILE *) NULL,exception);
break;
}
case MagickColorOptions:
{
(void) ListColorInfo((FILE *) NULL,exception);
break;
}
case MagickConfigureOptions:
{
(void) ListConfigureInfo((FILE *) NULL,exception);
break;
}
case MagickDelegateOptions:
{
(void) ListDelegateInfo((FILE *) NULL,exception);
break;
}
case MagickFontOptions:
{
(void) ListTypeInfo((FILE *) NULL,exception);
break;
}
case MagickFormatOptions:
{
(void) ListMagickInfo((FILE *) NULL,exception);
break;
}
case MagickLocaleOptions:
{
(void) ListLocaleInfo((FILE *) NULL,exception);
break;
}
case MagickLogOptions:
{
(void) ListLogInfo((FILE *) NULL,exception);
break;
}
case MagickMagicOptions:
{
(void) ListMagicInfo((FILE *) NULL,exception);
break;
}
case MagickMimeOptions:
{
(void) ListMimeInfo((FILE *) NULL,exception);
break;
}
case MagickModuleOptions:
{
(void) ListModuleInfo((FILE *) NULL,exception);
break;
}
case MagickPolicyOptions:
{
(void) ListPolicyInfo((FILE *) NULL,exception);
break;
}
case MagickResourceOptions:
{
(void) ListMagickResourceInfo((FILE *) NULL,exception);
break;
}
case MagickThresholdOptions:
{
(void) ListThresholdMaps((FILE *) NULL,exception);
break;
}
default:
{
(void) ListCommandOptions((FILE *) NULL,(CommandOption) list,
exception);
break;
}
}
break;
}
if (LocaleCompare("log",option+1) == 0)
{
if (*option == '+')
break;
(void) SetLogFormat(argv[i+1]);
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("matte",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("mattecolor",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(MogrifyMatteColor,
&image_info->matte_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(argv[i+1],&image_info->matte_color,
exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
(void) SetImageInfoProgressMonitor(image_info,MonitorProgress,
(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
image_info->monochrome=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
break;
}
case 'o':
{
if (LocaleCompare("orient",option+1) == 0)
{
if (*option == '+')
{
image_info->orientation=UndefinedOrientation;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
char
*canonical_page,
page[MaxTextExtent];
const char
*image_option;
MagickStatusType
flags;
RectangleInfo
geometry;
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) CloneString(&image_info->page,(char *) NULL);
break;
}
(void) memset(&geometry,0,sizeof(geometry));
image_option=GetImageOption(image_info,"page");
if (image_option != (const char *) NULL)
(void) ParseAbsoluteGeometry(image_option,&geometry);
canonical_page=GetPageGeometry(argv[i+1]);
flags=ParseAbsoluteGeometry(canonical_page,&geometry);
canonical_page=DestroyString(canonical_page);
(void) FormatLocaleString(page,MaxTextExtent,"%lux%lu",
(unsigned long) geometry.width,(unsigned long) geometry.height);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) FormatLocaleString(page,MaxTextExtent,"%lux%lu%+ld%+ld",
(unsigned long) geometry.width,(unsigned long) geometry.height,
(long) geometry.x,(long) geometry.y);
(void) SetImageOption(image_info,option+1,page);
(void) CloneString(&image_info->page,page);
break;
}
if (LocaleCompare("pen",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("ping",option+1) == 0)
{
image_info->ping=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
geometry_info.rho=0.0;
else
(void) ParseGeometry(argv[i+1],&geometry_info);
image_info->pointsize=geometry_info.rho;
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
(void) SetMagickPrecision(StringToInteger(argv[i+1]));
break;
}
if (LocaleCompare("preview",option+1) == 0)
{
/*
Preview image.
*/
if (*option == '+')
{
image_info->preview_type=UndefinedPreview;
break;
}
image_info->preview_type=(PreviewType) ParseCommandOption(
MagickPreviewOptions,MagickFalse,argv[i+1]);
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
/*
Set image compression quality.
*/
if (*option == '+')
{
image_info->quality=UndefinedCompressionQuality;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->quality=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("quiet",option+1) == 0)
{
static WarningHandler
warning_handler = (WarningHandler) NULL;
if (*option == '+')
{
/*
Restore error or warning messages.
*/
warning_handler=SetWarningHandler(warning_handler);
break;
}
/*
Suppress error or warning messages.
*/
warning_handler=SetWarningHandler((WarningHandler) NULL);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("red-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 's':
{
if (LocaleCompare("sampling-factor",option+1) == 0)
{
/*
Set image sampling factor.
*/
if (*option == '+')
{
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
break;
}
(void) CloneString(&image_info->sampling_factor,argv[i+1]);
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
/*
Set image scene.
*/
if (*option == '+')
{
image_info->scene=0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->scene=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
unsigned long
seed;
if (*option == '+')
{
seed=(unsigned long) time((time_t *) NULL);
SetRandomSecretKey(seed);
break;
}
seed=StringToUnsignedLong(argv[i+1]);
SetRandomSecretKey(seed);
break;
}
if (LocaleCompare("size",option+1) == 0)
{
if (*option == '+')
{
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
break;
}
(void) CloneString(&image_info->size,argv[i+1]);
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"none");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"none");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("synchronize",option+1) == 0)
{
if (*option == '+')
{
image_info->synchronize=MagickFalse;
break;
}
image_info->synchronize=MagickTrue;
break;
}
break;
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("texture",option+1) == 0)
{
if (*option == '+')
{
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
break;
}
(void) CloneString(&image_info->texture,argv[i+1]);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorDatabase("none",&image_info->transparent_color, exception);
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) QueryColorDatabase(argv[i+1],&image_info->transparent_color,
exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
if (*option == '+')
{
image_info->type=UndefinedType;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->type=(ImageType) ParseCommandOption(MagickTypeOptions,
MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("units",option+1) == 0)
{
if (*option == '+')
{
image_info->units=UndefinedResolution;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->units=(ResolutionType) ParseCommandOption(
MagickResolutionOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
if (*option == '+')
{
image_info->verbose=MagickFalse;
break;
}
image_info->verbose=MagickTrue;
image_info->ping=MagickFalse;
break;
}
if (LocaleCompare("view",option+1) == 0)
{
if (*option == '+')
{
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
break;
}
(void) CloneString(&image_info->view,argv[i+1]);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
if (*option == '+')
{
image_info->virtual_pixel_method=UndefinedVirtualPixelMethod;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->virtual_pixel_method=(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("weight",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
default:
break;
}
i+=count;
}
return(MagickTrue);
}
|
C
|
ImageMagick6
| 0 |
CVE-2015-6773
|
https://www.cvedetails.com/cve/CVE-2015-6773/
|
CWE-119
|
https://github.com/chromium/chromium/commit/33827275411b33371e7bb750cce20f11de85002d
|
33827275411b33371e7bb750cce20f11de85002d
|
Move SelectionTemplate::is_handle_visible_ to FrameSelection
This patch moves |is_handle_visible_| to |FrameSelection| from |SelectionTemplate|
since handle visibility is used only for setting |FrameSelection|, hence it is
a redundant member variable of |SelectionTemplate|.
Bug: 742093
Change-Id: I3add4da3844fb40be34dcb4d4b46b5fa6fed1d7e
Reviewed-on: https://chromium-review.googlesource.com/595389
Commit-Queue: Yoshifumi Inoue <[email protected]>
Reviewed-by: Xiaocheng Hu <[email protected]>
Reviewed-by: Kent Tamura <[email protected]>
Cr-Commit-Position: refs/heads/master@{#491660}
|
bool Editor::CanDHTMLCopy() {
GetFrame().GetDocument()->UpdateStyleAndLayoutIgnorePendingStylesheets();
return !IsInPasswordField(GetFrame()
.Selection()
.ComputeVisibleSelectionInDOMTree()
.Start()) &&
!DispatchCPPEvent(EventTypeNames::beforecopy, kDataTransferNumb);
}
|
bool Editor::CanDHTMLCopy() {
GetFrame().GetDocument()->UpdateStyleAndLayoutIgnorePendingStylesheets();
return !IsInPasswordField(GetFrame()
.Selection()
.ComputeVisibleSelectionInDOMTree()
.Start()) &&
!DispatchCPPEvent(EventTypeNames::beforecopy, kDataTransferNumb);
}
|
C
|
Chrome
| 0 |
CVE-2014-7841
|
https://www.cvedetails.com/cve/CVE-2014-7841/
|
CWE-399
|
https://github.com/torvalds/linux/commit/e40607cbe270a9e8360907cb1e62ddf0736e4864
|
e40607cbe270a9e8360907cb1e62ddf0736e4864
|
net: sctp: fix NULL pointer dereference in af->from_addr_param on malformed packet
An SCTP server doing ASCONF will panic on malformed INIT ping-of-death
in the form of:
------------ INIT[PARAM: SET_PRIMARY_IP] ------------>
While the INIT chunk parameter verification dissects through many things
in order to detect malformed input, it misses to actually check parameters
inside of parameters. E.g. RFC5061, section 4.2.4 proposes a 'set primary
IP address' parameter in ASCONF, which has as a subparameter an address
parameter.
So an attacker may send a parameter type other than SCTP_PARAM_IPV4_ADDRESS
or SCTP_PARAM_IPV6_ADDRESS, param_type2af() will subsequently return 0
and thus sctp_get_af_specific() returns NULL, too, which we then happily
dereference unconditionally through af->from_addr_param().
The trace for the log:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000078
IP: [<ffffffffa01e9c62>] sctp_process_init+0x492/0x990 [sctp]
PGD 0
Oops: 0000 [#1] SMP
[...]
Pid: 0, comm: swapper Not tainted 2.6.32-504.el6.x86_64 #1 Bochs Bochs
RIP: 0010:[<ffffffffa01e9c62>] [<ffffffffa01e9c62>] sctp_process_init+0x492/0x990 [sctp]
[...]
Call Trace:
<IRQ>
[<ffffffffa01f2add>] ? sctp_bind_addr_copy+0x5d/0xe0 [sctp]
[<ffffffffa01e1fcb>] sctp_sf_do_5_1B_init+0x21b/0x340 [sctp]
[<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp]
[<ffffffffa01e5c09>] ? sctp_endpoint_lookup_assoc+0xc9/0xf0 [sctp]
[<ffffffffa01e61f6>] sctp_endpoint_bh_rcv+0x116/0x230 [sctp]
[<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp]
[<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp]
[<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter]
[<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[...]
A minimal way to address this is to check for NULL as we do on all
other such occasions where we know sctp_get_af_specific() could
possibly return with NULL.
Fixes: d6de3097592b ("[SCTP]: Add the handling of "Set Primary IP Address" parameter to INIT")
Signed-off-by: Daniel Borkmann <[email protected]>
Cc: Vlad Yasevich <[email protected]>
Acked-by: Neil Horman <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
int data_len, __u8 flags, __u16 ssn)
{
struct sctp_chunk *retval;
struct sctp_datahdr dp;
int chunk_len;
/* We assign the TSN as LATE as possible, not here when
* creating the chunk.
*/
dp.tsn = 0;
dp.stream = htons(sinfo->sinfo_stream);
dp.ppid = sinfo->sinfo_ppid;
/* Set the flags for an unordered send. */
if (sinfo->sinfo_flags & SCTP_UNORDERED) {
flags |= SCTP_DATA_UNORDERED;
dp.ssn = 0;
} else
dp.ssn = htons(ssn);
chunk_len = sizeof(dp) + data_len;
retval = sctp_make_data(asoc, flags, chunk_len);
if (!retval)
goto nodata;
retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
nodata:
return retval;
}
|
struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
int data_len, __u8 flags, __u16 ssn)
{
struct sctp_chunk *retval;
struct sctp_datahdr dp;
int chunk_len;
/* We assign the TSN as LATE as possible, not here when
* creating the chunk.
*/
dp.tsn = 0;
dp.stream = htons(sinfo->sinfo_stream);
dp.ppid = sinfo->sinfo_ppid;
/* Set the flags for an unordered send. */
if (sinfo->sinfo_flags & SCTP_UNORDERED) {
flags |= SCTP_DATA_UNORDERED;
dp.ssn = 0;
} else
dp.ssn = htons(ssn);
chunk_len = sizeof(dp) + data_len;
retval = sctp_make_data(asoc, flags, chunk_len);
if (!retval)
goto nodata;
retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
nodata:
return retval;
}
|
C
|
linux
| 0 |
CVE-2016-5219
|
https://www.cvedetails.com/cve/CVE-2016-5219/
|
CWE-416
|
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
a4150b688a754d3d10d2ca385155b1c95d77d6ae
|
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Geoff Lang <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#657568}
|
error::Error GLES2DecoderPassthroughImpl::DoDestroyGpuFenceCHROMIUM(
GLuint gpu_fence_id) {
if (!feature_info_->feature_flags().chromium_gpu_fence)
return error::kUnknownCommand;
if (!GetGpuFenceManager()->RemoveGpuFence(gpu_fence_id))
return error::kInvalidArguments;
return error::kNoError;
}
|
error::Error GLES2DecoderPassthroughImpl::DoDestroyGpuFenceCHROMIUM(
GLuint gpu_fence_id) {
if (!feature_info_->feature_flags().chromium_gpu_fence)
return error::kUnknownCommand;
if (!GetGpuFenceManager()->RemoveGpuFence(gpu_fence_id))
return error::kInvalidArguments;
return error::kNoError;
}
|
C
|
Chrome
| 0 |
CVE-2018-17204
|
https://www.cvedetails.com/cve/CVE-2018-17204/
|
CWE-617
|
https://github.com/openvswitch/ovs/commit/4af6da3b275b764b1afe194df6499b33d2bf4cde
|
4af6da3b275b764b1afe194df6499b33d2bf4cde
|
ofp-group: Don't assert-fail decoding bad OF1.5 group mod type or command.
When decoding a group mod, the current code validates the group type and
command after the whole group mod has been decoded. The OF1.5 decoder,
however, tries to use the type and command earlier, when it might still be
invalid. This caused an assertion failure (via OVS_NOT_REACHED). This
commit fixes the problem.
ovs-vswitchd does not enable support for OpenFlow 1.5 by default.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9249
Signed-off-by: Ben Pfaff <[email protected]>
Reviewed-by: Yifeng Sun <[email protected]>
|
parse_oxms(struct ofpbuf *payload, bool loose,
struct mf_bitmap *exactp, struct mf_bitmap *maskedp)
{
struct mf_bitmap exact = MF_BITMAP_INITIALIZER;
struct mf_bitmap masked = MF_BITMAP_INITIALIZER;
while (payload->size > 0) {
const struct mf_field *field;
enum ofperr error;
bool hasmask;
error = nx_pull_header(payload, NULL, &field, &hasmask);
if (!error) {
bitmap_set1(hasmask ? masked.bm : exact.bm, field->id);
} else if (error != OFPERR_OFPBMC_BAD_FIELD || !loose) {
return error;
}
}
if (exactp) {
*exactp = exact;
} else if (!bitmap_is_all_zeros(exact.bm, MFF_N_IDS)) {
return OFPERR_OFPBMC_BAD_MASK;
}
if (maskedp) {
*maskedp = masked;
} else if (!bitmap_is_all_zeros(masked.bm, MFF_N_IDS)) {
return OFPERR_OFPBMC_BAD_MASK;
}
return 0;
}
|
parse_oxms(struct ofpbuf *payload, bool loose,
struct mf_bitmap *exactp, struct mf_bitmap *maskedp)
{
struct mf_bitmap exact = MF_BITMAP_INITIALIZER;
struct mf_bitmap masked = MF_BITMAP_INITIALIZER;
while (payload->size > 0) {
const struct mf_field *field;
enum ofperr error;
bool hasmask;
error = nx_pull_header(payload, NULL, &field, &hasmask);
if (!error) {
bitmap_set1(hasmask ? masked.bm : exact.bm, field->id);
} else if (error != OFPERR_OFPBMC_BAD_FIELD || !loose) {
return error;
}
}
if (exactp) {
*exactp = exact;
} else if (!bitmap_is_all_zeros(exact.bm, MFF_N_IDS)) {
return OFPERR_OFPBMC_BAD_MASK;
}
if (maskedp) {
*maskedp = masked;
} else if (!bitmap_is_all_zeros(masked.bm, MFF_N_IDS)) {
return OFPERR_OFPBMC_BAD_MASK;
}
return 0;
}
|
C
|
ovs
| 0 |
CVE-2009-3605
|
https://www.cvedetails.com/cve/CVE-2009-3605/
|
CWE-189
|
https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
|
7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
| null |
void ArthurOutputDev::drawImageMask(GfxState *state, Object *ref, Stream *str,
int width, int height, GBool invert,
GBool inlineImg)
{
qDebug() << "drawImageMask";
#if 0
unsigned char *buffer;
unsigned char *dest;
cairo_surface_t *image;
cairo_pattern_t *pattern;
int x, y;
ImageStream *imgStr;
Guchar *pix;
double *ctm;
cairo_matrix_t matrix;
int invert_bit;
int row_stride;
row_stride = (width + 3) & ~3;
buffer = (unsigned char *) malloc (height * row_stride);
if (buffer == NULL) {
error(-1, "Unable to allocate memory for image.");
return;
}
/* TODO: Do we want to cache these? */
imgStr = new ImageStream(str, width, 1, 1);
imgStr->reset();
invert_bit = invert ? 1 : 0;
for (y = 0; y < height; y++) {
pix = imgStr->getLine();
dest = buffer + y * row_stride;
for (x = 0; x < width; x++) {
if (pix[x] ^ invert_bit)
*dest++ = 0;
else
*dest++ = 255;
}
}
image = cairo_image_surface_create_for_data (buffer, CAIRO_FORMAT_A8,
width, height, row_stride);
if (image == NULL)
return;
pattern = cairo_pattern_create_for_surface (image);
if (pattern == NULL)
return;
ctm = state->getCTM();
LOG (printf ("drawImageMask %dx%d, matrix: %f, %f, %f, %f, %f, %f\n",
width, height, ctm[0], ctm[1], ctm[2], ctm[3], ctm[4], ctm[5]));
matrix.xx = ctm[0] / width;
matrix.xy = -ctm[2] / height;
matrix.yx = ctm[1] / width;
matrix.yy = -ctm[3] / height;
matrix.x0 = ctm[2] + ctm[4];
matrix.y0 = ctm[3] + ctm[5];
cairo_matrix_invert (&matrix);
cairo_pattern_set_matrix (pattern, &matrix);
cairo_pattern_set_filter (pattern, CAIRO_FILTER_BEST);
/* FIXME: Doesn't the image mask support any colorspace? */
cairo_set_source_rgb (cairo, fill_color.r, fill_color.g, fill_color.b);
cairo_mask (cairo, pattern);
cairo_pattern_destroy (pattern);
cairo_surface_destroy (image);
free (buffer);
delete imgStr;
#endif
}
|
void ArthurOutputDev::drawImageMask(GfxState *state, Object *ref, Stream *str,
int width, int height, GBool invert,
GBool inlineImg)
{
qDebug() << "drawImageMask";
#if 0
unsigned char *buffer;
unsigned char *dest;
cairo_surface_t *image;
cairo_pattern_t *pattern;
int x, y;
ImageStream *imgStr;
Guchar *pix;
double *ctm;
cairo_matrix_t matrix;
int invert_bit;
int row_stride;
row_stride = (width + 3) & ~3;
buffer = (unsigned char *) malloc (height * row_stride);
if (buffer == NULL) {
error(-1, "Unable to allocate memory for image.");
return;
}
/* TODO: Do we want to cache these? */
imgStr = new ImageStream(str, width, 1, 1);
imgStr->reset();
invert_bit = invert ? 1 : 0;
for (y = 0; y < height; y++) {
pix = imgStr->getLine();
dest = buffer + y * row_stride;
for (x = 0; x < width; x++) {
if (pix[x] ^ invert_bit)
*dest++ = 0;
else
*dest++ = 255;
}
}
image = cairo_image_surface_create_for_data (buffer, CAIRO_FORMAT_A8,
width, height, row_stride);
if (image == NULL)
return;
pattern = cairo_pattern_create_for_surface (image);
if (pattern == NULL)
return;
ctm = state->getCTM();
LOG (printf ("drawImageMask %dx%d, matrix: %f, %f, %f, %f, %f, %f\n",
width, height, ctm[0], ctm[1], ctm[2], ctm[3], ctm[4], ctm[5]));
matrix.xx = ctm[0] / width;
matrix.xy = -ctm[2] / height;
matrix.yx = ctm[1] / width;
matrix.yy = -ctm[3] / height;
matrix.x0 = ctm[2] + ctm[4];
matrix.y0 = ctm[3] + ctm[5];
cairo_matrix_invert (&matrix);
cairo_pattern_set_matrix (pattern, &matrix);
cairo_pattern_set_filter (pattern, CAIRO_FILTER_BEST);
/* FIXME: Doesn't the image mask support any colorspace? */
cairo_set_source_rgb (cairo, fill_color.r, fill_color.g, fill_color.b);
cairo_mask (cairo, pattern);
cairo_pattern_destroy (pattern);
cairo_surface_destroy (image);
free (buffer);
delete imgStr;
#endif
}
|
CPP
|
poppler
| 0 |
CVE-2016-2464
|
https://www.cvedetails.com/cve/CVE-2016-2464/
|
CWE-20
|
https://android.googlesource.com/platform/external/libvpx/+/65c49d5b382de4085ee5668732bcb0f6ecaf7148
|
65c49d5b382de4085ee5668732bcb0f6ecaf7148
|
Fix ParseElementHeader to support 0 payload elements
Cherry-pick'ing Change 5c83bbec9a5f6f00a349674ddad85b753d2ea219
from upstream. This fixes regression in some edge cases for mkv
playback.
BUG=26499283
Change-Id: I88de03219a3d941b6b2f251d384e29c36bdd4d9b
|
const Block* BlockGroup::GetBlock() const { return &m_block; }
|
const Block* BlockGroup::GetBlock() const { return &m_block; }
|
C
|
Android
| 0 |
CVE-2016-9537
|
https://www.cvedetails.com/cve/CVE-2016-9537/
|
CWE-787
|
https://github.com/vadz/libtiff/commit/83a4b92815ea04969d494416eaae3d4c6b338e4a#diff-c8b4b355f9b5c06d585b23138e1c185f
|
83a4b92815ea04969d494416eaae3d4c6b338e4a#diff-c8b4b355f9b5c06d585b23138e1c185f
|
* tools/tiffcrop.c: fix various out-of-bounds write vulnerabilities
in heap or stack allocated buffers. Reported as MSVR 35093,
MSVR 35096 and MSVR 35097. Discovered by Axel Souchet and Vishal
Chauhan from the MSRC Vulnerabilities & Mitigations team.
* tools/tiff2pdf.c: fix out-of-bounds write vulnerabilities in
heap allocate buffer in t2p_process_jpeg_strip(). Reported as MSVR
35098. Discovered by Axel Souchet and Vishal Chauhan from the MSRC
Vulnerabilities & Mitigations team.
* libtiff/tif_pixarlog.c: fix out-of-bounds write vulnerabilities
in heap allocated buffers. Reported as MSVR 35094. Discovered by
Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities &
Mitigations team.
* libtiff/tif_write.c: fix issue in error code path of TIFFFlushData1()
that didn't reset the tif_rawcc and tif_rawcp members. I'm not
completely sure if that could happen in practice outside of the odd
behaviour of t2p_seekproc() of tiff2pdf). The report points that a
better fix could be to check the return value of TIFFFlushData1() in
places where it isn't done currently, but it seems this patch is enough.
Reported as MSVR 35095. Discovered by Axel Souchet & Vishal Chauhan &
Suha Can from the MSRC Vulnerabilities & Mitigations team.
|
tsize_t t2p_write_pdf_pages(T2P* t2p, TIFF* output)
{
tsize_t written=0;
tdir_t i=0;
char buffer[32];
int buflen=0;
int page=0;
written += t2pWriteFile(output,
(tdata_t) "<< \n/Type /Pages \n/Kids [ ", 26);
page = t2p->pdf_pages+1;
for (i=0;i<t2p->tiff_pagecount;i++){
buflen=snprintf(buffer, sizeof(buffer), "%d", page);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
if ( ((i+1)%8)==0 ) {
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
page +=3;
page += t2p->tiff_pages[i].page_extra;
if(t2p->tiff_pages[i].page_tilecount>0){
page += (2 * t2p->tiff_pages[i].page_tilecount);
} else {
page +=2;
}
}
written += t2pWriteFile(output, (tdata_t) "] \n/Count ", 10);
buflen=snprintf(buffer, sizeof(buffer), "%d", t2p->tiff_pagecount);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " \n>> \n", 6);
return(written);
}
|
tsize_t t2p_write_pdf_pages(T2P* t2p, TIFF* output)
{
tsize_t written=0;
tdir_t i=0;
char buffer[32];
int buflen=0;
int page=0;
written += t2pWriteFile(output,
(tdata_t) "<< \n/Type /Pages \n/Kids [ ", 26);
page = t2p->pdf_pages+1;
for (i=0;i<t2p->tiff_pagecount;i++){
buflen=snprintf(buffer, sizeof(buffer), "%d", page);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
if ( ((i+1)%8)==0 ) {
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
page +=3;
page += t2p->tiff_pages[i].page_extra;
if(t2p->tiff_pages[i].page_tilecount>0){
page += (2 * t2p->tiff_pages[i].page_tilecount);
} else {
page +=2;
}
}
written += t2pWriteFile(output, (tdata_t) "] \n/Count ", 10);
buflen=snprintf(buffer, sizeof(buffer), "%d", t2p->tiff_pagecount);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " \n>> \n", 6);
return(written);
}
|
C
|
libtiff
| 0 |
CVE-2015-6780
|
https://www.cvedetails.com/cve/CVE-2015-6780/
| null |
https://github.com/chromium/chromium/commit/f2cba0d13b3a6d76dedede66731e5ca253d3b2af
|
f2cba0d13b3a6d76dedede66731e5ca253d3b2af
|
Fix UAF in Origin Info Bubble and permission settings UI.
In addition to fixing the UAF, will this also fix the problem of the bubble
showing over the previous tab (if the bubble is open when the tab it was opened
for closes).
BUG=490492
TBR=tedchoc
Review URL: https://codereview.chromium.org/1317443002
Cr-Commit-Position: refs/heads/master@{#346023}
|
void WebsiteSettings::PresentSitePermissions() {
PermissionInfoList permission_info_list;
WebsiteSettingsUI::PermissionInfo permission_info;
for (size_t i = 0; i < arraysize(kPermissionType); ++i) {
permission_info.type = kPermissionType[i];
content_settings::SettingInfo info;
scoped_ptr<base::Value> value =
content_settings_->GetWebsiteSetting(
site_url_, site_url_, permission_info.type, std::string(), &info);
DCHECK(value.get());
if (value->GetType() == base::Value::TYPE_INTEGER) {
permission_info.setting =
content_settings::ValueToContentSetting(value.get());
} else {
NOTREACHED();
}
permission_info.source = info.source;
if (info.primary_pattern == ContentSettingsPattern::Wildcard() &&
info.secondary_pattern == ContentSettingsPattern::Wildcard()) {
permission_info.default_setting = permission_info.setting;
permission_info.setting = CONTENT_SETTING_DEFAULT;
} else {
permission_info.default_setting =
content_settings_->GetDefaultContentSetting(permission_info.type,
NULL);
}
if (permission_info.setting != CONTENT_SETTING_DEFAULT &&
permission_info.setting != permission_info.default_setting) {
permission_info_list.push_back(permission_info);
}
}
ui_->SetPermissionInfo(permission_info_list);
}
|
void WebsiteSettings::PresentSitePermissions() {
PermissionInfoList permission_info_list;
WebsiteSettingsUI::PermissionInfo permission_info;
for (size_t i = 0; i < arraysize(kPermissionType); ++i) {
permission_info.type = kPermissionType[i];
content_settings::SettingInfo info;
scoped_ptr<base::Value> value =
content_settings_->GetWebsiteSetting(
site_url_, site_url_, permission_info.type, std::string(), &info);
DCHECK(value.get());
if (value->GetType() == base::Value::TYPE_INTEGER) {
permission_info.setting =
content_settings::ValueToContentSetting(value.get());
} else {
NOTREACHED();
}
permission_info.source = info.source;
if (info.primary_pattern == ContentSettingsPattern::Wildcard() &&
info.secondary_pattern == ContentSettingsPattern::Wildcard()) {
permission_info.default_setting = permission_info.setting;
permission_info.setting = CONTENT_SETTING_DEFAULT;
} else {
permission_info.default_setting =
content_settings_->GetDefaultContentSetting(permission_info.type,
NULL);
}
if (permission_info.setting != CONTENT_SETTING_DEFAULT &&
permission_info.setting != permission_info.default_setting) {
permission_info_list.push_back(permission_info);
}
}
ui_->SetPermissionInfo(permission_info_list);
}
|
C
|
Chrome
| 0 |
CVE-2013-2905
|
https://www.cvedetails.com/cve/CVE-2013-2905/
|
CWE-264
|
https://github.com/chromium/chromium/commit/afb848acb43ba316097ab4fddfa38dbd80bc6a71
|
afb848acb43ba316097ab4fddfa38dbd80bc6a71
|
Posix: fix named SHM mappings permissions.
Make sure that named mappings in /dev/shm/ aren't created with
broad permissions.
BUG=254159
[email protected], [email protected]
Review URL: https://codereview.chromium.org/17779002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@209814 0039d316-1c4b-4281-b951-d872f2087c98
|
explicit MultipleLockThread(int id) : id_(id) {}
|
explicit MultipleLockThread(int id) : id_(id) {}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/a0af50481db56aa780942e8595a20c36b2c34f5c
|
a0af50481db56aa780942e8595a20c36b2c34f5c
|
Build fix following bug #30696.
Patch by Gavin Barraclough <[email protected]> on 2009-10-22
Reviewed by NOBODY (build fix).
* WebCoreSupport/FrameLoaderClientGtk.cpp:
(WebKit::FrameLoaderClient::windowObjectCleared):
* webkit/webkitwebframe.cpp:
(webkit_web_frame_get_global_context):
git-svn-id: svn://svn.chromium.org/blink/trunk@49964 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void FrameLoaderClient::dispatchWillSubmitForm(FramePolicyFunction policyFunction, PassRefPtr<FormState>)
{
ASSERT(policyFunction);
if (!policyFunction)
return;
(core(m_frame)->loader()->policyChecker()->*policyFunction)(PolicyUse);
}
|
void FrameLoaderClient::dispatchWillSubmitForm(FramePolicyFunction policyFunction, PassRefPtr<FormState>)
{
ASSERT(policyFunction);
if (!policyFunction)
return;
(core(m_frame)->loader()->policyChecker()->*policyFunction)(PolicyUse);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/1161a49d663dd395bd639549c2dfe7324f847938
|
1161a49d663dd395bd639549c2dfe7324f847938
|
Don't populate URL data in WebDropData when dragging files.
This is considered a potential security issue as well, since it leaks
filesystem paths.
BUG=332579
Review URL: https://codereview.chromium.org/135633002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@244538 0039d316-1c4b-4281-b951-d872f2087c98
|
void OSExchangeDataProviderAura::SetString(const base::string16& data) {
string_ = data;
formats_ |= OSExchangeData::STRING;
}
|
void OSExchangeDataProviderAura::SetString(const base::string16& data) {
string_ = data;
formats_ |= OSExchangeData::STRING;
}
|
C
|
Chrome
| 0 |
CVE-2017-6903
|
https://www.cvedetails.com/cve/CVE-2017-6903/
|
CWE-269
|
https://github.com/iortcw/iortcw/commit/11a83410153756ae350a82ed41b08d128ff7f998
|
11a83410153756ae350a82ed41b08d128ff7f998
|
All: Merge some file writing extension checks
|
int Com_Filter( char *filter, char *name, int casesensitive ) {
char buf[MAX_TOKEN_CHARS];
char *ptr;
int i, found;
while ( *filter ) {
if ( *filter == '*' ) {
filter++;
for ( i = 0; *filter; i++ ) {
if ( *filter == '*' || *filter == '?' ) {
break;
}
buf[i] = *filter;
filter++;
}
buf[i] = '\0';
if ( strlen( buf ) ) {
ptr = Com_StringContains( name, buf, casesensitive );
if ( !ptr ) {
return qfalse;
}
name = ptr + strlen( buf );
}
} else if ( *filter == '?' ) {
filter++;
name++;
} else if ( *filter == '[' && *( filter + 1 ) == '[' ) {
filter++;
} else if ( *filter == '[' ) {
filter++;
found = qfalse;
while ( *filter && !found ) {
if ( *filter == ']' && *( filter + 1 ) != ']' ) {
break;
}
if ( *( filter + 1 ) == '-' && *( filter + 2 ) && ( *( filter + 2 ) != ']' || *( filter + 3 ) == ']' ) ) {
if ( casesensitive ) {
if ( *name >= *filter && *name <= *( filter + 2 ) ) {
found = qtrue;
}
} else {
if ( toupper( *name ) >= toupper( *filter ) &&
toupper( *name ) <= toupper( *( filter + 2 ) ) ) {
found = qtrue;
}
}
filter += 3;
} else {
if ( casesensitive ) {
if ( *filter == *name ) {
found = qtrue;
}
} else {
if ( toupper( *filter ) == toupper( *name ) ) {
found = qtrue;
}
}
filter++;
}
}
if ( !found ) {
return qfalse;
}
while ( *filter ) {
if ( *filter == ']' && *( filter + 1 ) != ']' ) {
break;
}
filter++;
}
filter++;
name++;
} else {
if ( casesensitive ) {
if ( *filter != *name ) {
return qfalse;
}
} else {
if ( toupper( *filter ) != toupper( *name ) ) {
return qfalse;
}
}
filter++;
name++;
}
}
return qtrue;
}
|
int Com_Filter( char *filter, char *name, int casesensitive ) {
char buf[MAX_TOKEN_CHARS];
char *ptr;
int i, found;
while ( *filter ) {
if ( *filter == '*' ) {
filter++;
for ( i = 0; *filter; i++ ) {
if ( *filter == '*' || *filter == '?' ) {
break;
}
buf[i] = *filter;
filter++;
}
buf[i] = '\0';
if ( strlen( buf ) ) {
ptr = Com_StringContains( name, buf, casesensitive );
if ( !ptr ) {
return qfalse;
}
name = ptr + strlen( buf );
}
} else if ( *filter == '?' ) {
filter++;
name++;
} else if ( *filter == '[' && *( filter + 1 ) == '[' ) {
filter++;
} else if ( *filter == '[' ) {
filter++;
found = qfalse;
while ( *filter && !found ) {
if ( *filter == ']' && *( filter + 1 ) != ']' ) {
break;
}
if ( *( filter + 1 ) == '-' && *( filter + 2 ) && ( *( filter + 2 ) != ']' || *( filter + 3 ) == ']' ) ) {
if ( casesensitive ) {
if ( *name >= *filter && *name <= *( filter + 2 ) ) {
found = qtrue;
}
} else {
if ( toupper( *name ) >= toupper( *filter ) &&
toupper( *name ) <= toupper( *( filter + 2 ) ) ) {
found = qtrue;
}
}
filter += 3;
} else {
if ( casesensitive ) {
if ( *filter == *name ) {
found = qtrue;
}
} else {
if ( toupper( *filter ) == toupper( *name ) ) {
found = qtrue;
}
}
filter++;
}
}
if ( !found ) {
return qfalse;
}
while ( *filter ) {
if ( *filter == ']' && *( filter + 1 ) != ']' ) {
break;
}
filter++;
}
filter++;
name++;
} else {
if ( casesensitive ) {
if ( *filter != *name ) {
return qfalse;
}
} else {
if ( toupper( *filter ) != toupper( *name ) ) {
return qfalse;
}
}
filter++;
name++;
}
}
return qtrue;
}
|
C
|
OpenJK
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/5041f984669fe3a989a84c348eb838c8f7233f6b
|
5041f984669fe3a989a84c348eb838c8f7233f6b
|
AutoFill: Release the cached frame when we receive the frameDestroyed() message
from WebKit.
BUG=48857
TEST=none
Review URL: http://codereview.chromium.org/3173005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55789 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderView::didDestroyScriptContext(WebFrame* frame) {
EventBindings::HandleContextDestroyed(frame);
}
|
void RenderView::didDestroyScriptContext(WebFrame* frame) {
EventBindings::HandleContextDestroyed(frame);
}
|
C
|
Chrome
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.