CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2014-3645
|
https://www.cvedetails.com/cve/CVE-2014-3645/
|
CWE-20
|
https://github.com/torvalds/linux/commit/bfd0a56b90005f8c8a004baf407ad90045c2b11e
|
bfd0a56b90005f8c8a004baf407ad90045c2b11e
|
nEPT: Nested INVEPT
If we let L1 use EPT, we should probably also support the INVEPT instruction.
In our current nested EPT implementation, when L1 changes its EPT table
for L2 (i.e., EPT12), L0 modifies the shadow EPT table (EPT02), and in
the course of this modification already calls INVEPT. But if last level
of shadow page is unsync not all L1's changes to EPT12 are intercepted,
which means roots need to be synced when L1 calls INVEPT. Global INVEPT
should not be different since roots are synced by kvm_mmu_load() each
time EPTP02 changes.
Reviewed-by: Xiao Guangrong <[email protected]>
Signed-off-by: Nadav Har'El <[email protected]>
Signed-off-by: Jun Nakajima <[email protected]>
Signed-off-by: Xinhao Xu <[email protected]>
Signed-off-by: Yang Zhang <[email protected]>
Signed-off-by: Gleb Natapov <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
struct kvm_mmu_page *sp;
unsigned long *rmapp;
sp = page_header(__pa(spte));
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
return pte_list_add(vcpu, spte, rmapp);
}
|
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
struct kvm_mmu_page *sp;
unsigned long *rmapp;
sp = page_header(__pa(spte));
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
return pte_list_add(vcpu, spte, rmapp);
}
|
C
|
linux
| 0 |
CVE-2013-6626
|
https://www.cvedetails.com/cve/CVE-2013-6626/
| null |
https://github.com/chromium/chromium/commit/90fb08ed0146c9beacfd4dde98a20fc45419fff3
|
90fb08ed0146c9beacfd4dde98a20fc45419fff3
|
Cancel JavaScript dialogs when an interstitial appears.
BUG=295695
TEST=See bug for repro steps.
Review URL: https://chromiumcodereview.appspot.com/24360011
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@225026 0039d316-1c4b-4281-b951-d872f2087c98
|
void WebContentsImpl::Init(const WebContents::CreateParams& params) {
render_manager_.Init(
params.browser_context, params.site_instance, params.routing_id,
params.main_frame_routing_id);
view_.reset(GetContentClient()->browser()->
OverrideCreateWebContentsView(this, &render_view_host_delegate_view_));
if (view_) {
CHECK(render_view_host_delegate_view_);
} else {
WebContentsViewDelegate* delegate =
GetContentClient()->browser()->GetWebContentsViewDelegate(this);
if (browser_plugin_guest_) {
scoped_ptr<WebContentsViewPort> platform_view(CreateWebContentsView(
this, delegate, &render_view_host_delegate_view_));
WebContentsViewGuest* rv = new WebContentsViewGuest(
this, browser_plugin_guest_.get(), platform_view.Pass(),
render_view_host_delegate_view_);
render_view_host_delegate_view_ = rv;
view_.reset(rv);
} else {
view_.reset(CreateWebContentsView(
this, delegate, &render_view_host_delegate_view_));
}
CHECK(render_view_host_delegate_view_);
}
CHECK(view_.get());
gfx::Size initial_size = params.initial_size;
view_->CreateView(initial_size, params.context);
if (opener_)
AddDestructionObserver(opener_);
registrar_.Add(this,
NOTIFICATION_RENDER_WIDGET_HOST_DESTROYED,
NotificationService::AllBrowserContextsAndSources());
#if defined(OS_ANDROID)
java_bridge_dispatcher_host_manager_.reset(
new JavaBridgeDispatcherHostManager(this));
#endif
#if defined(OS_ANDROID)
date_time_chooser_.reset(new DateTimeChooserAndroid());
#endif
}
|
void WebContentsImpl::Init(const WebContents::CreateParams& params) {
render_manager_.Init(
params.browser_context, params.site_instance, params.routing_id,
params.main_frame_routing_id);
view_.reset(GetContentClient()->browser()->
OverrideCreateWebContentsView(this, &render_view_host_delegate_view_));
if (view_) {
CHECK(render_view_host_delegate_view_);
} else {
WebContentsViewDelegate* delegate =
GetContentClient()->browser()->GetWebContentsViewDelegate(this);
if (browser_plugin_guest_) {
scoped_ptr<WebContentsViewPort> platform_view(CreateWebContentsView(
this, delegate, &render_view_host_delegate_view_));
WebContentsViewGuest* rv = new WebContentsViewGuest(
this, browser_plugin_guest_.get(), platform_view.Pass(),
render_view_host_delegate_view_);
render_view_host_delegate_view_ = rv;
view_.reset(rv);
} else {
view_.reset(CreateWebContentsView(
this, delegate, &render_view_host_delegate_view_));
}
CHECK(render_view_host_delegate_view_);
}
CHECK(view_.get());
gfx::Size initial_size = params.initial_size;
view_->CreateView(initial_size, params.context);
if (opener_)
AddDestructionObserver(opener_);
registrar_.Add(this,
NOTIFICATION_RENDER_WIDGET_HOST_DESTROYED,
NotificationService::AllBrowserContextsAndSources());
#if defined(OS_ANDROID)
java_bridge_dispatcher_host_manager_.reset(
new JavaBridgeDispatcherHostManager(this));
#endif
#if defined(OS_ANDROID)
date_time_chooser_.reset(new DateTimeChooserAndroid());
#endif
}
|
C
|
Chrome
| 0 |
CVE-2016-5688
|
https://www.cvedetails.com/cve/CVE-2016-5688/
|
CWE-119
|
https://github.com/ImageMagick/ImageMagick/commit/aecd0ada163a4d6c769cec178955d5f3e9316f2f
|
aecd0ada163a4d6c769cec178955d5f3e9316f2f
|
Set pixel cache to undefined if any resource limit is exceeded
|
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
|
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
|
C
|
ImageMagick
| 0 |
CVE-2017-7277
|
https://www.cvedetails.com/cve/CVE-2017-7277/
|
CWE-125
|
https://github.com/torvalds/linux/commit/8605330aac5a5785630aec8f64378a54891937cc
|
8605330aac5a5785630aec8f64378a54891937cc
|
tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs
__sock_recv_timestamp can be called for both normal skbs (for
receive timestamps) and for skbs on the error queue (for transmit
timestamps).
Commit 1c885808e456
(tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING)
assumes any skb passed to __sock_recv_timestamp are from
the error queue, containing OPT_STATS in the content of the skb.
This results in accessing invalid memory or generating junk
data.
To fix this, set skb->pkt_type to PACKET_OUTGOING for packets
on the error queue. This is safe because on the receive path
on local sockets skb->pkt_type is never set to PACKET_OUTGOING.
With that, copy OPT_STATS from a packet, only if its pkt_type
is PACKET_OUTGOING.
Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING")
Reported-by: JongHwan Kim <[email protected]>
Signed-off-by: Soheil Hassas Yeganeh <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
new->tstamp = old->tstamp;
/* We do not copy old->sk */
new->dev = old->dev;
memcpy(new->cb, old->cb, sizeof(old->cb));
skb_dst_copy(new, old);
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
__nf_copy(new, old, false);
/* Note : this field could be in headers_start/headers_end section
* It is not yet because we do not want to have a 16 bit hole
*/
new->queue_mapping = old->queue_mapping;
memcpy(&new->headers_start, &old->headers_start,
offsetof(struct sk_buff, headers_end) -
offsetof(struct sk_buff, headers_start));
CHECK_SKB_FIELD(protocol);
CHECK_SKB_FIELD(csum);
CHECK_SKB_FIELD(hash);
CHECK_SKB_FIELD(priority);
CHECK_SKB_FIELD(skb_iif);
CHECK_SKB_FIELD(vlan_proto);
CHECK_SKB_FIELD(vlan_tci);
CHECK_SKB_FIELD(transport_header);
CHECK_SKB_FIELD(network_header);
CHECK_SKB_FIELD(mac_header);
CHECK_SKB_FIELD(inner_protocol);
CHECK_SKB_FIELD(inner_transport_header);
CHECK_SKB_FIELD(inner_network_header);
CHECK_SKB_FIELD(inner_mac_header);
CHECK_SKB_FIELD(mark);
#ifdef CONFIG_NETWORK_SECMARK
CHECK_SKB_FIELD(secmark);
#endif
#ifdef CONFIG_NET_RX_BUSY_POLL
CHECK_SKB_FIELD(napi_id);
#endif
#ifdef CONFIG_XPS
CHECK_SKB_FIELD(sender_cpu);
#endif
#ifdef CONFIG_NET_SCHED
CHECK_SKB_FIELD(tc_index);
#endif
}
|
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
new->tstamp = old->tstamp;
/* We do not copy old->sk */
new->dev = old->dev;
memcpy(new->cb, old->cb, sizeof(old->cb));
skb_dst_copy(new, old);
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
__nf_copy(new, old, false);
/* Note : this field could be in headers_start/headers_end section
* It is not yet because we do not want to have a 16 bit hole
*/
new->queue_mapping = old->queue_mapping;
memcpy(&new->headers_start, &old->headers_start,
offsetof(struct sk_buff, headers_end) -
offsetof(struct sk_buff, headers_start));
CHECK_SKB_FIELD(protocol);
CHECK_SKB_FIELD(csum);
CHECK_SKB_FIELD(hash);
CHECK_SKB_FIELD(priority);
CHECK_SKB_FIELD(skb_iif);
CHECK_SKB_FIELD(vlan_proto);
CHECK_SKB_FIELD(vlan_tci);
CHECK_SKB_FIELD(transport_header);
CHECK_SKB_FIELD(network_header);
CHECK_SKB_FIELD(mac_header);
CHECK_SKB_FIELD(inner_protocol);
CHECK_SKB_FIELD(inner_transport_header);
CHECK_SKB_FIELD(inner_network_header);
CHECK_SKB_FIELD(inner_mac_header);
CHECK_SKB_FIELD(mark);
#ifdef CONFIG_NETWORK_SECMARK
CHECK_SKB_FIELD(secmark);
#endif
#ifdef CONFIG_NET_RX_BUSY_POLL
CHECK_SKB_FIELD(napi_id);
#endif
#ifdef CONFIG_XPS
CHECK_SKB_FIELD(sender_cpu);
#endif
#ifdef CONFIG_NET_SCHED
CHECK_SKB_FIELD(tc_index);
#endif
}
|
C
|
linux
| 0 |
CVE-2016-3839
|
https://www.cvedetails.com/cve/CVE-2016-3839/
|
CWE-284
|
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
|
472271b153c5dc53c28beac55480a8d8434b2d5c
|
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
|
static void bte_scan_filt_param_cfg_evt(UINT8 action_type,
tBTA_DM_BLE_PF_AVBL_SPACE avbl_space,
tBTA_DM_BLE_REF_VALUE ref_value, tBTA_STATUS status)
{
/* This event occurs on calling BTA_DmBleCfgFilterCondition internally,
** and that is why there is no HAL callback
*/
if(BTA_SUCCESS != status)
{
BTIF_TRACE_ERROR("%s, %d", __FUNCTION__, status);
}
else
{
BTIF_TRACE_DEBUG("%s", __FUNCTION__);
}
}
|
static void bte_scan_filt_param_cfg_evt(UINT8 action_type,
tBTA_DM_BLE_PF_AVBL_SPACE avbl_space,
tBTA_DM_BLE_REF_VALUE ref_value, tBTA_STATUS status)
{
/* This event occurs on calling BTA_DmBleCfgFilterCondition internally,
** and that is why there is no HAL callback
*/
if(BTA_SUCCESS != status)
{
BTIF_TRACE_ERROR("%s, %d", __FUNCTION__, status);
}
else
{
BTIF_TRACE_DEBUG("%s", __FUNCTION__);
}
}
|
C
|
Android
| 0 |
CVE-2018-12248
|
https://www.cvedetails.com/cve/CVE-2018-12248/
|
CWE-125
|
https://github.com/mruby/mruby/commit/778500563a9f7ceba996937dc886bd8cde29b42b
|
778500563a9f7ceba996937dc886bd8cde29b42b
|
Extend stack when pushing arguments that does not fit in; fix #4038
|
fiber_eq(mrb_state *mrb, mrb_value self)
{
mrb_value other;
mrb_get_args(mrb, "o", &other);
if (mrb_type(other) != MRB_TT_FIBER) {
return mrb_false_value();
}
return mrb_bool_value(fiber_ptr(self) == fiber_ptr(other));
}
|
fiber_eq(mrb_state *mrb, mrb_value self)
{
mrb_value other;
mrb_get_args(mrb, "o", &other);
if (mrb_type(other) != MRB_TT_FIBER) {
return mrb_false_value();
}
return mrb_bool_value(fiber_ptr(self) == fiber_ptr(other));
}
|
C
|
mruby
| 0 |
CVE-2015-6765
|
https://www.cvedetails.com/cve/CVE-2015-6765/
| null |
https://github.com/chromium/chromium/commit/e5c298b780737c53fa9aae44d6fef522931d88b0
|
e5c298b780737c53fa9aae44d6fef522931d88b0
|
AppCache: fix a browser crashing bug that can happen during updates.
BUG=558589
Review URL: https://codereview.chromium.org/1463463003
Cr-Commit-Position: refs/heads/master@{#360967}
|
void AppCacheUpdateJob::LoadFromNewestCacheFailed(
const GURL& url, AppCacheResponseInfo* response_info) {
if (internal_state_ == CACHE_FAILURE)
return;
urls_to_fetch_.push_front(UrlToFetch(url, true, response_info));
FetchUrls();
}
|
void AppCacheUpdateJob::LoadFromNewestCacheFailed(
const GURL& url, AppCacheResponseInfo* response_info) {
if (internal_state_ == CACHE_FAILURE)
return;
urls_to_fetch_.push_front(UrlToFetch(url, true, response_info));
FetchUrls();
}
|
C
|
Chrome
| 0 |
CVE-2018-11381
|
https://www.cvedetails.com/cve/CVE-2018-11381/
|
CWE-125
|
https://github.com/radare/radare2/commit/3fcf41ed96ffa25b38029449520c8d0a198745f3
|
3fcf41ed96ffa25b38029449520c8d0a198745f3
|
Fix #9902 - Fix oobread in RBin.string_scan_range
|
static char *swiftField(const char *dn, const char *cn) {
char *p = strstr (dn, ".getter_");
if (!p) {
p = strstr (dn, ".setter_");
if (!p) {
p = strstr (dn, ".method_");
}
}
if (p) {
char *q = strstr (dn, cn);
if (q && q[strlen (cn)] == '.') {
q = strdup (q + strlen (cn) + 1);
char *r = strchr (q, '.');
if (r) {
*r = 0;
}
return q;
}
}
return NULL;
}
|
static char *swiftField(const char *dn, const char *cn) {
char *p = strstr (dn, ".getter_");
if (!p) {
p = strstr (dn, ".setter_");
if (!p) {
p = strstr (dn, ".method_");
}
}
if (p) {
char *q = strstr (dn, cn);
if (q && q[strlen (cn)] == '.') {
q = strdup (q + strlen (cn) + 1);
char *r = strchr (q, '.');
if (r) {
*r = 0;
}
return q;
}
}
return NULL;
}
|
C
|
radare2
| 0 |
CVE-2011-2829
|
https://www.cvedetails.com/cve/CVE-2011-2829/
|
CWE-189
|
https://github.com/chromium/chromium/commit/a4b20ed4917f1f6fc83b6375a48e2c3895d43a8a
|
a4b20ed4917f1f6fc83b6375a48e2c3895d43a8a
|
Add chromium_code: 1 to surface.gyp and gl.gyp to pick up -Werror.
It looks like this was dropped accidentally in http://codereview.chromium.org/6718027 (surface.gyp) and http://codereview.chromium.org/6722026 (gl.gyp)
Remove now-redudant code that's implied by chromium_code: 1.
Fix the warnings that have crept in since chromium_code: 1 was removed.
BUG=none
TEST=none
Committed: http://src.chromium.org/viewvc/chrome?view=rev&revision=91598
Review URL: http://codereview.chromium.org/7227009
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91813 0039d316-1c4b-4281-b951-d872f2087c98
|
GLvoid StubGLGenBuffers(GLsizei n, GLuint* buffers) {
glGenBuffersARB(n, buffers);
}
|
GLvoid StubGLGenBuffers(GLsizei n, GLuint* buffers) {
glGenBuffersARB(n, buffers);
}
|
C
|
Chrome
| 0 |
CVE-2017-15423
|
https://www.cvedetails.com/cve/CVE-2017-15423/
|
CWE-310
|
https://github.com/chromium/chromium/commit/a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
|
a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
|
Roll src/third_party/boringssl/src 664e99a64..696c13bd6
https://boringssl.googlesource.com/boringssl/+log/664e99a6486c293728097c661332f92bf2d847c6..696c13bd6ab78011adfe7b775519c8b7cc82b604
BUG=778101
Change-Id: I8dda4f3db952597148e3c7937319584698d00e1c
Reviewed-on: https://chromium-review.googlesource.com/747941
Reviewed-by: Avi Drissman <[email protected]>
Reviewed-by: David Benjamin <[email protected]>
Commit-Queue: Steven Valdez <[email protected]>
Cr-Commit-Position: refs/heads/master@{#513774}
|
void BrowserMainLoop::InitializeMainThread() {
TRACE_EVENT0("startup", "BrowserMainLoop::InitializeMainThread");
base::PlatformThread::SetName("CrBrowserMain");
main_thread_.reset(
new BrowserThreadImpl(BrowserThread::UI, base::MessageLoop::current()));
}
|
void BrowserMainLoop::InitializeMainThread() {
TRACE_EVENT0("startup", "BrowserMainLoop::InitializeMainThread");
base::PlatformThread::SetName("CrBrowserMain");
main_thread_.reset(
new BrowserThreadImpl(BrowserThread::UI, base::MessageLoop::current()));
}
|
C
|
Chrome
| 0 |
CVE-2014-3173
|
https://www.cvedetails.com/cve/CVE-2014-3173/
|
CWE-119
|
https://github.com/chromium/chromium/commit/ee7579229ff7e9e5ae28bf53aea069251499d7da
|
ee7579229ff7e9e5ae28bf53aea069251499d7da
|
Framebuffer clear() needs to consider the situation some draw buffers are disabled.
This is when we expose DrawBuffers extension.
BUG=376951
TEST=the attached test case, webgl conformance
[email protected],[email protected]
Review URL: https://codereview.chromium.org/315283002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@275338 0039d316-1c4b-4281-b951-d872f2087c98
|
error::Error GLES2DecoderImpl::HandleGetRequestableExtensionsCHROMIUM(
uint32 immediate_data_size,
const cmds::GetRequestableExtensionsCHROMIUM& c) {
Bucket* bucket = CreateBucket(c.bucket_id);
scoped_refptr<FeatureInfo> info(new FeatureInfo());
info->Initialize(disallowed_features_);
bucket->SetFromString(info->extensions().c_str());
return error::kNoError;
}
|
error::Error GLES2DecoderImpl::HandleGetRequestableExtensionsCHROMIUM(
uint32 immediate_data_size,
const cmds::GetRequestableExtensionsCHROMIUM& c) {
Bucket* bucket = CreateBucket(c.bucket_id);
scoped_refptr<FeatureInfo> info(new FeatureInfo());
info->Initialize(disallowed_features_);
bucket->SetFromString(info->extensions().c_str());
return error::kNoError;
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/123e68f88fd0ed4f7447ba81148f9b619b947c47
|
123e68f88fd0ed4f7447ba81148f9b619b947c47
|
Clipboard: Opt out of PNG Encoding filters.
Set the PNG encoder's FilterFlag to kNone from the default kAll.
The clipboard should prefer faster encode time over encode size for image/png,
so set all clipboard image decoding to skip testing of different PNG encoding
filters, which takes a lot of time for not too much compression ratio benefit
in the common case.
Benchmarking with a random-pixel 8k by 4k px image
(https://www.photopea.com/clipboard_img.html), and fZLibLevel = 1, here's some
encode times (seconds) varying flags:
* kNone: 2.98 (trials: 3.00814, 2.98265, 2.99636, 2.9877, 2.96517, 2.99467)
* kSub: 3.03 (trials: 3.02345, 3.04085, 3.00886, 3.0587, 3.03992, 3.02549)
* kAll: 4.12 (trials: 4.12813, 4.12552, 4.08524, 4.13283, 4.15013, 4.11719)
Using kNone would save ~28% encode time over the current kAll.
This will be most visible for pasting of extremely large photos.
Bug: 1004867
Change-Id: I37a848498da425249e57171ae2ca3f0595c6b793
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1827953
Commit-Queue: Victor Costan <[email protected]>
Reviewed-by: Victor Costan <[email protected]>
Cr-Commit-Position: refs/heads/master@{#700598}
|
DataObjectItem* DataObjectItem::CreateFromURL(const String& url,
const String& title) {
DataObjectItem* item =
MakeGarbageCollected<DataObjectItem>(kStringKind, kMimeTypeTextURIList);
item->data_ = url;
item->title_ = title;
return item;
}
|
DataObjectItem* DataObjectItem::CreateFromURL(const String& url,
const String& title) {
DataObjectItem* item =
MakeGarbageCollected<DataObjectItem>(kStringKind, kMimeTypeTextURIList);
item->data_ = url;
item->title_ = title;
return item;
}
|
C
|
Chrome
| 0 |
CVE-2016-5189
|
https://www.cvedetails.com/cve/CVE-2016-5189/
|
CWE-284
|
https://github.com/chromium/chromium/commit/2440e872debd68ae7c2a8bf9ddb34df2cce378cd
|
2440e872debd68ae7c2a8bf9ddb34df2cce378cd
|
[GCPW] Disallow sign in of consumer accounts when mdm is enabled.
Unless the registry key "mdm_aca" is explicitly set to 1, always
fail sign in of consumer accounts when mdm enrollment is enabled.
Consumer accounts are defined as accounts with gmail.com or
googlemail.com domain.
Bug: 944049
Change-Id: Icb822f3737d90931de16a8d3317616dd2b159edd
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1532903
Commit-Queue: Tien Mai <[email protected]>
Reviewed-by: Roger Tawa <[email protected]>
Cr-Commit-Position: refs/heads/master@{#646278}
|
HRESULT CGaiaCredentialBase::ForkSaveAccountInfoStub(
const std::unique_ptr<base::DictionaryValue>& dict,
BSTR* status_text) {
LOGFN(INFO);
DCHECK(status_text);
ScopedStartupInfo startupinfo;
StdParentHandles parent_handles;
HRESULT hr =
InitializeStdHandles(CommDirection::kParentToChildOnly, kAllStdHandles,
&startupinfo, &parent_handles);
if (FAILED(hr)) {
LOGFN(ERROR) << "InitializeStdHandles hr=" << putHR(hr);
*status_text = AllocErrorString(IDS_INTERNAL_ERROR_BASE);
return hr;
}
base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
hr = GetCommandLineForEntrypoint(CURRENT_MODULE(), L"SaveAccountInfo",
&command_line);
if (hr == S_FALSE) {
LOGFN(INFO) << "Not running SAIS";
return S_OK;
} else if (FAILED(hr)) {
LOGFN(ERROR) << "GetCommandLineForEntryPoint hr=" << putHR(hr);
*status_text = AllocErrorString(IDS_INTERNAL_ERROR_BASE);
return hr;
}
command_line.AppendSwitchASCII(switches::kProcessType,
"gcpw-save-account-info");
base::win::ScopedProcessInformation procinfo;
hr = OSProcessManager::Get()->CreateRunningProcess(
command_line, startupinfo.GetInfo(), &procinfo);
if (FAILED(hr)) {
LOGFN(ERROR) << "OSProcessManager::CreateRunningProcess hr=" << putHR(hr);
*status_text = AllocErrorString(IDS_INTERNAL_ERROR_BASE);
return hr;
}
std::string json;
if (base::JSONWriter::Write(*dict, &json)) {
DWORD written;
if (!::WriteFile(parent_handles.hstdin_write.Get(), json.c_str(),
json.length() + 1, &written, /*lpOverlapped=*/nullptr)) {
HRESULT hrWrite = HRESULT_FROM_WIN32(::GetLastError());
LOGFN(ERROR) << "WriteFile hr=" << putHR(hrWrite);
}
} else {
LOGFN(ERROR) << "base::JSONWriter::Write failed";
}
return S_OK;
}
|
HRESULT CGaiaCredentialBase::ForkSaveAccountInfoStub(
const std::unique_ptr<base::DictionaryValue>& dict,
BSTR* status_text) {
LOGFN(INFO);
DCHECK(status_text);
ScopedStartupInfo startupinfo;
StdParentHandles parent_handles;
HRESULT hr =
InitializeStdHandles(CommDirection::kParentToChildOnly, kAllStdHandles,
&startupinfo, &parent_handles);
if (FAILED(hr)) {
LOGFN(ERROR) << "InitializeStdHandles hr=" << putHR(hr);
*status_text = AllocErrorString(IDS_INTERNAL_ERROR_BASE);
return hr;
}
base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
hr = GetCommandLineForEntrypoint(CURRENT_MODULE(), L"SaveAccountInfo",
&command_line);
if (hr == S_FALSE) {
LOGFN(INFO) << "Not running SAIS";
return S_OK;
} else if (FAILED(hr)) {
LOGFN(ERROR) << "GetCommandLineForEntryPoint hr=" << putHR(hr);
*status_text = AllocErrorString(IDS_INTERNAL_ERROR_BASE);
return hr;
}
command_line.AppendSwitchASCII(switches::kProcessType,
"gcpw-save-account-info");
base::win::ScopedProcessInformation procinfo;
hr = OSProcessManager::Get()->CreateRunningProcess(
command_line, startupinfo.GetInfo(), &procinfo);
if (FAILED(hr)) {
LOGFN(ERROR) << "OSProcessManager::CreateRunningProcess hr=" << putHR(hr);
*status_text = AllocErrorString(IDS_INTERNAL_ERROR_BASE);
return hr;
}
std::string json;
if (base::JSONWriter::Write(*dict, &json)) {
DWORD written;
if (!::WriteFile(parent_handles.hstdin_write.Get(), json.c_str(),
json.length() + 1, &written, /*lpOverlapped=*/nullptr)) {
HRESULT hrWrite = HRESULT_FROM_WIN32(::GetLastError());
LOGFN(ERROR) << "WriteFile hr=" << putHR(hrWrite);
}
} else {
LOGFN(ERROR) << "base::JSONWriter::Write failed";
}
return S_OK;
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/dcd10462fb49c72544719c490238f3a35edf3fc6
|
dcd10462fb49c72544719c490238f3a35edf3fc6
|
Open distiller UI setting through JavaScript
This change adds support for opening the android view distiller
settings from JavaScript via "distiller.openSettings". To avoid
passing numerous objects into the DomDistillerViewerSource, the
ExternalFeedbackReporter has been refactored into a generic UI
handle that is responsible for any interaction between Chrome and
Android specific controls and the distiller component.
The actual UI in the page is not yet implemented.
BUG=
Review URL: https://codereview.chromium.org/1386043002
Cr-Commit-Position: refs/heads/master@{#354123}
|
void DistillerNativeJavaScript::AddJavaScriptObjectToFrame(
v8::Local<v8::Context> context) {
v8::Isolate* isolate = blink::mainThreadIsolate();
v8::HandleScope handle_scope(isolate);
if (context.IsEmpty())
return;
v8::Context::Scope context_scope(context);
v8::Local<v8::Object> distiller_obj =
GetOrCreateDistillerObject(isolate, context->Global());
EnsureServiceConnected();
// Some of the JavaScript functions require extra work to be done when it is
// called, so they have wrapper functions maintained in this class.
BindFunctionToObject(
distiller_obj,
"echo",
base::Bind(
&DistillerNativeJavaScript::DistillerEcho, base::Unretained(this)));
// Many functions can simply call the Mojo interface directly and have no
// wrapper function for binding. Note that calling distiller_js_service.get()
// does not transfer ownership of the interface.
BindFunctionToObject(
distiller_obj,
"sendFeedback",
base::Bind(
&DistillerJavaScriptService::HandleDistillerFeedbackCall,
base::Unretained(distiller_js_service_.get())));
BindFunctionToObject(
distiller_obj,
"closePanel",
base::Bind(
&DistillerJavaScriptService::HandleDistillerClosePanelCall,
base::Unretained(distiller_js_service_.get())));
BindFunctionToObject(
distiller_obj,
"openSettings",
base::Bind(
&DistillerJavaScriptService::HandleDistillerOpenSettingsCall,
base::Unretained(distiller_js_service_.get())));
}
|
void DistillerNativeJavaScript::AddJavaScriptObjectToFrame(
v8::Local<v8::Context> context) {
v8::Isolate* isolate = blink::mainThreadIsolate();
v8::HandleScope handle_scope(isolate);
if (context.IsEmpty())
return;
v8::Context::Scope context_scope(context);
v8::Local<v8::Object> distiller_obj =
GetOrCreateDistillerObject(isolate, context->Global());
BindFunctionToObject(
distiller_obj,
"echo",
base::Bind(
&DistillerNativeJavaScript::DistillerEcho, base::Unretained(this)));
BindFunctionToObject(
distiller_obj,
"sendFeedback",
base::Bind(
&DistillerNativeJavaScript::DistillerSendFeedback,
base::Unretained(this)));
BindFunctionToObject(
distiller_obj,
"closePanel",
base::Bind(
&DistillerNativeJavaScript::DistillerClosePanel,
base::Unretained(this)));
}
|
C
|
Chrome
| 1 |
CVE-2017-12154
|
https://www.cvedetails.com/cve/CVE-2017-12154/
| null |
https://github.com/torvalds/linux/commit/51aa68e7d57e3217192d88ce90fd5b8ef29ec94f
|
51aa68e7d57e3217192d88ce90fd5b8ef29ec94f
|
kvm: nVMX: Don't allow L2 to access the hardware CR8
If L1 does not specify the "use TPR shadow" VM-execution control in
vmcs12, then L0 must specify the "CR8-load exiting" and "CR8-store
exiting" VM-execution controls in vmcs02. Failure to do so will give
the L2 VM unrestricted read/write access to the hardware CR8.
This fixes CVE-2017-12154.
Signed-off-by: Jim Mattson <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int handle_vmfunc(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12;
u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
/*
* VMFUNC is only supported for nested guests, but we always enable the
* secondary control for simplicity; for non-nested mode, fake that we
* didn't by injecting #UD.
*/
if (!is_guest_mode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
vmcs12 = get_vmcs12(vcpu);
if ((vmcs12->vm_function_control & (1 << function)) == 0)
goto fail;
switch (function) {
case 0:
if (nested_vmx_eptp_switching(vcpu, vmcs12))
goto fail;
break;
default:
goto fail;
}
return kvm_skip_emulated_instruction(vcpu);
fail:
nested_vmx_vmexit(vcpu, vmx->exit_reason,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;
}
|
static int handle_vmfunc(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12;
u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
/*
* VMFUNC is only supported for nested guests, but we always enable the
* secondary control for simplicity; for non-nested mode, fake that we
* didn't by injecting #UD.
*/
if (!is_guest_mode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
vmcs12 = get_vmcs12(vcpu);
if ((vmcs12->vm_function_control & (1 << function)) == 0)
goto fail;
switch (function) {
case 0:
if (nested_vmx_eptp_switching(vcpu, vmcs12))
goto fail;
break;
default:
goto fail;
}
return kvm_skip_emulated_instruction(vcpu);
fail:
nested_vmx_vmexit(vcpu, vmx->exit_reason,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;
}
|
C
|
linux
| 0 |
CVE-2018-20182
|
https://www.cvedetails.com/cve/CVE-2018-20182/
|
CWE-119
|
https://github.com/rdesktop/rdesktop/commit/4dca546d04321a610c1835010b5dad85163b65e1
|
4dca546d04321a610c1835010b5dad85163b65e1
|
Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182
|
rdpdr_abort_io(uint32 fd, uint32 major, RD_NTSTATUS status)
{
uint32 result;
struct async_iorequest *iorq;
struct async_iorequest *prev;
iorq = g_iorequest;
prev = NULL;
while (iorq != NULL)
{
/* Only remove from table when major is not set, or when correct major is supplied.
Abort read should not abort a write io request. */
if ((iorq->fd == fd) && (major == 0 || iorq->major == major))
{
result = 0;
rdpdr_send_completion(iorq->device, iorq->id, status, result, (uint8 *) "",
1);
iorq = rdpdr_remove_iorequest(prev, iorq);
return True;
}
prev = iorq;
iorq = iorq->next;
}
return False;
}
|
rdpdr_abort_io(uint32 fd, uint32 major, RD_NTSTATUS status)
{
uint32 result;
struct async_iorequest *iorq;
struct async_iorequest *prev;
iorq = g_iorequest;
prev = NULL;
while (iorq != NULL)
{
/* Only remove from table when major is not set, or when correct major is supplied.
Abort read should not abort a write io request. */
if ((iorq->fd == fd) && (major == 0 || iorq->major == major))
{
result = 0;
rdpdr_send_completion(iorq->device, iorq->id, status, result, (uint8 *) "",
1);
iorq = rdpdr_remove_iorequest(prev, iorq);
return True;
}
prev = iorq;
iorq = iorq->next;
}
return False;
}
|
C
|
rdesktop
| 0 |
CVE-2017-7539
|
https://www.cvedetails.com/cve/CVE-2017-7539/
|
CWE-20
|
https://git.qemu.org/?p=qemu.git;a=commitdiff;h=ff82911cd3f69f028f2537825c9720ff78bc3f19
|
ff82911cd3f69f028f2537825c9720ff78bc3f19
| null |
int nbd_init(int fd, QIOChannelSocket *sioc, uint16_t flags, off_t size)
{
unsigned long sectors = size / BDRV_SECTOR_SIZE;
if (size / BDRV_SECTOR_SIZE != sectors) {
LOG("Export size %lld too large for 32-bit kernel", (long long) size);
return -E2BIG;
}
TRACE("Setting NBD socket");
if (ioctl(fd, NBD_SET_SOCK, (unsigned long) sioc->fd) < 0) {
int serrno = errno;
LOG("Failed to set NBD socket");
return -serrno;
}
TRACE("Setting block size to %lu", (unsigned long)BDRV_SECTOR_SIZE);
if (ioctl(fd, NBD_SET_BLKSIZE, (unsigned long)BDRV_SECTOR_SIZE) < 0) {
int serrno = errno;
LOG("Failed setting NBD block size");
return -serrno;
}
TRACE("Setting size to %lu block(s)", sectors);
if (size % BDRV_SECTOR_SIZE) {
TRACE("Ignoring trailing %d bytes of export",
(int) (size % BDRV_SECTOR_SIZE));
}
if (ioctl(fd, NBD_SET_SIZE_BLOCKS, sectors) < 0) {
int serrno = errno;
LOG("Failed setting size (in blocks)");
return -serrno;
}
if (ioctl(fd, NBD_SET_FLAGS, (unsigned long) flags) < 0) {
if (errno == ENOTTY) {
int read_only = (flags & NBD_FLAG_READ_ONLY) != 0;
TRACE("Setting readonly attribute");
if (ioctl(fd, BLKROSET, (unsigned long) &read_only) < 0) {
int serrno = errno;
LOG("Failed setting read-only attribute");
return -serrno;
}
} else {
int serrno = errno;
LOG("Failed setting flags");
return -serrno;
}
}
TRACE("Negotiation ended");
return 0;
}
|
int nbd_init(int fd, QIOChannelSocket *sioc, uint16_t flags, off_t size)
{
unsigned long sectors = size / BDRV_SECTOR_SIZE;
if (size / BDRV_SECTOR_SIZE != sectors) {
LOG("Export size %lld too large for 32-bit kernel", (long long) size);
return -E2BIG;
}
TRACE("Setting NBD socket");
if (ioctl(fd, NBD_SET_SOCK, (unsigned long) sioc->fd) < 0) {
int serrno = errno;
LOG("Failed to set NBD socket");
return -serrno;
}
TRACE("Setting block size to %lu", (unsigned long)BDRV_SECTOR_SIZE);
if (ioctl(fd, NBD_SET_BLKSIZE, (unsigned long)BDRV_SECTOR_SIZE) < 0) {
int serrno = errno;
LOG("Failed setting NBD block size");
return -serrno;
}
TRACE("Setting size to %lu block(s)", sectors);
if (size % BDRV_SECTOR_SIZE) {
TRACE("Ignoring trailing %d bytes of export",
(int) (size % BDRV_SECTOR_SIZE));
}
if (ioctl(fd, NBD_SET_SIZE_BLOCKS, sectors) < 0) {
int serrno = errno;
LOG("Failed setting size (in blocks)");
return -serrno;
}
if (ioctl(fd, NBD_SET_FLAGS, (unsigned long) flags) < 0) {
if (errno == ENOTTY) {
int read_only = (flags & NBD_FLAG_READ_ONLY) != 0;
TRACE("Setting readonly attribute");
if (ioctl(fd, BLKROSET, (unsigned long) &read_only) < 0) {
int serrno = errno;
LOG("Failed setting read-only attribute");
return -serrno;
}
} else {
int serrno = errno;
LOG("Failed setting flags");
return -serrno;
}
}
TRACE("Negotiation ended");
return 0;
}
|
C
|
qemu
| 0 |
CVE-2017-16994
|
https://www.cvedetails.com/cve/CVE-2017-16994/
|
CWE-200
|
https://github.com/torvalds/linux/commit/373c4557d2aa362702c4c2d41288fb1e54990b7c
|
373c4557d2aa362702c4c2d41288fb1e54990b7c
|
mm/pagewalk.c: report holes in hugetlb ranges
This matters at least for the mincore syscall, which will otherwise copy
uninitialized memory from the page allocator to userspace. It is
probably also a correctness error for /proc/$pid/pagemap, but I haven't
tested that.
Removing the `walk->hugetlb_entry` condition in walk_hugetlb_range() has
no effect because the caller already checks for that.
This only reports holes in hugetlb ranges to callers who have specified
a hugetlb_entry callback.
This issue was found using an AFL-based fuzzer.
v2:
- don't crash on ->pte_hole==NULL (Andrew Morton)
- add Cc stable (Andrew Morton)
Fixes: 1e25a271c8ac ("mincore: apply page table walker on do_mincore()")
Signed-off-by: Jann Horn <[email protected]>
Cc: <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pmd_t *pmd;
unsigned long next;
int err = 0;
pmd = pmd_offset(pud, addr);
do {
again:
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd) || !walk->vma) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
/*
* This implies that each ->pmd_entry() handler
* needs to know about pmd_trans_huge() pmds
*/
if (walk->pmd_entry)
err = walk->pmd_entry(pmd, addr, next, walk);
if (err)
break;
/*
* Check this here so we only break down trans_huge
* pages when we _need_ to
*/
if (!walk->pte_entry)
continue;
split_huge_pmd(walk->vma, pmd, addr);
if (pmd_trans_unstable(pmd))
goto again;
err = walk_pte_range(pmd, addr, next, walk);
if (err)
break;
} while (pmd++, addr = next, addr != end);
return err;
}
|
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pmd_t *pmd;
unsigned long next;
int err = 0;
pmd = pmd_offset(pud, addr);
do {
again:
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd) || !walk->vma) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
/*
* This implies that each ->pmd_entry() handler
* needs to know about pmd_trans_huge() pmds
*/
if (walk->pmd_entry)
err = walk->pmd_entry(pmd, addr, next, walk);
if (err)
break;
/*
* Check this here so we only break down trans_huge
* pages when we _need_ to
*/
if (!walk->pte_entry)
continue;
split_huge_pmd(walk->vma, pmd, addr);
if (pmd_trans_unstable(pmd))
goto again;
err = walk_pte_range(pmd, addr, next, walk);
if (err)
break;
} while (pmd++, addr = next, addr != end);
return err;
}
|
C
|
linux
| 0 |
CVE-2013-4350
|
https://www.cvedetails.com/cve/CVE-2013-4350/
|
CWE-310
|
https://github.com/torvalds/linux/commit/95ee62083cb6453e056562d91f597552021e6ae7
|
95ee62083cb6453e056562d91f597552021e6ae7
|
net: sctp: fix ipv6 ipsec encryption bug in sctp_v6_xmit
Alan Chester reported an issue with IPv6 on SCTP that IPsec traffic is not
being encrypted, whereas on IPv4 it is. Setting up an AH + ESP transport
does not seem to have the desired effect:
SCTP + IPv4:
22:14:20.809645 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto AH (51), length 116)
192.168.0.2 > 192.168.0.5: AH(spi=0x00000042,sumlen=16,seq=0x1): ESP(spi=0x00000044,seq=0x1), length 72
22:14:20.813270 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto AH (51), length 340)
192.168.0.5 > 192.168.0.2: AH(spi=0x00000043,sumlen=16,seq=0x1):
SCTP + IPv6:
22:31:19.215029 IP6 (class 0x02, hlim 64, next-header SCTP (132) payload length: 364)
fe80::222:15ff:fe87:7fc.3333 > fe80::92e6:baff:fe0d:5a54.36767: sctp
1) [INIT ACK] [init tag: 747759530] [rwnd: 62464] [OS: 10] [MIS: 10]
Moreover, Alan says:
This problem was seen with both Racoon and Racoon2. Other people have seen
this with OpenSwan. When IPsec is configured to encrypt all upper layer
protocols the SCTP connection does not initialize. After using Wireshark to
follow packets, this is because the SCTP packet leaves Box A unencrypted and
Box B believes all upper layer protocols are to be encrypted so it drops
this packet, causing the SCTP connection to fail to initialize. When IPsec
is configured to encrypt just SCTP, the SCTP packets are observed unencrypted.
In fact, using `socat sctp6-listen:3333 -` on one end and transferring "plaintext"
string on the other end, results in cleartext on the wire where SCTP eventually
does not report any errors, thus in the latter case that Alan reports, the
non-paranoid user might think he's communicating over an encrypted transport on
SCTP although he's not (tcpdump ... -X):
...
0x0030: 5d70 8e1a 0003 001a 177d eb6c 0000 0000 ]p.......}.l....
0x0040: 0000 0000 706c 6169 6e74 6578 740a 0000 ....plaintext...
Only in /proc/net/xfrm_stat we can see XfrmInTmplMismatch increasing on the
receiver side. Initial follow-up analysis from Alan's bug report was done by
Alexey Dobriyan. Also thanks to Vlad Yasevich for feedback on this.
SCTP has its own implementation of sctp_v6_xmit() not calling inet6_csk_xmit().
This has the implication that it probably never really got updated along with
changes in inet6_csk_xmit() and therefore does not seem to invoke xfrm handlers.
SCTP's IPv4 xmit however, properly calls ip_queue_xmit() to do the work. Since
a call to inet6_csk_xmit() would solve this problem, but result in unecessary
route lookups, let us just use the cached flowi6 instead that we got through
sctp_v6_get_dst(). Since all SCTP packets are being sent through sctp_packet_transmit(),
we do the route lookup / flow caching in sctp_transport_route(), hold it in
tp->dst and skb_dst_set() right after that. If we would alter fl6->daddr in
sctp_v6_xmit() to np->opt->srcrt, we possibly could run into the same effect
of not having xfrm layer pick it up, hence, use fl6_update_dst() in sctp_v6_get_dst()
instead to get the correct source routed dst entry, which we assign to the skb.
Also source address routing example from 625034113 ("sctp: fix sctp to work with
ipv6 source address routing") still works with this patch! Nevertheless, in RFC5095
it is actually 'recommended' to not use that anyway due to traffic amplification [1].
So it seems we're not supposed to do that anyway in sctp_v6_xmit(). Moreover, if
we overwrite the flow destination here, the lower IPv6 layer will be unable to
put the correct destination address into IP header, as routing header is added in
ipv6_push_nfrag_opts() but then probably with wrong final destination. Things aside,
result of this patch is that we do not have any XfrmInTmplMismatch increase plus on
the wire with this patch it now looks like:
SCTP + IPv6:
08:17:47.074080 IP6 2620:52:0:102f:7a2b:cbff:fe27:1b0a > 2620:52:0:102f:213:72ff:fe32:7eba:
AH(spi=0x00005fb4,seq=0x1): ESP(spi=0x00005fb5,seq=0x1), length 72
08:17:47.074264 IP6 2620:52:0:102f:213:72ff:fe32:7eba > 2620:52:0:102f:7a2b:cbff:fe27:1b0a:
AH(spi=0x00003d54,seq=0x1): ESP(spi=0x00003d55,seq=0x1), length 296
This fixes Kernel Bugzilla 24412. This security issue seems to be present since
2.6.18 kernels. Lets just hope some big passive adversary in the wild didn't have
its fun with that. lksctp-tools IPv6 regression test suite passes as well with
this patch.
[1] http://www.secdev.org/conf/IPv6_RH_security-csw07.pdf
Reported-by: Alan Chester <[email protected]>
Reported-by: Alexey Dobriyan <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Cc: Steffen Klassert <[email protected]>
Cc: Hannes Frederic Sowa <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
__be16 port)
{
addr->sa.sa_family = AF_INET6;
addr->v6.sin6_port = port;
addr->v6.sin6_addr = *saddr;
}
|
static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
__be16 port)
{
addr->sa.sa_family = AF_INET6;
addr->v6.sin6_port = port;
addr->v6.sin6_addr = *saddr;
}
|
C
|
linux
| 0 |
CVE-2011-2517
|
https://www.cvedetails.com/cve/CVE-2011-2517/
|
CWE-119
|
https://github.com/torvalds/linux/commit/208c72f4fe44fe09577e7975ba0e7fa0278f3d03
|
208c72f4fe44fe09577e7975ba0e7fa0278f3d03
|
nl80211: fix check for valid SSID size in scan operations
In both trigger_scan and sched_scan operations, we were checking for
the SSID length before assigning the value correctly. Since the
memory was just kzalloc'ed, the check was always failing and SSID with
over 32 characters were allowed to go through.
This was causing a buffer overflow when copying the actual SSID to the
proper place.
This bug has been there since 2.6.29-rc4.
Cc: [email protected]
Signed-off-by: Luciano Coelho <[email protected]>
Signed-off-by: John W. Linville <[email protected]>
|
void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
struct net_device *netdev)
{
struct sk_buff *msg;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return;
if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
NL80211_CMD_TRIGGER_SCAN) < 0) {
nlmsg_free(msg);
return;
}
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_scan_mcgrp.id, GFP_KERNEL);
}
|
void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
struct net_device *netdev)
{
struct sk_buff *msg;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return;
if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
NL80211_CMD_TRIGGER_SCAN) < 0) {
nlmsg_free(msg);
return;
}
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_scan_mcgrp.id, GFP_KERNEL);
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/5041f984669fe3a989a84c348eb838c8f7233f6b
|
5041f984669fe3a989a84c348eb838c8f7233f6b
|
AutoFill: Release the cached frame when we receive the frameDestroyed() message
from WebKit.
BUG=48857
TEST=none
Review URL: http://codereview.chromium.org/3173005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55789 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderView::OnGetSerializedHtmlDataForCurrentPageWithLocalLinks(
const std::vector<GURL>& links,
const std::vector<FilePath>& local_paths,
const FilePath& local_directory_name) {
WebVector<WebURL> weburl_links(links);
WebVector<WebString> webstring_paths(local_paths.size());
for (size_t i = 0; i < local_paths.size(); i++)
webstring_paths[i] = webkit_glue::FilePathToWebString(local_paths[i]);
WebPageSerializer::serialize(webview()->mainFrame(),
true, this, weburl_links, webstring_paths,
webkit_glue::FilePathToWebString(
local_directory_name));
}
|
void RenderView::OnGetSerializedHtmlDataForCurrentPageWithLocalLinks(
const std::vector<GURL>& links,
const std::vector<FilePath>& local_paths,
const FilePath& local_directory_name) {
WebVector<WebURL> weburl_links(links);
WebVector<WebString> webstring_paths(local_paths.size());
for (size_t i = 0; i < local_paths.size(); i++)
webstring_paths[i] = webkit_glue::FilePathToWebString(local_paths[i]);
WebPageSerializer::serialize(webview()->mainFrame(),
true, this, weburl_links, webstring_paths,
webkit_glue::FilePathToWebString(
local_directory_name));
}
|
C
|
Chrome
| 0 |
CVE-2017-14166
|
https://www.cvedetails.com/cve/CVE-2017-14166/
|
CWE-125
|
https://github.com/libarchive/libarchive/commit/fa7438a0ff4033e4741c807394a9af6207940d71
|
fa7438a0ff4033e4741c807394a9af6207940d71
|
Do something sensible for empty strings to make fuzzers happy.
|
decompression_init(struct archive_read *a, enum enctype encoding)
{
struct xar *xar;
const char *detail;
int r;
xar = (struct xar *)(a->format->data);
xar->rd_encoding = encoding;
switch (encoding) {
case NONE:
break;
case GZIP:
if (xar->stream_valid)
r = inflateReset(&(xar->stream));
else
r = inflateInit(&(xar->stream));
if (r != Z_OK) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Couldn't initialize zlib stream.");
return (ARCHIVE_FATAL);
}
xar->stream_valid = 1;
xar->stream.total_in = 0;
xar->stream.total_out = 0;
break;
#if defined(HAVE_BZLIB_H) && defined(BZ_CONFIG_ERROR)
case BZIP2:
if (xar->bzstream_valid) {
BZ2_bzDecompressEnd(&(xar->bzstream));
xar->bzstream_valid = 0;
}
r = BZ2_bzDecompressInit(&(xar->bzstream), 0, 0);
if (r == BZ_MEM_ERROR)
r = BZ2_bzDecompressInit(&(xar->bzstream), 0, 1);
if (r != BZ_OK) {
int err = ARCHIVE_ERRNO_MISC;
detail = NULL;
switch (r) {
case BZ_PARAM_ERROR:
detail = "invalid setup parameter";
break;
case BZ_MEM_ERROR:
err = ENOMEM;
detail = "out of memory";
break;
case BZ_CONFIG_ERROR:
detail = "mis-compiled library";
break;
}
archive_set_error(&a->archive, err,
"Internal error initializing decompressor: %s",
detail == NULL ? "??" : detail);
xar->bzstream_valid = 0;
return (ARCHIVE_FATAL);
}
xar->bzstream_valid = 1;
xar->bzstream.total_in_lo32 = 0;
xar->bzstream.total_in_hi32 = 0;
xar->bzstream.total_out_lo32 = 0;
xar->bzstream.total_out_hi32 = 0;
break;
#endif
#if defined(HAVE_LZMA_H) && defined(HAVE_LIBLZMA)
#if LZMA_VERSION_MAJOR >= 5
/* Effectively disable the limiter. */
#define LZMA_MEMLIMIT UINT64_MAX
#else
/* NOTE: This needs to check memory size which running system has. */
#define LZMA_MEMLIMIT (1U << 30)
#endif
case XZ:
case LZMA:
if (xar->lzstream_valid) {
lzma_end(&(xar->lzstream));
xar->lzstream_valid = 0;
}
if (xar->entry_encoding == XZ)
r = lzma_stream_decoder(&(xar->lzstream),
LZMA_MEMLIMIT,/* memlimit */
LZMA_CONCATENATED);
else
r = lzma_alone_decoder(&(xar->lzstream),
LZMA_MEMLIMIT);/* memlimit */
if (r != LZMA_OK) {
switch (r) {
case LZMA_MEM_ERROR:
archive_set_error(&a->archive,
ENOMEM,
"Internal error initializing "
"compression library: "
"Cannot allocate memory");
break;
case LZMA_OPTIONS_ERROR:
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Internal error initializing "
"compression library: "
"Invalid or unsupported options");
break;
default:
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Internal error initializing "
"lzma library");
break;
}
return (ARCHIVE_FATAL);
}
xar->lzstream_valid = 1;
xar->lzstream.total_in = 0;
xar->lzstream.total_out = 0;
break;
#endif
/*
* Unsupported compression.
*/
default:
#if !defined(HAVE_BZLIB_H) || !defined(BZ_CONFIG_ERROR)
case BZIP2:
#endif
#if !defined(HAVE_LZMA_H) || !defined(HAVE_LIBLZMA)
case LZMA:
case XZ:
#endif
switch (xar->entry_encoding) {
case BZIP2: detail = "bzip2"; break;
case LZMA: detail = "lzma"; break;
case XZ: detail = "xz"; break;
default: detail = "??"; break;
}
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"%s compression not supported on this platform",
detail);
return (ARCHIVE_FAILED);
}
return (ARCHIVE_OK);
}
|
decompression_init(struct archive_read *a, enum enctype encoding)
{
struct xar *xar;
const char *detail;
int r;
xar = (struct xar *)(a->format->data);
xar->rd_encoding = encoding;
switch (encoding) {
case NONE:
break;
case GZIP:
if (xar->stream_valid)
r = inflateReset(&(xar->stream));
else
r = inflateInit(&(xar->stream));
if (r != Z_OK) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Couldn't initialize zlib stream.");
return (ARCHIVE_FATAL);
}
xar->stream_valid = 1;
xar->stream.total_in = 0;
xar->stream.total_out = 0;
break;
#if defined(HAVE_BZLIB_H) && defined(BZ_CONFIG_ERROR)
case BZIP2:
if (xar->bzstream_valid) {
BZ2_bzDecompressEnd(&(xar->bzstream));
xar->bzstream_valid = 0;
}
r = BZ2_bzDecompressInit(&(xar->bzstream), 0, 0);
if (r == BZ_MEM_ERROR)
r = BZ2_bzDecompressInit(&(xar->bzstream), 0, 1);
if (r != BZ_OK) {
int err = ARCHIVE_ERRNO_MISC;
detail = NULL;
switch (r) {
case BZ_PARAM_ERROR:
detail = "invalid setup parameter";
break;
case BZ_MEM_ERROR:
err = ENOMEM;
detail = "out of memory";
break;
case BZ_CONFIG_ERROR:
detail = "mis-compiled library";
break;
}
archive_set_error(&a->archive, err,
"Internal error initializing decompressor: %s",
detail == NULL ? "??" : detail);
xar->bzstream_valid = 0;
return (ARCHIVE_FATAL);
}
xar->bzstream_valid = 1;
xar->bzstream.total_in_lo32 = 0;
xar->bzstream.total_in_hi32 = 0;
xar->bzstream.total_out_lo32 = 0;
xar->bzstream.total_out_hi32 = 0;
break;
#endif
#if defined(HAVE_LZMA_H) && defined(HAVE_LIBLZMA)
#if LZMA_VERSION_MAJOR >= 5
/* Effectively disable the limiter. */
#define LZMA_MEMLIMIT UINT64_MAX
#else
/* NOTE: This needs to check memory size which running system has. */
#define LZMA_MEMLIMIT (1U << 30)
#endif
case XZ:
case LZMA:
if (xar->lzstream_valid) {
lzma_end(&(xar->lzstream));
xar->lzstream_valid = 0;
}
if (xar->entry_encoding == XZ)
r = lzma_stream_decoder(&(xar->lzstream),
LZMA_MEMLIMIT,/* memlimit */
LZMA_CONCATENATED);
else
r = lzma_alone_decoder(&(xar->lzstream),
LZMA_MEMLIMIT);/* memlimit */
if (r != LZMA_OK) {
switch (r) {
case LZMA_MEM_ERROR:
archive_set_error(&a->archive,
ENOMEM,
"Internal error initializing "
"compression library: "
"Cannot allocate memory");
break;
case LZMA_OPTIONS_ERROR:
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Internal error initializing "
"compression library: "
"Invalid or unsupported options");
break;
default:
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Internal error initializing "
"lzma library");
break;
}
return (ARCHIVE_FATAL);
}
xar->lzstream_valid = 1;
xar->lzstream.total_in = 0;
xar->lzstream.total_out = 0;
break;
#endif
/*
* Unsupported compression.
*/
default:
#if !defined(HAVE_BZLIB_H) || !defined(BZ_CONFIG_ERROR)
case BZIP2:
#endif
#if !defined(HAVE_LZMA_H) || !defined(HAVE_LIBLZMA)
case LZMA:
case XZ:
#endif
switch (xar->entry_encoding) {
case BZIP2: detail = "bzip2"; break;
case LZMA: detail = "lzma"; break;
case XZ: detail = "xz"; break;
default: detail = "??"; break;
}
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"%s compression not supported on this platform",
detail);
return (ARCHIVE_FAILED);
}
return (ARCHIVE_OK);
}
|
C
|
libarchive
| 0 |
CVE-2016-9756
|
https://www.cvedetails.com/cve/CVE-2016-9756/
|
CWE-200
|
https://github.com/torvalds/linux/commit/2117d5398c81554fbf803f5fd1dc55eb78216c0c
|
2117d5398c81554fbf803f5fd1dc55eb78216c0c
|
KVM: x86: drop error recovery in em_jmp_far and em_ret_far
em_jmp_far and em_ret_far assumed that setting IP can only fail in 64
bit mode, but syzkaller proved otherwise (and SDM agrees).
Code segment was restored upon failure, but it was left uninitialized
outside of long mode, which could lead to a leak of host kernel stack.
We could have fixed that by always saving and restoring the CS, but we
take a simpler approach and just break any guest that manages to fail
as the error recovery is error-prone and modern CPUs don't need emulator
for this.
Found by syzkaller:
WARNING: CPU: 2 PID: 3668 at arch/x86/kvm/emulate.c:2217 em_ret_far+0x428/0x480
Kernel panic - not syncing: panic_on_warn set ...
CPU: 2 PID: 3668 Comm: syz-executor Not tainted 4.9.0-rc4+ #49
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
[...]
Call Trace:
[...] __dump_stack lib/dump_stack.c:15
[...] dump_stack+0xb3/0x118 lib/dump_stack.c:51
[...] panic+0x1b7/0x3a3 kernel/panic.c:179
[...] __warn+0x1c4/0x1e0 kernel/panic.c:542
[...] warn_slowpath_null+0x2c/0x40 kernel/panic.c:585
[...] em_ret_far+0x428/0x480 arch/x86/kvm/emulate.c:2217
[...] em_ret_far_imm+0x17/0x70 arch/x86/kvm/emulate.c:2227
[...] x86_emulate_insn+0x87a/0x3730 arch/x86/kvm/emulate.c:5294
[...] x86_emulate_instruction+0x520/0x1ba0 arch/x86/kvm/x86.c:5545
[...] emulate_instruction arch/x86/include/asm/kvm_host.h:1116
[...] complete_emulated_io arch/x86/kvm/x86.c:6870
[...] complete_emulated_mmio+0x4e9/0x710 arch/x86/kvm/x86.c:6934
[...] kvm_arch_vcpu_ioctl_run+0x3b7a/0x5a90 arch/x86/kvm/x86.c:6978
[...] kvm_vcpu_ioctl+0x61e/0xdd0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:2557
[...] vfs_ioctl fs/ioctl.c:43
[...] do_vfs_ioctl+0x18c/0x1040 fs/ioctl.c:679
[...] SYSC_ioctl fs/ioctl.c:694
[...] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:685
[...] entry_SYSCALL_64_fastpath+0x1f/0xc2
Reported-by: Dmitry Vyukov <[email protected]>
Cc: [email protected]
Fixes: d1442d85cc30 ("KVM: x86: Handle errors when RIP is set during far jumps")
Signed-off-by: Radim Krčmář <[email protected]>
|
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
case InstrDual:
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.idual->mod3;
else
opcode = opcode.u.idual->mod012;
break;
case ModeDual:
if (ctxt->mode == X86EMUL_MODE_PROT64)
opcode = opcode.u.mdual->mode64;
else
opcode = opcode.u.mdual->mode32;
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
No16))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64) {
if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
else if (ctxt->d & NearBranch)
ctxt->op_bytes = 8;
}
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if ((ctxt->d & No16) && ctxt->op_bytes == 2)
ctxt->op_bytes = 4;
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
|
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
case InstrDual:
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.idual->mod3;
else
opcode = opcode.u.idual->mod012;
break;
case ModeDual:
if (ctxt->mode == X86EMUL_MODE_PROT64)
opcode = opcode.u.mdual->mode64;
else
opcode = opcode.u.mdual->mode32;
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
No16))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64) {
if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
else if (ctxt->d & NearBranch)
ctxt->op_bytes = 8;
}
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if ((ctxt->d & No16) && ctxt->op_bytes == 2)
ctxt->op_bytes = 4;
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
|
C
|
linux
| 0 |
CVE-2019-5774
|
https://www.cvedetails.com/cve/CVE-2019-5774/
|
CWE-20
|
https://github.com/chromium/chromium/commit/b32471d5abb3b3a4fe56e1dd79871400b51a0cca
|
b32471d5abb3b3a4fe56e1dd79871400b51a0cca
|
Add .desktop file to download_file_types.asciipb
.desktop files act as shortcuts on Linux, allowing arbitrary code
execution. We should send pings for these files.
Bug: 904182
Change-Id: Ibc26141fb180e843e1ffaf3f78717a9109d2fa9a
Reviewed-on: https://chromium-review.googlesource.com/c/1344552
Reviewed-by: Varun Khaneja <[email protected]>
Commit-Queue: Daniel Rubery <[email protected]>
Cr-Commit-Position: refs/heads/master@{#611272}
|
void RecordInProgressDBCount(InProgressDBCountTypes type) {
UMA_HISTOGRAM_ENUMERATION("Download.InProgressDB.Counts", type);
}
|
void RecordInProgressDBCount(InProgressDBCountTypes type) {
UMA_HISTOGRAM_ENUMERATION("Download.InProgressDB.Counts", type);
}
|
C
|
Chrome
| 0 |
CVE-2018-12232
|
https://www.cvedetails.com/cve/CVE-2018-12232/
|
CWE-362
|
https://github.com/torvalds/linux/commit/6d8c50dcb029872b298eea68cc6209c866fd3e14
|
6d8c50dcb029872b298eea68cc6209c866fd3e14
|
socket: close race condition between sock_close() and sockfs_setattr()
fchownat() doesn't even hold refcnt of fd until it figures out
fd is really needed (otherwise is ignored) and releases it after
it resolves the path. This means sock_close() could race with
sockfs_setattr(), which leads to a NULL pointer dereference
since typically we set sock->sk to NULL in ->release().
As pointed out by Al, this is unique to sockfs. So we can fix this
in socket layer by acquiring inode_lock in sock_close() and
checking against NULL in sockfs_setattr().
sock_release() is called in many places, only the sock_close()
path matters here. And fortunately, this should not affect normal
sock_close() as it is only called when the last fd refcnt is gone.
It only affects sock_close() with a parallel sockfs_setattr() in
progress, which is not common.
Fixes: 86741ec25462 ("net: core: Add a UID field to struct sock.")
Reported-by: shankarapailoor <[email protected]>
Cc: Tetsuo Handa <[email protected]>
Cc: Lorenzo Colitti <[email protected]>
Cc: Al Viro <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
int, addrlen)
{
return __sys_connect(fd, uservaddr, addrlen);
}
|
SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
int, addrlen)
{
return __sys_connect(fd, uservaddr, addrlen);
}
|
C
|
linux
| 0 |
CVE-2012-5148
|
https://www.cvedetails.com/cve/CVE-2012-5148/
|
CWE-20
|
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
|
e89cfcb9090e8c98129ae9160c513f504db74599
|
Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
|
void RunMenuAt(const gfx::Point& point) {
if (menu_runner_->RunMenuAt(
tab_->GetWidget(), NULL, gfx::Rect(point, gfx::Size()),
views::MenuItemView::TOPLEFT, views::MenuRunner::HAS_MNEMONICS |
views::MenuRunner::CONTEXT_MENU) ==
views::MenuRunner::MENU_DELETED)
return;
}
|
void RunMenuAt(const gfx::Point& point) {
if (menu_runner_->RunMenuAt(
tab_->GetWidget(), NULL, gfx::Rect(point, gfx::Size()),
views::MenuItemView::TOPLEFT, views::MenuRunner::HAS_MNEMONICS |
views::MenuRunner::CONTEXT_MENU) ==
views::MenuRunner::MENU_DELETED)
return;
}
|
C
|
Chrome
| 0 |
CVE-2012-5139
|
https://www.cvedetails.com/cve/CVE-2012-5139/
|
CWE-416
|
https://github.com/chromium/chromium/commit/9e417dae2833230a651989bb4e56b835355dda39
|
9e417dae2833230a651989bb4e56b835355dda39
|
Tests were marked as Flaky.
BUG=151811,151810
[email protected],[email protected]
NOTRY=true
Review URL: https://chromiumcodereview.appspot.com/10968052
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@158204 0039d316-1c4b-4281-b951-d872f2087c98
|
~RestartTestJob() {}
|
~RestartTestJob() {}
|
C
|
Chrome
| 0 |
CVE-2012-6703
|
https://www.cvedetails.com/cve/CVE-2012-6703/
| null |
https://github.com/torvalds/linux/commit/b35cc8225845112a616e3a2266d2fde5ab13d3ab
|
b35cc8225845112a616e3a2266d2fde5ab13d3ab
|
ALSA: compress_core: integer overflow in snd_compr_allocate_buffer()
These are 32 bit values that come from the user, we need to check for
integer overflows or we could end up allocating a smaller buffer than
expected.
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
static int snd_compr_drain(struct snd_compr_stream *stream)
{
int retval;
if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
stream->runtime->state == SNDRV_PCM_STATE_SETUP)
return -EPERM;
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
if (!retval) {
stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
wake_up(&stream->runtime->sleep);
}
return retval;
}
|
static int snd_compr_drain(struct snd_compr_stream *stream)
{
int retval;
if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
stream->runtime->state == SNDRV_PCM_STATE_SETUP)
return -EPERM;
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
if (!retval) {
stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
wake_up(&stream->runtime->sleep);
}
return retval;
}
|
C
|
linux
| 0 |
CVE-2017-1000251
|
https://www.cvedetails.com/cve/CVE-2017-1000251/
|
CWE-119
|
https://github.com/torvalds/linux/commit/f2fcfcd670257236ebf2088bbdf26f6a8ef459fe
|
f2fcfcd670257236ebf2088bbdf26f6a8ef459fe
|
Bluetooth: Add configuration support for ERTM and Streaming mode
Add support to config_req and config_rsp to configure ERTM and Streaming
mode. If the remote device specifies ERTM or Streaming mode, then the
same mode is proposed. Otherwise ERTM or Basic mode is used. And in case
of a state 2 device, the remote device should propose the same mode. If
not, then the channel gets disconnected.
Signed-off-by: Gustavo F. Padovan <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
|
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk->sk_sleep, &wait);
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk->sk_sleep, &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", nsk);
done:
release_sock(sk);
return err;
}
|
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk->sk_sleep, &wait);
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk->sk_sleep, &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", nsk);
done:
release_sock(sk);
return err;
}
|
C
|
linux
| 0 |
CVE-2019-13225
|
https://www.cvedetails.com/cve/CVE-2019-13225/
|
CWE-476
|
https://github.com/kkos/oniguruma/commit/c509265c5f6ae7264f7b8a8aae1cfa5fc59d108c
|
c509265c5f6ae7264f7b8a8aae1cfa5fc59d108c
|
Fix CVE-2019-13225: problem in converting if-then-else pattern to bytecode.
|
get_head_value_node(Node* node, int exact, regex_t* reg)
{
Node* n = NULL_NODE;
switch (NODE_TYPE(node)) {
case NODE_BACKREF:
case NODE_ALT:
#ifdef USE_CALL
case NODE_CALL:
#endif
break;
case NODE_CTYPE:
if (CTYPE_(node)->ctype == CTYPE_ANYCHAR)
break;
/* fall */
case NODE_CCLASS:
if (exact == 0) {
n = node;
}
break;
case NODE_LIST:
n = get_head_value_node(NODE_CAR(node), exact, reg);
break;
case NODE_STRING:
{
StrNode* sn = STR_(node);
if (sn->end <= sn->s)
break;
if (exact == 0 ||
! IS_IGNORECASE(reg->options) || NODE_STRING_IS_RAW(node)) {
n = node;
}
}
break;
case NODE_QUANT:
{
QuantNode* qn = QUANT_(node);
if (qn->lower > 0) {
if (IS_NOT_NULL(qn->head_exact))
n = qn->head_exact;
else
n = get_head_value_node(NODE_BODY(node), exact, reg);
}
}
break;
case NODE_BAG:
{
BagNode* en = BAG_(node);
switch (en->type) {
case BAG_OPTION:
{
OnigOptionType options = reg->options;
reg->options = BAG_(node)->o.options;
n = get_head_value_node(NODE_BODY(node), exact, reg);
reg->options = options;
}
break;
case BAG_MEMORY:
case BAG_STOP_BACKTRACK:
case BAG_IF_ELSE:
n = get_head_value_node(NODE_BODY(node), exact, reg);
break;
}
}
break;
case NODE_ANCHOR:
if (ANCHOR_(node)->type == ANCR_PREC_READ)
n = get_head_value_node(NODE_BODY(node), exact, reg);
break;
case NODE_GIMMICK:
default:
break;
}
return n;
}
|
get_head_value_node(Node* node, int exact, regex_t* reg)
{
Node* n = NULL_NODE;
switch (NODE_TYPE(node)) {
case NODE_BACKREF:
case NODE_ALT:
#ifdef USE_CALL
case NODE_CALL:
#endif
break;
case NODE_CTYPE:
if (CTYPE_(node)->ctype == CTYPE_ANYCHAR)
break;
/* fall */
case NODE_CCLASS:
if (exact == 0) {
n = node;
}
break;
case NODE_LIST:
n = get_head_value_node(NODE_CAR(node), exact, reg);
break;
case NODE_STRING:
{
StrNode* sn = STR_(node);
if (sn->end <= sn->s)
break;
if (exact == 0 ||
! IS_IGNORECASE(reg->options) || NODE_STRING_IS_RAW(node)) {
n = node;
}
}
break;
case NODE_QUANT:
{
QuantNode* qn = QUANT_(node);
if (qn->lower > 0) {
if (IS_NOT_NULL(qn->head_exact))
n = qn->head_exact;
else
n = get_head_value_node(NODE_BODY(node), exact, reg);
}
}
break;
case NODE_BAG:
{
BagNode* en = BAG_(node);
switch (en->type) {
case BAG_OPTION:
{
OnigOptionType options = reg->options;
reg->options = BAG_(node)->o.options;
n = get_head_value_node(NODE_BODY(node), exact, reg);
reg->options = options;
}
break;
case BAG_MEMORY:
case BAG_STOP_BACKTRACK:
case BAG_IF_ELSE:
n = get_head_value_node(NODE_BODY(node), exact, reg);
break;
}
}
break;
case NODE_ANCHOR:
if (ANCHOR_(node)->type == ANCR_PREC_READ)
n = get_head_value_node(NODE_BODY(node), exact, reg);
break;
case NODE_GIMMICK:
default:
break;
}
return n;
}
|
C
|
oniguruma
| 0 |
CVE-2018-20067
|
https://www.cvedetails.com/cve/CVE-2018-20067/
|
CWE-254
|
https://github.com/chromium/chromium/commit/a7d715ae5b654d1f98669fd979a00282a7229044
|
a7d715ae5b654d1f98669fd979a00282a7229044
|
Prevent renderer initiated back navigation to cancel a browser one.
Renderer initiated back/forward navigations must not be able to cancel ongoing
browser initiated navigation if they are not user initiated.
Note: 'normal' renderer initiated navigation uses the
FrameHost::BeginNavigation() path. A code similar to this patch is done
in NavigatorImpl::OnBeginNavigation().
Test:
-----
Added: NavigationBrowserTest.
* HistoryBackInBeforeUnload
* HistoryBackInBeforeUnloadAfterSetTimeout
* HistoryBackCancelPendingNavigationNoUserGesture
* HistoryBackCancelPendingNavigationUserGesture
Fixed:
* (WPT) .../the-history-interface/traverse_the_history_2.html
* (WPT) .../the-history-interface/traverse_the_history_3.html
* (WPT) .../the-history-interface/traverse_the_history_4.html
* (WPT) .../the-history-interface/traverse_the_history_5.html
Bug: 879965
Change-Id: I1a9bfaaea1ffc219e6c32f6e676b660e746c578c
Reviewed-on: https://chromium-review.googlesource.com/1209744
Commit-Queue: Arthur Sonzogni <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Mustaq Ahmed <[email protected]>
Reviewed-by: Camille Lamy <[email protected]>
Reviewed-by: Charlie Reis <[email protected]>
Cr-Commit-Position: refs/heads/master@{#592823}
|
void LocalFrameClientImpl::RunScriptsAtDocumentIdle() {
if (web_frame_->Client())
web_frame_->Client()->RunScriptsAtDocumentIdle();
}
|
void LocalFrameClientImpl::RunScriptsAtDocumentIdle() {
if (web_frame_->Client())
web_frame_->Client()->RunScriptsAtDocumentIdle();
}
|
C
|
Chrome
| 0 |
CVE-2016-5218
|
https://www.cvedetails.com/cve/CVE-2016-5218/
|
CWE-20
|
https://github.com/chromium/chromium/commit/45d901b56f578a74b19ba0d10fa5c4c467f19303
|
45d901b56f578a74b19ba0d10fa5c4c467f19303
|
Paint tab groups with the group color.
* The background of TabGroupHeader now uses the group color.
* The backgrounds of tabs in the group are tinted with the group color.
This treatment, along with the colors chosen, are intended to be
a placeholder.
Bug: 905491
Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504
Commit-Queue: Bret Sepulveda <[email protected]>
Reviewed-by: Taylor Bergquist <[email protected]>
Cr-Commit-Position: refs/heads/master@{#660498}
|
void GM2TabStyle::PaintTabBackground(gfx::Canvas* canvas,
TabState active_state,
int fill_id,
int y_inset,
const SkPath* clip) const {
DCHECK(!y_inset || fill_id);
const SkColor stroke_color =
tab_->controller()->GetToolbarTopSeparatorColor();
const bool paint_hover_effect =
active_state == TAB_INACTIVE && IsHoverActive();
const float stroke_thickness = GetStrokeThickness(active_state == TAB_ACTIVE);
PaintTabBackgroundFill(canvas, active_state, paint_hover_effect, fill_id,
y_inset);
if (stroke_thickness > 0) {
gfx::ScopedCanvas scoped_canvas(clip ? canvas : nullptr);
if (clip)
canvas->sk_canvas()->clipPath(*clip, SkClipOp::kDifference, true);
PaintBackgroundStroke(canvas, active_state, stroke_color);
}
PaintSeparators(canvas);
}
|
void GM2TabStyle::PaintTabBackground(gfx::Canvas* canvas,
bool active,
int fill_id,
int y_inset,
const SkPath* clip) const {
DCHECK(!y_inset || fill_id);
const SkColor active_color =
tab_->controller()->GetTabBackgroundColor(TAB_ACTIVE);
const SkColor inactive_color =
tab_->GetThemeProvider()->GetDisplayProperty(
ThemeProperties::SHOULD_FILL_BACKGROUND_TAB_COLOR)
? tab_->controller()->GetTabBackgroundColor(TAB_INACTIVE)
: SK_ColorTRANSPARENT;
const SkColor stroke_color =
tab_->controller()->GetToolbarTopSeparatorColor();
const bool paint_hover_effect = !active && IsHoverActive();
const float stroke_thickness = GetStrokeThickness(active);
PaintTabBackgroundFill(canvas, active, paint_hover_effect, active_color,
inactive_color, fill_id, y_inset);
if (stroke_thickness > 0) {
gfx::ScopedCanvas scoped_canvas(clip ? canvas : nullptr);
if (clip)
canvas->sk_canvas()->clipPath(*clip, SkClipOp::kDifference, true);
PaintBackgroundStroke(canvas, active, stroke_color);
}
PaintSeparators(canvas);
}
|
C
|
Chrome
| 1 |
null | null | null |
https://github.com/chromium/chromium/commit/5041f984669fe3a989a84c348eb838c8f7233f6b
|
5041f984669fe3a989a84c348eb838c8f7233f6b
|
AutoFill: Release the cached frame when we receive the frameDestroyed() message
from WebKit.
BUG=48857
TEST=none
Review URL: http://codereview.chromium.org/3173005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55789 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderView::showSpellingUI(bool show) {
Send(new ViewHostMsg_ShowSpellingPanel(routing_id_, show));
}
|
void RenderView::showSpellingUI(bool show) {
Send(new ViewHostMsg_ShowSpellingPanel(routing_id_, show));
}
|
C
|
Chrome
| 0 |
CVE-2018-17294
|
https://www.cvedetails.com/cve/CVE-2018-17294/
|
CWE-125
|
https://github.com/liblouis/liblouis/commit/5e4089659bb49b3095fa541fa6387b4c40d7396e
|
5e4089659bb49b3095fa541fa6387b4c40d7396e
|
Fix a buffer overflow
Fixes #635
Thanks to HongxuChen for reporting it
|
swapReplace(int start, int end, const TranslationTableHeader *table,
const InString *input, OutString *output, int *posMapping,
const widechar *passInstructions, int passIC) {
TranslationTableOffset swapRuleOffset;
TranslationTableRule *swapRule;
widechar *replacements;
int p;
swapRuleOffset = (passInstructions[passIC + 1] << 16) | passInstructions[passIC + 2];
swapRule = (TranslationTableRule *)&table->ruleArea[swapRuleOffset];
replacements = &swapRule->charsdots[swapRule->charslen];
for (p = start; p < end; p++) {
int rep;
int test;
int k;
if (swapRule->opcode == CTO_SwapDd) {
for (test = 0; test * 2 + 1 < swapRule->charslen; test++)
if (input->chars[p] == swapRule->charsdots[test * 2 + 1]) break;
if (test * 2 == swapRule->charslen) continue;
} else {
for (test = 0; test < swapRule->charslen; test++)
if (input->chars[p] == swapRule->charsdots[test]) break;
if (test == swapRule->charslen) continue;
}
k = 0;
for (rep = 0; rep < test; rep++)
if (swapRule->opcode == CTO_SwapCc)
k++;
else
k += replacements[k];
if (swapRule->opcode == CTO_SwapCc) {
if ((output->length + 1) > output->maxlength) return 0;
posMapping[output->length] = p;
output->chars[output->length++] = replacements[k];
} else {
int l = replacements[k] - 1;
int d = output->length + l;
if (d > output->maxlength) return 0;
while (--d >= output->length) posMapping[d] = p;
memcpy(&output->chars[output->length], &replacements[k + 1],
l * sizeof(*output->chars));
output->length += l;
}
}
return 1;
}
|
swapReplace(int start, int end, const TranslationTableHeader *table,
const InString *input, OutString *output, int *posMapping,
const widechar *passInstructions, int passIC) {
TranslationTableOffset swapRuleOffset;
TranslationTableRule *swapRule;
widechar *replacements;
int p;
swapRuleOffset = (passInstructions[passIC + 1] << 16) | passInstructions[passIC + 2];
swapRule = (TranslationTableRule *)&table->ruleArea[swapRuleOffset];
replacements = &swapRule->charsdots[swapRule->charslen];
for (p = start; p < end; p++) {
int rep;
int test;
int k;
if (swapRule->opcode == CTO_SwapDd) {
for (test = 0; test * 2 + 1 < swapRule->charslen; test++)
if (input->chars[p] == swapRule->charsdots[test * 2 + 1]) break;
if (test * 2 == swapRule->charslen) continue;
} else {
for (test = 0; test < swapRule->charslen; test++)
if (input->chars[p] == swapRule->charsdots[test]) break;
if (test == swapRule->charslen) continue;
}
k = 0;
for (rep = 0; rep < test; rep++)
if (swapRule->opcode == CTO_SwapCc)
k++;
else
k += replacements[k];
if (swapRule->opcode == CTO_SwapCc) {
if ((output->length + 1) > output->maxlength) return 0;
posMapping[output->length] = p;
output->chars[output->length++] = replacements[k];
} else {
int l = replacements[k] - 1;
int d = output->length + l;
if (d > output->maxlength) return 0;
while (--d >= output->length) posMapping[d] = p;
memcpy(&output->chars[output->length], &replacements[k + 1],
l * sizeof(*output->chars));
output->length += l;
}
}
return 1;
}
|
C
|
liblouis
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
[email protected]
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void nullableLongAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
bool isNull = false;
int jsValue = imp->nullableLongAttribute(isNull);
if (isNull) {
v8SetReturnValueNull(info);
return;
}
v8SetReturnValueInt(info, jsValue);
}
|
static void nullableLongAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
bool isNull = false;
int jsValue = imp->nullableLongAttribute(isNull);
if (isNull) {
v8SetReturnValueNull(info);
return;
}
v8SetReturnValueInt(info, jsValue);
}
|
C
|
Chrome
| 0 |
CVE-2015-5307
|
https://www.cvedetails.com/cve/CVE-2015-5307/
|
CWE-399
|
https://github.com/torvalds/linux/commit/54a20552e1eae07aa240fa370a0293e006b5faed
|
54a20552e1eae07aa240fa370a0293e006b5faed
|
KVM: x86: work around infinite loop in microcode when #AC is delivered
It was found that a guest can DoS a host by triggering an infinite
stream of "alignment check" (#AC) exceptions. This causes the
microcode to enter an infinite loop where the core never receives
another interrupt. The host kernel panics pretty quickly due to the
effects (CVE-2015-5307).
Signed-off-by: Eric Northup <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
{
if (!vmx->host_state.loaded)
return;
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu))
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
#ifdef CONFIG_X86_64
load_gs_index(vmx->host_state.gs_sel);
#else
loadsegment(gs, vmx->host_state.gs_sel);
#endif
}
if (vmx->host_state.fs_reload_needed)
loadsegment(fs, vmx->host_state.fs_sel);
#ifdef CONFIG_X86_64
if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
loadsegment(ds, vmx->host_state.ds_sel);
loadsegment(es, vmx->host_state.es_sel);
}
#endif
reload_tss();
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
if (vmx->host_state.msr_host_bndcfgs)
wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
/*
* If the FPU is not active (through the host task or
* the guest vcpu), then restore the cr0.TS bit.
*/
if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded)
stts();
load_gdt(this_cpu_ptr(&host_gdt));
}
|
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
{
if (!vmx->host_state.loaded)
return;
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu))
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
#ifdef CONFIG_X86_64
load_gs_index(vmx->host_state.gs_sel);
#else
loadsegment(gs, vmx->host_state.gs_sel);
#endif
}
if (vmx->host_state.fs_reload_needed)
loadsegment(fs, vmx->host_state.fs_sel);
#ifdef CONFIG_X86_64
if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
loadsegment(ds, vmx->host_state.ds_sel);
loadsegment(es, vmx->host_state.es_sel);
}
#endif
reload_tss();
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
if (vmx->host_state.msr_host_bndcfgs)
wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
/*
* If the FPU is not active (through the host task or
* the guest vcpu), then restore the cr0.TS bit.
*/
if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded)
stts();
load_gdt(this_cpu_ptr(&host_gdt));
}
|
C
|
linux
| 0 |
CVE-2013-1929
|
https://www.cvedetails.com/cve/CVE-2013-1929/
|
CWE-119
|
https://github.com/torvalds/linux/commit/715230a44310a8cf66fbfb5a46f9a62a9b2de424
|
715230a44310a8cf66fbfb5a46f9a62a9b2de424
|
tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static u64 tg3_refclk_read(struct tg3 *tp)
{
u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
}
|
static u64 tg3_refclk_read(struct tg3 *tp)
{
u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
}
|
C
|
linux
| 0 |
CVE-2012-3552
|
https://www.cvedetails.com/cve/CVE-2012-3552/
|
CWE-362
|
https://github.com/torvalds/linux/commit/f6d8bd051c391c1c0458a30b2a7abcd939329259
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
|
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
struct sock_exterr_skb *serr;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
serr->ee.ee_type = icmp_hdr(skb)->type;
serr->ee.ee_code = icmp_hdr(skb)->code;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
skb_network_header(skb);
serr->port = port;
if (skb_pull(skb, payload - skb->data) != NULL) {
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb) == 0)
return;
}
kfree_skb(skb);
}
|
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
struct sock_exterr_skb *serr;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
serr->ee.ee_type = icmp_hdr(skb)->type;
serr->ee.ee_code = icmp_hdr(skb)->code;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
skb_network_header(skb);
serr->port = port;
if (skb_pull(skb, payload - skb->data) != NULL) {
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb) == 0)
return;
}
kfree_skb(skb);
}
|
C
|
linux
| 0 |
CVE-2012-1601
|
https://www.cvedetails.com/cve/CVE-2012-1601/
|
CWE-399
|
https://github.com/torvalds/linux/commit/9c895160d25a76c21b65bad141b08e8d4f99afef
|
9c895160d25a76c21b65bad141b08e8d4f99afef
|
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
(cherry picked from commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e)
If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.
Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.
Based on earlier patch by Michael Ellerman.
Signed-off-by: Michael Ellerman <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int r;
vcpu->arch.mtrr_state.have_fixed = 1;
vcpu_load(vcpu);
r = kvm_arch_vcpu_reset(vcpu);
if (r == 0)
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
return r;
}
|
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int r;
vcpu->arch.mtrr_state.have_fixed = 1;
vcpu_load(vcpu);
r = kvm_arch_vcpu_reset(vcpu);
if (r == 0)
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
return r;
}
|
C
|
linux
| 0 |
CVE-2016-7533
|
https://www.cvedetails.com/cve/CVE-2016-7533/
|
CWE-125
|
https://github.com/ImageMagick/ImageMagick/commit/bef1e4f637d8f665bc133a9c6d30df08d983bc3a
|
bef1e4f637d8f665bc133a9c6d30df08d983bc3a
|
https://github.com/ImageMagick/ImageMagick/issues/120
|
static Image *ReadWPGImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
typedef struct
{
size_t FileId;
MagickOffsetType DataOffset;
unsigned int ProductType;
unsigned int FileType;
unsigned char MajorVersion;
unsigned char MinorVersion;
unsigned int EncryptKey;
unsigned int Reserved;
} WPGHeader;
typedef struct
{
unsigned char RecType;
size_t RecordLength;
} WPGRecord;
typedef struct
{
unsigned char Class;
unsigned char RecType;
size_t Extension;
size_t RecordLength;
} WPG2Record;
typedef struct
{
unsigned HorizontalUnits;
unsigned VerticalUnits;
unsigned char PosSizePrecision;
} WPG2Start;
typedef struct
{
unsigned int Width;
unsigned int Height;
unsigned int Depth;
unsigned int HorzRes;
unsigned int VertRes;
} WPGBitmapType1;
typedef struct
{
unsigned int Width;
unsigned int Height;
unsigned char Depth;
unsigned char Compression;
} WPG2BitmapType1;
typedef struct
{
unsigned int RotAngle;
unsigned int LowLeftX;
unsigned int LowLeftY;
unsigned int UpRightX;
unsigned int UpRightY;
unsigned int Width;
unsigned int Height;
unsigned int Depth;
unsigned int HorzRes;
unsigned int VertRes;
} WPGBitmapType2;
typedef struct
{
unsigned int StartIndex;
unsigned int NumOfEntries;
} WPGColorMapRec;
/*
typedef struct {
size_t PS_unknown1;
unsigned int PS_unknown2;
unsigned int PS_unknown3;
} WPGPSl1Record;
*/
Image
*image;
unsigned int
status;
WPGHeader
Header;
WPGRecord
Rec;
WPG2Record
Rec2;
WPG2Start StartWPG;
WPGBitmapType1
BitmapHeader1;
WPG2BitmapType1
Bitmap2Header1;
WPGBitmapType2
BitmapHeader2;
WPGColorMapRec
WPG_Palette;
int
i,
bpp,
WPG2Flags;
ssize_t
ldblk;
size_t
one;
unsigned char
*BImgBuff;
tCTM CTM; /*current transform matrix*/
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1;
image=AcquireImage(image_info,exception);
image->depth=8;
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read WPG image.
*/
Header.FileId=ReadBlobLSBLong(image);
Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image);
Header.ProductType=ReadBlobLSBShort(image);
Header.FileType=ReadBlobLSBShort(image);
Header.MajorVersion=ReadBlobByte(image);
Header.MinorVersion=ReadBlobByte(image);
Header.EncryptKey=ReadBlobLSBShort(image);
Header.Reserved=ReadBlobLSBShort(image);
if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (Header.EncryptKey!=0)
ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported");
image->columns = 1;
image->rows = 1;
image->colors = 0;
bpp=0;
BitmapHeader2.RotAngle=0;
switch(Header.FileType)
{
case 1: /* WPG level 1 */
while(!EOFBlob(image)) /* object parser loop */
{
(void) SeekBlob(image,Header.DataOffset,SEEK_SET);
if(EOFBlob(image))
break;
Rec.RecType=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rd_WP_DWORD(image,&Rec.RecordLength);
if(EOFBlob(image))
break;
Header.DataOffset=TellBlob(image)+Rec.RecordLength;
switch(Rec.RecType)
{
case 0x0B: /* bitmap type 1 */
BitmapHeader1.Width=ReadBlobLSBShort(image);
BitmapHeader1.Height=ReadBlobLSBShort(image);
if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
BitmapHeader1.Depth=ReadBlobLSBShort(image);
BitmapHeader1.HorzRes=ReadBlobLSBShort(image);
BitmapHeader1.VertRes=ReadBlobLSBShort(image);
if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes)
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x=BitmapHeader1.HorzRes/470.0;
image->resolution.y=BitmapHeader1.VertRes/470.0;
}
image->columns=BitmapHeader1.Width;
image->rows=BitmapHeader1.Height;
bpp=BitmapHeader1.Depth;
goto UnpackRaster;
case 0x0E: /*Color palette */
WPG_Palette.StartIndex=ReadBlobLSBShort(image);
WPG_Palette.NumOfEntries=ReadBlobLSBShort(image);
image->colors=WPG_Palette.NumOfEntries;
if (!AcquireImageColormap(image,image->colors,exception))
goto NoMemory;
for (i=WPG_Palette.StartIndex;
i < (int)WPG_Palette.NumOfEntries; i++)
{
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
}
break;
case 0x11: /* Start PS l1 */
if(Rec.RecordLength > 8)
image=ExtractPostscript(image,image_info,
TellBlob(image)+8, /* skip PS header in the wpg */
(ssize_t) Rec.RecordLength-8,exception);
break;
case 0x14: /* bitmap type 2 */
BitmapHeader2.RotAngle=ReadBlobLSBShort(image);
BitmapHeader2.LowLeftX=ReadBlobLSBShort(image);
BitmapHeader2.LowLeftY=ReadBlobLSBShort(image);
BitmapHeader2.UpRightX=ReadBlobLSBShort(image);
BitmapHeader2.UpRightY=ReadBlobLSBShort(image);
BitmapHeader2.Width=ReadBlobLSBShort(image);
BitmapHeader2.Height=ReadBlobLSBShort(image);
if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
BitmapHeader2.Depth=ReadBlobLSBShort(image);
BitmapHeader2.HorzRes=ReadBlobLSBShort(image);
BitmapHeader2.VertRes=ReadBlobLSBShort(image);
image->units=PixelsPerCentimeterResolution;
image->page.width=(unsigned int)
((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0);
image->page.height=(unsigned int)
((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0);
image->page.x=(int) (BitmapHeader2.LowLeftX/470.0);
image->page.y=(int) (BitmapHeader2.LowLeftX/470.0);
if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes)
{
image->resolution.x=BitmapHeader2.HorzRes/470.0;
image->resolution.y=BitmapHeader2.VertRes/470.0;
}
image->columns=BitmapHeader2.Width;
image->rows=BitmapHeader2.Height;
bpp=BitmapHeader2.Depth;
UnpackRaster:
if ((image->colors == 0) && (bpp != 24))
{
image->colors=one << bpp;
if (!AcquireImageColormap(image,image->colors,exception))
{
NoMemory:
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
/* printf("Load default colormap \n"); */
for (i=0; (i < (int) image->colors) && (i < 256); i++)
{
image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red);
image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green);
image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue);
}
}
else
{
if (bpp < 24)
if ( (image->colors < (one << bpp)) && (bpp != 24) )
image->colormap=(PixelInfo *) ResizeQuantumMemory(
image->colormap,(size_t) (one << bpp),
sizeof(*image->colormap));
}
if (bpp == 1)
{
if(image->colormap[0].red==0 &&
image->colormap[0].green==0 &&
image->colormap[0].blue==0 &&
image->colormap[1].red==0 &&
image->colormap[1].green==0 &&
image->colormap[1].blue==0)
{ /* fix crippled monochrome palette */
image->colormap[1].red =
image->colormap[1].green =
image->colormap[1].blue = QuantumRange;
}
}
if(UnpackWPGRaster(image,bpp,exception) < 0)
/* The raster cannot be unpacked */
{
DecompressionFailed:
ThrowReaderException(CoderError,"UnableToDecompressImage");
}
if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping)
{
/* flop command */
if(BitmapHeader2.RotAngle & 0x8000)
{
Image
*flop_image;
flop_image = FlopImage(image, exception);
if (flop_image != (Image *) NULL) {
DuplicateBlob(flop_image,image);
(void) RemoveLastImageFromList(&image);
AppendImageToList(&image,flop_image);
}
}
/* flip command */
if(BitmapHeader2.RotAngle & 0x2000)
{
Image
*flip_image;
flip_image = FlipImage(image, exception);
if (flip_image != (Image *) NULL) {
DuplicateBlob(flip_image,image);
(void) RemoveLastImageFromList(&image);
AppendImageToList(&image,flip_image);
}
}
/* rotate command */
if(BitmapHeader2.RotAngle & 0x0FFF)
{
Image
*rotate_image;
rotate_image=RotateImage(image,(BitmapHeader2.RotAngle &
0x0FFF), exception);
if (rotate_image != (Image *) NULL) {
DuplicateBlob(rotate_image,image);
(void) RemoveLastImageFromList(&image);
AppendImageToList(&image,rotate_image);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
image->depth=8;
if (image->next == (Image *) NULL)
goto Finish;
image=SyncNextImageInList(image);
image->columns=image->rows=0;
image->colors=0;
break;
case 0x1B: /* Postscript l2 */
if(Rec.RecordLength>0x3C)
image=ExtractPostscript(image,image_info,
TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */
(ssize_t) Rec.RecordLength-0x3C,exception);
break;
}
}
break;
case 2: /* WPG level 2 */
(void) memset(CTM,0,sizeof(CTM));
StartWPG.PosSizePrecision = 0;
while(!EOFBlob(image)) /* object parser loop */
{
(void) SeekBlob(image,Header.DataOffset,SEEK_SET);
if(EOFBlob(image))
break;
Rec2.Class=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rec2.RecType=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rd_WP_DWORD(image,&Rec2.Extension);
Rd_WP_DWORD(image,&Rec2.RecordLength);
if(EOFBlob(image))
break;
Header.DataOffset=TellBlob(image)+Rec2.RecordLength;
switch(Rec2.RecType)
{
case 1:
StartWPG.HorizontalUnits=ReadBlobLSBShort(image);
StartWPG.VerticalUnits=ReadBlobLSBShort(image);
StartWPG.PosSizePrecision=ReadBlobByte(image);
break;
case 0x0C: /* Color palette */
WPG_Palette.StartIndex=ReadBlobLSBShort(image);
WPG_Palette.NumOfEntries=ReadBlobLSBShort(image);
image->colors=WPG_Palette.NumOfEntries;
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
for (i=WPG_Palette.StartIndex;
i < (int)WPG_Palette.NumOfEntries; i++)
{
image->colormap[i].red=ScaleCharToQuantum((char)
ReadBlobByte(image));
image->colormap[i].green=ScaleCharToQuantum((char)
ReadBlobByte(image));
image->colormap[i].blue=ScaleCharToQuantum((char)
ReadBlobByte(image));
(void) ReadBlobByte(image); /*Opacity??*/
}
break;
case 0x0E:
Bitmap2Header1.Width=ReadBlobLSBShort(image);
Bitmap2Header1.Height=ReadBlobLSBShort(image);
if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
Bitmap2Header1.Depth=ReadBlobByte(image);
Bitmap2Header1.Compression=ReadBlobByte(image);
if(Bitmap2Header1.Compression > 1)
continue; /*Unknown compression method */
switch(Bitmap2Header1.Depth)
{
case 1:
bpp=1;
break;
case 2:
bpp=2;
break;
case 3:
bpp=4;
break;
case 4:
bpp=8;
break;
case 8:
bpp=24;
break;
default:
continue; /*Ignore raster with unknown depth*/
}
image->columns=Bitmap2Header1.Width;
image->rows=Bitmap2Header1.Height;
if ((image->colors == 0) && (bpp != 24))
{
size_t
one;
one=1;
image->colors=one << bpp;
if (!AcquireImageColormap(image,image->colors,exception))
goto NoMemory;
}
else
{
if(bpp < 24)
if( image->colors<(one << bpp) && bpp!=24 )
image->colormap=(PixelInfo *) ResizeQuantumMemory(
image->colormap,(size_t) (one << bpp),
sizeof(*image->colormap));
}
switch(Bitmap2Header1.Compression)
{
case 0: /*Uncompressed raster*/
{
ldblk=(ssize_t) ((bpp*image->columns+7)/8);
BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t)
ldblk+1,sizeof(*BImgBuff));
if (BImgBuff == (unsigned char *) NULL)
goto NoMemory;
for(i=0; i< (ssize_t) image->rows; i++)
{
(void) ReadBlob(image,ldblk,BImgBuff);
InsertRow(image,BImgBuff,i,bpp,exception);
}
if(BImgBuff)
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);;
break;
}
case 1: /*RLE for WPG2 */
{
if( UnpackWPG2Raster(image,bpp,exception) < 0)
goto DecompressionFailed;
break;
}
}
if(CTM[0][0]<0 && !image_info->ping)
{ /*?? RotAngle=360-RotAngle;*/
Image
*flop_image;
flop_image = FlopImage(image, exception);
if (flop_image != (Image *) NULL) {
DuplicateBlob(flop_image,image);
(void) RemoveLastImageFromList(&image);
AppendImageToList(&image,flop_image);
}
/* Try to change CTM according to Flip - I am not sure, must be checked.
Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0;
Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0;
Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll);
Tx(1,2)=0; Tx(2,2)=1; */
}
if(CTM[1][1]<0 && !image_info->ping)
{ /*?? RotAngle=360-RotAngle;*/
Image
*flip_image;
flip_image = FlipImage(image, exception);
if (flip_image != (Image *) NULL) {
DuplicateBlob(flip_image,image);
(void) RemoveLastImageFromList(&image);
AppendImageToList(&image,flip_image);
}
/* Try to change CTM according to Flip - I am not sure, must be checked.
float_matrix Tx(3,3);
Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0;
Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0;
Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll);
Tx(2,2)=1; */
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
image->depth=8;
if (image->next == (Image *) NULL)
goto Finish;
image=SyncNextImageInList(image);
image->columns=image->rows=1;
image->colors=0;
break;
case 0x12: /* Postscript WPG2*/
i=ReadBlobLSBShort(image);
if(Rec2.RecordLength > (unsigned int) i)
image=ExtractPostscript(image,image_info,
TellBlob(image)+i, /*skip PS header in the wpg2*/
(ssize_t) (Rec2.RecordLength-i-2),exception);
break;
case 0x1B: /*bitmap rectangle*/
WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM);
(void) WPG2Flags;
break;
}
}
break;
default:
{
ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported");
}
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
Finish:
(void) CloseBlob(image);
{
Image
*p;
ssize_t
scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers.
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=(size_t) scene++;
}
if (image == (Image *) NULL)
ThrowReaderException(CorruptImageError,
"ImageFileDoesNotContainAnyImageData");
return(image);
}
|
static Image *ReadWPGImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
typedef struct
{
size_t FileId;
MagickOffsetType DataOffset;
unsigned int ProductType;
unsigned int FileType;
unsigned char MajorVersion;
unsigned char MinorVersion;
unsigned int EncryptKey;
unsigned int Reserved;
} WPGHeader;
typedef struct
{
unsigned char RecType;
size_t RecordLength;
} WPGRecord;
typedef struct
{
unsigned char Class;
unsigned char RecType;
size_t Extension;
size_t RecordLength;
} WPG2Record;
typedef struct
{
unsigned HorizontalUnits;
unsigned VerticalUnits;
unsigned char PosSizePrecision;
} WPG2Start;
typedef struct
{
unsigned int Width;
unsigned int Height;
unsigned int Depth;
unsigned int HorzRes;
unsigned int VertRes;
} WPGBitmapType1;
typedef struct
{
unsigned int Width;
unsigned int Height;
unsigned char Depth;
unsigned char Compression;
} WPG2BitmapType1;
typedef struct
{
unsigned int RotAngle;
unsigned int LowLeftX;
unsigned int LowLeftY;
unsigned int UpRightX;
unsigned int UpRightY;
unsigned int Width;
unsigned int Height;
unsigned int Depth;
unsigned int HorzRes;
unsigned int VertRes;
} WPGBitmapType2;
typedef struct
{
unsigned int StartIndex;
unsigned int NumOfEntries;
} WPGColorMapRec;
/*
typedef struct {
size_t PS_unknown1;
unsigned int PS_unknown2;
unsigned int PS_unknown3;
} WPGPSl1Record;
*/
Image
*image;
unsigned int
status;
WPGHeader
Header;
WPGRecord
Rec;
WPG2Record
Rec2;
WPG2Start StartWPG;
WPGBitmapType1
BitmapHeader1;
WPG2BitmapType1
Bitmap2Header1;
WPGBitmapType2
BitmapHeader2;
WPGColorMapRec
WPG_Palette;
int
i,
bpp,
WPG2Flags;
ssize_t
ldblk;
size_t
one;
unsigned char
*BImgBuff;
tCTM CTM; /*current transform matrix*/
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1;
image=AcquireImage(image_info,exception);
image->depth=8;
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read WPG image.
*/
Header.FileId=ReadBlobLSBLong(image);
Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image);
Header.ProductType=ReadBlobLSBShort(image);
Header.FileType=ReadBlobLSBShort(image);
Header.MajorVersion=ReadBlobByte(image);
Header.MinorVersion=ReadBlobByte(image);
Header.EncryptKey=ReadBlobLSBShort(image);
Header.Reserved=ReadBlobLSBShort(image);
if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (Header.EncryptKey!=0)
ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported");
image->columns = 1;
image->rows = 1;
image->colors = 0;
bpp=0;
BitmapHeader2.RotAngle=0;
switch(Header.FileType)
{
case 1: /* WPG level 1 */
while(!EOFBlob(image)) /* object parser loop */
{
(void) SeekBlob(image,Header.DataOffset,SEEK_SET);
if(EOFBlob(image))
break;
Rec.RecType=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rd_WP_DWORD(image,&Rec.RecordLength);
if(EOFBlob(image))
break;
Header.DataOffset=TellBlob(image)+Rec.RecordLength;
switch(Rec.RecType)
{
case 0x0B: /* bitmap type 1 */
BitmapHeader1.Width=ReadBlobLSBShort(image);
BitmapHeader1.Height=ReadBlobLSBShort(image);
if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
BitmapHeader1.Depth=ReadBlobLSBShort(image);
BitmapHeader1.HorzRes=ReadBlobLSBShort(image);
BitmapHeader1.VertRes=ReadBlobLSBShort(image);
if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes)
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x=BitmapHeader1.HorzRes/470.0;
image->resolution.y=BitmapHeader1.VertRes/470.0;
}
image->columns=BitmapHeader1.Width;
image->rows=BitmapHeader1.Height;
bpp=BitmapHeader1.Depth;
goto UnpackRaster;
case 0x0E: /*Color palette */
WPG_Palette.StartIndex=ReadBlobLSBShort(image);
WPG_Palette.NumOfEntries=ReadBlobLSBShort(image);
image->colors=WPG_Palette.NumOfEntries;
if (!AcquireImageColormap(image,image->colors,exception))
goto NoMemory;
for (i=WPG_Palette.StartIndex;
i < (int)WPG_Palette.NumOfEntries; i++)
{
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
}
break;
case 0x11: /* Start PS l1 */
if(Rec.RecordLength > 8)
image=ExtractPostscript(image,image_info,
TellBlob(image)+8, /* skip PS header in the wpg */
(ssize_t) Rec.RecordLength-8,exception);
break;
case 0x14: /* bitmap type 2 */
BitmapHeader2.RotAngle=ReadBlobLSBShort(image);
BitmapHeader2.LowLeftX=ReadBlobLSBShort(image);
BitmapHeader2.LowLeftY=ReadBlobLSBShort(image);
BitmapHeader2.UpRightX=ReadBlobLSBShort(image);
BitmapHeader2.UpRightY=ReadBlobLSBShort(image);
BitmapHeader2.Width=ReadBlobLSBShort(image);
BitmapHeader2.Height=ReadBlobLSBShort(image);
if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
BitmapHeader2.Depth=ReadBlobLSBShort(image);
BitmapHeader2.HorzRes=ReadBlobLSBShort(image);
BitmapHeader2.VertRes=ReadBlobLSBShort(image);
image->units=PixelsPerCentimeterResolution;
image->page.width=(unsigned int)
((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0);
image->page.height=(unsigned int)
((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0);
image->page.x=(int) (BitmapHeader2.LowLeftX/470.0);
image->page.y=(int) (BitmapHeader2.LowLeftX/470.0);
if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes)
{
image->resolution.x=BitmapHeader2.HorzRes/470.0;
image->resolution.y=BitmapHeader2.VertRes/470.0;
}
image->columns=BitmapHeader2.Width;
image->rows=BitmapHeader2.Height;
bpp=BitmapHeader2.Depth;
UnpackRaster:
if ((image->colors == 0) && (bpp != 24))
{
image->colors=one << bpp;
if (!AcquireImageColormap(image,image->colors,exception))
{
NoMemory:
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
/* printf("Load default colormap \n"); */
for (i=0; (i < (int) image->colors) && (i < 256); i++)
{
image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red);
image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green);
image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue);
}
}
else
{
if (bpp < 24)
if ( (image->colors < (one << bpp)) && (bpp != 24) )
image->colormap=(PixelInfo *) ResizeQuantumMemory(
image->colormap,(size_t) (one << bpp),
sizeof(*image->colormap));
}
if (bpp == 1)
{
if(image->colormap[0].red==0 &&
image->colormap[0].green==0 &&
image->colormap[0].blue==0 &&
image->colormap[1].red==0 &&
image->colormap[1].green==0 &&
image->colormap[1].blue==0)
{ /* fix crippled monochrome palette */
image->colormap[1].red =
image->colormap[1].green =
image->colormap[1].blue = QuantumRange;
}
}
if(UnpackWPGRaster(image,bpp,exception) < 0)
/* The raster cannot be unpacked */
{
DecompressionFailed:
ThrowReaderException(CoderError,"UnableToDecompressImage");
}
if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping)
{
/* flop command */
if(BitmapHeader2.RotAngle & 0x8000)
{
Image
*flop_image;
flop_image = FlopImage(image, exception);
if (flop_image != (Image *) NULL) {
DuplicateBlob(flop_image,image);
(void) RemoveLastImageFromList(&image);
AppendImageToList(&image,flop_image);
}
}
/* flip command */
if(BitmapHeader2.RotAngle & 0x2000)
{
Image
*flip_image;
flip_image = FlipImage(image, exception);
if (flip_image != (Image *) NULL) {
DuplicateBlob(flip_image,image);
(void) RemoveLastImageFromList(&image);
AppendImageToList(&image,flip_image);
}
}
/* rotate command */
if(BitmapHeader2.RotAngle & 0x0FFF)
{
Image
*rotate_image;
rotate_image=RotateImage(image,(BitmapHeader2.RotAngle &
0x0FFF), exception);
if (rotate_image != (Image *) NULL) {
DuplicateBlob(rotate_image,image);
(void) RemoveLastImageFromList(&image);
AppendImageToList(&image,rotate_image);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
image->depth=8;
if (image->next == (Image *) NULL)
goto Finish;
image=SyncNextImageInList(image);
image->columns=image->rows=0;
image->colors=0;
break;
case 0x1B: /* Postscript l2 */
if(Rec.RecordLength>0x3C)
image=ExtractPostscript(image,image_info,
TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */
(ssize_t) Rec.RecordLength-0x3C,exception);
break;
}
}
break;
case 2: /* WPG level 2 */
(void) memset(CTM,0,sizeof(CTM));
StartWPG.PosSizePrecision = 0;
while(!EOFBlob(image)) /* object parser loop */
{
(void) SeekBlob(image,Header.DataOffset,SEEK_SET);
if(EOFBlob(image))
break;
Rec2.Class=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rec2.RecType=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rd_WP_DWORD(image,&Rec2.Extension);
Rd_WP_DWORD(image,&Rec2.RecordLength);
if(EOFBlob(image))
break;
Header.DataOffset=TellBlob(image)+Rec2.RecordLength;
switch(Rec2.RecType)
{
case 1:
StartWPG.HorizontalUnits=ReadBlobLSBShort(image);
StartWPG.VerticalUnits=ReadBlobLSBShort(image);
StartWPG.PosSizePrecision=ReadBlobByte(image);
break;
case 0x0C: /* Color palette */
WPG_Palette.StartIndex=ReadBlobLSBShort(image);
WPG_Palette.NumOfEntries=ReadBlobLSBShort(image);
image->colors=WPG_Palette.NumOfEntries;
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
for (i=WPG_Palette.StartIndex;
i < (int)WPG_Palette.NumOfEntries; i++)
{
image->colormap[i].red=ScaleCharToQuantum((char)
ReadBlobByte(image));
image->colormap[i].green=ScaleCharToQuantum((char)
ReadBlobByte(image));
image->colormap[i].blue=ScaleCharToQuantum((char)
ReadBlobByte(image));
(void) ReadBlobByte(image); /*Opacity??*/
}
break;
case 0x0E:
Bitmap2Header1.Width=ReadBlobLSBShort(image);
Bitmap2Header1.Height=ReadBlobLSBShort(image);
if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
Bitmap2Header1.Depth=ReadBlobByte(image);
Bitmap2Header1.Compression=ReadBlobByte(image);
if(Bitmap2Header1.Compression > 1)
continue; /*Unknown compression method */
switch(Bitmap2Header1.Depth)
{
case 1:
bpp=1;
break;
case 2:
bpp=2;
break;
case 3:
bpp=4;
break;
case 4:
bpp=8;
break;
case 8:
bpp=24;
break;
default:
continue; /*Ignore raster with unknown depth*/
}
image->columns=Bitmap2Header1.Width;
image->rows=Bitmap2Header1.Height;
if ((image->colors == 0) && (bpp != 24))
{
size_t
one;
one=1;
image->colors=one << bpp;
if (!AcquireImageColormap(image,image->colors,exception))
goto NoMemory;
}
else
{
if(bpp < 24)
if( image->colors<(one << bpp) && bpp!=24 )
image->colormap=(PixelInfo *) ResizeQuantumMemory(
image->colormap,(size_t) (one << bpp),
sizeof(*image->colormap));
}
switch(Bitmap2Header1.Compression)
{
case 0: /*Uncompressed raster*/
{
ldblk=(ssize_t) ((bpp*image->columns+7)/8);
BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t)
ldblk,sizeof(*BImgBuff));
if (BImgBuff == (unsigned char *) NULL)
goto NoMemory;
for(i=0; i< (ssize_t) image->rows; i++)
{
(void) ReadBlob(image,ldblk,BImgBuff);
InsertRow(image,BImgBuff,i,bpp,exception);
}
if(BImgBuff)
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);;
break;
}
case 1: /*RLE for WPG2 */
{
if( UnpackWPG2Raster(image,bpp,exception) < 0)
goto DecompressionFailed;
break;
}
}
if(CTM[0][0]<0 && !image_info->ping)
{ /*?? RotAngle=360-RotAngle;*/
Image
*flop_image;
flop_image = FlopImage(image, exception);
if (flop_image != (Image *) NULL) {
DuplicateBlob(flop_image,image);
(void) RemoveLastImageFromList(&image);
AppendImageToList(&image,flop_image);
}
/* Try to change CTM according to Flip - I am not sure, must be checked.
Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0;
Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0;
Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll);
Tx(1,2)=0; Tx(2,2)=1; */
}
if(CTM[1][1]<0 && !image_info->ping)
{ /*?? RotAngle=360-RotAngle;*/
Image
*flip_image;
flip_image = FlipImage(image, exception);
if (flip_image != (Image *) NULL) {
DuplicateBlob(flip_image,image);
(void) RemoveLastImageFromList(&image);
AppendImageToList(&image,flip_image);
}
/* Try to change CTM according to Flip - I am not sure, must be checked.
float_matrix Tx(3,3);
Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0;
Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0;
Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll);
Tx(2,2)=1; */
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image,exception);
image->depth=8;
if (image->next == (Image *) NULL)
goto Finish;
image=SyncNextImageInList(image);
image->columns=image->rows=1;
image->colors=0;
break;
case 0x12: /* Postscript WPG2*/
i=ReadBlobLSBShort(image);
if(Rec2.RecordLength > (unsigned int) i)
image=ExtractPostscript(image,image_info,
TellBlob(image)+i, /*skip PS header in the wpg2*/
(ssize_t) (Rec2.RecordLength-i-2),exception);
break;
case 0x1B: /*bitmap rectangle*/
WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM);
(void) WPG2Flags;
break;
}
}
break;
default:
{
ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported");
}
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
Finish:
(void) CloseBlob(image);
{
Image
*p;
ssize_t
scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers.
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=(size_t) scene++;
}
if (image == (Image *) NULL)
ThrowReaderException(CorruptImageError,
"ImageFileDoesNotContainAnyImageData");
return(image);
}
|
C
|
ImageMagick
| 1 |
CVE-2015-6763
|
https://www.cvedetails.com/cve/CVE-2015-6763/
| null |
https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
|
f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
|
MacViews: Enable secure text input for password Textfields.
In Cocoa the NSTextInputContext automatically enables secure text input
when activated and it's in the secure text entry mode.
RenderWidgetHostViewMac did the similar thing for ages following the
WebKit example.
views::Textfield needs to do the same thing in a fashion that's
sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions
are possible when the Textfield gets focus, activates the secure text
input mode and the RWHVM loses focus immediately afterwards and disables
the secure text input instead of leaving it in the enabled state.
BUG=818133,677220
Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b
Reviewed-on: https://chromium-review.googlesource.com/943064
Commit-Queue: Michail Pishchagin <[email protected]>
Reviewed-by: Pavel Feldman <[email protected]>
Reviewed-by: Avi Drissman <[email protected]>
Reviewed-by: Peter Kasting <[email protected]>
Cr-Commit-Position: refs/heads/master@{#542517}
|
void HTMLInputElement::DispatchChangeEventIfNeeded() {
if (isConnected() && input_type_->ShouldSendChangeEventAfterCheckedChanged())
DispatchChangeEvent();
}
|
void HTMLInputElement::DispatchChangeEventIfNeeded() {
if (isConnected() && input_type_->ShouldSendChangeEventAfterCheckedChanged())
DispatchChangeEvent();
}
|
C
|
Chrome
| 0 |
CVE-2013-4544
|
https://www.cvedetails.com/cve/CVE-2013-4544/
|
CWE-20
|
https://git.qemu.org/?p=qemu.git;a=commit;h=3c99afc779c2c78718a565ad8c5e98de7c2c7484
|
3c99afc779c2c78718a565ad8c5e98de7c2c7484
| null |
static void vmxnet3_reset_mac(VMXNET3State *s)
{
memcpy(&s->conf.macaddr.a, &s->perm_mac.a, sizeof(s->perm_mac.a));
VMW_CFPRN("MAC address set to: " VMXNET_MF, VMXNET_MA(s->conf.macaddr.a));
}
|
static void vmxnet3_reset_mac(VMXNET3State *s)
{
memcpy(&s->conf.macaddr.a, &s->perm_mac.a, sizeof(s->perm_mac.a));
VMW_CFPRN("MAC address set to: " VMXNET_MF, VMXNET_MA(s->conf.macaddr.a));
}
|
C
|
qemu
| 0 |
CVE-2014-1738
|
https://www.cvedetails.com/cve/CVE-2014-1738/
|
CWE-264
|
https://github.com/torvalds/linux/commit/2145e15e0557a01b9195d1c7199a1b92cb9be81f
|
2145e15e0557a01b9195d1c7199a1b92cb9be81f
|
floppy: don't write kernel-only members to FDRAWCMD ioctl output
Do not leak kernel-only floppy_raw_cmd structure members to userspace.
This includes the linked-list pointer and the pointer to the allocated
DMA space.
Signed-off-by: Matthew Daley <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static inline void set_debugt(void) { }
|
static inline void set_debugt(void) { }
|
C
|
linux
| 0 |
CVE-2019-5799
|
https://www.cvedetails.com/cve/CVE-2019-5799/
|
CWE-20
|
https://github.com/chromium/chromium/commit/108147dfd1ea159fd3632ef92ccc4ab8952980c7
|
108147dfd1ea159fd3632ef92ccc4ab8952980c7
|
Inherit the navigation initiator when navigating instead of the parent/opener
Spec PR: https://github.com/w3c/webappsec-csp/pull/358
Bug: 905301, 894228, 836148
Change-Id: I43ada2266d42d1cd56dbe3c6dd89d115e878a83a
Reviewed-on: https://chromium-review.googlesource.com/c/1314633
Commit-Queue: Andy Paicu <[email protected]>
Reviewed-by: Mike West <[email protected]>
Cr-Commit-Position: refs/heads/master@{#610850}
|
static WebFeature GetUseCounterType(ContentSecurityPolicyHeaderType type) {
switch (type) {
case kContentSecurityPolicyHeaderTypeEnforce:
return WebFeature::kContentSecurityPolicy;
case kContentSecurityPolicyHeaderTypeReport:
return WebFeature::kContentSecurityPolicyReportOnly;
}
NOTREACHED();
return WebFeature::kNumberOfFeatures;
}
|
static WebFeature GetUseCounterType(ContentSecurityPolicyHeaderType type) {
switch (type) {
case kContentSecurityPolicyHeaderTypeEnforce:
return WebFeature::kContentSecurityPolicy;
case kContentSecurityPolicyHeaderTypeReport:
return WebFeature::kContentSecurityPolicyReportOnly;
}
NOTREACHED();
return WebFeature::kNumberOfFeatures;
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/27c68f543e5eba779902447445dfb05ec3f5bf75
|
27c68f543e5eba779902447445dfb05ec3f5bf75
|
Revert of Add accelerated VP9 decode infrastructure and an implementation for VA-API. (patchset #7 id:260001 of https://codereview.chromium.org/1318863003/ )
Reason for revert:
I think this patch broke compile step for Chromium Linux ChromeOS MSan Builder.
First failing build:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20Linux%20ChromeOS%20MSan%20Builder/builds/8310
All recent builds:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20Linux%20ChromeOS%20MSan%20Builder?numbuilds=200
Sorry for the revert. I'll re-revert if I'm wrong.
Cheers,
Tommy
Original issue's description:
> Add accelerated VP9 decode infrastructure and an implementation for VA-API.
>
> - Add a hardware/platform-independent VP9Decoder class and related
> infrastructure, implementing AcceleratedVideoDecoder interface. VP9Decoder
> performs the initial stages of the decode process, which are to be done
> on host/in software, such as stream parsing and reference frame management.
>
> - Add a VP9Accelerator interface, used by the VP9Decoder to offload the
> remaining stages of the decode process to hardware. VP9Accelerator
> implementations are platform-specific.
>
> - Add the first implementation of VP9Accelerator - VaapiVP9Accelerator - and
> integrate it with VaapiVideoDecodeAccelerator, for devices which provide
> hardware VP9 acceleration through VA-API. Hook it up to the new
> infrastructure and VP9Decoder.
>
> - Extend Vp9Parser to provide functionality required by VP9Decoder and
> VP9Accelerator, including superframe parsing, handling of loop filter
> and segmentation initialization, state persistence across frames and
> resetting when needed. Also add code calculating segmentation dequants
> and loop filter levels.
>
> - Update vp9_parser_unittest to the new Vp9Parser interface and flow.
>
> TEST=vp9_parser_unittest,vda_unittest,Chrome VP9 playback
> BUG=chrome-os-partner:41469,chrome-os-partner:41470,chromium:525331
> [email protected]
>
> Committed: https://crrev.com/e3cc0a661b8abfdc74f569940949bc1f336ece40
> Cr-Commit-Position: refs/heads/master@{#349312}
[email protected],[email protected],[email protected],[email protected],[email protected]
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chrome-os-partner:41469,chrome-os-partner:41470,chromium:525331
Review URL: https://codereview.chromium.org/1357513002
Cr-Commit-Position: refs/heads/master@{#349443}
|
VaapiVP9Picture* VP9Picture::AsVaapiVP9Picture() {
return nullptr;
}
|
VaapiVP9Picture* VP9Picture::AsVaapiVP9Picture() {
return nullptr;
}
|
C
|
Chrome
| 0 |
CVE-2018-20843
|
https://www.cvedetails.com/cve/CVE-2018-20843/
|
CWE-611
|
https://github.com/libexpat/libexpat/pull/262/commits/11f8838bf99ea0a6f0b76f9760c43704d00c4ff6
|
11f8838bf99ea0a6f0b76f9760c43704d00c4ff6
|
xmlparse.c: Fix extraction of namespace prefix from XML name (#186)
|
reportComment(XML_Parser parser, const ENCODING *enc,
const char *start, const char *end)
{
XML_Char *data;
if (!parser->m_commentHandler) {
if (parser->m_defaultHandler)
reportDefault(parser, enc, start, end);
return 1;
}
data = poolStoreString(&parser->m_tempPool,
enc,
start + enc->minBytesPerChar * 4,
end - enc->minBytesPerChar * 3);
if (!data)
return 0;
normalizeLines(data);
parser->m_commentHandler(parser->m_handlerArg, data);
poolClear(&parser->m_tempPool);
return 1;
}
|
reportComment(XML_Parser parser, const ENCODING *enc,
const char *start, const char *end)
{
XML_Char *data;
if (!parser->m_commentHandler) {
if (parser->m_defaultHandler)
reportDefault(parser, enc, start, end);
return 1;
}
data = poolStoreString(&parser->m_tempPool,
enc,
start + enc->minBytesPerChar * 4,
end - enc->minBytesPerChar * 3);
if (!data)
return 0;
normalizeLines(data);
parser->m_commentHandler(parser->m_handlerArg, data);
poolClear(&parser->m_tempPool);
return 1;
}
|
C
|
libexpat
| 0 |
CVE-2012-2816
|
https://www.cvedetails.com/cve/CVE-2012-2816/
| null |
https://github.com/chromium/chromium/commit/cd0bd79d6ebdb72183e6f0833673464cc10b3600
|
cd0bd79d6ebdb72183e6f0833673464cc10b3600
|
Convert plugin and GPU process to brokered handle duplication.
BUG=119250
Review URL: https://chromiumcodereview.appspot.com/9958034
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98
|
int num_done_bitstream_buffers() { return num_done_bitstream_buffers_; }
|
int num_done_bitstream_buffers() { return num_done_bitstream_buffers_; }
|
C
|
Chrome
| 0 |
CVE-2017-3733
|
https://www.cvedetails.com/cve/CVE-2017-3733/
|
CWE-20
|
https://github.com/openssl/openssl/commit/4ad93618d26a3ea23d36ad5498ff4f59eff3a4d2
|
4ad93618d26a3ea23d36ad5498ff4f59eff3a4d2
|
Don't change the state of the ETM flags until CCS processing
Changing the ciphersuite during a renegotiation can result in a crash
leading to a DoS attack. ETM has not been implemented in 1.1.0 for DTLS
so this is TLS only.
The problem is caused by changing the flag indicating whether to use ETM
or not immediately on negotiation of ETM, rather than at CCS. Therefore,
during a renegotiation, if the ETM state is changing (usually due to a
change of ciphersuite), then an error/crash will occur.
Due to the fact that there are separate CCS messages for read and write
we actually now need two flags to determine whether to use ETM or not.
CVE-2017-3733
Reviewed-by: Richard Levitte <[email protected]>
|
int ssl3_read_bytes(SSL *s, int type, int *recvd_type, unsigned char *buf,
int len, int peek)
{
int al, i, j, ret;
unsigned int n, curr_rec, num_recs, read_bytes;
SSL3_RECORD *rr;
SSL3_BUFFER *rbuf;
void (*cb) (const SSL *ssl, int type2, int val) = NULL;
rbuf = &s->rlayer.rbuf;
if (!SSL3_BUFFER_is_initialised(rbuf)) {
/* Not initialized yet */
if (!ssl3_setup_read_buffer(s))
return (-1);
}
if ((type && (type != SSL3_RT_APPLICATION_DATA)
&& (type != SSL3_RT_HANDSHAKE)) || (peek
&& (type !=
SSL3_RT_APPLICATION_DATA))) {
SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR);
return -1;
}
if ((type == SSL3_RT_HANDSHAKE) && (s->rlayer.handshake_fragment_len > 0))
/* (partially) satisfy request from storage */
{
unsigned char *src = s->rlayer.handshake_fragment;
unsigned char *dst = buf;
unsigned int k;
/* peek == 0 */
n = 0;
while ((len > 0) && (s->rlayer.handshake_fragment_len > 0)) {
*dst++ = *src++;
len--;
s->rlayer.handshake_fragment_len--;
n++;
}
/* move any remaining fragment bytes: */
for (k = 0; k < s->rlayer.handshake_fragment_len; k++)
s->rlayer.handshake_fragment[k] = *src++;
if (recvd_type != NULL)
*recvd_type = SSL3_RT_HANDSHAKE;
return n;
}
/*
* Now s->rlayer.handshake_fragment_len == 0 if type == SSL3_RT_HANDSHAKE.
*/
if (!ossl_statem_get_in_handshake(s) && SSL_in_init(s)) {
/* type == SSL3_RT_APPLICATION_DATA */
i = s->handshake_func(s);
if (i < 0)
return (i);
if (i == 0) {
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE);
return (-1);
}
}
start:
s->rwstate = SSL_NOTHING;
/*-
* For each record 'i' up to |num_recs]
* rr[i].type - is the type of record
* rr[i].data, - data
* rr[i].off, - offset into 'data' for next read
* rr[i].length, - number of bytes.
*/
rr = s->rlayer.rrec;
num_recs = RECORD_LAYER_get_numrpipes(&s->rlayer);
do {
/* get new records if necessary */
if (num_recs == 0) {
ret = ssl3_get_record(s);
if (ret <= 0)
return (ret);
num_recs = RECORD_LAYER_get_numrpipes(&s->rlayer);
if (num_recs == 0) {
/* Shouldn't happen */
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR);
goto f_err;
}
}
/* Skip over any records we have already read */
for (curr_rec = 0;
curr_rec < num_recs && SSL3_RECORD_is_read(&rr[curr_rec]);
curr_rec++) ;
if (curr_rec == num_recs) {
RECORD_LAYER_set_numrpipes(&s->rlayer, 0);
num_recs = 0;
curr_rec = 0;
}
} while (num_recs == 0);
rr = &rr[curr_rec];
/*
* Reset the count of consecutive warning alerts if we've got a non-empty
* record that isn't an alert.
*/
if (SSL3_RECORD_get_type(rr) != SSL3_RT_ALERT
&& SSL3_RECORD_get_length(rr) != 0)
s->rlayer.alert_count = 0;
/* we now have a packet which can be read and processed */
if (s->s3->change_cipher_spec /* set when we receive ChangeCipherSpec,
* reset by ssl3_get_finished */
&& (SSL3_RECORD_get_type(rr) != SSL3_RT_HANDSHAKE)) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_DATA_BETWEEN_CCS_AND_FINISHED);
goto f_err;
}
/*
* If the other end has shut down, throw anything we read away (even in
* 'peek' mode)
*/
if (s->shutdown & SSL_RECEIVED_SHUTDOWN) {
SSL3_RECORD_set_length(rr, 0);
s->rwstate = SSL_NOTHING;
return (0);
}
if (type == SSL3_RECORD_get_type(rr)
|| (SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC
&& type == SSL3_RT_HANDSHAKE && recvd_type != NULL)) {
/*
* SSL3_RT_APPLICATION_DATA or
* SSL3_RT_HANDSHAKE or
* SSL3_RT_CHANGE_CIPHER_SPEC
*/
/*
* make sure that we are not getting application data when we are
* doing a handshake for the first time
*/
if (SSL_in_init(s) && (type == SSL3_RT_APPLICATION_DATA) &&
(s->enc_read_ctx == NULL)) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_APP_DATA_IN_HANDSHAKE);
goto f_err;
}
if (type == SSL3_RT_HANDSHAKE
&& SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC
&& s->rlayer.handshake_fragment_len > 0) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_CCS_RECEIVED_EARLY);
goto f_err;
}
if (recvd_type != NULL)
*recvd_type = SSL3_RECORD_get_type(rr);
if (len <= 0)
return (len);
read_bytes = 0;
do {
if ((unsigned int)len - read_bytes > SSL3_RECORD_get_length(rr))
n = SSL3_RECORD_get_length(rr);
else
n = (unsigned int)len - read_bytes;
memcpy(buf, &(rr->data[rr->off]), n);
buf += n;
if (peek) {
/* Mark any zero length record as consumed CVE-2016-6305 */
if (SSL3_RECORD_get_length(rr) == 0)
SSL3_RECORD_set_read(rr);
} else {
SSL3_RECORD_sub_length(rr, n);
SSL3_RECORD_add_off(rr, n);
if (SSL3_RECORD_get_length(rr) == 0) {
s->rlayer.rstate = SSL_ST_READ_HEADER;
SSL3_RECORD_set_off(rr, 0);
SSL3_RECORD_set_read(rr);
}
}
if (SSL3_RECORD_get_length(rr) == 0
|| (peek && n == SSL3_RECORD_get_length(rr))) {
curr_rec++;
rr++;
}
read_bytes += n;
} while (type == SSL3_RT_APPLICATION_DATA && curr_rec < num_recs
&& read_bytes < (unsigned int)len);
if (read_bytes == 0) {
/* We must have read empty records. Get more data */
goto start;
}
if (!peek && curr_rec == num_recs
&& (s->mode & SSL_MODE_RELEASE_BUFFERS)
&& SSL3_BUFFER_get_left(rbuf) == 0)
ssl3_release_read_buffer(s);
return read_bytes;
}
/*
* If we get here, then type != rr->type; if we have a handshake message,
* then it was unexpected (Hello Request or Client Hello) or invalid (we
* were actually expecting a CCS).
*/
/*
* Lets just double check that we've not got an SSLv2 record
*/
if (rr->rec_version == SSL2_VERSION) {
/*
* Should never happen. ssl3_get_record() should only give us an SSLv2
* record back if this is the first packet and we are looking for an
* initial ClientHello. Therefore |type| should always be equal to
* |rr->type|. If not then something has gone horribly wrong
*/
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR);
goto f_err;
}
if (s->method->version == TLS_ANY_VERSION
&& (s->server || rr->type != SSL3_RT_ALERT)) {
/*
* If we've got this far and still haven't decided on what version
* we're using then this must be a client side alert we're dealing with
* (we don't allow heartbeats yet). We shouldn't be receiving anything
* other than a ClientHello if we are a server.
*/
s->version = rr->rec_version;
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
/*
* In case of record types for which we have 'fragment' storage, fill
* that so that we can process the data at a fixed place.
*/
{
unsigned int dest_maxlen = 0;
unsigned char *dest = NULL;
unsigned int *dest_len = NULL;
if (SSL3_RECORD_get_type(rr) == SSL3_RT_HANDSHAKE) {
dest_maxlen = sizeof s->rlayer.handshake_fragment;
dest = s->rlayer.handshake_fragment;
dest_len = &s->rlayer.handshake_fragment_len;
} else if (SSL3_RECORD_get_type(rr) == SSL3_RT_ALERT) {
dest_maxlen = sizeof s->rlayer.alert_fragment;
dest = s->rlayer.alert_fragment;
dest_len = &s->rlayer.alert_fragment_len;
}
if (dest_maxlen > 0) {
n = dest_maxlen - *dest_len; /* available space in 'dest' */
if (SSL3_RECORD_get_length(rr) < n)
n = SSL3_RECORD_get_length(rr); /* available bytes */
/* now move 'n' bytes: */
while (n-- > 0) {
dest[(*dest_len)++] =
SSL3_RECORD_get_data(rr)[SSL3_RECORD_get_off(rr)];
SSL3_RECORD_add_off(rr, 1);
SSL3_RECORD_add_length(rr, -1);
}
if (*dest_len < dest_maxlen) {
SSL3_RECORD_set_read(rr);
goto start; /* fragment was too small */
}
}
}
/*-
* s->rlayer.handshake_fragment_len == 4 iff rr->type == SSL3_RT_HANDSHAKE;
* s->rlayer.alert_fragment_len == 2 iff rr->type == SSL3_RT_ALERT.
* (Possibly rr is 'empty' now, i.e. rr->length may be 0.)
*/
/* If we are a client, check for an incoming 'Hello Request': */
if ((!s->server) &&
(s->rlayer.handshake_fragment_len >= 4) &&
(s->rlayer.handshake_fragment[0] == SSL3_MT_HELLO_REQUEST) &&
(s->session != NULL) && (s->session->cipher != NULL)) {
s->rlayer.handshake_fragment_len = 0;
if ((s->rlayer.handshake_fragment[1] != 0) ||
(s->rlayer.handshake_fragment[2] != 0) ||
(s->rlayer.handshake_fragment[3] != 0)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_BAD_HELLO_REQUEST);
goto f_err;
}
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_HANDSHAKE,
s->rlayer.handshake_fragment, 4, s,
s->msg_callback_arg);
if (SSL_is_init_finished(s) &&
!(s->s3->flags & SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS) &&
!s->s3->renegotiate) {
ssl3_renegotiate(s);
if (ssl3_renegotiate_check(s)) {
i = s->handshake_func(s);
if (i < 0)
return (i);
if (i == 0) {
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE);
return (-1);
}
if (!(s->mode & SSL_MODE_AUTO_RETRY)) {
if (SSL3_BUFFER_get_left(rbuf) == 0) {
/* no read-ahead left? */
BIO *bio;
/*
* In the case where we try to read application data,
* but we trigger an SSL handshake, we return -1 with
* the retry option set. Otherwise renegotiation may
* cause nasty problems in the blocking world
*/
s->rwstate = SSL_READING;
bio = SSL_get_rbio(s);
BIO_clear_retry_flags(bio);
BIO_set_retry_read(bio);
return (-1);
}
}
} else {
SSL3_RECORD_set_read(rr);
}
} else {
/* Does this ever happen? */
SSL3_RECORD_set_read(rr);
}
/*
* we either finished a handshake or ignored the request, now try
* again to obtain the (application) data we were asked for
*/
goto start;
}
/*
* If we are a server and get a client hello when renegotiation isn't
* allowed send back a no renegotiation alert and carry on. WARNING:
* experimental code, needs reviewing (steve)
*/
if (s->server &&
SSL_is_init_finished(s) &&
!s->s3->send_connection_binding &&
(s->version > SSL3_VERSION) &&
(s->rlayer.handshake_fragment_len >= 4) &&
(s->rlayer.handshake_fragment[0] == SSL3_MT_CLIENT_HELLO) &&
(s->session != NULL) && (s->session->cipher != NULL) &&
!(s->ctx->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) {
SSL3_RECORD_set_length(rr, 0);
SSL3_RECORD_set_read(rr);
ssl3_send_alert(s, SSL3_AL_WARNING, SSL_AD_NO_RENEGOTIATION);
goto start;
}
if (s->rlayer.alert_fragment_len >= 2) {
int alert_level = s->rlayer.alert_fragment[0];
int alert_descr = s->rlayer.alert_fragment[1];
s->rlayer.alert_fragment_len = 0;
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_ALERT,
s->rlayer.alert_fragment, 2, s,
s->msg_callback_arg);
if (s->info_callback != NULL)
cb = s->info_callback;
else if (s->ctx->info_callback != NULL)
cb = s->ctx->info_callback;
if (cb != NULL) {
j = (alert_level << 8) | alert_descr;
cb(s, SSL_CB_READ_ALERT, j);
}
if (alert_level == SSL3_AL_WARNING) {
s->s3->warn_alert = alert_descr;
SSL3_RECORD_set_read(rr);
s->rlayer.alert_count++;
if (s->rlayer.alert_count == MAX_WARN_ALERT_COUNT) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_TOO_MANY_WARN_ALERTS);
goto f_err;
}
if (alert_descr == SSL_AD_CLOSE_NOTIFY) {
s->shutdown |= SSL_RECEIVED_SHUTDOWN;
return (0);
}
/*
* This is a warning but we receive it if we requested
* renegotiation and the peer denied it. Terminate with a fatal
* alert because if application tried to renegotiate it
* presumably had a good reason and expects it to succeed. In
* future we might have a renegotiation where we don't care if
* the peer refused it where we carry on.
*/
else if (alert_descr == SSL_AD_NO_RENEGOTIATION) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_NO_RENEGOTIATION);
goto f_err;
}
#ifdef SSL_AD_MISSING_SRP_USERNAME
else if (alert_descr == SSL_AD_MISSING_SRP_USERNAME)
return (0);
#endif
} else if (alert_level == SSL3_AL_FATAL) {
char tmp[16];
s->rwstate = SSL_NOTHING;
s->s3->fatal_alert = alert_descr;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_AD_REASON_OFFSET + alert_descr);
BIO_snprintf(tmp, sizeof tmp, "%d", alert_descr);
ERR_add_error_data(2, "SSL alert number ", tmp);
s->shutdown |= SSL_RECEIVED_SHUTDOWN;
SSL3_RECORD_set_read(rr);
SSL_CTX_remove_session(s->session_ctx, s->session);
return (0);
} else {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNKNOWN_ALERT_TYPE);
goto f_err;
}
goto start;
}
if (s->shutdown & SSL_SENT_SHUTDOWN) { /* but we have not received a
* shutdown */
s->rwstate = SSL_NOTHING;
SSL3_RECORD_set_length(rr, 0);
SSL3_RECORD_set_read(rr);
return (0);
}
if (SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_CCS_RECEIVED_EARLY);
goto f_err;
}
/*
* Unexpected handshake message (Client Hello, or protocol violation)
*/
if ((s->rlayer.handshake_fragment_len >= 4)
&& !ossl_statem_get_in_handshake(s)) {
if (SSL_is_init_finished(s) &&
!(s->s3->flags & SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS)) {
ossl_statem_set_in_init(s, 1);
s->renegotiate = 1;
s->new_session = 1;
}
i = s->handshake_func(s);
if (i < 0)
return (i);
if (i == 0) {
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE);
return (-1);
}
if (!(s->mode & SSL_MODE_AUTO_RETRY)) {
if (SSL3_BUFFER_get_left(rbuf) == 0) {
/* no read-ahead left? */
BIO *bio;
/*
* In the case where we try to read application data, but we
* trigger an SSL handshake, we return -1 with the retry
* option set. Otherwise renegotiation may cause nasty
* problems in the blocking world
*/
s->rwstate = SSL_READING;
bio = SSL_get_rbio(s);
BIO_clear_retry_flags(bio);
BIO_set_retry_read(bio);
return (-1);
}
}
goto start;
}
switch (SSL3_RECORD_get_type(rr)) {
default:
/*
* TLS 1.0 and 1.1 say you SHOULD ignore unrecognised record types, but
* TLS 1.2 says you MUST send an unexpected message alert. We use the
* TLS 1.2 behaviour for all protocol versions to prevent issues where
* no progress is being made and the peer continually sends unrecognised
* record types, using up resources processing them.
*/
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_RECORD);
goto f_err;
case SSL3_RT_CHANGE_CIPHER_SPEC:
case SSL3_RT_ALERT:
case SSL3_RT_HANDSHAKE:
/*
* we already handled all of these, with the possible exception of
* SSL3_RT_HANDSHAKE when ossl_statem_get_in_handshake(s) is true, but
* that should not happen when type != rr->type
*/
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR);
goto f_err;
case SSL3_RT_APPLICATION_DATA:
/*
* At this point, we were expecting handshake data, but have
* application data. If the library was running inside ssl3_read()
* (i.e. in_read_app_data is set) and it makes sense to read
* application data at this point (session renegotiation not yet
* started), we will indulge it.
*/
if (ossl_statem_app_data_allowed(s)) {
s->s3->in_read_app_data = 2;
return (-1);
} else {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_RECORD);
goto f_err;
}
}
/* not reached */
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
return (-1);
}
|
int ssl3_read_bytes(SSL *s, int type, int *recvd_type, unsigned char *buf,
int len, int peek)
{
int al, i, j, ret;
unsigned int n, curr_rec, num_recs, read_bytes;
SSL3_RECORD *rr;
SSL3_BUFFER *rbuf;
void (*cb) (const SSL *ssl, int type2, int val) = NULL;
rbuf = &s->rlayer.rbuf;
if (!SSL3_BUFFER_is_initialised(rbuf)) {
/* Not initialized yet */
if (!ssl3_setup_read_buffer(s))
return (-1);
}
if ((type && (type != SSL3_RT_APPLICATION_DATA)
&& (type != SSL3_RT_HANDSHAKE)) || (peek
&& (type !=
SSL3_RT_APPLICATION_DATA))) {
SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR);
return -1;
}
if ((type == SSL3_RT_HANDSHAKE) && (s->rlayer.handshake_fragment_len > 0))
/* (partially) satisfy request from storage */
{
unsigned char *src = s->rlayer.handshake_fragment;
unsigned char *dst = buf;
unsigned int k;
/* peek == 0 */
n = 0;
while ((len > 0) && (s->rlayer.handshake_fragment_len > 0)) {
*dst++ = *src++;
len--;
s->rlayer.handshake_fragment_len--;
n++;
}
/* move any remaining fragment bytes: */
for (k = 0; k < s->rlayer.handshake_fragment_len; k++)
s->rlayer.handshake_fragment[k] = *src++;
if (recvd_type != NULL)
*recvd_type = SSL3_RT_HANDSHAKE;
return n;
}
/*
* Now s->rlayer.handshake_fragment_len == 0 if type == SSL3_RT_HANDSHAKE.
*/
if (!ossl_statem_get_in_handshake(s) && SSL_in_init(s)) {
/* type == SSL3_RT_APPLICATION_DATA */
i = s->handshake_func(s);
if (i < 0)
return (i);
if (i == 0) {
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE);
return (-1);
}
}
start:
s->rwstate = SSL_NOTHING;
/*-
* For each record 'i' up to |num_recs]
* rr[i].type - is the type of record
* rr[i].data, - data
* rr[i].off, - offset into 'data' for next read
* rr[i].length, - number of bytes.
*/
rr = s->rlayer.rrec;
num_recs = RECORD_LAYER_get_numrpipes(&s->rlayer);
do {
/* get new records if necessary */
if (num_recs == 0) {
ret = ssl3_get_record(s);
if (ret <= 0)
return (ret);
num_recs = RECORD_LAYER_get_numrpipes(&s->rlayer);
if (num_recs == 0) {
/* Shouldn't happen */
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR);
goto f_err;
}
}
/* Skip over any records we have already read */
for (curr_rec = 0;
curr_rec < num_recs && SSL3_RECORD_is_read(&rr[curr_rec]);
curr_rec++) ;
if (curr_rec == num_recs) {
RECORD_LAYER_set_numrpipes(&s->rlayer, 0);
num_recs = 0;
curr_rec = 0;
}
} while (num_recs == 0);
rr = &rr[curr_rec];
/*
* Reset the count of consecutive warning alerts if we've got a non-empty
* record that isn't an alert.
*/
if (SSL3_RECORD_get_type(rr) != SSL3_RT_ALERT
&& SSL3_RECORD_get_length(rr) != 0)
s->rlayer.alert_count = 0;
/* we now have a packet which can be read and processed */
if (s->s3->change_cipher_spec /* set when we receive ChangeCipherSpec,
* reset by ssl3_get_finished */
&& (SSL3_RECORD_get_type(rr) != SSL3_RT_HANDSHAKE)) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_DATA_BETWEEN_CCS_AND_FINISHED);
goto f_err;
}
/*
* If the other end has shut down, throw anything we read away (even in
* 'peek' mode)
*/
if (s->shutdown & SSL_RECEIVED_SHUTDOWN) {
SSL3_RECORD_set_length(rr, 0);
s->rwstate = SSL_NOTHING;
return (0);
}
if (type == SSL3_RECORD_get_type(rr)
|| (SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC
&& type == SSL3_RT_HANDSHAKE && recvd_type != NULL)) {
/*
* SSL3_RT_APPLICATION_DATA or
* SSL3_RT_HANDSHAKE or
* SSL3_RT_CHANGE_CIPHER_SPEC
*/
/*
* make sure that we are not getting application data when we are
* doing a handshake for the first time
*/
if (SSL_in_init(s) && (type == SSL3_RT_APPLICATION_DATA) &&
(s->enc_read_ctx == NULL)) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_APP_DATA_IN_HANDSHAKE);
goto f_err;
}
if (type == SSL3_RT_HANDSHAKE
&& SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC
&& s->rlayer.handshake_fragment_len > 0) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_CCS_RECEIVED_EARLY);
goto f_err;
}
if (recvd_type != NULL)
*recvd_type = SSL3_RECORD_get_type(rr);
if (len <= 0)
return (len);
read_bytes = 0;
do {
if ((unsigned int)len - read_bytes > SSL3_RECORD_get_length(rr))
n = SSL3_RECORD_get_length(rr);
else
n = (unsigned int)len - read_bytes;
memcpy(buf, &(rr->data[rr->off]), n);
buf += n;
if (peek) {
/* Mark any zero length record as consumed CVE-2016-6305 */
if (SSL3_RECORD_get_length(rr) == 0)
SSL3_RECORD_set_read(rr);
} else {
SSL3_RECORD_sub_length(rr, n);
SSL3_RECORD_add_off(rr, n);
if (SSL3_RECORD_get_length(rr) == 0) {
s->rlayer.rstate = SSL_ST_READ_HEADER;
SSL3_RECORD_set_off(rr, 0);
SSL3_RECORD_set_read(rr);
}
}
if (SSL3_RECORD_get_length(rr) == 0
|| (peek && n == SSL3_RECORD_get_length(rr))) {
curr_rec++;
rr++;
}
read_bytes += n;
} while (type == SSL3_RT_APPLICATION_DATA && curr_rec < num_recs
&& read_bytes < (unsigned int)len);
if (read_bytes == 0) {
/* We must have read empty records. Get more data */
goto start;
}
if (!peek && curr_rec == num_recs
&& (s->mode & SSL_MODE_RELEASE_BUFFERS)
&& SSL3_BUFFER_get_left(rbuf) == 0)
ssl3_release_read_buffer(s);
return read_bytes;
}
/*
* If we get here, then type != rr->type; if we have a handshake message,
* then it was unexpected (Hello Request or Client Hello) or invalid (we
* were actually expecting a CCS).
*/
/*
* Lets just double check that we've not got an SSLv2 record
*/
if (rr->rec_version == SSL2_VERSION) {
/*
* Should never happen. ssl3_get_record() should only give us an SSLv2
* record back if this is the first packet and we are looking for an
* initial ClientHello. Therefore |type| should always be equal to
* |rr->type|. If not then something has gone horribly wrong
*/
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR);
goto f_err;
}
if (s->method->version == TLS_ANY_VERSION
&& (s->server || rr->type != SSL3_RT_ALERT)) {
/*
* If we've got this far and still haven't decided on what version
* we're using then this must be a client side alert we're dealing with
* (we don't allow heartbeats yet). We shouldn't be receiving anything
* other than a ClientHello if we are a server.
*/
s->version = rr->rec_version;
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
/*
* In case of record types for which we have 'fragment' storage, fill
* that so that we can process the data at a fixed place.
*/
{
unsigned int dest_maxlen = 0;
unsigned char *dest = NULL;
unsigned int *dest_len = NULL;
if (SSL3_RECORD_get_type(rr) == SSL3_RT_HANDSHAKE) {
dest_maxlen = sizeof s->rlayer.handshake_fragment;
dest = s->rlayer.handshake_fragment;
dest_len = &s->rlayer.handshake_fragment_len;
} else if (SSL3_RECORD_get_type(rr) == SSL3_RT_ALERT) {
dest_maxlen = sizeof s->rlayer.alert_fragment;
dest = s->rlayer.alert_fragment;
dest_len = &s->rlayer.alert_fragment_len;
}
if (dest_maxlen > 0) {
n = dest_maxlen - *dest_len; /* available space in 'dest' */
if (SSL3_RECORD_get_length(rr) < n)
n = SSL3_RECORD_get_length(rr); /* available bytes */
/* now move 'n' bytes: */
while (n-- > 0) {
dest[(*dest_len)++] =
SSL3_RECORD_get_data(rr)[SSL3_RECORD_get_off(rr)];
SSL3_RECORD_add_off(rr, 1);
SSL3_RECORD_add_length(rr, -1);
}
if (*dest_len < dest_maxlen) {
SSL3_RECORD_set_read(rr);
goto start; /* fragment was too small */
}
}
}
/*-
* s->rlayer.handshake_fragment_len == 4 iff rr->type == SSL3_RT_HANDSHAKE;
* s->rlayer.alert_fragment_len == 2 iff rr->type == SSL3_RT_ALERT.
* (Possibly rr is 'empty' now, i.e. rr->length may be 0.)
*/
/* If we are a client, check for an incoming 'Hello Request': */
if ((!s->server) &&
(s->rlayer.handshake_fragment_len >= 4) &&
(s->rlayer.handshake_fragment[0] == SSL3_MT_HELLO_REQUEST) &&
(s->session != NULL) && (s->session->cipher != NULL)) {
s->rlayer.handshake_fragment_len = 0;
if ((s->rlayer.handshake_fragment[1] != 0) ||
(s->rlayer.handshake_fragment[2] != 0) ||
(s->rlayer.handshake_fragment[3] != 0)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_BAD_HELLO_REQUEST);
goto f_err;
}
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_HANDSHAKE,
s->rlayer.handshake_fragment, 4, s,
s->msg_callback_arg);
if (SSL_is_init_finished(s) &&
!(s->s3->flags & SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS) &&
!s->s3->renegotiate) {
ssl3_renegotiate(s);
if (ssl3_renegotiate_check(s)) {
i = s->handshake_func(s);
if (i < 0)
return (i);
if (i == 0) {
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE);
return (-1);
}
if (!(s->mode & SSL_MODE_AUTO_RETRY)) {
if (SSL3_BUFFER_get_left(rbuf) == 0) {
/* no read-ahead left? */
BIO *bio;
/*
* In the case where we try to read application data,
* but we trigger an SSL handshake, we return -1 with
* the retry option set. Otherwise renegotiation may
* cause nasty problems in the blocking world
*/
s->rwstate = SSL_READING;
bio = SSL_get_rbio(s);
BIO_clear_retry_flags(bio);
BIO_set_retry_read(bio);
return (-1);
}
}
} else {
SSL3_RECORD_set_read(rr);
}
} else {
/* Does this ever happen? */
SSL3_RECORD_set_read(rr);
}
/*
* we either finished a handshake or ignored the request, now try
* again to obtain the (application) data we were asked for
*/
goto start;
}
/*
* If we are a server and get a client hello when renegotiation isn't
* allowed send back a no renegotiation alert and carry on. WARNING:
* experimental code, needs reviewing (steve)
*/
if (s->server &&
SSL_is_init_finished(s) &&
!s->s3->send_connection_binding &&
(s->version > SSL3_VERSION) &&
(s->rlayer.handshake_fragment_len >= 4) &&
(s->rlayer.handshake_fragment[0] == SSL3_MT_CLIENT_HELLO) &&
(s->session != NULL) && (s->session->cipher != NULL) &&
!(s->ctx->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) {
SSL3_RECORD_set_length(rr, 0);
SSL3_RECORD_set_read(rr);
ssl3_send_alert(s, SSL3_AL_WARNING, SSL_AD_NO_RENEGOTIATION);
goto start;
}
if (s->rlayer.alert_fragment_len >= 2) {
int alert_level = s->rlayer.alert_fragment[0];
int alert_descr = s->rlayer.alert_fragment[1];
s->rlayer.alert_fragment_len = 0;
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_ALERT,
s->rlayer.alert_fragment, 2, s,
s->msg_callback_arg);
if (s->info_callback != NULL)
cb = s->info_callback;
else if (s->ctx->info_callback != NULL)
cb = s->ctx->info_callback;
if (cb != NULL) {
j = (alert_level << 8) | alert_descr;
cb(s, SSL_CB_READ_ALERT, j);
}
if (alert_level == SSL3_AL_WARNING) {
s->s3->warn_alert = alert_descr;
SSL3_RECORD_set_read(rr);
s->rlayer.alert_count++;
if (s->rlayer.alert_count == MAX_WARN_ALERT_COUNT) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_TOO_MANY_WARN_ALERTS);
goto f_err;
}
if (alert_descr == SSL_AD_CLOSE_NOTIFY) {
s->shutdown |= SSL_RECEIVED_SHUTDOWN;
return (0);
}
/*
* This is a warning but we receive it if we requested
* renegotiation and the peer denied it. Terminate with a fatal
* alert because if application tried to renegotiate it
* presumably had a good reason and expects it to succeed. In
* future we might have a renegotiation where we don't care if
* the peer refused it where we carry on.
*/
else if (alert_descr == SSL_AD_NO_RENEGOTIATION) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_NO_RENEGOTIATION);
goto f_err;
}
#ifdef SSL_AD_MISSING_SRP_USERNAME
else if (alert_descr == SSL_AD_MISSING_SRP_USERNAME)
return (0);
#endif
} else if (alert_level == SSL3_AL_FATAL) {
char tmp[16];
s->rwstate = SSL_NOTHING;
s->s3->fatal_alert = alert_descr;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_AD_REASON_OFFSET + alert_descr);
BIO_snprintf(tmp, sizeof tmp, "%d", alert_descr);
ERR_add_error_data(2, "SSL alert number ", tmp);
s->shutdown |= SSL_RECEIVED_SHUTDOWN;
SSL3_RECORD_set_read(rr);
SSL_CTX_remove_session(s->session_ctx, s->session);
return (0);
} else {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNKNOWN_ALERT_TYPE);
goto f_err;
}
goto start;
}
if (s->shutdown & SSL_SENT_SHUTDOWN) { /* but we have not received a
* shutdown */
s->rwstate = SSL_NOTHING;
SSL3_RECORD_set_length(rr, 0);
SSL3_RECORD_set_read(rr);
return (0);
}
if (SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_CCS_RECEIVED_EARLY);
goto f_err;
}
/*
* Unexpected handshake message (Client Hello, or protocol violation)
*/
if ((s->rlayer.handshake_fragment_len >= 4)
&& !ossl_statem_get_in_handshake(s)) {
if (SSL_is_init_finished(s) &&
!(s->s3->flags & SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS)) {
ossl_statem_set_in_init(s, 1);
s->renegotiate = 1;
s->new_session = 1;
}
i = s->handshake_func(s);
if (i < 0)
return (i);
if (i == 0) {
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE);
return (-1);
}
if (!(s->mode & SSL_MODE_AUTO_RETRY)) {
if (SSL3_BUFFER_get_left(rbuf) == 0) {
/* no read-ahead left? */
BIO *bio;
/*
* In the case where we try to read application data, but we
* trigger an SSL handshake, we return -1 with the retry
* option set. Otherwise renegotiation may cause nasty
* problems in the blocking world
*/
s->rwstate = SSL_READING;
bio = SSL_get_rbio(s);
BIO_clear_retry_flags(bio);
BIO_set_retry_read(bio);
return (-1);
}
}
goto start;
}
switch (SSL3_RECORD_get_type(rr)) {
default:
/*
* TLS 1.0 and 1.1 say you SHOULD ignore unrecognised record types, but
* TLS 1.2 says you MUST send an unexpected message alert. We use the
* TLS 1.2 behaviour for all protocol versions to prevent issues where
* no progress is being made and the peer continually sends unrecognised
* record types, using up resources processing them.
*/
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_RECORD);
goto f_err;
case SSL3_RT_CHANGE_CIPHER_SPEC:
case SSL3_RT_ALERT:
case SSL3_RT_HANDSHAKE:
/*
* we already handled all of these, with the possible exception of
* SSL3_RT_HANDSHAKE when ossl_statem_get_in_handshake(s) is true, but
* that should not happen when type != rr->type
*/
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR);
goto f_err;
case SSL3_RT_APPLICATION_DATA:
/*
* At this point, we were expecting handshake data, but have
* application data. If the library was running inside ssl3_read()
* (i.e. in_read_app_data is set) and it makes sense to read
* application data at this point (session renegotiation not yet
* started), we will indulge it.
*/
if (ossl_statem_app_data_allowed(s)) {
s->s3->in_read_app_data = 2;
return (-1);
} else {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_RECORD);
goto f_err;
}
}
/* not reached */
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
return (-1);
}
|
C
|
openssl
| 0 |
CVE-2015-8812
|
https://www.cvedetails.com/cve/CVE-2015-8812/
| null |
https://github.com/torvalds/linux/commit/67f1aee6f45059fd6b0f5b0ecb2c97ad0451f6b3
|
67f1aee6f45059fd6b0f5b0ecb2c97ad0451f6b3
|
iw_cxgb3: Fix incorrectly returning error on success
The cxgb3_*_send() functions return NET_XMIT_ values, which are
positive integers values. So don't treat positive return values
as an error.
Signed-off-by: Steve Wise <[email protected]>
Signed-off-by: Hariprasad Shenai <[email protected]>
Signed-off-by: Doug Ledford <[email protected]>
|
static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
{
unsigned long flags;
enum iwch_ep_state state;
spin_lock_irqsave(&epc->lock, flags);
state = epc->state;
spin_unlock_irqrestore(&epc->lock, flags);
return state;
}
|
static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
{
unsigned long flags;
enum iwch_ep_state state;
spin_lock_irqsave(&epc->lock, flags);
state = epc->state;
spin_unlock_irqrestore(&epc->lock, flags);
return state;
}
|
C
|
linux
| 0 |
CVE-2018-17476
|
https://www.cvedetails.com/cve/CVE-2018-17476/
|
CWE-20
|
https://github.com/chromium/chromium/commit/3d41e77125f3de8d722b6d8303599abaf2a91667
|
3d41e77125f3de8d722b6d8303599abaf2a91667
|
If a dialog is shown, drop fullscreen.
BUG=875066, 817809, 792876, 812769, 813815
TEST=included
Change-Id: Ic3d697fa3c4b01f5d7fea77391857177ada660db
Reviewed-on: https://chromium-review.googlesource.com/1185208
Reviewed-by: Sidney San Martín <[email protected]>
Commit-Queue: Avi Drissman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#586418}
|
void Browser::RendererResponsive(
WebContents* source,
content::RenderWidgetHost* render_widget_host) {
TabDialogs::FromWebContents(source)->HideHungRendererDialog(
render_widget_host);
}
|
void Browser::RendererResponsive(
WebContents* source,
content::RenderWidgetHost* render_widget_host) {
TabDialogs::FromWebContents(source)->HideHungRendererDialog(
render_widget_host);
}
|
C
|
Chrome
| 0 |
CVE-2011-0716
|
https://www.cvedetails.com/cve/CVE-2011-0716/
|
CWE-399
|
https://github.com/torvalds/linux/commit/6b0d6a9b4296fa16a28d10d416db7a770fc03287
|
6b0d6a9b4296fa16a28d10d416db7a770fc03287
|
bridge: Fix mglist corruption that leads to memory corruption
The list mp->mglist is used to indicate whether a multicast group
is active on the bridge interface itself as opposed to one of the
constituent interfaces in the bridge.
Unfortunately the operation that adds the mp->mglist node to the
list neglected to check whether it has already been added. This
leads to list corruption in the form of nodes pointing to itself.
Normally this would be quite obvious as it would cause an infinite
loop when walking the list. However, as this list is never actually
walked (which means that we don't really need it, I'll get rid of
it in a subsequent patch), this instead is hidden until we perform
a delete operation on the affected nodes.
As the same node may now be pointed to by more than one node, the
delete operations can then cause modification of freed memory.
This was observed in practice to cause corruption in 512-byte slabs,
most commonly leading to crashes in jbd2.
Thanks to Josef Bacik for pointing me in the right direction.
Reported-by: Ian Page Hands <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void br_mdb_free(struct rcu_head *head)
{
struct net_bridge_mdb_htable *mdb =
container_of(head, struct net_bridge_mdb_htable, rcu);
struct net_bridge_mdb_htable *old = mdb->old;
mdb->old = NULL;
kfree(old->mhash);
kfree(old);
}
|
static void br_mdb_free(struct rcu_head *head)
{
struct net_bridge_mdb_htable *mdb =
container_of(head, struct net_bridge_mdb_htable, rcu);
struct net_bridge_mdb_htable *old = mdb->old;
mdb->old = NULL;
kfree(old->mhash);
kfree(old);
}
|
C
|
linux
| 0 |
CVE-2017-13686
|
https://www.cvedetails.com/cve/CVE-2017-13686/
|
CWE-476
|
https://github.com/torvalds/linux/commit/bc3aae2bbac46dd894c89db5d5e98f7f0ef9e205
|
bc3aae2bbac46dd894c89db5d5e98f7f0ef9e205
|
net: check and errout if res->fi is NULL when RTM_F_FIB_MATCH is set
Syzkaller hit 'general protection fault in fib_dump_info' bug on
commit 4.13-rc5..
Guilty file: net/ipv4/fib_semantics.c
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 0 PID: 2808 Comm: syz-executor0 Not tainted 4.13.0-rc5 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
Ubuntu-1.8.2-1ubuntu1 04/01/2014
task: ffff880078562700 task.stack: ffff880078110000
RIP: 0010:fib_dump_info+0x388/0x1170 net/ipv4/fib_semantics.c:1314
RSP: 0018:ffff880078117010 EFLAGS: 00010206
RAX: dffffc0000000000 RBX: 00000000000000fe RCX: 0000000000000002
RDX: 0000000000000006 RSI: ffff880078117084 RDI: 0000000000000030
RBP: ffff880078117268 R08: 000000000000000c R09: ffff8800780d80c8
R10: 0000000058d629b4 R11: 0000000067fce681 R12: 0000000000000000
R13: ffff8800784bd540 R14: ffff8800780d80b5 R15: ffff8800780d80a4
FS: 00000000022fa940(0000) GS:ffff88007fc00000(0000)
knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00000000004387d0 CR3: 0000000079135000 CR4: 00000000000006f0
Call Trace:
inet_rtm_getroute+0xc89/0x1f50 net/ipv4/route.c:2766
rtnetlink_rcv_msg+0x288/0x680 net/core/rtnetlink.c:4217
netlink_rcv_skb+0x340/0x470 net/netlink/af_netlink.c:2397
rtnetlink_rcv+0x28/0x30 net/core/rtnetlink.c:4223
netlink_unicast_kernel net/netlink/af_netlink.c:1265 [inline]
netlink_unicast+0x4c4/0x6e0 net/netlink/af_netlink.c:1291
netlink_sendmsg+0x8c4/0xca0 net/netlink/af_netlink.c:1854
sock_sendmsg_nosec net/socket.c:633 [inline]
sock_sendmsg+0xca/0x110 net/socket.c:643
___sys_sendmsg+0x779/0x8d0 net/socket.c:2035
__sys_sendmsg+0xd1/0x170 net/socket.c:2069
SYSC_sendmsg net/socket.c:2080 [inline]
SyS_sendmsg+0x2d/0x50 net/socket.c:2076
entry_SYSCALL_64_fastpath+0x1a/0xa5
RIP: 0033:0x4512e9
RSP: 002b:00007ffc75584cc8 EFLAGS: 00000216 ORIG_RAX:
000000000000002e
RAX: ffffffffffffffda RBX: 0000000000000002 RCX: 00000000004512e9
RDX: 0000000000000000 RSI: 0000000020f2cfc8 RDI: 0000000000000003
RBP: 000000000000000e R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000216 R12: fffffffffffffffe
R13: 0000000000718000 R14: 0000000020c44ff0 R15: 0000000000000000
Code: 00 0f b6 8d ec fd ff ff 48 8b 85 f0 fd ff ff 88 48 17 48 8b 45
28 48 8d 78 30 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03
<0f>
b6 04 02 84 c0 74 08 3c 03 0f 8e cb 0c 00 00 48 8b 45 28 44
RIP: fib_dump_info+0x388/0x1170 net/ipv4/fib_semantics.c:1314 RSP:
ffff880078117010
---[ end trace 254a7af28348f88b ]---
This patch adds a res->fi NULL check.
example run:
$ip route get 0.0.0.0 iif virt1-0
broadcast 0.0.0.0 dev lo
cache <local,brd> iif virt1-0
$ip route get 0.0.0.0 iif virt1-0 fibmatch
RTNETLINK answers: No route to host
Reported-by: idaifish <[email protected]>
Reported-by: Dmitry Vyukov <[email protected]>
Fixes: b61798130f1b ("net: ipv4: RTM_GETROUTE: return matched fib result when requested")
Signed-off-by: Roopa Prabhu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void set_lwt_redirect(struct rtable *rth)
{
if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
rth->dst.lwtstate->orig_output = rth->dst.output;
rth->dst.output = lwtunnel_output;
}
if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
rth->dst.lwtstate->orig_input = rth->dst.input;
rth->dst.input = lwtunnel_input;
}
}
|
static void set_lwt_redirect(struct rtable *rth)
{
if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
rth->dst.lwtstate->orig_output = rth->dst.output;
rth->dst.output = lwtunnel_output;
}
if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
rth->dst.lwtstate->orig_input = rth->dst.input;
rth->dst.input = lwtunnel_input;
}
}
|
C
|
linux
| 0 |
CVE-2015-6787
|
https://www.cvedetails.com/cve/CVE-2015-6787/
| null |
https://github.com/chromium/chromium/commit/f911e11e7f6b5c0d6f5ee694a9871de6619889f7
|
f911e11e7f6b5c0d6f5ee694a9871de6619889f7
|
Reland "[CI] Make paint property nodes non-ref-counted"
This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7.
Reason for revert: Retry in M69.
Original change's description:
> Revert "[CI] Make paint property nodes non-ref-counted"
>
> This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123.
>
> Reason for revert: Caused bugs found by clusterfuzz
>
> Original change's description:
> > [CI] Make paint property nodes non-ref-counted
> >
> > Now all paint property nodes are owned by ObjectPaintProperties
> > (and LocalFrameView temporarily before removing non-RLS mode).
> > Others just use raw pointers or references.
> >
> > Bug: 833496
> > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae
> > Reviewed-on: https://chromium-review.googlesource.com/1031101
> > Reviewed-by: Tien-Ren Chen <[email protected]>
> > Commit-Queue: Xianzhu Wang <[email protected]>
> > Cr-Commit-Position: refs/heads/master@{#554626}
>
> [email protected],[email protected],[email protected]
>
> Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 833496,837932,837943
> Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> Reviewed-on: https://chromium-review.googlesource.com/1034292
> Reviewed-by: Xianzhu Wang <[email protected]>
> Commit-Queue: Xianzhu Wang <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#554653}
[email protected],[email protected],[email protected]
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 833496, 837932, 837943
Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
Reviewed-on: https://chromium-review.googlesource.com/1083491
Commit-Queue: Xianzhu Wang <[email protected]>
Reviewed-by: Xianzhu Wang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#563930}
|
CreateFragmentContextsForRepeatingFixedPosition() {
DCHECK(object_.IsFixedPositionObjectInPagedMedia());
LayoutView* view = object_.View();
auto page_height = view->PageLogicalHeight();
int page_count = ceilf(view->DocumentRect().Height() / page_height);
context_.fragments.resize(page_count);
context_.fragments[0].fixed_position.paint_offset.Move(LayoutUnit(),
-view->ScrollTop());
for (int page = 1; page < page_count; page++) {
context_.fragments[page] = context_.fragments[page - 1];
context_.fragments[page].fixed_position.paint_offset.Move(LayoutUnit(),
page_height);
context_.fragments[page].logical_top_in_flow_thread += page_height;
}
}
|
CreateFragmentContextsForRepeatingFixedPosition() {
DCHECK(object_.IsFixedPositionObjectInPagedMedia());
LayoutView* view = object_.View();
auto page_height = view->PageLogicalHeight();
int page_count = ceilf(view->DocumentRect().Height() / page_height);
context_.fragments.resize(page_count);
context_.fragments[0].fixed_position.paint_offset.Move(LayoutUnit(),
-view->ScrollTop());
for (int page = 1; page < page_count; page++) {
context_.fragments[page] = context_.fragments[page - 1];
context_.fragments[page].fixed_position.paint_offset.Move(LayoutUnit(),
page_height);
context_.fragments[page].logical_top_in_flow_thread += page_height;
}
}
|
C
|
Chrome
| 0 |
CVE-2019-1010239
|
https://www.cvedetails.com/cve/CVE-2019-1010239/
|
CWE-754
|
https://github.com/DaveGamble/cJSON/commit/be749d7efa7c9021da746e685bd6dec79f9dd99b
|
be749d7efa7c9021da746e685bd6dec79f9dd99b
|
Fix crash of cJSON_GetObjectItemCaseSensitive when calling it on arrays
|
CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt)
{
printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } };
if (prebuffer < 0)
{
return NULL;
}
p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer);
if (!p.buffer)
{
return NULL;
}
p.length = (size_t)prebuffer;
p.offset = 0;
p.noalloc = false;
p.format = fmt;
p.hooks = global_hooks;
if (!print_value(item, &p))
{
global_hooks.deallocate(p.buffer);
return NULL;
}
return (char*)p.buffer;
}
|
CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt)
{
printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } };
if (prebuffer < 0)
{
return NULL;
}
p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer);
if (!p.buffer)
{
return NULL;
}
p.length = (size_t)prebuffer;
p.offset = 0;
p.noalloc = false;
p.format = fmt;
p.hooks = global_hooks;
if (!print_value(item, &p))
{
global_hooks.deallocate(p.buffer);
return NULL;
}
return (char*)p.buffer;
}
|
C
|
cJSON
| 0 |
CVE-2014-3610
|
https://www.cvedetails.com/cve/CVE-2014-3610/
|
CWE-264
|
https://github.com/torvalds/linux/commit/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int nm_interception(struct vcpu_svm *svm)
{
svm_fpu_activate(&svm->vcpu);
return 1;
}
|
static int nm_interception(struct vcpu_svm *svm)
{
svm_fpu_activate(&svm->vcpu);
return 1;
}
|
C
|
linux
| 0 |
CVE-2016-7910
|
https://www.cvedetails.com/cve/CVE-2016-7910/
|
CWE-416
|
https://github.com/torvalds/linux/commit/77da160530dd1dc94f6ae15a981f24e5f0021e84
|
77da160530dd1dc94f6ae15a981f24e5f0021e84
|
block: fix use-after-free in seq file
I got a KASAN report of use-after-free:
==================================================================
BUG: KASAN: use-after-free in klist_iter_exit+0x61/0x70 at addr ffff8800b6581508
Read of size 8 by task trinity-c1/315
=============================================================================
BUG kmalloc-32 (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in disk_seqf_start+0x66/0x110 age=144 cpu=1 pid=315
___slab_alloc+0x4f1/0x520
__slab_alloc.isra.58+0x56/0x80
kmem_cache_alloc_trace+0x260/0x2a0
disk_seqf_start+0x66/0x110
traverse+0x176/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
INFO: Freed in disk_seqf_stop+0x42/0x50 age=160 cpu=1 pid=315
__slab_free+0x17a/0x2c0
kfree+0x20a/0x220
disk_seqf_stop+0x42/0x50
traverse+0x3b5/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
CPU: 1 PID: 315 Comm: trinity-c1 Tainted: G B 4.7.0+ #62
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014
ffffea0002d96000 ffff880119b9f918 ffffffff81d6ce81 ffff88011a804480
ffff8800b6581500 ffff880119b9f948 ffffffff8146c7bd ffff88011a804480
ffffea0002d96000 ffff8800b6581500 fffffffffffffff4 ffff880119b9f970
Call Trace:
[<ffffffff81d6ce81>] dump_stack+0x65/0x84
[<ffffffff8146c7bd>] print_trailer+0x10d/0x1a0
[<ffffffff814704ff>] object_err+0x2f/0x40
[<ffffffff814754d1>] kasan_report_error+0x221/0x520
[<ffffffff8147590e>] __asan_report_load8_noabort+0x3e/0x40
[<ffffffff83888161>] klist_iter_exit+0x61/0x70
[<ffffffff82404389>] class_dev_iter_exit+0x9/0x10
[<ffffffff81d2e8ea>] disk_seqf_stop+0x3a/0x50
[<ffffffff8151f812>] seq_read+0x4b2/0x11a0
[<ffffffff815f8fdc>] proc_reg_read+0xbc/0x180
[<ffffffff814b24e4>] do_loop_readv_writev+0x134/0x210
[<ffffffff814b4c45>] do_readv_writev+0x565/0x660
[<ffffffff814b8a17>] vfs_readv+0x67/0xa0
[<ffffffff814b8de6>] do_preadv+0x126/0x170
[<ffffffff814b92ec>] SyS_preadv+0xc/0x10
This problem can occur in the following situation:
open()
- pread()
- .seq_start()
- iter = kmalloc() // succeeds
- seqf->private = iter
- .seq_stop()
- kfree(seqf->private)
- pread()
- .seq_start()
- iter = kmalloc() // fails
- .seq_stop()
- class_dev_iter_exit(seqf->private) // boom! old pointer
As the comment in disk_seqf_stop() says, stop is called even if start
failed, so we need to reinitialise the private pointer to NULL when seq
iteration stops.
An alternative would be to set the private pointer to NULL when the
kmalloc() in disk_seqf_start() fails.
Cc: [email protected]
Signed-off-by: Vegard Nossum <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static char *bdevt_str(dev_t devt, char *buf)
{
if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
char tbuf[BDEVT_SIZE];
snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
} else
snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
return buf;
}
|
static char *bdevt_str(dev_t devt, char *buf)
{
if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
char tbuf[BDEVT_SIZE];
snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
} else
snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
return buf;
}
|
C
|
linux
| 0 |
CVE-2018-6158
|
https://www.cvedetails.com/cve/CVE-2018-6158/
|
CWE-362
|
https://github.com/chromium/chromium/commit/20b65d00ca3d8696430e22efad7485366f8c3a21
|
20b65d00ca3d8696430e22efad7485366f8c3a21
|
[oilpan] Fix GCInfoTable for multiple threads
Previously, grow and access from different threads could lead to a race
on the table backing; see bug.
- Rework the table to work on an existing reservation.
- Commit upon growing, avoiding any copies.
Drive-by: Fix over-allocation of table.
Bug: chromium:841280
Change-Id: I329cb6f40091e14e8c05334ba1104a9440c31d43
Reviewed-on: https://chromium-review.googlesource.com/1061525
Commit-Queue: Michael Lippautz <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Cr-Commit-Position: refs/heads/master@{#560434}
|
void BaseArena::MakeConsistentForMutator() {
ClearFreeLists();
#if DCHECK_IS_ON()
DCHECK(IsConsistentForGC());
#endif
DCHECK(!first_page_);
BasePage* previous_page = nullptr;
for (BasePage *page = first_unswept_page_; page;
previous_page = page, page = page->Next()) {
page->MakeConsistentForMutator();
page->MarkAsSwept();
}
if (previous_page) {
DCHECK(!SweepingCompleted());
previous_page->next_ = first_page_;
first_page_ = first_unswept_page_;
first_unswept_page_ = nullptr;
}
DCHECK(SweepingCompleted());
VerifyObjectStartBitmap();
}
|
void BaseArena::MakeConsistentForMutator() {
ClearFreeLists();
#if DCHECK_IS_ON()
DCHECK(IsConsistentForGC());
#endif
DCHECK(!first_page_);
BasePage* previous_page = nullptr;
for (BasePage *page = first_unswept_page_; page;
previous_page = page, page = page->Next()) {
page->MakeConsistentForMutator();
page->MarkAsSwept();
}
if (previous_page) {
DCHECK(!SweepingCompleted());
previous_page->next_ = first_page_;
first_page_ = first_unswept_page_;
first_unswept_page_ = nullptr;
}
DCHECK(SweepingCompleted());
VerifyObjectStartBitmap();
}
|
C
|
Chrome
| 0 |
CVE-2011-2495
|
https://www.cvedetails.com/cve/CVE-2011-2495/
|
CWE-264
|
https://github.com/torvalds/linux/commit/1d1221f375c94ef961ba8574ac4f85c8870ddd51
|
1d1221f375c94ef961ba8574ac4f85c8870ddd51
|
proc: restrict access to /proc/PID/io
/proc/PID/io may be used for gathering private information. E.g. for
openssh and vsftpd daemons wchars/rchars may be used to learn the
precise password length. Restrict it to processes being able to ptrace
the target process.
ptrace_may_access() is needed to prevent keeping open file descriptor of
"io" file, executing setuid binary and gathering io information of the
setuid'ed process.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
struct inode * inode = file->f_path.dentry->d_inode;
char *page, *tmp;
ssize_t length;
uid_t loginuid;
if (!capable(CAP_AUDIT_CONTROL))
return -EPERM;
rcu_read_lock();
if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
rcu_read_unlock();
return -EPERM;
}
rcu_read_unlock();
if (count >= PAGE_SIZE)
count = PAGE_SIZE - 1;
if (*ppos != 0) {
/* No partial writes. */
return -EINVAL;
}
page = (char*)__get_free_page(GFP_TEMPORARY);
if (!page)
return -ENOMEM;
length = -EFAULT;
if (copy_from_user(page, buf, count))
goto out_free_page;
page[count] = '\0';
loginuid = simple_strtoul(page, &tmp, 10);
if (tmp == page) {
length = -EINVAL;
goto out_free_page;
}
length = audit_set_loginuid(current, loginuid);
if (likely(length == 0))
length = count;
out_free_page:
free_page((unsigned long) page);
return length;
}
|
static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
struct inode * inode = file->f_path.dentry->d_inode;
char *page, *tmp;
ssize_t length;
uid_t loginuid;
if (!capable(CAP_AUDIT_CONTROL))
return -EPERM;
rcu_read_lock();
if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
rcu_read_unlock();
return -EPERM;
}
rcu_read_unlock();
if (count >= PAGE_SIZE)
count = PAGE_SIZE - 1;
if (*ppos != 0) {
/* No partial writes. */
return -EINVAL;
}
page = (char*)__get_free_page(GFP_TEMPORARY);
if (!page)
return -ENOMEM;
length = -EFAULT;
if (copy_from_user(page, buf, count))
goto out_free_page;
page[count] = '\0';
loginuid = simple_strtoul(page, &tmp, 10);
if (tmp == page) {
length = -EINVAL;
goto out_free_page;
}
length = audit_set_loginuid(current, loginuid);
if (likely(length == 0))
length = count;
out_free_page:
free_page((unsigned long) page);
return length;
}
|
C
|
linux
| 0 |
CVE-2018-6942
|
https://www.cvedetails.com/cve/CVE-2018-6942/
|
CWE-476
|
https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=29c759284e305ec428703c9a5831d0b1fc3497ef
|
29c759284e305ec428703c9a5831d0b1fc3497ef
| null |
Ins_SxVTL( TT_ExecContext exc,
FT_UShort aIdx1,
FT_UShort aIdx2,
FT_UnitVector* Vec )
{
FT_Long A, B, C;
FT_Vector* p1;
FT_Vector* p2;
FT_Byte opcode = exc->opcode;
if ( BOUNDS( aIdx1, exc->zp2.n_points ) ||
BOUNDS( aIdx2, exc->zp1.n_points ) )
{
if ( exc->pedantic_hinting )
exc->error = FT_THROW( Invalid_Reference );
return FAILURE;
}
p1 = exc->zp1.cur + aIdx2;
p2 = exc->zp2.cur + aIdx1;
A = SUB_LONG( p1->x, p2->x );
B = SUB_LONG( p1->y, p2->y );
/* If p1 == p2, SPvTL and SFvTL behave the same as */
/* SPvTCA[X] and SFvTCA[X], respectively. */
/* */
/* Confirmed by Greg Hitchcock. */
if ( A == 0 && B == 0 )
{
A = 0x4000;
opcode = 0;
}
if ( ( opcode & 1 ) != 0 )
{
C = B; /* counter clockwise rotation */
B = A;
A = NEG_LONG( C );
}
Normalize( A, B, Vec );
return SUCCESS;
}
|
Ins_SxVTL( TT_ExecContext exc,
FT_UShort aIdx1,
FT_UShort aIdx2,
FT_UnitVector* Vec )
{
FT_Long A, B, C;
FT_Vector* p1;
FT_Vector* p2;
FT_Byte opcode = exc->opcode;
if ( BOUNDS( aIdx1, exc->zp2.n_points ) ||
BOUNDS( aIdx2, exc->zp1.n_points ) )
{
if ( exc->pedantic_hinting )
exc->error = FT_THROW( Invalid_Reference );
return FAILURE;
}
p1 = exc->zp1.cur + aIdx2;
p2 = exc->zp2.cur + aIdx1;
A = SUB_LONG( p1->x, p2->x );
B = SUB_LONG( p1->y, p2->y );
/* If p1 == p2, SPvTL and SFvTL behave the same as */
/* SPvTCA[X] and SFvTCA[X], respectively. */
/* */
/* Confirmed by Greg Hitchcock. */
if ( A == 0 && B == 0 )
{
A = 0x4000;
opcode = 0;
}
if ( ( opcode & 1 ) != 0 )
{
C = B; /* counter clockwise rotation */
B = A;
A = NEG_LONG( C );
}
Normalize( A, B, Vec );
return SUCCESS;
}
|
C
|
savannah
| 0 |
CVE-2017-9211
|
https://www.cvedetails.com/cve/CVE-2017-9211/
|
CWE-476
|
https://github.com/torvalds/linux/commit/9933e113c2e87a9f46a40fde8dafbf801dca1ab9
|
9933e113c2e87a9f46a40fde8dafbf801dca1ab9
|
crypto: skcipher - Add missing API setkey checks
The API setkey checks for key sizes and alignment went AWOL during the
skcipher conversion. This patch restores them.
Cc: <[email protected]>
Fixes: 4e6c3df4d729 ("crypto: skcipher - Add low-level skcipher...")
Reported-by: Baozeng <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
static int skcipher_walk_skcipher(struct skcipher_walk *walk,
struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
walk->total = req->cryptlen;
walk->iv = req->iv;
walk->oiv = req->iv;
walk->flags &= ~SKCIPHER_WALK_SLEEP;
walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
SKCIPHER_WALK_SLEEP : 0;
walk->blocksize = crypto_skcipher_blocksize(tfm);
walk->stride = crypto_skcipher_walksize(tfm);
walk->ivsize = crypto_skcipher_ivsize(tfm);
walk->alignmask = crypto_skcipher_alignmask(tfm);
return skcipher_walk_first(walk);
}
|
static int skcipher_walk_skcipher(struct skcipher_walk *walk,
struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
walk->total = req->cryptlen;
walk->iv = req->iv;
walk->oiv = req->iv;
walk->flags &= ~SKCIPHER_WALK_SLEEP;
walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
SKCIPHER_WALK_SLEEP : 0;
walk->blocksize = crypto_skcipher_blocksize(tfm);
walk->stride = crypto_skcipher_walksize(tfm);
walk->ivsize = crypto_skcipher_ivsize(tfm);
walk->alignmask = crypto_skcipher_alignmask(tfm);
return skcipher_walk_first(walk);
}
|
C
|
linux
| 0 |
CVE-2013-2866
|
https://www.cvedetails.com/cve/CVE-2013-2866/
|
CWE-264
|
https://github.com/chromium/chromium/commit/69827e08e9e0a30ce452589705d7336edaffd490
|
69827e08e9e0a30ce452589705d7336edaffd490
|
Make the content setting for webcam/mic sticky for Pepper requests.
This makes the content setting sticky for webcam/mic requests from Pepper from non-https origins.
BUG=249335
[email protected], [email protected]
Review URL: https://codereview.chromium.org/17060006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@206479 0039d316-1c4b-4281-b951-d872f2087c98
|
void MediaStreamDevicesController::NotifyUIRequestDenied() const {
if (!content_settings_)
return;
if (request_.audio_type == content::MEDIA_TAB_AUDIO_CAPTURE ||
request_.video_type == content::MEDIA_TAB_VIDEO_CAPTURE) {
return;
}
if (request_.audio_type == content::MEDIA_DEVICE_AUDIO_CAPTURE)
content_settings_->OnMicrophoneAccessBlocked();
if (request_.video_type == content::MEDIA_DEVICE_VIDEO_CAPTURE)
content_settings_->OnCameraAccessBlocked();
}
|
void MediaStreamDevicesController::NotifyUIRequestDenied() const {
if (!content_settings_)
return;
if (request_.audio_type == content::MEDIA_TAB_AUDIO_CAPTURE ||
request_.video_type == content::MEDIA_TAB_VIDEO_CAPTURE) {
return;
}
if (request_.audio_type == content::MEDIA_DEVICE_AUDIO_CAPTURE)
content_settings_->OnMicrophoneAccessBlocked();
if (request_.video_type == content::MEDIA_DEVICE_VIDEO_CAPTURE)
content_settings_->OnCameraAccessBlocked();
}
|
C
|
Chrome
| 0 |
CVE-2011-2350
|
https://www.cvedetails.com/cve/CVE-2011-2350/
|
CWE-20
|
https://github.com/chromium/chromium/commit/b944f670bb7a8a919daac497a4ea0536c954c201
|
b944f670bb7a8a919daac497a4ea0536c954c201
|
[JSC] Implement a helper method createNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=85102
Reviewed by Geoffrey Garen.
In bug 84787, kbr@ requested to avoid hard-coding
createTypeError(exec, "Not enough arguments") here and there.
This patch implements createNotEnoughArgumentsError(exec)
and uses it in JSC bindings.
c.f. a corresponding bug for V8 bindings is bug 85097.
Source/JavaScriptCore:
* runtime/Error.cpp:
(JSC::createNotEnoughArgumentsError):
(JSC):
* runtime/Error.h:
(JSC):
Source/WebCore:
Test: bindings/scripts/test/TestObj.idl
* bindings/scripts/CodeGeneratorJS.pm: Modified as described above.
(GenerateArgumentsCountCheck):
* bindings/js/JSDataViewCustom.cpp: Ditto.
(WebCore::getDataViewMember):
(WebCore::setDataViewMember):
* bindings/js/JSDeprecatedPeerConnectionCustom.cpp:
(WebCore::JSDeprecatedPeerConnectionConstructor::constructJSDeprecatedPeerConnection):
* bindings/js/JSDirectoryEntryCustom.cpp:
(WebCore::JSDirectoryEntry::getFile):
(WebCore::JSDirectoryEntry::getDirectory):
* bindings/js/JSSharedWorkerCustom.cpp:
(WebCore::JSSharedWorkerConstructor::constructJSSharedWorker):
* bindings/js/JSWebKitMutationObserverCustom.cpp:
(WebCore::JSWebKitMutationObserverConstructor::constructJSWebKitMutationObserver):
(WebCore::JSWebKitMutationObserver::observe):
* bindings/js/JSWorkerCustom.cpp:
(WebCore::JSWorkerConstructor::constructJSWorker):
* bindings/scripts/test/JS/JSFloat64Array.cpp: Updated run-bindings-tests.
(WebCore::jsFloat64ArrayPrototypeFunctionFoo):
* bindings/scripts/test/JS/JSTestActiveDOMObject.cpp:
(WebCore::jsTestActiveDOMObjectPrototypeFunctionExcitingFunction):
(WebCore::jsTestActiveDOMObjectPrototypeFunctionPostMessage):
* bindings/scripts/test/JS/JSTestCustomNamedGetter.cpp:
(WebCore::jsTestCustomNamedGetterPrototypeFunctionAnotherFunction):
* bindings/scripts/test/JS/JSTestEventTarget.cpp:
(WebCore::jsTestEventTargetPrototypeFunctionItem):
(WebCore::jsTestEventTargetPrototypeFunctionAddEventListener):
(WebCore::jsTestEventTargetPrototypeFunctionRemoveEventListener):
(WebCore::jsTestEventTargetPrototypeFunctionDispatchEvent):
* bindings/scripts/test/JS/JSTestInterface.cpp:
(WebCore::JSTestInterfaceConstructor::constructJSTestInterface):
(WebCore::jsTestInterfacePrototypeFunctionSupplementalMethod2):
* bindings/scripts/test/JS/JSTestMediaQueryListListener.cpp:
(WebCore::jsTestMediaQueryListListenerPrototypeFunctionMethod):
* bindings/scripts/test/JS/JSTestNamedConstructor.cpp:
(WebCore::JSTestNamedConstructorNamedConstructor::constructJSTestNamedConstructor):
* bindings/scripts/test/JS/JSTestObj.cpp:
(WebCore::JSTestObjConstructor::constructJSTestObj):
(WebCore::jsTestObjPrototypeFunctionVoidMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionIntMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionObjMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionMethodWithSequenceArg):
(WebCore::jsTestObjPrototypeFunctionMethodReturningSequence):
(WebCore::jsTestObjPrototypeFunctionMethodThatRequiresAllArgsAndThrows):
(WebCore::jsTestObjPrototypeFunctionSerializedValue):
(WebCore::jsTestObjPrototypeFunctionIdbKey):
(WebCore::jsTestObjPrototypeFunctionOptionsObject):
(WebCore::jsTestObjPrototypeFunctionAddEventListener):
(WebCore::jsTestObjPrototypeFunctionRemoveEventListener):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndOptionalArg):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndTwoOptionalArgs):
(WebCore::jsTestObjPrototypeFunctionMethodWithCallbackArg):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonCallbackArgAndCallbackArg):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod1):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod2):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod3):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod4):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod5):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod6):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod7):
(WebCore::jsTestObjConstructorFunctionClassMethod2):
(WebCore::jsTestObjConstructorFunctionOverloadedMethod11):
(WebCore::jsTestObjConstructorFunctionOverloadedMethod12):
(WebCore::jsTestObjPrototypeFunctionMethodWithUnsignedLongArray):
(WebCore::jsTestObjPrototypeFunctionConvert1):
(WebCore::jsTestObjPrototypeFunctionConvert2):
(WebCore::jsTestObjPrototypeFunctionConvert3):
(WebCore::jsTestObjPrototypeFunctionConvert4):
(WebCore::jsTestObjPrototypeFunctionConvert5):
(WebCore::jsTestObjPrototypeFunctionStrictFunction):
* bindings/scripts/test/JS/JSTestSerializedScriptValueInterface.cpp:
(WebCore::JSTestSerializedScriptValueInterfaceConstructor::constructJSTestSerializedScriptValueInterface):
(WebCore::jsTestSerializedScriptValueInterfacePrototypeFunctionAcceptTransferList):
git-svn-id: svn://svn.chromium.org/blink/trunk@115536 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
JSValue jsFloat64ArrayConstructor(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSFloat64Array* domObject = jsCast<JSFloat64Array*>(asObject(slotBase));
return JSFloat64Array::getConstructor(exec, domObject->globalObject());
}
|
JSValue jsFloat64ArrayConstructor(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSFloat64Array* domObject = jsCast<JSFloat64Array*>(asObject(slotBase));
return JSFloat64Array::getConstructor(exec, domObject->globalObject());
}
|
C
|
Chrome
| 0 |
CVE-2016-10066
|
https://www.cvedetails.com/cve/CVE-2016-10066/
|
CWE-119
|
https://github.com/ImageMagick/ImageMagick/commit/f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
|
f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
| null |
static MagickBooleanType WriteMATImage(const ImageInfo *image_info,Image *image)
{
ExceptionInfo
*exception;
ssize_t y;
unsigned z;
const PixelPacket *p;
unsigned int status;
int logging;
size_t DataSize;
char padding;
char MATLAB_HDR[0x80];
time_t current_time;
struct tm local_time;
unsigned char *pixels;
int is_gray;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
logging=LogMagickEvent(CoderEvent,GetMagickModule(),"enter MAT");
(void) logging;
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(MagickFalse);
image->depth=8;
current_time=time((time_t *) NULL);
#if defined(MAGICKCORE_HAVE_LOCALTIME_R)
(void) localtime_r(¤t_time,&local_time);
#else
(void) memcpy(&local_time,localtime(¤t_time),sizeof(local_time));
#endif
(void) memset(MATLAB_HDR,' ',MagickMin(sizeof(MATLAB_HDR),124));
FormatLocaleString(MATLAB_HDR,sizeof(MATLAB_HDR),
"MATLAB 5.0 MAT-file, Platform: %s, Created on: %s %s %2d %2d:%2d:%2d %d",
OsDesc,DayOfWTab[local_time.tm_wday],MonthsTab[local_time.tm_mon],
local_time.tm_mday,local_time.tm_hour,local_time.tm_min,
local_time.tm_sec,local_time.tm_year+1900);
MATLAB_HDR[0x7C]=0;
MATLAB_HDR[0x7D]=1;
MATLAB_HDR[0x7E]='I';
MATLAB_HDR[0x7F]='M';
(void) WriteBlob(image,sizeof(MATLAB_HDR),(unsigned char *) MATLAB_HDR);
scene=0;
do
{
(void) TransformImageColorspace(image,sRGBColorspace);
is_gray = IsGrayImage(image,&image->exception);
z = is_gray ? 0 : 3;
/*
Store MAT header.
*/
DataSize = image->rows /*Y*/ * image->columns /*X*/;
if(!is_gray) DataSize *= 3 /*Z*/;
padding=((unsigned char)(DataSize-1) & 0x7) ^ 0x7;
(void) WriteBlobLSBLong(image, miMATRIX);
(void) WriteBlobLSBLong(image, (unsigned int) DataSize+padding+(is_gray ? 48 : 56));
(void) WriteBlobLSBLong(image, 0x6); /* 0x88 */
(void) WriteBlobLSBLong(image, 0x8); /* 0x8C */
(void) WriteBlobLSBLong(image, 0x6); /* 0x90 */
(void) WriteBlobLSBLong(image, 0);
(void) WriteBlobLSBLong(image, 0x5); /* 0x98 */
(void) WriteBlobLSBLong(image, is_gray ? 0x8 : 0xC); /* 0x9C - DimFlag */
(void) WriteBlobLSBLong(image, (unsigned int) image->rows); /* x: 0xA0 */
(void) WriteBlobLSBLong(image, (unsigned int) image->columns); /* y: 0xA4 */
if(!is_gray)
{
(void) WriteBlobLSBLong(image, 3); /* z: 0xA8 */
(void) WriteBlobLSBLong(image, 0);
}
(void) WriteBlobLSBShort(image, 1); /* 0xB0 */
(void) WriteBlobLSBShort(image, 1); /* 0xB2 */
(void) WriteBlobLSBLong(image, 'M'); /* 0xB4 */
(void) WriteBlobLSBLong(image, 0x2); /* 0xB8 */
(void) WriteBlobLSBLong(image, (unsigned int) DataSize); /* 0xBC */
/*
Store image data.
*/
exception=(&image->exception);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=GetQuantumPixels(quantum_info);
do
{
for (y=0; y < (ssize_t)image->columns; y++)
{
p=GetVirtualPixels(image,y,0,1,image->rows,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
(void) ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info,
z2qtype[z],pixels,exception);
(void) WriteBlob(image,image->rows,pixels);
}
if (!SyncAuthenticPixels(image,exception))
break;
} while(z-- >= 2);
while(padding-->0) (void) WriteBlobByte(image,0);
quantum_info=DestroyQuantumInfo(quantum_info);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
static MagickBooleanType WriteMATImage(const ImageInfo *image_info,Image *image)
{
ExceptionInfo
*exception;
ssize_t y;
unsigned z;
const PixelPacket *p;
unsigned int status;
int logging;
size_t DataSize;
char padding;
char MATLAB_HDR[0x80];
time_t current_time;
struct tm local_time;
unsigned char *pixels;
int is_gray;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
logging=LogMagickEvent(CoderEvent,GetMagickModule(),"enter MAT");
(void) logging;
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(MagickFalse);
image->depth=8;
current_time=time((time_t *) NULL);
#if defined(MAGICKCORE_HAVE_LOCALTIME_R)
(void) localtime_r(¤t_time,&local_time);
#else
(void) memcpy(&local_time,localtime(¤t_time),sizeof(local_time));
#endif
(void) memset(MATLAB_HDR,' ',MagickMin(sizeof(MATLAB_HDR),124));
FormatLocaleString(MATLAB_HDR,sizeof(MATLAB_HDR),
"MATLAB 5.0 MAT-file, Platform: %s, Created on: %s %s %2d %2d:%2d:%2d %d",
OsDesc,DayOfWTab[local_time.tm_wday],MonthsTab[local_time.tm_mon],
local_time.tm_mday,local_time.tm_hour,local_time.tm_min,
local_time.tm_sec,local_time.tm_year+1900);
MATLAB_HDR[0x7C]=0;
MATLAB_HDR[0x7D]=1;
MATLAB_HDR[0x7E]='I';
MATLAB_HDR[0x7F]='M';
(void) WriteBlob(image,sizeof(MATLAB_HDR),(unsigned char *) MATLAB_HDR);
scene=0;
do
{
(void) TransformImageColorspace(image,sRGBColorspace);
is_gray = IsGrayImage(image,&image->exception);
z = is_gray ? 0 : 3;
/*
Store MAT header.
*/
DataSize = image->rows /*Y*/ * image->columns /*X*/;
if(!is_gray) DataSize *= 3 /*Z*/;
padding=((unsigned char)(DataSize-1) & 0x7) ^ 0x7;
(void) WriteBlobLSBLong(image, miMATRIX);
(void) WriteBlobLSBLong(image, (unsigned int) DataSize+padding+(is_gray ? 48 : 56));
(void) WriteBlobLSBLong(image, 0x6); /* 0x88 */
(void) WriteBlobLSBLong(image, 0x8); /* 0x8C */
(void) WriteBlobLSBLong(image, 0x6); /* 0x90 */
(void) WriteBlobLSBLong(image, 0);
(void) WriteBlobLSBLong(image, 0x5); /* 0x98 */
(void) WriteBlobLSBLong(image, is_gray ? 0x8 : 0xC); /* 0x9C - DimFlag */
(void) WriteBlobLSBLong(image, (unsigned int) image->rows); /* x: 0xA0 */
(void) WriteBlobLSBLong(image, (unsigned int) image->columns); /* y: 0xA4 */
if(!is_gray)
{
(void) WriteBlobLSBLong(image, 3); /* z: 0xA8 */
(void) WriteBlobLSBLong(image, 0);
}
(void) WriteBlobLSBShort(image, 1); /* 0xB0 */
(void) WriteBlobLSBShort(image, 1); /* 0xB2 */
(void) WriteBlobLSBLong(image, 'M'); /* 0xB4 */
(void) WriteBlobLSBLong(image, 0x2); /* 0xB8 */
(void) WriteBlobLSBLong(image, (unsigned int) DataSize); /* 0xBC */
/*
Store image data.
*/
exception=(&image->exception);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=GetQuantumPixels(quantum_info);
do
{
for (y=0; y < (ssize_t)image->columns; y++)
{
p=GetVirtualPixels(image,y,0,1,image->rows,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
(void) ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info,
z2qtype[z],pixels,exception);
(void) WriteBlob(image,image->rows,pixels);
}
if (!SyncAuthenticPixels(image,exception))
break;
} while(z-- >= 2);
while(padding-->0) (void) WriteBlobByte(image,0);
quantum_info=DestroyQuantumInfo(quantum_info);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
C
|
ImageMagick
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/a44b00c88bc5ea35b5b150217c5fd6e4ce168e58
|
a44b00c88bc5ea35b5b150217c5fd6e4ce168e58
|
Apply behaviour change fix from upstream for previous XPath change.
BUG=58731
TEST=NONE
Review URL: http://codereview.chromium.org/4027006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@63572 0039d316-1c4b-4281-b951-d872f2087c98
|
xmlXPathCompOpEvalLast(xmlXPathParserContextPtr ctxt, xmlXPathStepOpPtr op,
xmlNodePtr * last)
{
int total = 0, cur;
xmlXPathCompExprPtr comp;
xmlXPathObjectPtr arg1, arg2;
xmlNodePtr bak;
xmlDocPtr bakd;
int pp;
int cs;
CHECK_ERROR0;
comp = ctxt->comp;
switch (op->op) {
case XPATH_OP_END:
return (0);
case XPATH_OP_UNION:
bakd = ctxt->context->doc;
bak = ctxt->context->node;
pp = ctxt->context->proximityPosition;
cs = ctxt->context->contextSize;
total =
xmlXPathCompOpEvalLast(ctxt, &comp->steps[op->ch1], last);
CHECK_ERROR0;
if ((ctxt->value != NULL)
&& (ctxt->value->type == XPATH_NODESET)
&& (ctxt->value->nodesetval != NULL)
&& (ctxt->value->nodesetval->nodeNr >= 1)) {
/*
* limit tree traversing to first node in the result
*/
if (ctxt->value->nodesetval->nodeNr > 1)
xmlXPathNodeSetSort(ctxt->value->nodesetval);
*last =
ctxt->value->nodesetval->nodeTab[ctxt->value->
nodesetval->nodeNr -
1];
}
ctxt->context->doc = bakd;
ctxt->context->node = bak;
ctxt->context->proximityPosition = pp;
ctxt->context->contextSize = cs;
cur =
xmlXPathCompOpEvalLast(ctxt, &comp->steps[op->ch2], last);
CHECK_ERROR0;
if ((ctxt->value != NULL)
&& (ctxt->value->type == XPATH_NODESET)
&& (ctxt->value->nodesetval != NULL)
&& (ctxt->value->nodesetval->nodeNr >= 1)) { /* TODO: NOP ? */
}
CHECK_TYPE0(XPATH_NODESET);
arg2 = valuePop(ctxt);
CHECK_TYPE0(XPATH_NODESET);
arg1 = valuePop(ctxt);
arg1->nodesetval = xmlXPathNodeSetMerge(arg1->nodesetval,
arg2->nodesetval);
valuePush(ctxt, arg1);
xmlXPathReleaseObject(ctxt->context, arg2);
/* optimizer */
if (total > cur)
xmlXPathCompSwap(op);
return (total + cur);
case XPATH_OP_ROOT:
xmlXPathRoot(ctxt);
return (0);
case XPATH_OP_NODE:
if (op->ch1 != -1)
total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]);
CHECK_ERROR0;
if (op->ch2 != -1)
total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]);
CHECK_ERROR0;
valuePush(ctxt, xmlXPathCacheNewNodeSet(ctxt->context,
ctxt->context->node));
return (total);
case XPATH_OP_RESET:
if (op->ch1 != -1)
total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]);
CHECK_ERROR0;
if (op->ch2 != -1)
total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]);
CHECK_ERROR0;
ctxt->context->node = NULL;
return (total);
case XPATH_OP_COLLECT:{
if (op->ch1 == -1)
return (0);
total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]);
CHECK_ERROR0;
total += xmlXPathNodeCollectAndTest(ctxt, op, NULL, last, 0);
return (total);
}
case XPATH_OP_VALUE:
valuePush(ctxt,
xmlXPathCacheObjectCopy(ctxt->context,
(xmlXPathObjectPtr) op->value4));
return (0);
case XPATH_OP_SORT:
if (op->ch1 != -1)
total +=
xmlXPathCompOpEvalLast(ctxt, &comp->steps[op->ch1],
last);
CHECK_ERROR0;
if ((ctxt->value != NULL)
&& (ctxt->value->type == XPATH_NODESET)
&& (ctxt->value->nodesetval != NULL)
&& (ctxt->value->nodesetval->nodeNr > 1))
xmlXPathNodeSetSort(ctxt->value->nodesetval);
return (total);
default:
return (xmlXPathCompOpEval(ctxt, op));
}
}
|
xmlXPathCompOpEvalLast(xmlXPathParserContextPtr ctxt, xmlXPathStepOpPtr op,
xmlNodePtr * last)
{
int total = 0, cur;
xmlXPathCompExprPtr comp;
xmlXPathObjectPtr arg1, arg2;
xmlNodePtr bak;
xmlDocPtr bakd;
int pp;
int cs;
CHECK_ERROR0;
comp = ctxt->comp;
switch (op->op) {
case XPATH_OP_END:
return (0);
case XPATH_OP_UNION:
bakd = ctxt->context->doc;
bak = ctxt->context->node;
pp = ctxt->context->proximityPosition;
cs = ctxt->context->contextSize;
total =
xmlXPathCompOpEvalLast(ctxt, &comp->steps[op->ch1], last);
CHECK_ERROR0;
if ((ctxt->value != NULL)
&& (ctxt->value->type == XPATH_NODESET)
&& (ctxt->value->nodesetval != NULL)
&& (ctxt->value->nodesetval->nodeNr >= 1)) {
/*
* limit tree traversing to first node in the result
*/
if (ctxt->value->nodesetval->nodeNr > 1)
xmlXPathNodeSetSort(ctxt->value->nodesetval);
*last =
ctxt->value->nodesetval->nodeTab[ctxt->value->
nodesetval->nodeNr -
1];
}
ctxt->context->doc = bakd;
ctxt->context->node = bak;
ctxt->context->proximityPosition = pp;
ctxt->context->contextSize = cs;
cur =
xmlXPathCompOpEvalLast(ctxt, &comp->steps[op->ch2], last);
CHECK_ERROR0;
if ((ctxt->value != NULL)
&& (ctxt->value->type == XPATH_NODESET)
&& (ctxt->value->nodesetval != NULL)
&& (ctxt->value->nodesetval->nodeNr >= 1)) { /* TODO: NOP ? */
}
CHECK_TYPE0(XPATH_NODESET);
arg2 = valuePop(ctxt);
CHECK_TYPE0(XPATH_NODESET);
arg1 = valuePop(ctxt);
arg1->nodesetval = xmlXPathNodeSetMerge(arg1->nodesetval,
arg2->nodesetval);
valuePush(ctxt, arg1);
xmlXPathReleaseObject(ctxt->context, arg2);
/* optimizer */
if (total > cur)
xmlXPathCompSwap(op);
return (total + cur);
case XPATH_OP_ROOT:
xmlXPathRoot(ctxt);
return (0);
case XPATH_OP_NODE:
if (op->ch1 != -1)
total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]);
CHECK_ERROR0;
if (op->ch2 != -1)
total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]);
CHECK_ERROR0;
valuePush(ctxt, xmlXPathCacheNewNodeSet(ctxt->context,
ctxt->context->node));
return (total);
case XPATH_OP_RESET:
if (op->ch1 != -1)
total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]);
CHECK_ERROR0;
if (op->ch2 != -1)
total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch2]);
CHECK_ERROR0;
ctxt->context->node = NULL;
return (total);
case XPATH_OP_COLLECT:{
if (op->ch1 == -1)
return (0);
total += xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]);
CHECK_ERROR0;
total += xmlXPathNodeCollectAndTest(ctxt, op, NULL, last, 0);
return (total);
}
case XPATH_OP_VALUE:
valuePush(ctxt,
xmlXPathCacheObjectCopy(ctxt->context,
(xmlXPathObjectPtr) op->value4));
return (0);
case XPATH_OP_SORT:
if (op->ch1 != -1)
total +=
xmlXPathCompOpEvalLast(ctxt, &comp->steps[op->ch1],
last);
CHECK_ERROR0;
if ((ctxt->value != NULL)
&& (ctxt->value->type == XPATH_NODESET)
&& (ctxt->value->nodesetval != NULL)
&& (ctxt->value->nodesetval->nodeNr > 1))
xmlXPathNodeSetSort(ctxt->value->nodesetval);
return (total);
default:
return (xmlXPathCompOpEval(ctxt, op));
}
}
|
C
|
Chrome
| 0 |
CVE-2012-2313
|
https://www.cvedetails.com/cve/CVE-2012-2313/
|
CWE-264
|
https://github.com/torvalds/linux/commit/1bb57e940e1958e40d51f2078f50c3a96a9b2d75
|
1bb57e940e1958e40d51f2078f50c3a96a9b2d75
|
dl2k: Clean up rio_ioctl
The dl2k driver's rio_ioctl call has a few issues:
- No permissions checking
- Implements SIOCGMIIREG and SIOCGMIIREG using the SIOCDEVPRIVATE numbers
- Has a few ioctls that may have been used for debugging at one point
but have no place in the kernel proper.
This patch removes all but the MII ioctls, renumbers them to use the
standard ones, and adds the proper permission check for SIOCSMIIREG.
We can also get rid of the dl2k-specific struct mii_data in favor of
the generic struct mii_ioctl_data.
Since we have the phyid on hand, we can add the SIOCGMIIPHY ioctl too.
Most of the MII code for the driver could probably be converted to use
the generic MII library but I don't have a device to test the results.
Reported-by: Stephan Mueller <[email protected]>
Signed-off-by: Jeff Mahoney <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
set_multicast (struct net_device *dev)
{
long ioaddr = dev->base_addr;
u32 hash_table[2];
u16 rx_mode = 0;
struct netdev_private *np = netdev_priv(dev);
hash_table[0] = hash_table[1] = 0;
/* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
hash_table[1] |= 0x02000000;
if (dev->flags & IFF_PROMISC) {
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
} else if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
} else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
netdev_for_each_mc_addr(ha, dev) {
int bit, index = 0;
int crc = ether_crc_le(ETH_ALEN, ha->addr);
/* The inverted high significant 6 bits of CRC are
used as an index to hashtable */
for (bit = 0; bit < 6; bit++)
if (crc & (1 << (31 - bit)))
index |= (1 << bit);
hash_table[index / 32] |= (1 << (index % 32));
}
} else {
rx_mode = ReceiveBroadcast | ReceiveUnicast;
}
if (np->vlan) {
/* ReceiveVLANMatch field in ReceiveMode */
rx_mode |= ReceiveVLANMatch;
}
writel (hash_table[0], ioaddr + HashTable0);
writel (hash_table[1], ioaddr + HashTable1);
writew (rx_mode, ioaddr + ReceiveMode);
}
|
set_multicast (struct net_device *dev)
{
long ioaddr = dev->base_addr;
u32 hash_table[2];
u16 rx_mode = 0;
struct netdev_private *np = netdev_priv(dev);
hash_table[0] = hash_table[1] = 0;
/* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
hash_table[1] |= 0x02000000;
if (dev->flags & IFF_PROMISC) {
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
} else if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
} else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
netdev_for_each_mc_addr(ha, dev) {
int bit, index = 0;
int crc = ether_crc_le(ETH_ALEN, ha->addr);
/* The inverted high significant 6 bits of CRC are
used as an index to hashtable */
for (bit = 0; bit < 6; bit++)
if (crc & (1 << (31 - bit)))
index |= (1 << bit);
hash_table[index / 32] |= (1 << (index % 32));
}
} else {
rx_mode = ReceiveBroadcast | ReceiveUnicast;
}
if (np->vlan) {
/* ReceiveVLANMatch field in ReceiveMode */
rx_mode |= ReceiveVLANMatch;
}
writel (hash_table[0], ioaddr + HashTable0);
writel (hash_table[1], ioaddr + HashTable1);
writew (rx_mode, ioaddr + ReceiveMode);
}
|
C
|
linux
| 0 |
CVE-2018-6033
|
https://www.cvedetails.com/cve/CVE-2018-6033/
|
CWE-20
|
https://github.com/chromium/chromium/commit/a8d6ae61d266d8bc44c3dd2d08bda32db701e359
|
a8d6ae61d266d8bc44c3dd2d08bda32db701e359
|
Downloads : Fixed an issue of opening incorrect download file
When one download overwrites another completed download, calling download.open in the old download causes the new download to open, which could be dangerous and undesirable. In this CL, we are trying to avoid this by blocking the opening of the old download.
Bug: 793620
Change-Id: Ic948175756700ad7c08489c3cc347330daedb6f8
Reviewed-on: https://chromium-review.googlesource.com/826477
Reviewed-by: David Trainor <[email protected]>
Reviewed-by: Xing Liu <[email protected]>
Reviewed-by: John Abd-El-Malek <[email protected]>
Commit-Queue: Shakti Sahu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#525810}
|
download::InProgressCache* ChromeDownloadManagerDelegate::GetInProgressCache() {
DCHECK(download_metadata_cache_ != nullptr);
return download_metadata_cache_.get();
}
|
download::InProgressCache* ChromeDownloadManagerDelegate::GetInProgressCache() {
DCHECK(download_metadata_cache_ != nullptr);
return download_metadata_cache_.get();
}
|
C
|
Chrome
| 0 |
CVE-2015-1342
|
https://www.cvedetails.com/cve/CVE-2015-1342/
|
CWE-264
|
https://github.com/lxc/lxcfs/commit/a8b6c3e0537e90fba3c55910fd1b7229d54a60a7
|
a8b6c3e0537e90fba3c55910fd1b7229d54a60a7
|
Fix checking of parent directories
Taken from the justification in the launchpad bug:
To a task in freezer cgroup /a/b/c/d, it should appear that there are no
cgroups other than its descendents. Since this is a filesystem, we must have
the parent directories, but each parent cgroup should only contain the child
which the task can see.
So, when this task looks at /a/b, it should see only directory 'c' and no
files. Attempt to create /a/b/x should result in -EPERM, whether /a/b/x already
exists or not. Attempts to query /a/b/x should result in -ENOENT whether /a/b/x
exists or not. Opening /a/b/tasks should result in -ENOENT.
The caller_may_see_dir checks specifically whether a task may see a cgroup
directory - i.e. /a/b/x if opening /a/b/x/tasks, and /a/b/c/d if doing
opendir('/a/b/c/d').
caller_is_in_ancestor() will return true if the caller in /a/b/c/d looks at
/a/b/c/d/e. If the caller is in a child cgroup of the queried one - i.e. if the
task in /a/b/c/d queries /a/b, then *nextcg will container the next (the only)
directory which he can see in the path - 'c'.
Beyond this, regular DAC permissions should apply, with the
root-in-user-namespace privilege over its mapped uids being respected. The
fc_may_access check does this check for both directories and files.
This is CVE-2015-1342 (LP: #1508481)
Signed-off-by: Serge Hallyn <[email protected]>
|
static int cg_getattr(const char *path, struct stat *sb)
{
struct timespec now;
struct fuse_context *fc = fuse_get_context();
char * cgdir = NULL;
char *fpath = NULL, *path1, *path2;
struct cgfs_files *k = NULL;
const char *cgroup;
const char *controller = NULL;
int ret = -ENOENT;
if (!fc)
return -EIO;
memset(sb, 0, sizeof(struct stat));
if (clock_gettime(CLOCK_REALTIME, &now) < 0)
return -EINVAL;
sb->st_uid = sb->st_gid = 0;
sb->st_atim = sb->st_mtim = sb->st_ctim = now;
sb->st_size = 0;
if (strcmp(path, "/cgroup") == 0) {
sb->st_mode = S_IFDIR | 00755;
sb->st_nlink = 2;
return 0;
}
controller = pick_controller_from_path(fc, path);
if (!controller)
return -EIO;
cgroup = find_cgroup_in_path(path);
if (!cgroup) {
/* this is just /cgroup/controller, return it as a dir */
sb->st_mode = S_IFDIR | 00755;
sb->st_nlink = 2;
return 0;
}
get_cgdir_and_path(cgroup, &cgdir, &fpath);
if (!fpath) {
path1 = "/";
path2 = cgdir;
} else {
path1 = cgdir;
path2 = fpath;
}
/* check that cgcopy is either a child cgroup of cgdir, or listed in its keys.
* Then check that caller's cgroup is under path if fpath is a child
* cgroup, or cgdir if fpath is a file */
if (is_child_cgroup(controller, path1, path2)) {
if (!caller_may_see_dir(fc->pid, controller, cgroup)) {
ret = -ENOENT;
goto out;
}
if (!caller_is_in_ancestor(fc->pid, controller, cgroup, NULL)) {
/* this is just /cgroup/controller, return it as a dir */
sb->st_mode = S_IFDIR | 00555;
sb->st_nlink = 2;
ret = 0;
goto out;
}
if (!fc_may_access(fc, controller, cgroup, NULL, O_RDONLY)) {
ret = -EACCES;
goto out;
}
sb->st_mode = S_IFDIR | 00755;
k = cgfs_get_key(controller, cgroup, "tasks");
if (!k) {
sb->st_uid = sb->st_gid = 0;
} else {
sb->st_uid = k->uid;
sb->st_gid = k->gid;
}
free_key(k);
sb->st_nlink = 2;
ret = 0;
goto out;
}
if ((k = cgfs_get_key(controller, path1, path2)) != NULL) {
sb->st_mode = S_IFREG | k->mode;
sb->st_nlink = 1;
sb->st_uid = k->uid;
sb->st_gid = k->gid;
sb->st_size = 0;
free_key(k);
if (!caller_is_in_ancestor(fc->pid, controller, path1, NULL)) {
ret = -ENOENT;
goto out;
}
if (!fc_may_access(fc, controller, path1, path2, O_RDONLY)) {
ret = -EACCES;
goto out;
}
ret = 0;
}
out:
free(cgdir);
return ret;
}
|
static int cg_getattr(const char *path, struct stat *sb)
{
struct timespec now;
struct fuse_context *fc = fuse_get_context();
char * cgdir = NULL;
char *fpath = NULL, *path1, *path2;
struct cgfs_files *k = NULL;
const char *cgroup;
const char *controller = NULL;
int ret = -ENOENT;
if (!fc)
return -EIO;
memset(sb, 0, sizeof(struct stat));
if (clock_gettime(CLOCK_REALTIME, &now) < 0)
return -EINVAL;
sb->st_uid = sb->st_gid = 0;
sb->st_atim = sb->st_mtim = sb->st_ctim = now;
sb->st_size = 0;
if (strcmp(path, "/cgroup") == 0) {
sb->st_mode = S_IFDIR | 00755;
sb->st_nlink = 2;
return 0;
}
controller = pick_controller_from_path(fc, path);
if (!controller)
return -EIO;
cgroup = find_cgroup_in_path(path);
if (!cgroup) {
/* this is just /cgroup/controller, return it as a dir */
sb->st_mode = S_IFDIR | 00755;
sb->st_nlink = 2;
return 0;
}
get_cgdir_and_path(cgroup, &cgdir, &fpath);
if (!fpath) {
path1 = "/";
path2 = cgdir;
} else {
path1 = cgdir;
path2 = fpath;
}
/* check that cgcopy is either a child cgroup of cgdir, or listed in its keys.
* Then check that caller's cgroup is under path if fpath is a child
* cgroup, or cgdir if fpath is a file */
if (is_child_cgroup(controller, path1, path2)) {
if (!caller_is_in_ancestor(fc->pid, controller, cgroup, NULL)) {
/* this is just /cgroup/controller, return it as a dir */
sb->st_mode = S_IFDIR | 00555;
sb->st_nlink = 2;
ret = 0;
goto out;
}
if (!fc_may_access(fc, controller, cgroup, NULL, O_RDONLY)) {
ret = -EACCES;
goto out;
}
sb->st_mode = S_IFDIR | 00755;
k = cgfs_get_key(controller, cgroup, "tasks");
if (!k) {
sb->st_uid = sb->st_gid = 0;
} else {
sb->st_uid = k->uid;
sb->st_gid = k->gid;
}
free_key(k);
sb->st_nlink = 2;
ret = 0;
goto out;
}
if ((k = cgfs_get_key(controller, path1, path2)) != NULL) {
sb->st_mode = S_IFREG | k->mode;
sb->st_nlink = 1;
sb->st_uid = k->uid;
sb->st_gid = k->gid;
sb->st_size = 0;
free_key(k);
if (!caller_is_in_ancestor(fc->pid, controller, path1, NULL)) {
ret = -ENOENT;
goto out;
}
if (!fc_may_access(fc, controller, path1, path2, O_RDONLY)) {
ret = -EACCES;
goto out;
}
ret = 0;
}
out:
free(cgdir);
return ret;
}
|
C
|
lxcfs
| 1 |
CVE-2013-2906
|
https://www.cvedetails.com/cve/CVE-2013-2906/
|
CWE-362
|
https://github.com/chromium/chromium/commit/bcc265132e3d9b62c6c49facbf849575c615d1e3
|
bcc265132e3d9b62c6c49facbf849575c615d1e3
|
Cleanups in ScreenOrientationDispatcherHost.
BUG=None
Review URL: https://codereview.chromium.org/408213003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@284786 0039d316-1c4b-4281-b951-d872f2087c98
|
void ScreenOrientationDispatcherHost::ResetCurrentLock() {
if (current_lock_) {
delete current_lock_;
current_lock_ = 0;
}
}
|
void ScreenOrientationDispatcherHost::ResetCurrentLock() {
if (current_lock_) {
delete current_lock_;
current_lock_ = 0;
}
}
|
C
|
Chrome
| 0 |
CVE-2012-2896
|
https://www.cvedetails.com/cve/CVE-2012-2896/
|
CWE-189
|
https://github.com/chromium/chromium/commit/3aad1a37affb1ab70d1897f2b03eb8c077264984
|
3aad1a37affb1ab70d1897f2b03eb8c077264984
|
Fix SafeAdd and SafeMultiply
BUG=145648,145544
Review URL: https://chromiumcodereview.appspot.com/10916165
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@155478 0039d316-1c4b-4281-b951-d872f2087c98
|
error::Error GLES2DecoderImpl::DoTexImage2D(
GLenum target,
GLint level,
GLenum internal_format,
GLsizei width,
GLsizei height,
GLint border,
GLenum format,
GLenum type,
const void* pixels,
uint32 pixels_size) {
if (!validators_->texture_target.IsValid(target)) {
SetGLErrorInvalidEnum("glTexImage2D", target, "target");
return error::kNoError;
}
if (!validators_->texture_format.IsValid(internal_format)) {
SetGLErrorInvalidEnum("glTexImage2D", internal_format, "internal_format");
return error::kNoError;
}
if (!validators_->texture_format.IsValid(format)) {
SetGLErrorInvalidEnum("glTexImage2D", format, "format");
return error::kNoError;
}
if (!validators_->pixel_type.IsValid(type)) {
SetGLErrorInvalidEnum("glTexImage2D", type, "type");
return error::kNoError;
}
if (format != internal_format) {
SetGLError(GL_INVALID_OPERATION,
"glTexImage2D", "format != internalFormat");
return error::kNoError;
}
if (!ValidateTextureParameters("glTexImage2D", target, format, type, level)) {
return error::kNoError;
}
if (!texture_manager()->ValidForTarget(target, level, width, height, 1) ||
border != 0) {
SetGLError(GL_INVALID_VALUE, "glTexImage2D", "dimensions out of range");
return error::kNoError;
}
if ((GLES2Util::GetChannelsForFormat(format) &
(GLES2Util::kDepth | GLES2Util::kStencil)) != 0 && pixels) {
SetGLError(
GL_INVALID_OPERATION,
"glTexImage2D", "can not supply data for depth or stencil textures");
return error::kNoError;
}
TextureManager::TextureInfo* info = GetTextureInfoForTarget(target);
if (!info) {
SetGLError(GL_INVALID_OPERATION,
"glTexImage2D", "unknown texture for target");
return error::kNoError;
}
if (info->IsImmutable()) {
SetGLError(GL_INVALID_OPERATION,
"glTexImage2D", "texture is immutable");
return error::kNoError;
}
GLsizei tex_width = 0;
GLsizei tex_height = 0;
GLenum tex_type = 0;
GLenum tex_format = 0;
bool level_is_same =
info->GetLevelSize(target, level, &tex_width, &tex_height) &&
info->GetLevelType(target, level, &tex_type, &tex_format) &&
width == tex_width && height == tex_height &&
type == tex_type && format == tex_format;
if (level_is_same && !pixels) {
texture_manager()->SetLevelInfo(
info,
target, level, internal_format, width, height, 1, border, format, type,
false);
tex_image_2d_failed_ = false;
return error::kNoError;
}
if (info->IsAttachedToFramebuffer()) {
state_dirty_ = true;
framebuffer_manager()->IncFramebufferStateChangeCount();
}
if (!teximage2d_faster_than_texsubimage2d_ && level_is_same && pixels) {
glTexSubImage2D(target, level, 0, 0, width, height, format, type, pixels);
texture_manager()->SetLevelCleared(info, target, level);
tex_image_2d_failed_ = false;
return error::kNoError;
}
CopyRealGLErrorsToWrapper();
WrappedTexImage2D(
target, level, internal_format, width, height, border, format, type,
pixels);
GLenum error = PeekGLError();
if (error == GL_NO_ERROR) {
texture_manager()->SetLevelInfo(
info,
target, level, internal_format, width, height, 1, border, format, type,
pixels != NULL);
tex_image_2d_failed_ = false;
}
return error::kNoError;
}
|
error::Error GLES2DecoderImpl::DoTexImage2D(
GLenum target,
GLint level,
GLenum internal_format,
GLsizei width,
GLsizei height,
GLint border,
GLenum format,
GLenum type,
const void* pixels,
uint32 pixels_size) {
if (!validators_->texture_target.IsValid(target)) {
SetGLErrorInvalidEnum("glTexImage2D", target, "target");
return error::kNoError;
}
if (!validators_->texture_format.IsValid(internal_format)) {
SetGLErrorInvalidEnum("glTexImage2D", internal_format, "internal_format");
return error::kNoError;
}
if (!validators_->texture_format.IsValid(format)) {
SetGLErrorInvalidEnum("glTexImage2D", format, "format");
return error::kNoError;
}
if (!validators_->pixel_type.IsValid(type)) {
SetGLErrorInvalidEnum("glTexImage2D", type, "type");
return error::kNoError;
}
if (format != internal_format) {
SetGLError(GL_INVALID_OPERATION,
"glTexImage2D", "format != internalFormat");
return error::kNoError;
}
if (!ValidateTextureParameters("glTexImage2D", target, format, type, level)) {
return error::kNoError;
}
if (!texture_manager()->ValidForTarget(target, level, width, height, 1) ||
border != 0) {
SetGLError(GL_INVALID_VALUE, "glTexImage2D", "dimensions out of range");
return error::kNoError;
}
if ((GLES2Util::GetChannelsForFormat(format) &
(GLES2Util::kDepth | GLES2Util::kStencil)) != 0 && pixels) {
SetGLError(
GL_INVALID_OPERATION,
"glTexImage2D", "can not supply data for depth or stencil textures");
return error::kNoError;
}
TextureManager::TextureInfo* info = GetTextureInfoForTarget(target);
if (!info) {
SetGLError(GL_INVALID_OPERATION,
"glTexImage2D", "unknown texture for target");
return error::kNoError;
}
if (info->IsImmutable()) {
SetGLError(GL_INVALID_OPERATION,
"glTexImage2D", "texture is immutable");
return error::kNoError;
}
GLsizei tex_width = 0;
GLsizei tex_height = 0;
GLenum tex_type = 0;
GLenum tex_format = 0;
bool level_is_same =
info->GetLevelSize(target, level, &tex_width, &tex_height) &&
info->GetLevelType(target, level, &tex_type, &tex_format) &&
width == tex_width && height == tex_height &&
type == tex_type && format == tex_format;
if (level_is_same && !pixels) {
texture_manager()->SetLevelInfo(
info,
target, level, internal_format, width, height, 1, border, format, type,
false);
tex_image_2d_failed_ = false;
return error::kNoError;
}
if (info->IsAttachedToFramebuffer()) {
state_dirty_ = true;
framebuffer_manager()->IncFramebufferStateChangeCount();
}
if (!teximage2d_faster_than_texsubimage2d_ && level_is_same && pixels) {
glTexSubImage2D(target, level, 0, 0, width, height, format, type, pixels);
texture_manager()->SetLevelCleared(info, target, level);
tex_image_2d_failed_ = false;
return error::kNoError;
}
CopyRealGLErrorsToWrapper();
WrappedTexImage2D(
target, level, internal_format, width, height, border, format, type,
pixels);
GLenum error = PeekGLError();
if (error == GL_NO_ERROR) {
texture_manager()->SetLevelInfo(
info,
target, level, internal_format, width, height, 1, border, format, type,
pixels != NULL);
tex_image_2d_failed_ = false;
}
return error::kNoError;
}
|
C
|
Chrome
| 0 |
CVE-2013-4592
|
https://www.cvedetails.com/cve/CVE-2013-4592/
|
CWE-399
|
https://github.com/torvalds/linux/commit/12d6e7538e2d418c08f082b1b44ffa5fb7270ed8
|
12d6e7538e2d418c08f082b1b44ffa5fb7270ed8
|
KVM: perform an invalid memslot step for gpa base change
PPC must flush all translations before the new memory slot
is visible.
Signed-off-by: Marcelo Tosatti <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
|
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int r;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots, *old_memslots;
r = check_memory_region_flags(mem);
if (r)
goto out;
r = -EINVAL;
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
/* We can read the guest memory with __xxx_user() later on. */
if (user_alloc &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok(VERIFY_WRITE,
(void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
goto out;
if (mem->slot >= KVM_MEM_SLOTS_NUM)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
memslot = id_to_memslot(kvm->memslots, mem->slot);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
r = -EINVAL;
if (npages > KVM_MEM_MAX_NR_PAGES)
goto out;
if (!npages)
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
new = old = *memslot;
new.id = mem->slot;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
/* Disallow changing a memory slot's size. */
r = -EINVAL;
if (npages && old.npages && npages != old.npages)
goto out_free;
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
if (!((base_gfn + npages <= s->base_gfn) ||
(base_gfn >= s->base_gfn + s->npages)))
goto out_free;
}
/* Free page dirty bitmap if unneeded */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
r = -ENOMEM;
/* Allocate if a slot is being created */
if (npages && !old.npages) {
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
if (kvm_arch_create_memslot(&new, npages))
goto out_free;
}
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
/* destroy any largepage mappings for dirty tracking */
}
if (!npages || base_gfn != old.base_gfn) {
struct kvm_memory_slot *slot;
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
slot = id_to_memslot(slots, mem->slot);
slot->flags |= KVM_MEMSLOT_INVALID;
update_memslots(slots, NULL);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
/* From this point no new shadow pages pointing to a deleted,
* or moved, memslot will be created.
*
* validation of sp->gfn happens in:
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
kfree(old_memslots);
}
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
goto out_free;
/* map/unmap the pages in iommu page table */
if (npages) {
r = kvm_iommu_map_pages(kvm, &new);
if (r)
goto out_free;
} else
kvm_iommu_unmap_pages(kvm, &old);
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}
update_memslots(slots, &new);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
/*
* If the new memory slot is created, we need to clear all
* mmio sptes.
*/
if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
kvm_arch_flush_shadow_all(kvm);
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);
return 0;
out_free:
kvm_free_physmem_slot(&new, &old);
out:
return r;
}
|
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int r;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots, *old_memslots;
r = check_memory_region_flags(mem);
if (r)
goto out;
r = -EINVAL;
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
/* We can read the guest memory with __xxx_user() later on. */
if (user_alloc &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok(VERIFY_WRITE,
(void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
goto out;
if (mem->slot >= KVM_MEM_SLOTS_NUM)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
memslot = id_to_memslot(kvm->memslots, mem->slot);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
r = -EINVAL;
if (npages > KVM_MEM_MAX_NR_PAGES)
goto out;
if (!npages)
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
new = old = *memslot;
new.id = mem->slot;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
/* Disallow changing a memory slot's size. */
r = -EINVAL;
if (npages && old.npages && npages != old.npages)
goto out_free;
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
if (!((base_gfn + npages <= s->base_gfn) ||
(base_gfn >= s->base_gfn + s->npages)))
goto out_free;
}
/* Free page dirty bitmap if unneeded */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
r = -ENOMEM;
/* Allocate if a slot is being created */
if (npages && !old.npages) {
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
if (kvm_arch_create_memslot(&new, npages))
goto out_free;
}
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
/* destroy any largepage mappings for dirty tracking */
}
if (!npages) {
struct kvm_memory_slot *slot;
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
slot = id_to_memslot(slots, mem->slot);
slot->flags |= KVM_MEMSLOT_INVALID;
update_memslots(slots, NULL);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
/* From this point no new shadow pages pointing to a deleted
* memslot will be created.
*
* validation of sp->gfn happens in:
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
kfree(old_memslots);
}
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
goto out_free;
/* map/unmap the pages in iommu page table */
if (npages) {
r = kvm_iommu_map_pages(kvm, &new);
if (r)
goto out_free;
} else
kvm_iommu_unmap_pages(kvm, &old);
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}
update_memslots(slots, &new);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
/*
* If the new memory slot is created, we need to clear all
* mmio sptes.
*/
if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
kvm_arch_flush_shadow_all(kvm);
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);
return 0;
out_free:
kvm_free_physmem_slot(&new, &old);
out:
return r;
}
|
C
|
linux
| 1 |
CVE-2013-0921
|
https://www.cvedetails.com/cve/CVE-2013-0921/
|
CWE-264
|
https://github.com/chromium/chromium/commit/e9841fbdaf41b4a2baaa413f94d5c0197f9261f4
|
e9841fbdaf41b4a2baaa413f94d5c0197f9261f4
|
Ensure extensions and the Chrome Web Store are loaded in new BrowsingInstances.
BUG=174943
TEST=Can't post message to CWS. See bug for repro steps.
Review URL: https://chromiumcodereview.appspot.com/12301013
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@184208 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderViewHostManager::DidNavigateMainFrame(
RenderViewHost* render_view_host) {
if (!cross_navigation_pending_) {
DCHECK(!pending_render_view_host_);
DCHECK(render_view_host == render_view_host_);
if (pending_web_ui())
CommitPending();
return;
}
if (render_view_host == pending_render_view_host_) {
if (pending_render_view_host_->GetPendingRequestId() == -1) {
OnCrossSiteResponse(pending_render_view_host_->GetProcess()->GetID(),
pending_render_view_host_->GetRoutingID());
}
CommitPending();
cross_navigation_pending_ = false;
} else if (render_view_host == render_view_host_) {
CancelPending();
cross_navigation_pending_ = false;
} else {
DCHECK(false);
}
}
|
void RenderViewHostManager::DidNavigateMainFrame(
RenderViewHost* render_view_host) {
if (!cross_navigation_pending_) {
DCHECK(!pending_render_view_host_);
DCHECK(render_view_host == render_view_host_);
if (pending_web_ui())
CommitPending();
return;
}
if (render_view_host == pending_render_view_host_) {
if (pending_render_view_host_->GetPendingRequestId() == -1) {
OnCrossSiteResponse(pending_render_view_host_->GetProcess()->GetID(),
pending_render_view_host_->GetRoutingID());
}
CommitPending();
cross_navigation_pending_ = false;
} else if (render_view_host == render_view_host_) {
CancelPending();
cross_navigation_pending_ = false;
} else {
DCHECK(false);
}
}
|
C
|
Chrome
| 0 |
CVE-2006-5331
|
https://www.cvedetails.com/cve/CVE-2006-5331/
|
CWE-19
|
https://github.com/torvalds/linux/commit/6c4841c2b6c32a134f9f36e5e08857138cc12b10
|
6c4841c2b6c32a134f9f36e5e08857138cc12b10
|
[POWERPC] Never panic when taking altivec exceptions from userspace
At the moment we rely on a cpu feature bit or a firmware property to
detect altivec. If we dont have either of these and the cpu does in fact
support altivec we can cause a panic from userspace.
It seems safer to always send a signal if we manage to get an 0xf20
exception from userspace.
Signed-off-by: Anton Blanchard <[email protected]>
Signed-off-by: Paul Mackerras <[email protected]>
|
void alignment_exception(struct pt_regs *regs)
{
int fixed = 0;
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
if (fixed == 1) {
regs->nip += 4; /* skip over emulated instruction */
emulate_single_step(regs);
return;
}
/* Operand address was bad */
if (fixed == -EFAULT) {
if (user_mode(regs))
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
else
/* Search exception table */
bad_page_fault(regs, regs->dar, SIGSEGV);
return;
}
_exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
}
|
void alignment_exception(struct pt_regs *regs)
{
int fixed = 0;
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
if (fixed == 1) {
regs->nip += 4; /* skip over emulated instruction */
emulate_single_step(regs);
return;
}
/* Operand address was bad */
if (fixed == -EFAULT) {
if (user_mode(regs))
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
else
/* Search exception table */
bad_page_fault(regs, regs->dar, SIGSEGV);
return;
}
_exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
}
|
C
|
linux
| 0 |
CVE-2015-7513
|
https://www.cvedetails.com/cve/CVE-2015-7513/
| null |
https://github.com/torvalds/linux/commit/0185604c2d82c560dab2f2933a18f797e74ab5a8
|
0185604c2d82c560dab2f2933a18f797e74ab5a8
|
KVM: x86: Reload pit counters for all channels when restoring state
Currently if userspace restores the pit counters with a count of 0
on channels 1 or 2 and the guest attempts to read the count on those
channels, then KVM will perform a mod of 0 and crash. This will ensure
that 0 values are converted to 65536 as per the spec.
This is CVE-2015-7513.
Signed-off-by: Andy Honig <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
bool pr = false;
u32 msr = msr_info->index;
u64 data = msr_info->data;
switch (msr) {
case MSR_AMD64_NB_CFG:
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
break;
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
data &= ~(u64)0x40000; /* ignore Mc status write enable */
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
}
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
"0x%llx\n", data);
return 1;
}
break;
case MSR_IA32_DEBUGCTLMSR:
if (!data) {
/* We support the non-activated case already */
break;
} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
/* Values other than LBR and BTF are vendor-specific,
thus reserved and should throw a #GP */
return 1;
}
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data);
break;
case 0x200 ... 0x2ff:
return kvm_mtrr_set_msr(vcpu, msr, data);
case MSR_IA32_APICBASE:
return kvm_set_apic_base(vcpu, msr_info);
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
adjust_tsc_offset_guest(vcpu, adj);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
break;
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
case MSR_IA32_SMBASE:
if (!msr_info->host_initiated)
return 1;
vcpu->arch.smbase = data;
break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
kvm_write_wall_clock(vcpu->kvm, data);
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
u64 gpa_offset;
struct kvm_arch *ka = &vcpu->kvm->arch;
kvmclock_reset(vcpu);
if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
if (ka->boot_vcpu_runs_old_kvmclock != tmp)
set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
&vcpu->requests);
ka->boot_vcpu_runs_old_kvmclock = tmp;
}
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
if (!(data & 1))
break;
gpa_offset = data & ~(PAGE_MASK | 1);
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.pv_time, data & ~1ULL,
sizeof(struct pvclock_vcpu_time_info)))
vcpu->arch.pv_time_enabled = false;
else
vcpu->arch.pv_time_enabled = true;
break;
}
case MSR_KVM_ASYNC_PF_EN:
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
case MSR_KVM_STEAL_TIME:
if (unlikely(!sched_info_on()))
return 1;
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS,
sizeof(struct kvm_steal_time)))
return 1;
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
break;
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
case MSR_KVM_PV_EOI_EN:
if (kvm_lapic_enable_pv_eoi(vcpu, data))
return 1;
break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
return set_msr_mce(vcpu, msr, data);
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
pr = true; /* fall through */
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
if (pr || data != 0)
vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_K7_CLK_CTL:
/*
* Ignore all writes to this no longer documented MSR.
* Writes are only relevant for old K7 processors,
* all pre-dating SVM, but a recommended workaround from
* AMD for these chips. It is possible to specify the
* affected processor models on the command line, hence
* the need to ignore the workaround.
*/
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
case HV_X64_MSR_CRASH_CTL:
return kvm_hv_set_msr_common(vcpu, msr, data,
msr_info->host_initiated);
case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.length = data;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.status = data;
break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
msr, data);
break;
}
}
return 0;
}
|
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
bool pr = false;
u32 msr = msr_info->index;
u64 data = msr_info->data;
switch (msr) {
case MSR_AMD64_NB_CFG:
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
break;
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
data &= ~(u64)0x40000; /* ignore Mc status write enable */
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
}
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
"0x%llx\n", data);
return 1;
}
break;
case MSR_IA32_DEBUGCTLMSR:
if (!data) {
/* We support the non-activated case already */
break;
} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
/* Values other than LBR and BTF are vendor-specific,
thus reserved and should throw a #GP */
return 1;
}
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data);
break;
case 0x200 ... 0x2ff:
return kvm_mtrr_set_msr(vcpu, msr, data);
case MSR_IA32_APICBASE:
return kvm_set_apic_base(vcpu, msr_info);
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
adjust_tsc_offset_guest(vcpu, adj);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
break;
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
case MSR_IA32_SMBASE:
if (!msr_info->host_initiated)
return 1;
vcpu->arch.smbase = data;
break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
kvm_write_wall_clock(vcpu->kvm, data);
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
u64 gpa_offset;
struct kvm_arch *ka = &vcpu->kvm->arch;
kvmclock_reset(vcpu);
if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
if (ka->boot_vcpu_runs_old_kvmclock != tmp)
set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
&vcpu->requests);
ka->boot_vcpu_runs_old_kvmclock = tmp;
}
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
if (!(data & 1))
break;
gpa_offset = data & ~(PAGE_MASK | 1);
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.pv_time, data & ~1ULL,
sizeof(struct pvclock_vcpu_time_info)))
vcpu->arch.pv_time_enabled = false;
else
vcpu->arch.pv_time_enabled = true;
break;
}
case MSR_KVM_ASYNC_PF_EN:
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
case MSR_KVM_STEAL_TIME:
if (unlikely(!sched_info_on()))
return 1;
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS,
sizeof(struct kvm_steal_time)))
return 1;
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
break;
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
case MSR_KVM_PV_EOI_EN:
if (kvm_lapic_enable_pv_eoi(vcpu, data))
return 1;
break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
return set_msr_mce(vcpu, msr, data);
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
pr = true; /* fall through */
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
if (pr || data != 0)
vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_K7_CLK_CTL:
/*
* Ignore all writes to this no longer documented MSR.
* Writes are only relevant for old K7 processors,
* all pre-dating SVM, but a recommended workaround from
* AMD for these chips. It is possible to specify the
* affected processor models on the command line, hence
* the need to ignore the workaround.
*/
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
case HV_X64_MSR_CRASH_CTL:
return kvm_hv_set_msr_common(vcpu, msr, data,
msr_info->host_initiated);
case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.length = data;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.status = data;
break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
msr, data);
break;
}
}
return 0;
}
|
C
|
linux
| 0 |
CVE-2018-13006
|
https://www.cvedetails.com/cve/CVE-2018-13006/
|
CWE-125
|
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
fixed 2 possible heap overflows (inc. #1088)
|
GF_Err rssr_Read(GF_Box *s, GF_BitStream *bs)
{
GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s;
ptr->ssrc = gf_bs_read_u32(bs);
return GF_OK;
}
|
GF_Err rssr_Read(GF_Box *s, GF_BitStream *bs)
{
GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s;
ptr->ssrc = gf_bs_read_u32(bs);
return GF_OK;
}
|
C
|
gpac
| 0 |
CVE-2010-1166
|
https://www.cvedetails.com/cve/CVE-2010-1166/
|
CWE-189
|
https://cgit.freedesktop.org/xorg/xserver/commit/?id=d2f813f7db
|
d2f813f7db157fc83abc4b3726821c36ee7e40b1
| null |
fbCombineInC (CARD32 *dest, CARD32 *src, CARD32 *mask, int width)
{
int i;
for (i = 0; i < width; ++i) {
CARD32 d = READ(dest + i);
CARD16 a = d >> 24;
CARD32 s = 0;
if (a)
{
CARD32 m = READ(mask + i);
s = READ(src + i);
fbCombineMaskValueC (&s, &m);
if (a != 0xff)
{
FbByteMul(s, a);
}
}
WRITE(dest + i, s);
}
}
|
fbCombineInC (CARD32 *dest, CARD32 *src, CARD32 *mask, int width)
{
int i;
for (i = 0; i < width; ++i) {
CARD32 d = READ(dest + i);
CARD16 a = d >> 24;
CARD32 s = 0;
if (a)
{
CARD32 m = READ(mask + i);
s = READ(src + i);
fbCombineMaskValueC (&s, &m);
if (a != 0xff)
{
FbByteMul(s, a);
}
}
WRITE(dest + i, s);
}
}
|
C
|
xserver
| 0 |
CVE-2018-6063
|
https://www.cvedetails.com/cve/CVE-2018-6063/
|
CWE-787
|
https://github.com/chromium/chromium/commit/673ce95d481ea9368c4d4d43ac756ba1d6d9e608
|
673ce95d481ea9368c4d4d43ac756ba1d6d9e608
|
Correct mojo::WrapSharedMemoryHandle usage
Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which
were assuming that the call actually has any control over the memory
protection applied to a handle when mapped.
Where fixing usage is infeasible for this CL, TODOs are added to
annotate follow-up work.
Also updates the API and documentation to (hopefully) improve clarity
and avoid similar mistakes from being made in the future.
BUG=792900
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477
Reviewed-on: https://chromium-review.googlesource.com/818282
Reviewed-by: Wei Li <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Reviewed-by: John Abd-El-Malek <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Sadrul Chowdhury <[email protected]>
Reviewed-by: Yuzhu Shen <[email protected]>
Reviewed-by: Robert Sesek <[email protected]>
Commit-Queue: Ken Rockot <[email protected]>
Cr-Commit-Position: refs/heads/master@{#530268}
|
uint8_t* SharedMemoryHandleProvider::Handle::data() const {
return static_cast<uint8_t*>(owner_->shared_memory_->memory());
}
|
uint8_t* SharedMemoryHandleProvider::Handle::data() const {
return static_cast<uint8_t*>(owner_->shared_memory_->memory());
}
|
C
|
Chrome
| 0 |
CVE-2016-3134
|
https://www.cvedetails.com/cve/CVE-2016-3134/
|
CWE-119
|
https://github.com/torvalds/linux/commit/54d83fc74aa9ec72794373cb47432c5f7fb1a309
|
54d83fc74aa9ec72794373cb47432c5f7fb1a309
|
netfilter: x_tables: fix unconditional helper
Ben Hawkes says:
In the mark_source_chains function (net/ipv4/netfilter/ip_tables.c) it
is possible for a user-supplied ipt_entry structure to have a large
next_offset field. This field is not bounds checked prior to writing a
counter value at the supplied offset.
Problem is that mark_source_chains should not have been called --
the rule doesn't have a next entry, so its supposed to return
an absolute verdict of either ACCEPT or DROP.
However, the function conditional() doesn't work as the name implies.
It only checks that the rule is using wildcard address matching.
However, an unconditional rule must also not be using any matches
(no -m args).
The underflow validator only checked the addresses, therefore
passing the 'unconditional absolute verdict' test, while
mark_source_chains also tested for presence of matches, and thus
proceeeded to the next (not-existent) rule.
Unify this so that all the callers have same idea of 'unconditional rule'.
Reported-by: Ben Hawkes <[email protected]>
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
const char *name)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
int ret, off, h;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
if (e->next_offset < sizeof(struct compat_ip6t_entry) +
sizeof(struct compat_xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct ip6t_entry *)e);
if (ret)
return ret;
off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
if (ret != 0)
goto release_matches;
++j;
}
t = compat_ip6t_get_target(e);
target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
if (ret)
goto out;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h])
newinfo->underflow[h] = underflows[h];
}
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
return 0;
out:
module_put(t->u.kernel.target->me);
release_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
module_put(ematch->u.kernel.match->me);
}
return ret;
}
|
check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
const char *name)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
int ret, off, h;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
if (e->next_offset < sizeof(struct compat_ip6t_entry) +
sizeof(struct compat_xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct ip6t_entry *)e);
if (ret)
return ret;
off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
if (ret != 0)
goto release_matches;
++j;
}
t = compat_ip6t_get_target(e);
target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
if (ret)
goto out;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h])
newinfo->underflow[h] = underflows[h];
}
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
return 0;
out:
module_put(t->u.kernel.target->me);
release_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
module_put(ematch->u.kernel.match->me);
}
return ret;
}
|
C
|
linux
| 0 |
CVE-2012-2880
|
https://www.cvedetails.com/cve/CVE-2012-2880/
|
CWE-362
|
https://github.com/chromium/chromium/commit/fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
|
fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
|
[Sync] Cleanup all tab sync enabling logic now that its on by default.
BUG=none
TEST=
Review URL: https://chromiumcodereview.appspot.com/10443046
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139462 0039d316-1c4b-4281-b951-d872f2087c98
|
DictionaryValue* ChromiumExtensionActivityToValue(
const sync_pb::ChromiumExtensionsActivity& proto) {
DictionaryValue* value = new DictionaryValue();
SET_STR(extension_id);
SET_INT32(bookmark_writes_since_last_commit);
return value;
}
|
DictionaryValue* ChromiumExtensionActivityToValue(
const sync_pb::ChromiumExtensionsActivity& proto) {
DictionaryValue* value = new DictionaryValue();
SET_STR(extension_id);
SET_INT32(bookmark_writes_since_last_commit);
return value;
}
|
C
|
Chrome
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
auth_get_pin_reference (struct sc_card *card, int type, int reference, int cmd, int *out_ref)
{
if (!out_ref)
LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS);
switch (type) {
case SC_AC_CHV:
if (reference != 1 && reference != 2 && reference != 4)
LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_PIN_REFERENCE);
*out_ref = reference;
if (reference == 1 || reference == 4)
if (cmd == SC_PIN_CMD_VERIFY)
*out_ref |= 0x80;
break;
default:
LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS);
}
LOG_FUNC_RETURN(card->ctx, SC_SUCCESS);
}
|
auth_get_pin_reference (struct sc_card *card, int type, int reference, int cmd, int *out_ref)
{
if (!out_ref)
LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS);
switch (type) {
case SC_AC_CHV:
if (reference != 1 && reference != 2 && reference != 4)
LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_PIN_REFERENCE);
*out_ref = reference;
if (reference == 1 || reference == 4)
if (cmd == SC_PIN_CMD_VERIFY)
*out_ref |= 0x80;
break;
default:
LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS);
}
LOG_FUNC_RETURN(card->ctx, SC_SUCCESS);
}
|
C
|
OpenSC
| 0 |
CVE-2018-6158
|
https://www.cvedetails.com/cve/CVE-2018-6158/
|
CWE-362
|
https://github.com/chromium/chromium/commit/20b65d00ca3d8696430e22efad7485366f8c3a21
|
20b65d00ca3d8696430e22efad7485366f8c3a21
|
[oilpan] Fix GCInfoTable for multiple threads
Previously, grow and access from different threads could lead to a race
on the table backing; see bug.
- Rework the table to work on an existing reservation.
- Commit upon growing, avoiding any copies.
Drive-by: Fix over-allocation of table.
Bug: chromium:841280
Change-Id: I329cb6f40091e14e8c05334ba1104a9440c31d43
Reviewed-on: https://chromium-review.googlesource.com/1061525
Commit-Queue: Michael Lippautz <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Cr-Commit-Position: refs/heads/master@{#560434}
|
void set_mixin(Mixin* mixin) { mixin_ = mixin; }
|
void set_mixin(Mixin* mixin) { mixin_ = mixin; }
|
C
|
Chrome
| 0 |
CVE-2017-5014
|
https://www.cvedetails.com/cve/CVE-2017-5014/
|
CWE-119
|
https://github.com/chromium/chromium/commit/35eb28748d45b87695a69eceffaff73a0be476af
|
35eb28748d45b87695a69eceffaff73a0be476af
|
Remove unused histograms from the background loader offliner.
Bug: 975512
Change-Id: I87b0a91bed60e3a9e8a1fd9ae9b18cac27a0859f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1683361
Reviewed-by: Cathy Li <[email protected]>
Reviewed-by: Steven Holte <[email protected]>
Commit-Queue: Peter Williamson <[email protected]>
Cr-Commit-Position: refs/heads/master@{#675332}
|
void CompleteSavingAsArchiveCreationFailed() {
DCHECK(mock_saving_);
mock_saving_ = false;
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(std::move(save_page_callback_),
SavePageResult::ARCHIVE_CREATION_FAILED, 0));
}
|
void CompleteSavingAsArchiveCreationFailed() {
DCHECK(mock_saving_);
mock_saving_ = false;
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(std::move(save_page_callback_),
SavePageResult::ARCHIVE_CREATION_FAILED, 0));
}
|
C
|
Chrome
| 0 |
CVE-2015-6252
|
https://www.cvedetails.com/cve/CVE-2015-6252/
|
CWE-399
|
https://github.com/torvalds/linux/commit/7932c0bd7740f4cd2aa168d3ce0199e7af7d72d5
|
7932c0bd7740f4cd2aa168d3ce0199e7af7d72d5
|
vhost: actually track log eventfd file
While reviewing vhost log code, I found out that log_file is never
set. Note: I haven't tested the change (QEMU doesn't use LOG_FD yet).
Cc: [email protected]
Signed-off-by: Marc-André Lureau <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
|
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size)
{
const struct vhost_memory_region *reg;
struct vhost_memory *mem;
struct iovec *_iov;
u64 s = 0;
int ret = 0;
mem = vq->memory;
while ((u64)len > s) {
u64 size;
if (unlikely(ret >= iov_size)) {
ret = -ENOBUFS;
break;
}
reg = find_region(mem, addr, len);
if (unlikely(!reg)) {
ret = -EFAULT;
break;
}
_iov = iov + ret;
size = reg->memory_size - addr + reg->guest_phys_addr;
_iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
(reg->userspace_addr + addr - reg->guest_phys_addr);
s += size;
addr += size;
++ret;
}
return ret;
}
|
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size)
{
const struct vhost_memory_region *reg;
struct vhost_memory *mem;
struct iovec *_iov;
u64 s = 0;
int ret = 0;
mem = vq->memory;
while ((u64)len > s) {
u64 size;
if (unlikely(ret >= iov_size)) {
ret = -ENOBUFS;
break;
}
reg = find_region(mem, addr, len);
if (unlikely(!reg)) {
ret = -EFAULT;
break;
}
_iov = iov + ret;
size = reg->memory_size - addr + reg->guest_phys_addr;
_iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
(reg->userspace_addr + addr - reg->guest_phys_addr);
s += size;
addr += size;
++ret;
}
return ret;
}
|
C
|
linux
| 0 |
CVE-2013-4483
|
https://www.cvedetails.com/cve/CVE-2013-4483/
|
CWE-189
|
https://github.com/torvalds/linux/commit/6062a8dc0517bce23e3c2f7d2fea5e22411269a3
|
6062a8dc0517bce23e3c2f7d2fea5e22411269a3
|
ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[[email protected]: do not call sem_lock when bogus sma]
[[email protected]: make refcounter atomic]
Signed-off-by: Rik van Riel <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Chegu Vinod <[email protected]>
Cc: Jason Low <[email protected]>
Reviewed-by: Michel Lespinasse <[email protected]>
Cc: Peter Hurley <[email protected]>
Cc: Stanislav Kinsbursky <[email protected]>
Tested-by: Emmanuel Benisty <[email protected]>
Tested-by: Sedat Dilek <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int testmsg(struct msg_msg *msg, long type, int mode)
{
switch(mode)
{
case SEARCH_ANY:
case SEARCH_NUMBER:
return 1;
case SEARCH_LESSEQUAL:
if (msg->m_type <=type)
return 1;
break;
case SEARCH_EQUAL:
if (msg->m_type == type)
return 1;
break;
case SEARCH_NOTEQUAL:
if (msg->m_type != type)
return 1;
break;
}
return 0;
}
|
static int testmsg(struct msg_msg *msg, long type, int mode)
{
switch(mode)
{
case SEARCH_ANY:
case SEARCH_NUMBER:
return 1;
case SEARCH_LESSEQUAL:
if (msg->m_type <=type)
return 1;
break;
case SEARCH_EQUAL:
if (msg->m_type == type)
return 1;
break;
case SEARCH_NOTEQUAL:
if (msg->m_type != type)
return 1;
break;
}
return 0;
}
|
C
|
linux
| 0 |
CVE-2014-3191
|
https://www.cvedetails.com/cve/CVE-2014-3191/
|
CWE-416
|
https://github.com/chromium/chromium/commit/11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
|
Defer call to updateWidgetPositions() outside of RenderLayerScrollableArea.
updateWidgetPositions() can destroy the render tree, so it should never
be called from inside RenderLayerScrollableArea. Leaving it there allows
for the potential of use-after-free bugs.
BUG=402407
[email protected]
Review URL: https://codereview.chromium.org/490473003
git-svn-id: svn://svn.chromium.org/blink/trunk@180681 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void FrameView::invalidateTreeIfNeededRecursive()
{
lifecycle().advanceTo(DocumentLifecycle::InPaintInvalidation);
invalidateTreeIfNeeded();
lifecycle().advanceTo(DocumentLifecycle::PaintInvalidationClean);
for (Frame* child = m_frame->tree().firstChild(); child; child = child->tree().nextSibling()) {
if (!child->isLocalFrame())
continue;
toLocalFrame(child)->view()->invalidateTreeIfNeededRecursive();
}
}
|
void FrameView::invalidateTreeIfNeededRecursive()
{
lifecycle().advanceTo(DocumentLifecycle::InPaintInvalidation);
invalidateTreeIfNeeded();
lifecycle().advanceTo(DocumentLifecycle::PaintInvalidationClean);
for (Frame* child = m_frame->tree().firstChild(); child; child = child->tree().nextSibling()) {
if (!child->isLocalFrame())
continue;
toLocalFrame(child)->view()->invalidateTreeIfNeededRecursive();
}
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/a44b00c88bc5ea35b5b150217c5fd6e4ce168e58
|
a44b00c88bc5ea35b5b150217c5fd6e4ce168e58
|
Apply behaviour change fix from upstream for previous XPath change.
BUG=58731
TEST=NONE
Review URL: http://codereview.chromium.org/4027006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@63572 0039d316-1c4b-4281-b951-d872f2087c98
|
xmlXPathRoundFunction(xmlXPathParserContextPtr ctxt, int nargs) {
double f;
CHECK_ARITY(1);
CAST_TO_NUMBER;
CHECK_TYPE(XPATH_NUMBER);
if ((xmlXPathIsNaN(ctxt->value->floatval)) ||
(xmlXPathIsInf(ctxt->value->floatval) == 1) ||
(xmlXPathIsInf(ctxt->value->floatval) == -1) ||
(ctxt->value->floatval == 0.0))
return;
XTRUNC(f, ctxt->value->floatval);
if (ctxt->value->floatval < 0) {
if (ctxt->value->floatval < f - 0.5)
ctxt->value->floatval = f - 1;
else
ctxt->value->floatval = f;
if (ctxt->value->floatval == 0)
ctxt->value->floatval = xmlXPathNZERO;
} else {
if (ctxt->value->floatval < f + 0.5)
ctxt->value->floatval = f;
else
ctxt->value->floatval = f + 1;
}
}
|
xmlXPathRoundFunction(xmlXPathParserContextPtr ctxt, int nargs) {
double f;
CHECK_ARITY(1);
CAST_TO_NUMBER;
CHECK_TYPE(XPATH_NUMBER);
if ((xmlXPathIsNaN(ctxt->value->floatval)) ||
(xmlXPathIsInf(ctxt->value->floatval) == 1) ||
(xmlXPathIsInf(ctxt->value->floatval) == -1) ||
(ctxt->value->floatval == 0.0))
return;
XTRUNC(f, ctxt->value->floatval);
if (ctxt->value->floatval < 0) {
if (ctxt->value->floatval < f - 0.5)
ctxt->value->floatval = f - 1;
else
ctxt->value->floatval = f;
if (ctxt->value->floatval == 0)
ctxt->value->floatval = xmlXPathNZERO;
} else {
if (ctxt->value->floatval < f + 0.5)
ctxt->value->floatval = f;
else
ctxt->value->floatval = f + 1;
}
}
|
C
|
Chrome
| 0 |
CVE-2018-12436
|
https://www.cvedetails.com/cve/CVE-2018-12436/
|
CWE-200
|
https://github.com/wolfSSL/wolfssl/commit/9b9568d500f31f964af26ba8d01e542e1f27e5ca
|
9b9568d500f31f964af26ba8d01e542e1f27e5ca
|
Change ECDSA signing to use blinding.
|
static int normal_ecc_mul2add(ecc_point* A, mp_int* kA,
ecc_point* B, mp_int* kB,
ecc_point* C, mp_int* a, mp_int* modulus,
void* heap)
#else
int ecc_mul2add(ecc_point* A, mp_int* kA,
ecc_point* B, mp_int* kB,
ecc_point* C, mp_int* a, mp_int* modulus,
void* heap)
#endif
{
ecc_point* precomp[16];
unsigned bitbufA, bitbufB, lenA, lenB, len, nA, nB, nibble;
unsigned char* tA;
unsigned char* tB;
int err = MP_OKAY, first, x, y;
mp_digit mp = 0;
/* argchks */
if (A == NULL || kA == NULL || B == NULL || kB == NULL || C == NULL ||
modulus == NULL) {
return ECC_BAD_ARG_E;
}
/* allocate memory */
tA = (unsigned char*)XMALLOC(ECC_BUFSIZE, heap, DYNAMIC_TYPE_ECC_BUFFER);
if (tA == NULL) {
return GEN_MEM_ERR;
}
tB = (unsigned char*)XMALLOC(ECC_BUFSIZE, heap, DYNAMIC_TYPE_ECC_BUFFER);
if (tB == NULL) {
XFREE(tA, heap, DYNAMIC_TYPE_ECC_BUFFER);
return GEN_MEM_ERR;
}
/* init variables */
XMEMSET(tA, 0, ECC_BUFSIZE);
XMEMSET(tB, 0, ECC_BUFSIZE);
XMEMSET(precomp, 0, sizeof(precomp));
/* get sizes */
lenA = mp_unsigned_bin_size(kA);
lenB = mp_unsigned_bin_size(kB);
len = MAX(lenA, lenB);
/* sanity check */
if ((lenA > ECC_BUFSIZE) || (lenB > ECC_BUFSIZE)) {
err = BAD_FUNC_ARG;
}
if (err == MP_OKAY) {
/* extract and justify kA */
err = mp_to_unsigned_bin(kA, (len - lenA) + tA);
/* extract and justify kB */
if (err == MP_OKAY)
err = mp_to_unsigned_bin(kB, (len - lenB) + tB);
/* allocate the table */
if (err == MP_OKAY) {
for (x = 0; x < 16; x++) {
precomp[x] = wc_ecc_new_point_h(heap);
if (precomp[x] == NULL) {
err = GEN_MEM_ERR;
break;
}
}
}
}
if (err == MP_OKAY)
/* init montgomery reduction */
err = mp_montgomery_setup(modulus, &mp);
if (err == MP_OKAY) {
mp_int mu;
err = mp_init(&mu);
if (err == MP_OKAY) {
err = mp_montgomery_calc_normalization(&mu, modulus);
if (err == MP_OKAY)
/* copy ones ... */
err = mp_mulmod(A->x, &mu, modulus, precomp[1]->x);
if (err == MP_OKAY)
err = mp_mulmod(A->y, &mu, modulus, precomp[1]->y);
if (err == MP_OKAY)
err = mp_mulmod(A->z, &mu, modulus, precomp[1]->z);
if (err == MP_OKAY)
err = mp_mulmod(B->x, &mu, modulus, precomp[1<<2]->x);
if (err == MP_OKAY)
err = mp_mulmod(B->y, &mu, modulus, precomp[1<<2]->y);
if (err == MP_OKAY)
err = mp_mulmod(B->z, &mu, modulus, precomp[1<<2]->z);
/* done with mu */
mp_clear(&mu);
}
}
if (err == MP_OKAY)
/* precomp [i,0](A + B) table */
err = ecc_projective_dbl_point(precomp[1], precomp[2], a, modulus, mp);
if (err == MP_OKAY)
err = ecc_projective_add_point(precomp[1], precomp[2], precomp[3],
a, modulus, mp);
if (err == MP_OKAY)
/* precomp [0,i](A + B) table */
err = ecc_projective_dbl_point(precomp[1<<2], precomp[2<<2], a, modulus, mp);
if (err == MP_OKAY)
err = ecc_projective_add_point(precomp[1<<2], precomp[2<<2], precomp[3<<2],
a, modulus, mp);
if (err == MP_OKAY) {
/* precomp [i,j](A + B) table (i != 0, j != 0) */
for (x = 1; x < 4; x++) {
for (y = 1; y < 4; y++) {
if (err == MP_OKAY)
err = ecc_projective_add_point(precomp[x], precomp[(y<<2)],
precomp[x+(y<<2)], a, modulus, mp);
}
}
}
if (err == MP_OKAY) {
nibble = 3;
first = 1;
bitbufA = tA[0];
bitbufB = tB[0];
/* for every byte of the multiplicands */
for (x = 0;; ) {
/* grab a nibble */
if (++nibble == 4) {
if (x == (int)len) break;
bitbufA = tA[x];
bitbufB = tB[x];
nibble = 0;
x++;
}
/* extract two bits from both, shift/update */
nA = (bitbufA >> 6) & 0x03;
nB = (bitbufB >> 6) & 0x03;
bitbufA = (bitbufA << 2) & 0xFF;
bitbufB = (bitbufB << 2) & 0xFF;
/* if both zero, if first, continue */
if ((nA == 0) && (nB == 0) && (first == 1)) {
continue;
}
/* double twice, only if this isn't the first */
if (first == 0) {
/* double twice */
if (err == MP_OKAY)
err = ecc_projective_dbl_point(C, C, a, modulus, mp);
if (err == MP_OKAY)
err = ecc_projective_dbl_point(C, C, a, modulus, mp);
else
break;
}
/* if not both zero */
if ((nA != 0) || (nB != 0)) {
if (first == 1) {
/* if first, copy from table */
first = 0;
if (err == MP_OKAY)
err = mp_copy(precomp[nA + (nB<<2)]->x, C->x);
if (err == MP_OKAY)
err = mp_copy(precomp[nA + (nB<<2)]->y, C->y);
if (err == MP_OKAY)
err = mp_copy(precomp[nA + (nB<<2)]->z, C->z);
else
break;
} else {
/* if not first, add from table */
if (err == MP_OKAY)
err = ecc_projective_add_point(C, precomp[nA + (nB<<2)], C,
a, modulus, mp);
else
break;
}
}
}
}
/* reduce to affine */
if (err == MP_OKAY)
err = ecc_map(C, modulus, mp);
/* clean up */
for (x = 0; x < 16; x++) {
wc_ecc_del_point_h(precomp[x], heap);
}
ForceZero(tA, ECC_BUFSIZE);
ForceZero(tB, ECC_BUFSIZE);
XFREE(tA, heap, DYNAMIC_TYPE_ECC_BUFFER);
XFREE(tB, heap, DYNAMIC_TYPE_ECC_BUFFER);
return err;
}
|
static int normal_ecc_mul2add(ecc_point* A, mp_int* kA,
ecc_point* B, mp_int* kB,
ecc_point* C, mp_int* a, mp_int* modulus,
void* heap)
#else
int ecc_mul2add(ecc_point* A, mp_int* kA,
ecc_point* B, mp_int* kB,
ecc_point* C, mp_int* a, mp_int* modulus,
void* heap)
#endif
{
ecc_point* precomp[16];
unsigned bitbufA, bitbufB, lenA, lenB, len, nA, nB, nibble;
unsigned char* tA;
unsigned char* tB;
int err = MP_OKAY, first, x, y;
mp_digit mp = 0;
/* argchks */
if (A == NULL || kA == NULL || B == NULL || kB == NULL || C == NULL ||
modulus == NULL) {
return ECC_BAD_ARG_E;
}
/* allocate memory */
tA = (unsigned char*)XMALLOC(ECC_BUFSIZE, heap, DYNAMIC_TYPE_ECC_BUFFER);
if (tA == NULL) {
return GEN_MEM_ERR;
}
tB = (unsigned char*)XMALLOC(ECC_BUFSIZE, heap, DYNAMIC_TYPE_ECC_BUFFER);
if (tB == NULL) {
XFREE(tA, heap, DYNAMIC_TYPE_ECC_BUFFER);
return GEN_MEM_ERR;
}
/* init variables */
XMEMSET(tA, 0, ECC_BUFSIZE);
XMEMSET(tB, 0, ECC_BUFSIZE);
XMEMSET(precomp, 0, sizeof(precomp));
/* get sizes */
lenA = mp_unsigned_bin_size(kA);
lenB = mp_unsigned_bin_size(kB);
len = MAX(lenA, lenB);
/* sanity check */
if ((lenA > ECC_BUFSIZE) || (lenB > ECC_BUFSIZE)) {
err = BAD_FUNC_ARG;
}
if (err == MP_OKAY) {
/* extract and justify kA */
err = mp_to_unsigned_bin(kA, (len - lenA) + tA);
/* extract and justify kB */
if (err == MP_OKAY)
err = mp_to_unsigned_bin(kB, (len - lenB) + tB);
/* allocate the table */
if (err == MP_OKAY) {
for (x = 0; x < 16; x++) {
precomp[x] = wc_ecc_new_point_h(heap);
if (precomp[x] == NULL) {
err = GEN_MEM_ERR;
break;
}
}
}
}
if (err == MP_OKAY)
/* init montgomery reduction */
err = mp_montgomery_setup(modulus, &mp);
if (err == MP_OKAY) {
mp_int mu;
err = mp_init(&mu);
if (err == MP_OKAY) {
err = mp_montgomery_calc_normalization(&mu, modulus);
if (err == MP_OKAY)
/* copy ones ... */
err = mp_mulmod(A->x, &mu, modulus, precomp[1]->x);
if (err == MP_OKAY)
err = mp_mulmod(A->y, &mu, modulus, precomp[1]->y);
if (err == MP_OKAY)
err = mp_mulmod(A->z, &mu, modulus, precomp[1]->z);
if (err == MP_OKAY)
err = mp_mulmod(B->x, &mu, modulus, precomp[1<<2]->x);
if (err == MP_OKAY)
err = mp_mulmod(B->y, &mu, modulus, precomp[1<<2]->y);
if (err == MP_OKAY)
err = mp_mulmod(B->z, &mu, modulus, precomp[1<<2]->z);
/* done with mu */
mp_clear(&mu);
}
}
if (err == MP_OKAY)
/* precomp [i,0](A + B) table */
err = ecc_projective_dbl_point(precomp[1], precomp[2], a, modulus, mp);
if (err == MP_OKAY)
err = ecc_projective_add_point(precomp[1], precomp[2], precomp[3],
a, modulus, mp);
if (err == MP_OKAY)
/* precomp [0,i](A + B) table */
err = ecc_projective_dbl_point(precomp[1<<2], precomp[2<<2], a, modulus, mp);
if (err == MP_OKAY)
err = ecc_projective_add_point(precomp[1<<2], precomp[2<<2], precomp[3<<2],
a, modulus, mp);
if (err == MP_OKAY) {
/* precomp [i,j](A + B) table (i != 0, j != 0) */
for (x = 1; x < 4; x++) {
for (y = 1; y < 4; y++) {
if (err == MP_OKAY)
err = ecc_projective_add_point(precomp[x], precomp[(y<<2)],
precomp[x+(y<<2)], a, modulus, mp);
}
}
}
if (err == MP_OKAY) {
nibble = 3;
first = 1;
bitbufA = tA[0];
bitbufB = tB[0];
/* for every byte of the multiplicands */
for (x = 0;; ) {
/* grab a nibble */
if (++nibble == 4) {
if (x == (int)len) break;
bitbufA = tA[x];
bitbufB = tB[x];
nibble = 0;
x++;
}
/* extract two bits from both, shift/update */
nA = (bitbufA >> 6) & 0x03;
nB = (bitbufB >> 6) & 0x03;
bitbufA = (bitbufA << 2) & 0xFF;
bitbufB = (bitbufB << 2) & 0xFF;
/* if both zero, if first, continue */
if ((nA == 0) && (nB == 0) && (first == 1)) {
continue;
}
/* double twice, only if this isn't the first */
if (first == 0) {
/* double twice */
if (err == MP_OKAY)
err = ecc_projective_dbl_point(C, C, a, modulus, mp);
if (err == MP_OKAY)
err = ecc_projective_dbl_point(C, C, a, modulus, mp);
else
break;
}
/* if not both zero */
if ((nA != 0) || (nB != 0)) {
if (first == 1) {
/* if first, copy from table */
first = 0;
if (err == MP_OKAY)
err = mp_copy(precomp[nA + (nB<<2)]->x, C->x);
if (err == MP_OKAY)
err = mp_copy(precomp[nA + (nB<<2)]->y, C->y);
if (err == MP_OKAY)
err = mp_copy(precomp[nA + (nB<<2)]->z, C->z);
else
break;
} else {
/* if not first, add from table */
if (err == MP_OKAY)
err = ecc_projective_add_point(C, precomp[nA + (nB<<2)], C,
a, modulus, mp);
else
break;
}
}
}
}
/* reduce to affine */
if (err == MP_OKAY)
err = ecc_map(C, modulus, mp);
/* clean up */
for (x = 0; x < 16; x++) {
wc_ecc_del_point_h(precomp[x], heap);
}
ForceZero(tA, ECC_BUFSIZE);
ForceZero(tB, ECC_BUFSIZE);
XFREE(tA, heap, DYNAMIC_TYPE_ECC_BUFFER);
XFREE(tB, heap, DYNAMIC_TYPE_ECC_BUFFER);
return err;
}
|
C
|
wolfssl
| 0 |
CVE-2012-5375
|
https://www.cvedetails.com/cve/CVE-2012-5375/
|
CWE-310
|
https://github.com/torvalds/linux/commit/9c52057c698fb96f8f07e7a4bcf4801a092bda89
|
9c52057c698fb96f8f07e7a4bcf4801a092bda89
|
Btrfs: fix hash overflow handling
The handling for directory crc hash overflows was fairly obscure,
split_leaf returns EOVERFLOW when we try to extend the item and that is
supposed to bubble up to userland. For a while it did so, but along the
way we added better handling of errors and forced the FS readonly if we
hit IO errors during the directory insertion.
Along the way, we started testing only for EEXIST and the EOVERFLOW case
was dropped. The end result is that we may force the FS readonly if we
catch a directory hash bucket overflow.
This fixes a few problem spots. First I add tests for EOVERFLOW in the
places where we can safely just return the error up the chain.
btrfs_rename is harder though, because it tries to insert the new
directory item only after it has already unlinked anything the rename
was going to overwrite. Rather than adding very complex logic, I added
a helper to test for the hash overflow case early while it is still safe
to bail out.
Snapshot and subvolume creation had a similar problem, so they are using
the new helper now too.
Signed-off-by: Chris Mason <[email protected]>
Reported-by: Pascal Junod <[email protected]>
|
static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
void __user *arg)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 root_flags;
u64 flags;
int ret = 0;
ret = mnt_want_write_file(file);
if (ret)
goto out;
if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
ret = -EINVAL;
goto out_drop_write;
}
if (copy_from_user(&flags, arg, sizeof(flags))) {
ret = -EFAULT;
goto out_drop_write;
}
if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
ret = -EINVAL;
goto out_drop_write;
}
if (flags & ~BTRFS_SUBVOL_RDONLY) {
ret = -EOPNOTSUPP;
goto out_drop_write;
}
if (!inode_owner_or_capable(inode)) {
ret = -EACCES;
goto out_drop_write;
}
down_write(&root->fs_info->subvol_sem);
/* nothing to do */
if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
goto out_drop_sem;
root_flags = btrfs_root_flags(&root->root_item);
if (flags & BTRFS_SUBVOL_RDONLY)
btrfs_set_root_flags(&root->root_item,
root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
else
btrfs_set_root_flags(&root->root_item,
root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_reset;
}
ret = btrfs_update_root(trans, root->fs_info->tree_root,
&root->root_key, &root->root_item);
btrfs_commit_transaction(trans, root);
out_reset:
if (ret)
btrfs_set_root_flags(&root->root_item, root_flags);
out_drop_sem:
up_write(&root->fs_info->subvol_sem);
out_drop_write:
mnt_drop_write_file(file);
out:
return ret;
}
|
static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
void __user *arg)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 root_flags;
u64 flags;
int ret = 0;
ret = mnt_want_write_file(file);
if (ret)
goto out;
if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
ret = -EINVAL;
goto out_drop_write;
}
if (copy_from_user(&flags, arg, sizeof(flags))) {
ret = -EFAULT;
goto out_drop_write;
}
if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
ret = -EINVAL;
goto out_drop_write;
}
if (flags & ~BTRFS_SUBVOL_RDONLY) {
ret = -EOPNOTSUPP;
goto out_drop_write;
}
if (!inode_owner_or_capable(inode)) {
ret = -EACCES;
goto out_drop_write;
}
down_write(&root->fs_info->subvol_sem);
/* nothing to do */
if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
goto out_drop_sem;
root_flags = btrfs_root_flags(&root->root_item);
if (flags & BTRFS_SUBVOL_RDONLY)
btrfs_set_root_flags(&root->root_item,
root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
else
btrfs_set_root_flags(&root->root_item,
root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_reset;
}
ret = btrfs_update_root(trans, root->fs_info->tree_root,
&root->root_key, &root->root_item);
btrfs_commit_transaction(trans, root);
out_reset:
if (ret)
btrfs_set_root_flags(&root->root_item, root_flags);
out_drop_sem:
up_write(&root->fs_info->subvol_sem);
out_drop_write:
mnt_drop_write_file(file);
out:
return ret;
}
|
C
|
linux
| 0 |
CVE-2011-3097
|
https://www.cvedetails.com/cve/CVE-2011-3097/
|
CWE-20
|
https://github.com/chromium/chromium/commit/027429ee5abe6e2fb5e3b2b4542f0a6fe0dbc12d
|
027429ee5abe6e2fb5e3b2b4542f0a6fe0dbc12d
|
Metrics for measuring how much overhead reading compressed content states adds.
BUG=104293
TEST=NONE
Review URL: https://chromiumcodereview.appspot.com/9426039
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@123733 0039d316-1c4b-4281-b951-d872f2087c98
|
void SessionService::TabRestored(TabContentsWrapper* tab, bool pinned) {
if (!ShouldTrackChangesToWindow(tab->restore_tab_helper()->window_id()))
return;
BuildCommandsForTab(tab->restore_tab_helper()->window_id(), tab, -1,
pinned, &pending_commands(), NULL);
StartSaveTimer();
}
|
void SessionService::TabRestored(TabContentsWrapper* tab, bool pinned) {
if (!ShouldTrackChangesToWindow(tab->restore_tab_helper()->window_id()))
return;
BuildCommandsForTab(tab->restore_tab_helper()->window_id(), tab, -1,
pinned, &pending_commands(), NULL);
StartSaveTimer();
}
|
C
|
Chrome
| 0 |
CVE-2017-6903
|
https://www.cvedetails.com/cve/CVE-2017-6903/
|
CWE-269
|
https://github.com/iortcw/iortcw/commit/b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
|
b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
|
All: Don't load .pk3s as .dlls, and don't load user config files from .pk3s
|
static void CL_Cache_MapChange_f( void ) {
cacheIndex++;
}
|
static void CL_Cache_MapChange_f( void ) {
cacheIndex++;
}
|
C
|
OpenJK
| 0 |
CVE-2012-2867
|
https://www.cvedetails.com/cve/CVE-2012-2867/
| null |
https://github.com/chromium/chromium/commit/b7a161633fd7ecb59093c2c56ed908416292d778
|
b7a161633fd7ecb59093c2c56ed908416292d778
|
[GTK][WTR] Implement AccessibilityUIElement::stringValue
https://bugs.webkit.org/show_bug.cgi?id=102951
Reviewed by Martin Robinson.
Implement AccessibilityUIElement::stringValue in the ATK backend
in the same manner it is implemented in DumpRenderTree.
* WebKitTestRunner/InjectedBundle/atk/AccessibilityUIElementAtk.cpp:
(WTR::replaceCharactersForResults):
(WTR):
(WTR::AccessibilityUIElement::stringValue):
git-svn-id: svn://svn.chromium.org/blink/trunk@135485 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
JSRetainPtr<JSStringRef> AccessibilityUIElement::documentEncoding()
{
if (!m_element || !ATK_IS_OBJECT(m_element))
return JSStringCreateWithCharacters(0, 0);
AtkRole role = atk_object_get_role(ATK_OBJECT(m_element));
if (role != ATK_ROLE_DOCUMENT_FRAME)
return JSStringCreateWithCharacters(0, 0);
return JSStringCreateWithUTF8CString(atk_document_get_attribute_value(ATK_DOCUMENT(m_element), "Encoding"));
}
|
JSRetainPtr<JSStringRef> AccessibilityUIElement::documentEncoding()
{
if (!m_element || !ATK_IS_OBJECT(m_element))
return JSStringCreateWithCharacters(0, 0);
AtkRole role = atk_object_get_role(ATK_OBJECT(m_element));
if (role != ATK_ROLE_DOCUMENT_FRAME)
return JSStringCreateWithCharacters(0, 0);
return JSStringCreateWithUTF8CString(atk_document_get_attribute_value(ATK_DOCUMENT(m_element), "Encoding"));
}
|
C
|
Chrome
| 0 |
CVE-2017-5112
|
https://www.cvedetails.com/cve/CVE-2017-5112/
|
CWE-119
|
https://github.com/chromium/chromium/commit/f6ac1dba5e36f338a490752a2cbef3339096d9fe
|
f6ac1dba5e36f338a490752a2cbef3339096d9fe
|
Reset ES3 pixel pack parameters and PIXEL_PACK_BUFFER binding in DrawingBuffer before ReadPixels() and recover them later.
BUG=740603
TEST=new conformance test
[email protected],[email protected]
Change-Id: I3ea54c6cc34f34e249f7c8b9f792d93c5e1958f4
Reviewed-on: https://chromium-review.googlesource.com/570840
Reviewed-by: Antoine Labour <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Commit-Queue: Zhenyao Mo <[email protected]>
Cr-Commit-Position: refs/heads/master@{#486518}
|
sk_sp<SkImage> WebGLRenderingContextBase::MakeImageSnapshot(
SkImageInfo& image_info) {
GetDrawingBuffer()->ResolveAndBindForReadAndDraw();
gpu::gles2::GLES2Interface* gl = SharedGpuContext::Gl();
SkSurfaceProps disable_lcd_props(0, kUnknown_SkPixelGeometry);
sk_sp<SkSurface> surface = SkSurface::MakeRenderTarget(
SharedGpuContext::Gr(), SkBudgeted::kYes, image_info, 0,
image_info.alphaType() == kOpaque_SkAlphaType ? nullptr
: &disable_lcd_props);
const GrGLTextureInfo* texture_info = skia::GrBackendObjectToGrGLTextureInfo(
surface->getTextureHandle(SkSurface::kDiscardWrite_TextureHandleAccess));
GLuint texture_id = texture_info->fID;
GLenum texture_target = texture_info->fTarget;
GetDrawingBuffer()->CopyToPlatformTexture(
gl, texture_target, texture_id, true, false, IntPoint(0, 0),
IntRect(IntPoint(0, 0), GetDrawingBuffer()->Size()), kBackBuffer);
return surface->makeImageSnapshot();
}
|
sk_sp<SkImage> WebGLRenderingContextBase::MakeImageSnapshot(
SkImageInfo& image_info) {
GetDrawingBuffer()->ResolveAndBindForReadAndDraw();
gpu::gles2::GLES2Interface* gl = SharedGpuContext::Gl();
SkSurfaceProps disable_lcd_props(0, kUnknown_SkPixelGeometry);
sk_sp<SkSurface> surface = SkSurface::MakeRenderTarget(
SharedGpuContext::Gr(), SkBudgeted::kYes, image_info, 0,
image_info.alphaType() == kOpaque_SkAlphaType ? nullptr
: &disable_lcd_props);
const GrGLTextureInfo* texture_info = skia::GrBackendObjectToGrGLTextureInfo(
surface->getTextureHandle(SkSurface::kDiscardWrite_TextureHandleAccess));
GLuint texture_id = texture_info->fID;
GLenum texture_target = texture_info->fTarget;
GetDrawingBuffer()->CopyToPlatformTexture(
gl, texture_target, texture_id, true, false, IntPoint(0, 0),
IntRect(IntPoint(0, 0), GetDrawingBuffer()->Size()), kBackBuffer);
return surface->makeImageSnapshot();
}
|
C
|
Chrome
| 0 |
CVE-2016-9557
|
https://www.cvedetails.com/cve/CVE-2016-9557/
|
CWE-190
|
https://github.com/mdadams/jasper/commit/d42b2388f7f8e0332c846675133acea151fc557a
|
d42b2388f7f8e0332c846675133acea151fc557a
|
The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
|
int pnm_validate(jas_stream_t *in)
{
jas_uchar buf[2];
int i;
int n;
assert(JAS_STREAM_MAXPUTBACK >= 2);
/* Read the first two characters that constitute the signature. */
if ((n = jas_stream_read(in, buf, 2)) < 0) {
return -1;
}
/* Put these characters back to the stream. */
for (i = n - 1; i >= 0; --i) {
if (jas_stream_ungetc(in, buf[i]) == EOF) {
return -1;
}
}
/* Did we read enough data? */
if (n < 2) {
return -1;
}
/* Is this the correct signature for a PNM file? */
if (buf[0] == 'P' && isdigit(buf[1])) {
return 0;
}
return -1;
}
|
int pnm_validate(jas_stream_t *in)
{
uchar buf[2];
int i;
int n;
assert(JAS_STREAM_MAXPUTBACK >= 2);
/* Read the first two characters that constitute the signature. */
if ((n = jas_stream_read(in, buf, 2)) < 0) {
return -1;
}
/* Put these characters back to the stream. */
for (i = n - 1; i >= 0; --i) {
if (jas_stream_ungetc(in, buf[i]) == EOF) {
return -1;
}
}
/* Did we read enough data? */
if (n < 2) {
return -1;
}
/* Is this the correct signature for a PNM file? */
if (buf[0] == 'P' && isdigit(buf[1])) {
return 0;
}
return -1;
}
|
C
|
jasper
| 1 |
CVE-2013-6663
|
https://www.cvedetails.com/cve/CVE-2013-6663/
|
CWE-399
|
https://github.com/chromium/chromium/commit/fb5dce12f0462056fc9f66967b0f7b2b7bcd88f5
|
fb5dce12f0462056fc9f66967b0f7b2b7bcd88f5
|
One polymer_config.js to rule them all.
[email protected],[email protected],[email protected]
BUG=425626
Review URL: https://codereview.chromium.org/1224783005
Cr-Commit-Position: refs/heads/master@{#337882}
|
void OobeUI::RemoveObserver(Observer* observer) {
observer_list_.RemoveObserver(observer);
}
|
void OobeUI::RemoveObserver(Observer* observer) {
observer_list_.RemoveObserver(observer);
}
|
C
|
Chrome
| 0 |
CVE-2011-1768
|
https://www.cvedetails.com/cve/CVE-2011-1768/
|
CWE-362
|
https://github.com/torvalds/linux/commit/d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978
|
d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978
|
tunnels: fix netns vs proto registration ordering
Same stuff as in ip_gre patch: receive hook can be called before netns
setup is done, oopsing in net_generic().
Signed-off-by: Alexey Dobriyan <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int ipip6_err(struct sk_buff *skb, u32 info)
{
/* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible.
*/
struct iphdr *iph = (struct iphdr*)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct ip_tunnel *t;
int err;
switch (type) {
default:
case ICMP_PARAMETERPROB:
return 0;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
return 0;
case ICMP_FRAG_NEEDED:
/* Soft state for pmtu is maintained by IP core. */
return 0;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
I believe they are just ether pollution. --ANK
*/
break;
}
break;
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
return 0;
break;
}
err = -ENOENT;
rcu_read_lock();
t = ipip6_tunnel_lookup(dev_net(skb->dev),
skb->dev,
iph->daddr,
iph->saddr);
if (t == NULL || t->parms.iph.daddr == 0)
goto out;
err = 0;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
t->err_time = jiffies;
out:
rcu_read_unlock();
return err;
}
|
static int ipip6_err(struct sk_buff *skb, u32 info)
{
/* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible.
*/
struct iphdr *iph = (struct iphdr*)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct ip_tunnel *t;
int err;
switch (type) {
default:
case ICMP_PARAMETERPROB:
return 0;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
return 0;
case ICMP_FRAG_NEEDED:
/* Soft state for pmtu is maintained by IP core. */
return 0;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
I believe they are just ether pollution. --ANK
*/
break;
}
break;
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
return 0;
break;
}
err = -ENOENT;
rcu_read_lock();
t = ipip6_tunnel_lookup(dev_net(skb->dev),
skb->dev,
iph->daddr,
iph->saddr);
if (t == NULL || t->parms.iph.daddr == 0)
goto out;
err = 0;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
t->err_time = jiffies;
out:
rcu_read_unlock();
return err;
}
|
C
|
linux
| 0 |
CVE-2019-11487
|
https://www.cvedetails.com/cve/CVE-2019-11487/
|
CWE-416
|
https://github.com/torvalds/linux/commit/6b3a707736301c2128ca85ce85fb13f60b5e350a
|
6b3a707736301c2128ca85ce85fb13f60b5e350a
|
Merge branch 'page-refs' (page ref overflow)
Merge page ref overflow branch.
Jann Horn reported that he can overflow the page ref count with
sufficient memory (and a filesystem that is intentionally extremely
slow).
Admittedly it's not exactly easy. To have more than four billion
references to a page requires a minimum of 32GB of kernel memory just
for the pointers to the pages, much less any metadata to keep track of
those pointers. Jann needed a total of 140GB of memory and a specially
crafted filesystem that leaves all reads pending (in order to not ever
free the page references and just keep adding more).
Still, we have a fairly straightforward way to limit the two obvious
user-controllable sources of page references: direct-IO like page
references gotten through get_user_pages(), and the splice pipe page
duplication. So let's just do that.
* branch page-refs:
fs: prevent page refcount overflow in pipe_buf_get
mm: prevent get_user_pages() from overflowing page refcount
mm: add 'try_get_page()' helper function
mm: make page ref count overflow check tighter and more explicit
|
static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
{
unsigned nbuf;
unsigned idx;
struct pipe_buffer *bufs;
struct fuse_copy_state cs;
struct fuse_dev *fud;
size_t rem;
ssize_t ret;
fud = fuse_get_dev(out);
if (!fud)
return -EPERM;
pipe_lock(pipe);
bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer),
GFP_KERNEL);
if (!bufs) {
pipe_unlock(pipe);
return -ENOMEM;
}
nbuf = 0;
rem = 0;
for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
ret = -EINVAL;
if (rem < len)
goto out_free;
rem = len;
while (rem) {
struct pipe_buffer *ibuf;
struct pipe_buffer *obuf;
BUG_ON(nbuf >= pipe->buffers);
BUG_ON(!pipe->nrbufs);
ibuf = &pipe->bufs[pipe->curbuf];
obuf = &bufs[nbuf];
if (rem >= ibuf->len) {
*obuf = *ibuf;
ibuf->ops = NULL;
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
} else {
if (!pipe_buf_get(pipe, ibuf))
goto out_free;
*obuf = *ibuf;
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
obuf->len = rem;
ibuf->offset += obuf->len;
ibuf->len -= obuf->len;
}
nbuf++;
rem -= obuf->len;
}
pipe_unlock(pipe);
fuse_copy_init(&cs, 0, NULL);
cs.pipebufs = bufs;
cs.nr_segs = nbuf;
cs.pipe = pipe;
if (flags & SPLICE_F_MOVE)
cs.move_pages = 1;
ret = fuse_dev_do_write(fud, &cs, len);
pipe_lock(pipe);
out_free:
for (idx = 0; idx < nbuf; idx++)
pipe_buf_release(pipe, &bufs[idx]);
pipe_unlock(pipe);
kvfree(bufs);
return ret;
}
|
static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
{
unsigned nbuf;
unsigned idx;
struct pipe_buffer *bufs;
struct fuse_copy_state cs;
struct fuse_dev *fud;
size_t rem;
ssize_t ret;
fud = fuse_get_dev(out);
if (!fud)
return -EPERM;
pipe_lock(pipe);
bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer),
GFP_KERNEL);
if (!bufs) {
pipe_unlock(pipe);
return -ENOMEM;
}
nbuf = 0;
rem = 0;
for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
ret = -EINVAL;
if (rem < len) {
pipe_unlock(pipe);
goto out;
}
rem = len;
while (rem) {
struct pipe_buffer *ibuf;
struct pipe_buffer *obuf;
BUG_ON(nbuf >= pipe->buffers);
BUG_ON(!pipe->nrbufs);
ibuf = &pipe->bufs[pipe->curbuf];
obuf = &bufs[nbuf];
if (rem >= ibuf->len) {
*obuf = *ibuf;
ibuf->ops = NULL;
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
} else {
pipe_buf_get(pipe, ibuf);
*obuf = *ibuf;
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
obuf->len = rem;
ibuf->offset += obuf->len;
ibuf->len -= obuf->len;
}
nbuf++;
rem -= obuf->len;
}
pipe_unlock(pipe);
fuse_copy_init(&cs, 0, NULL);
cs.pipebufs = bufs;
cs.nr_segs = nbuf;
cs.pipe = pipe;
if (flags & SPLICE_F_MOVE)
cs.move_pages = 1;
ret = fuse_dev_do_write(fud, &cs, len);
pipe_lock(pipe);
for (idx = 0; idx < nbuf; idx++)
pipe_buf_release(pipe, &bufs[idx]);
pipe_unlock(pipe);
out:
kvfree(bufs);
return ret;
}
|
C
|
linux
| 1 |
CVE-2017-9761
|
https://www.cvedetails.com/cve/CVE-2017-9761/
|
CWE-119
|
https://github.com/radare/radare2/commit/00e8f205475332d7842d0f0d1481eeab4e83017c
|
00e8f205475332d7842d0f0d1481eeab4e83017c
|
Fix #7727 - undefined pointers and out of band string access fixes
|
static int cmd_stdin(void *data, const char *input) {
RCore *core = (RCore *)data;
if (input[0] == '?') {
r_cons_printf ("Usage: '-' '.-' '. -' do the same\n");
return false;
}
return r_core_run_script (core, "-");
}
|
static int cmd_stdin(void *data, const char *input) {
RCore *core = (RCore *)data;
if (input[0] == '?') {
r_cons_printf ("Usage: '-' '.-' '. -' do the same\n");
return false;
}
return r_core_run_script (core, "-");
}
|
C
|
radare2
| 0 |
CVE-2015-8746
|
https://www.cvedetails.com/cve/CVE-2015-8746/
| null |
https://github.com/torvalds/linux/commit/18e3b739fdc826481c6a1335ce0c5b19b3d415da
|
18e3b739fdc826481c6a1335ce0c5b19b3d415da
|
NFS: Fix a NULL pointer dereference of migration recovery ops for v4.2 client
---Steps to Reproduce--
<nfs-server>
# cat /etc/exports
/nfs/referal *(rw,insecure,no_subtree_check,no_root_squash,crossmnt)
/nfs/old *(ro,insecure,subtree_check,root_squash,crossmnt)
<nfs-client>
# mount -t nfs nfs-server:/nfs/ /mnt/
# ll /mnt/*/
<nfs-server>
# cat /etc/exports
/nfs/referal *(rw,insecure,no_subtree_check,no_root_squash,crossmnt,refer=/nfs/old/@nfs-server)
/nfs/old *(ro,insecure,subtree_check,root_squash,crossmnt)
# service nfs restart
<nfs-client>
# ll /mnt/*/ --->>>>> oops here
[ 5123.102925] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 5123.103363] IP: [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4]
[ 5123.103752] PGD 587b9067 PUD 3cbf5067 PMD 0
[ 5123.104131] Oops: 0000 [#1]
[ 5123.104529] Modules linked in: nfsv4(OE) nfs(OE) fscache(E) nfsd(OE) xfs libcrc32c iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi coretemp crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel ppdev vmw_balloon parport_pc parport i2c_piix4 shpchp auth_rpcgss nfs_acl vmw_vmci lockd grace sunrpc vmwgfx drm_kms_helper ttm drm mptspi serio_raw scsi_transport_spi e1000 mptscsih mptbase ata_generic pata_acpi [last unloaded: nfsd]
[ 5123.105887] CPU: 0 PID: 15853 Comm: ::1-manager Tainted: G OE 4.2.0-rc6+ #214
[ 5123.106358] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 05/20/2014
[ 5123.106860] task: ffff88007620f300 ti: ffff88005877c000 task.ti: ffff88005877c000
[ 5123.107363] RIP: 0010:[<ffffffffa03ed38b>] [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4]
[ 5123.107909] RSP: 0018:ffff88005877fdb8 EFLAGS: 00010246
[ 5123.108435] RAX: ffff880053f3bc00 RBX: ffff88006ce6c908 RCX: ffff880053a0d240
[ 5123.108968] RDX: ffffea0000e6d940 RSI: ffff8800399a0000 RDI: ffff88006ce6c908
[ 5123.109503] RBP: ffff88005877fe28 R08: ffffffff81c708a0 R09: 0000000000000000
[ 5123.110045] R10: 00000000000001a2 R11: ffff88003ba7f5c8 R12: ffff880054c55800
[ 5123.110618] R13: 0000000000000000 R14: ffff880053a0d240 R15: ffff880053a0d240
[ 5123.111169] FS: 0000000000000000(0000) GS:ffffffff81c27000(0000) knlGS:0000000000000000
[ 5123.111726] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 5123.112286] CR2: 0000000000000000 CR3: 0000000054cac000 CR4: 00000000001406f0
[ 5123.112888] Stack:
[ 5123.113458] ffffea0000e6d940 ffff8800399a0000 00000000000167d0 0000000000000000
[ 5123.114049] 0000000000000000 0000000000000000 0000000000000000 00000000a7ec82c6
[ 5123.114662] ffff88005877fe18 ffffea0000e6d940 ffff8800399a0000 ffff880054c55800
[ 5123.115264] Call Trace:
[ 5123.115868] [<ffffffffa03fb44b>] nfs4_try_migration+0xbb/0x220 [nfsv4]
[ 5123.116487] [<ffffffffa03fcb3b>] nfs4_run_state_manager+0x4ab/0x7b0 [nfsv4]
[ 5123.117104] [<ffffffffa03fc690>] ? nfs4_do_reclaim+0x510/0x510 [nfsv4]
[ 5123.117813] [<ffffffff810a4527>] kthread+0xd7/0xf0
[ 5123.118456] [<ffffffff810a4450>] ? kthread_worker_fn+0x160/0x160
[ 5123.119108] [<ffffffff816d9cdf>] ret_from_fork+0x3f/0x70
[ 5123.119723] [<ffffffff810a4450>] ? kthread_worker_fn+0x160/0x160
[ 5123.120329] Code: 4c 8b 6a 58 74 17 eb 52 48 8d 55 a8 89 c6 4c 89 e7 e8 4a b5 ff ff 8b 45 b0 85 c0 74 1c 4c 89 f9 48 8b 55 90 48 8b 75 98 48 89 df <41> ff 55 00 3d e8 d8 ff ff 41 89 c6 74 cf 48 8b 4d c8 65 48 33
[ 5123.121643] RIP [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4]
[ 5123.122308] RSP <ffff88005877fdb8>
[ 5123.122942] CR2: 0000000000000000
Fixes: ec011fe847 ("NFS: Introduce a vector of migration recovery ops")
Cc: [email protected] # v3.13+
Signed-off-by: Kinglong Mee <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
gfp_t gfp_mask)
{
struct nfs4_lockdata *p;
struct inode *inode = lsp->ls_state->inode;
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
if (IS_ERR(p->arg.open_seqid))
goto out_free;
alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
if (IS_ERR(p->arg.lock_seqid))
goto out_free_seqid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
p->arg.lock_owner.s_dev = server->s_dev;
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
get_file(fl->fl_file);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
nfs_free_seqid(p->arg.open_seqid);
out_free:
kfree(p);
return NULL;
}
|
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
gfp_t gfp_mask)
{
struct nfs4_lockdata *p;
struct inode *inode = lsp->ls_state->inode;
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
if (IS_ERR(p->arg.open_seqid))
goto out_free;
alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
if (IS_ERR(p->arg.lock_seqid))
goto out_free_seqid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
p->arg.lock_owner.s_dev = server->s_dev;
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
get_file(fl->fl_file);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
nfs_free_seqid(p->arg.open_seqid);
out_free:
kfree(p);
return NULL;
}
|
C
|
linux
| 0 |
CVE-2017-13142
|
https://www.cvedetails.com/cve/CVE-2017-13142/
|
CWE-754
|
https://github.com/ImageMagick/ImageMagick/commit/aa84944b405acebbeefe871d0f64969b9e9f31ac
|
aa84944b405acebbeefe871d0f64969b9e9f31ac
|
...
|
Magick_png_write_raw_profile(const ImageInfo *image_info,png_struct *ping,
png_info *ping_info, unsigned char *profile_type, unsigned char
*profile_description, unsigned char *profile_data, png_uint_32 length)
{
png_textp
text;
register ssize_t
i;
unsigned char
*sp;
png_charp
dp;
png_uint_32
allocated_length,
description_length;
unsigned char
hex[16]={'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
if (LocaleNCompare((char *) profile_type+1, "ng-chunk-",9) == 0)
return;
if (image_info->verbose)
{
(void) printf("writing raw profile: type=%s, length=%.20g\n",
(char *) profile_type, (double) length);
}
#if PNG_LIBPNG_VER >= 10400
text=(png_textp) png_malloc(ping,(png_alloc_size_t) sizeof(png_text));
#else
text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text));
#endif
description_length=(png_uint_32) strlen((const char *) profile_description);
allocated_length=(png_uint_32) (length*2 + (length >> 5) + 20
+ description_length);
#if PNG_LIBPNG_VER >= 10400
text[0].text=(png_charp) png_malloc(ping,
(png_alloc_size_t) allocated_length);
text[0].key=(png_charp) png_malloc(ping, (png_alloc_size_t) 80);
#else
text[0].text=(png_charp) png_malloc(ping, (png_size_t) allocated_length);
text[0].key=(png_charp) png_malloc(ping, (png_size_t) 80);
#endif
text[0].key[0]='\0';
(void) ConcatenateMagickString(text[0].key,
"Raw profile type ",MaxTextExtent);
(void) ConcatenateMagickString(text[0].key,(const char *) profile_type,62);
sp=profile_data;
dp=text[0].text;
*dp++='\n';
(void) CopyMagickString(dp,(const char *) profile_description,
allocated_length);
dp+=description_length;
*dp++='\n';
(void) FormatLocaleString(dp,allocated_length-
(png_size_t) (dp-text[0].text),"%8lu ",(unsigned long) length);
dp+=8;
for (i=0; i < (ssize_t) length; i++)
{
if (i%36 == 0)
*dp++='\n';
*(dp++)=(char) hex[((*sp >> 4) & 0x0f)];
*(dp++)=(char) hex[((*sp++ ) & 0x0f)];
}
*dp++='\n';
*dp='\0';
text[0].text_length=(png_size_t) (dp-text[0].text);
text[0].compression=image_info->compression == NoCompression ||
(image_info->compression == UndefinedCompression &&
text[0].text_length < 128) ? -1 : 0;
if (text[0].text_length <= allocated_length)
png_set_text(ping,ping_info,text,1);
png_free(ping,text[0].text);
png_free(ping,text[0].key);
png_free(ping,text);
}
|
Magick_png_write_raw_profile(const ImageInfo *image_info,png_struct *ping,
png_info *ping_info, unsigned char *profile_type, unsigned char
*profile_description, unsigned char *profile_data, png_uint_32 length)
{
png_textp
text;
register ssize_t
i;
unsigned char
*sp;
png_charp
dp;
png_uint_32
allocated_length,
description_length;
unsigned char
hex[16]={'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
if (LocaleNCompare((char *) profile_type+1, "ng-chunk-",9) == 0)
return;
if (image_info->verbose)
{
(void) printf("writing raw profile: type=%s, length=%.20g\n",
(char *) profile_type, (double) length);
}
#if PNG_LIBPNG_VER >= 10400
text=(png_textp) png_malloc(ping,(png_alloc_size_t) sizeof(png_text));
#else
text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text));
#endif
description_length=(png_uint_32) strlen((const char *) profile_description);
allocated_length=(png_uint_32) (length*2 + (length >> 5) + 20
+ description_length);
#if PNG_LIBPNG_VER >= 10400
text[0].text=(png_charp) png_malloc(ping,
(png_alloc_size_t) allocated_length);
text[0].key=(png_charp) png_malloc(ping, (png_alloc_size_t) 80);
#else
text[0].text=(png_charp) png_malloc(ping, (png_size_t) allocated_length);
text[0].key=(png_charp) png_malloc(ping, (png_size_t) 80);
#endif
text[0].key[0]='\0';
(void) ConcatenateMagickString(text[0].key,
"Raw profile type ",MaxTextExtent);
(void) ConcatenateMagickString(text[0].key,(const char *) profile_type,62);
sp=profile_data;
dp=text[0].text;
*dp++='\n';
(void) CopyMagickString(dp,(const char *) profile_description,
allocated_length);
dp+=description_length;
*dp++='\n';
(void) FormatLocaleString(dp,allocated_length-
(png_size_t) (dp-text[0].text),"%8lu ",(unsigned long) length);
dp+=8;
for (i=0; i < (ssize_t) length; i++)
{
if (i%36 == 0)
*dp++='\n';
*(dp++)=(char) hex[((*sp >> 4) & 0x0f)];
*(dp++)=(char) hex[((*sp++ ) & 0x0f)];
}
*dp++='\n';
*dp='\0';
text[0].text_length=(png_size_t) (dp-text[0].text);
text[0].compression=image_info->compression == NoCompression ||
(image_info->compression == UndefinedCompression &&
text[0].text_length < 128) ? -1 : 0;
if (text[0].text_length <= allocated_length)
png_set_text(ping,ping_info,text,1);
png_free(ping,text[0].text);
png_free(ping,text[0].key);
png_free(ping,text);
}
|
C
|
ImageMagick
| 0 |
CVE-2013-2906
|
https://www.cvedetails.com/cve/CVE-2013-2906/
|
CWE-362
|
https://github.com/chromium/chromium/commit/c4a4dfb26615b5ef5e9dcc730ef43f70ce9202e2
|
c4a4dfb26615b5ef5e9dcc730ef43f70ce9202e2
|
Suspend shared timers while blockingly closing databases
BUG=388771
[email protected]
Review URL: https://codereview.chromium.org/409863002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@284785 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderThreadImpl::Shutdown() {
FOR_EACH_OBSERVER(
RenderProcessObserver, observers_, OnRenderProcessShutdown());
ChildThread::Shutdown();
if (memory_observer_) {
message_loop()->RemoveTaskObserver(memory_observer_.get());
memory_observer_.reset();
}
if (webkit_platform_support_) {
// WaitForAllDatabasesToClose might run a nested message loop. To avoid
// processing timer events while we're already in the process of shutting
// down blink, put a ScopePageLoadDeferrer on the stack.
WebView::willEnterModalLoop();
webkit_platform_support_->web_database_observer_impl()->
WaitForAllDatabasesToClose();
WebView::didExitModalLoop();
}
if (devtools_agent_message_filter_.get()) {
RemoveFilter(devtools_agent_message_filter_.get());
devtools_agent_message_filter_ = NULL;
}
RemoveFilter(audio_input_message_filter_.get());
audio_input_message_filter_ = NULL;
RemoveFilter(audio_message_filter_.get());
audio_message_filter_ = NULL;
#if defined(ENABLE_WEBRTC)
RTCPeerConnectionHandler::DestructAllHandlers();
peer_connection_factory_.reset();
#endif
RemoveFilter(vc_manager_->video_capture_message_filter());
vc_manager_.reset();
RemoveFilter(db_message_filter_.get());
db_message_filter_ = NULL;
if (file_thread_)
file_thread_->Stop();
if (compositor_output_surface_filter_.get()) {
RemoveFilter(compositor_output_surface_filter_.get());
compositor_output_surface_filter_ = NULL;
}
media_thread_.reset();
compositor_thread_.reset();
input_handler_manager_.reset();
if (input_event_filter_.get()) {
RemoveFilter(input_event_filter_.get());
input_event_filter_ = NULL;
}
embedded_worker_dispatcher_.reset();
main_thread_indexed_db_dispatcher_.reset();
if (webkit_platform_support_)
blink::shutdown();
lazy_tls.Pointer()->Set(NULL);
#if defined(OS_WIN)
NPChannelBase::CleanupChannels();
#endif
}
|
void RenderThreadImpl::Shutdown() {
FOR_EACH_OBSERVER(
RenderProcessObserver, observers_, OnRenderProcessShutdown());
ChildThread::Shutdown();
if (memory_observer_) {
message_loop()->RemoveTaskObserver(memory_observer_.get());
memory_observer_.reset();
}
if (webkit_platform_support_) {
webkit_platform_support_->web_database_observer_impl()->
WaitForAllDatabasesToClose();
}
if (devtools_agent_message_filter_.get()) {
RemoveFilter(devtools_agent_message_filter_.get());
devtools_agent_message_filter_ = NULL;
}
RemoveFilter(audio_input_message_filter_.get());
audio_input_message_filter_ = NULL;
RemoveFilter(audio_message_filter_.get());
audio_message_filter_ = NULL;
#if defined(ENABLE_WEBRTC)
RTCPeerConnectionHandler::DestructAllHandlers();
peer_connection_factory_.reset();
#endif
RemoveFilter(vc_manager_->video_capture_message_filter());
vc_manager_.reset();
RemoveFilter(db_message_filter_.get());
db_message_filter_ = NULL;
if (file_thread_)
file_thread_->Stop();
if (compositor_output_surface_filter_.get()) {
RemoveFilter(compositor_output_surface_filter_.get());
compositor_output_surface_filter_ = NULL;
}
media_thread_.reset();
compositor_thread_.reset();
input_handler_manager_.reset();
if (input_event_filter_.get()) {
RemoveFilter(input_event_filter_.get());
input_event_filter_ = NULL;
}
embedded_worker_dispatcher_.reset();
main_thread_indexed_db_dispatcher_.reset();
if (webkit_platform_support_)
blink::shutdown();
lazy_tls.Pointer()->Set(NULL);
#if defined(OS_WIN)
NPChannelBase::CleanupChannels();
#endif
}
|
C
|
Chrome
| 1 |
null | null | null |
https://github.com/chromium/chromium/commit/d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
|
d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
|
DevTools: 'Overrides' UI overlay obstructs page and element inspector
BUG=302862
[email protected]
Review URL: https://codereview.chromium.org/40233006
git-svn-id: svn://svn.chromium.org/blink/trunk@160559 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void InspectorController::getHighlight(Highlight* highlight) const
{
m_overlay->getHighlight(highlight);
}
|
void InspectorController::getHighlight(Highlight* highlight) const
{
m_overlay->getHighlight(highlight);
}
|
C
|
Chrome
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.