func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
{
nodemask_t tmp;
if (pol->flags & MPOL_F_STATIC_NODES)
nodes_and(tmp, pol->w.user_nodemask, *nodes);
else if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
else {
nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
*nodes);
pol->w.cpuset_mems_allowed = *nodes;
}
if (nodes_empty(tmp))
tmp = *nodes;
pol->v.nodes = tmp;
} | 0 | [
"CWE-787"
]
| linux | aa9f7d5172fac9bf1f09e678c35e287a40a7b7dd | 254,588,786,270,450,470,000,000,000,000,000,000,000 | 19 | mm: mempolicy: require at least one nodeid for MPOL_PREFERRED
Using an empty (malformed) nodelist that is not caught during mount option
parsing leads to a stack-out-of-bounds access.
The option string that was used was: "mpol=prefer:,". However,
MPOL_PREFERRED requires a single node number, which is not being provided
here.
Add a check that 'nodes' is not empty after parsing for MPOL_PREFERRED's
nodeid.
Fixes: 095f1fc4ebf3 ("mempolicy: rework shmem mpol parsing and display")
Reported-by: Entropy Moe <[email protected]>
Reported-by: [email protected]
Signed-off-by: Randy Dunlap <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Tested-by: [email protected]
Cc: Lee Schermerhorn <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
bool r_pkcs7_parse_signeddata (RPKCS7SignedData *sd, RASN1Object *object) {
RASN1Object **elems;
ut32 shift = 3;
if (!sd || !object || object->list.length < 4) {
return false;
}
memset (sd, 0, sizeof (RPKCS7SignedData));
elems = object->list.objects;
//Following RFC
sd->version = (ut32) elems[0]->sector[0];
r_pkcs7_parse_digestalgorithmidentifier (&sd->digestAlgorithms, elems[1]);
r_pkcs7_parse_contentinfo (&sd->contentInfo, elems[2]);
//Optional
if (shift < object->list.length && elems[shift]->klass == CLASS_CONTEXT && elems[shift]->tag == 0) {
r_pkcs7_parse_extendedcertificatesandcertificates (&sd->certificates, elems[shift]);
shift++;
}
//Optional
if (shift < object->list.length && elems[shift]->klass == CLASS_CONTEXT && elems[shift]->tag == 1) {
r_pkcs7_parse_certificaterevocationlists (&sd->crls, elems[shift]);
shift++;
}
if (shift < object->list.length) {
r_pkcs7_parse_signerinfos (&sd->signerinfos, elems[shift]);
}
return true;
} | 0 | [
"CWE-476"
]
| radare2 | 7ab66cca5bbdf6cb2d69339ef4f513d95e532dbf | 58,164,401,263,032,280,000,000,000,000,000,000,000 | 27 | Fix #7152 - Null deref in cms |
static struct fdtable *close_files(struct files_struct * files)
{
/*
* It is safe to dereference the fd table without RCU or
* ->file_lock because this is the last reference to the
* files structure.
*/
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
unsigned int i, j = 0;
for (;;) {
unsigned long set;
i = j * BITS_PER_LONG;
if (i >= fdt->max_fds)
break;
set = fdt->open_fds[j++];
while (set) {
if (set & 1) {
struct file * file = xchg(&fdt->fd[i], NULL);
if (file) {
filp_close(file, files);
cond_resched();
}
}
i++;
set >>= 1;
}
}
return fdt;
} | 0 | []
| linux | 0f2122045b946241a9e549c2a76cea54fa58a7ff | 95,979,355,906,447,740,000,000,000,000,000,000,000 | 31 | io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: [email protected] # v5.5+
Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e)
{
if (r == X86EMUL_PROPAGATE_FAULT) {
kvm_inject_emulated_page_fault(vcpu, e);
return 1;
}
/*
* In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
* while handling a VMX instruction KVM could've handled the request
* correctly by exiting to userspace and performing I/O but there
* doesn't seem to be a real use-case behind such requests, just return
* KVM_EXIT_INTERNAL_ERROR for now.
*/
kvm_prepare_emulation_failure_exit(vcpu);
return 0;
} | 0 | [
"CWE-476"
]
| linux | 55749769fe608fa3f4a075e42e89d237c8e37637 | 263,684,955,570,323,630,000,000,000,000,000,000,000 | 19 | KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
may_record_change(
linenr_T lnum,
colnr_T col,
linenr_T lnume,
long xtra)
{
dict_T *dict;
if (curbuf->b_listener == NULL)
return;
// If the new change is going to change the line numbers in already listed
// changes, then flush.
check_recorded_changes(curbuf, lnum, lnume, xtra);
if (curbuf->b_recorded_changes == NULL)
{
curbuf->b_recorded_changes = list_alloc();
if (curbuf->b_recorded_changes == NULL) // out of memory
return;
++curbuf->b_recorded_changes->lv_refcount;
curbuf->b_recorded_changes->lv_lock = VAR_FIXED;
}
dict = dict_alloc();
if (dict == NULL)
return;
dict_add_number(dict, "lnum", (varnumber_T)lnum);
dict_add_number(dict, "end", (varnumber_T)lnume);
dict_add_number(dict, "added", (varnumber_T)xtra);
dict_add_number(dict, "col", (varnumber_T)col + 1);
list_append_dict(curbuf->b_recorded_changes, dict);
} | 0 | [
"CWE-120"
]
| vim | 7ce5b2b590256ce53d6af28c1d203fb3bc1d2d97 | 5,499,357,562,677,619,000,000,000,000,000,000,000 | 34 | patch 8.2.4969: changing text in Visual mode may cause invalid memory access
Problem: Changing text in Visual mode may cause invalid memory access.
Solution: Check the Visual position after making a change. |
u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
unsigned int nest;
u64 ts;
/* If the event includes an absolute time, then just use that */
if (event->type_len == RINGBUF_TYPE_TIME_STAMP)
return rb_event_time_stamp(event);
nest = local_read(&cpu_buffer->committing);
verify_event(cpu_buffer, event);
if (WARN_ON_ONCE(!nest))
goto fail;
/* Read the current saved nesting level time stamp */
if (likely(--nest < MAX_NEST))
return cpu_buffer->event_stamp[nest];
/* Shouldn't happen, warn if it does */
WARN_ONCE(1, "nest (%d) greater than max", nest);
fail:
/* Can only fail on 32 bit */
if (!rb_time_read(&cpu_buffer->write_stamp, &ts))
/* Screw it, just read the current time */
ts = rb_time_stamp(cpu_buffer->buffer);
return ts;
} | 0 | [
"CWE-835"
]
| linux | 67f0d6d9883c13174669f88adac4f0ee656cc16a | 78,969,417,543,876,200,000,000,000,000,000,000,000 | 31 | tracing: Fix bug in rb_per_cpu_empty() that might cause deadloop.
The "rb_per_cpu_empty()" misinterpret the condition (as not-empty) when
"head_page" and "commit_page" of "struct ring_buffer_per_cpu" points to
the same buffer page, whose "buffer_data_page" is empty and "read" field
is non-zero.
An error scenario could be constructed as followed (kernel perspective):
1. All pages in the buffer has been accessed by reader(s) so that all of
them will have non-zero "read" field.
2. Read and clear all buffer pages so that "rb_num_of_entries()" will
return 0 rendering there's no more data to read. It is also required
that the "read_page", "commit_page" and "tail_page" points to the same
page, while "head_page" is the next page of them.
3. Invoke "ring_buffer_lock_reserve()" with large enough "length"
so that it shot pass the end of current tail buffer page. Now the
"head_page", "commit_page" and "tail_page" points to the same page.
4. Discard current event with "ring_buffer_discard_commit()", so that
"head_page", "commit_page" and "tail_page" points to a page whose buffer
data page is now empty.
When the error scenario has been constructed, "tracing_read_pipe" will
be trapped inside a deadloop: "trace_empty()" returns 0 since
"rb_per_cpu_empty()" returns 0 when it hits the CPU containing such
constructed ring buffer. Then "trace_find_next_entry_inc()" always
return NULL since "rb_num_of_entries()" reports there's no more entry
to read. Finally "trace_seq_to_user()" returns "-EBUSY" spanking
"tracing_read_pipe" back to the start of the "waitagain" loop.
I've also written a proof-of-concept script to construct the scenario
and trigger the bug automatically, you can use it to trace and validate
my reasoning above:
https://github.com/aegistudio/RingBufferDetonator.git
Tests has been carried out on linux kernel 5.14-rc2
(2734d6c1b1a089fb593ef6a23d4b70903526fe0c), my fixed version
of kernel (for testing whether my update fixes the bug) and
some older kernels (for range of affected kernels). Test result is
also attached to the proof-of-concept repository.
Link: https://lore.kernel.org/linux-trace-devel/YPaNxsIlb2yjSi5Y@aegistudio/
Link: https://lore.kernel.org/linux-trace-devel/YPgrN85WL9VyrZ55@aegistudio
Cc: [email protected]
Fixes: bf41a158cacba ("ring-buffer: make reentrant")
Suggested-by: Linus Torvalds <[email protected]>
Signed-off-by: Haoran Luo <[email protected]>
Signed-off-by: Steven Rostedt (VMware) <[email protected]> |
void Http2Session::UpdateChunksSent(const FunctionCallbackInfo<Value>& args) {
Environment* env = Environment::GetCurrent(args);
Isolate* isolate = env->isolate();
HandleScope scope(isolate);
Http2Session* session;
ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder());
uint32_t length = session->chunks_sent_since_last_write_;
session->object()->Set(env->context(),
env->chunks_sent_since_last_write_string(),
Integer::NewFromUnsigned(isolate, length)).FromJust();
args.GetReturnValue().Set(length);
} | 0 | []
| node | ce22d6f9178507c7a41b04ac4097b9ea902049e3 | 263,164,625,632,827,900,000,000,000,000,000,000,000 | 15 | http2: add altsvc support
Add support for sending and receiving ALTSVC frames.
PR-URL: https://github.com/nodejs/node/pull/17917
Reviewed-By: Anna Henningsen <[email protected]>
Reviewed-By: Tiancheng "Timothy" Gu <[email protected]>
Reviewed-By: Matteo Collina <[email protected]> |
cmsBool CheckOne(const cmsAllowedLUT* Tab, const cmsPipeline* Lut)
{
cmsStage* mpe;
int n;
for (n=0, mpe = Lut ->Elements; mpe != NULL; mpe = mpe ->Next, n++) {
if (n > Tab ->nTypes) return FALSE;
if (cmsStageType(mpe) != Tab ->MpeTypes[n]) return FALSE;
}
return (n == Tab ->nTypes);
} | 0 | []
| Little-CMS | 41d222df1bc6188131a8f46c32eab0a4d4cdf1b6 | 112,165,960,929,070,950,000,000,000,000,000,000,000 | 13 | Memory squeezing fix: lcms2 cmsPipeline construction
When creating a new pipeline, lcms would often try to allocate a stage
and pass it to cmsPipelineInsertStage without checking whether the
allocation succeeded. cmsPipelineInsertStage would then assert (or crash)
if it had not.
The fix here is to change cmsPipelineInsertStage to check and return
an error value. All calling code is then checked to test this return
value and cope. |
const char* GetSigName(int oid) {
switch (oid) {
#if !defined(NO_DSA) && !defined(NO_SHA)
case CTC_SHAwDSA:
return sigSha1wDsaName;
case CTC_SHA256wDSA:
return sigSha256wDsaName;
#endif /* NO_DSA && NO_SHA */
#ifndef NO_RSA
#ifdef WOLFSSL_MD2
case CTC_MD2wRSA:
return sigMd2wRsaName;
#endif
#ifndef NO_MD5
case CTC_MD5wRSA:
return sigMd5wRsaName;
#endif
#ifndef NO_SHA
case CTC_SHAwRSA:
return sigSha1wRsaName;
#endif
#ifdef WOLFSSL_SHA224
case CTC_SHA224wRSA:
return sigSha224wRsaName;
#endif
#ifndef NO_SHA256
case CTC_SHA256wRSA:
return sigSha256wRsaName;
#endif
#ifdef WOLFSSL_SHA384
case CTC_SHA384wRSA:
return sigSha384wRsaName;
#endif
#ifdef WOLFSSL_SHA512
case CTC_SHA512wRSA:
return sigSha512wRsaName;
#endif
#ifdef WOLFSSL_SHA3
#ifndef WOLFSSL_NOSHA3_224
case CTC_SHA3_224wRSA:
return sigSha3_224wRsaName;
#endif
#ifndef WOLFSSL_NOSHA3_256
case CTC_SHA3_256wRSA:
return sigSha3_256wRsaName;
#endif
#ifndef WOLFSSL_NOSHA3_384
case CTC_SHA3_384wRSA:
return sigSha3_384wRsaName;
#endif
#ifndef WOLFSSL_NOSHA3_512
case CTC_SHA3_512wRSA:
return sigSha3_512wRsaName;
#endif
#endif
#endif /* NO_RSA */
#ifdef HAVE_ECC
#ifndef NO_SHA
case CTC_SHAwECDSA:
return sigSha1wEcdsaName;
#endif
#ifdef WOLFSSL_SHA224
case CTC_SHA224wECDSA:
return sigSha224wEcdsaName;
#endif
#ifndef NO_SHA256
case CTC_SHA256wECDSA:
return sigSha256wEcdsaName;
#endif
#ifdef WOLFSSL_SHA384
case CTC_SHA384wECDSA:
return sigSha384wEcdsaName;
#endif
#ifdef WOLFSSL_SHA512
case CTC_SHA512wECDSA:
return sigSha512wEcdsaName;
#endif
#ifdef WOLFSSL_SHA3
#ifndef WOLFSSL_NOSHA3_224
case CTC_SHA3_224wECDSA:
return sigSha3_224wEcdsaName;
#endif
#ifndef WOLFSSL_NOSHA3_256
case CTC_SHA3_256wECDSA:
return sigSha3_256wEcdsaName;
#endif
#ifndef WOLFSSL_NOSHA3_384
case CTC_SHA3_384wECDSA:
return sigSha3_384wEcdsaName;
#endif
#ifndef WOLFSSL_NOSHA3_512
case CTC_SHA3_512wECDSA:
return sigSha3_512wEcdsaName;
#endif
#endif
#endif /* HAVE_ECC */
default:
return sigUnknownName;
}
} | 0 | [
"CWE-125",
"CWE-345"
]
| wolfssl | f93083be72a3b3d956b52a7ec13f307a27b6e093 | 189,468,395,122,375,400,000,000,000,000,000,000,000 | 100 | OCSP: improve handling of OCSP no check extension |
prealloc_cu_tu_list (unsigned int nshndx)
{
if (shndx_pool == NULL)
{
shndx_pool_size = nshndx;
shndx_pool_used = 0;
shndx_pool = (unsigned int *) xcmalloc (shndx_pool_size,
sizeof (unsigned int));
}
else
{
shndx_pool_size = shndx_pool_used + nshndx;
shndx_pool = (unsigned int *) xcrealloc (shndx_pool, shndx_pool_size,
sizeof (unsigned int));
}
} | 0 | [
"CWE-703"
]
| binutils-gdb | 695c6dfe7e85006b98c8b746f3fd5f913c94ebff | 183,726,988,439,276,800,000,000,000,000,000,000,000 | 16 | PR29370, infinite loop in display_debug_abbrev
The PR29370 testcase is a fuzzed object file with multiple
.trace_abbrev sections. Multiple .trace_abbrev or .debug_abbrev
sections are not a violation of the DWARF standard. The DWARF5
standard even gives an example of multiple .debug_abbrev sections
contained in groups. Caching and lookup of processed abbrevs thus
needs to be done by section and offset rather than base and offset.
(Why base anyway?) Or, since section contents are kept, by a pointer
into the contents.
PR 29370
* dwarf.c (struct abbrev_list): Replace abbrev_base and
abbrev_offset with raw field.
(find_abbrev_list_by_abbrev_offset): Delete.
(find_abbrev_list_by_raw_abbrev): New function.
(process_abbrev_set): Set list->raw and list->next.
(find_and_process_abbrev_set): Replace abbrev list lookup with
new function. Don't set list abbrev_base, abbrev_offset or next. |
acl_fetch_stver(struct proxy *px, struct session *l4, void *l7, int dir,
struct acl_expr *expr, struct acl_test *test)
{
struct http_txn *txn = l7;
char *ptr;
int len;
if (!txn)
return 0;
if (txn->rsp.msg_state < HTTP_MSG_BODY)
return 0;
len = txn->rsp.sl.st.v_l;
ptr = txn->rsp.sol;
while ((len-- > 0) && (*ptr++ != '/'));
if (len <= 0)
return 0;
test->ptr = ptr;
test->len = len;
test->flags = ACL_TEST_F_READ_ONLY | ACL_TEST_F_VOL_1ST;
return 1;
} | 0 | []
| haproxy-1.4 | dc80672211e085c211f1fc47e15cfe57ab587d38 | 217,291,511,191,672,230,000,000,000,000,000,000,000 | 26 | BUG/CRITICAL: using HTTP information in tcp-request content may crash the process
During normal HTTP request processing, request buffers are realigned if
there are less than global.maxrewrite bytes available after them, in
order to leave enough room for rewriting headers after the request. This
is done in http_wait_for_request().
However, if some HTTP inspection happens during a "tcp-request content"
rule, this realignment is not performed. In theory this is not a problem
because empty buffers are always aligned and TCP inspection happens at
the beginning of a connection. But with HTTP keep-alive, it also happens
at the beginning of each subsequent request. So if a second request was
pipelined by the client before the first one had a chance to be forwarded,
the second request will not be realigned. Then, http_wait_for_request()
will not perform such a realignment either because the request was
already parsed and marked as such. The consequence of this, is that the
rewrite of a sufficient number of such pipelined, unaligned requests may
leave less room past the request been processed than the configured
reserve, which can lead to a buffer overflow if request processing appends
some data past the end of the buffer.
A number of conditions are required for the bug to be triggered :
- HTTP keep-alive must be enabled ;
- HTTP inspection in TCP rules must be used ;
- some request appending rules are needed (reqadd, x-forwarded-for)
- since empty buffers are always realigned, the client must pipeline
enough requests so that the buffer always contains something till
the point where there is no more room for rewriting.
While such a configuration is quite unlikely to be met (which is
confirmed by the bug's lifetime), a few people do use these features
together for very specific usages. And more importantly, writing such
a configuration and the request to attack it is trivial.
A quick workaround consists in forcing keep-alive off by adding
"option httpclose" or "option forceclose" in the frontend. Alternatively,
disabling HTTP-based TCP inspection rules enough if the application
supports it.
At first glance, this bug does not look like it could lead to remote code
execution, as the overflowing part is controlled by the configuration and
not by the user. But some deeper analysis should be performed to confirm
this. And anyway, corrupting the process' memory and crashing it is quite
trivial.
Special thanks go to Yves Lafon from the W3C who reported this bug and
deployed significant efforts to collect the relevant data needed to
understand it in less than one week.
CVE-2013-1912 was assigned to this issue.
Note that 1.4 is also affected so the fix must be backported.
(cherry picked from commit aae75e3279c6c9bd136413a72dafdcd4986bb89a) |
static int unix_set_peek_off(struct sock *sk, int val)
{
struct unix_sock *u = unix_sk(sk);
if (mutex_lock_interruptible(&u->readlock))
return -EINTR;
sk->sk_peek_off = val;
mutex_unlock(&u->readlock);
return 0;
} | 0 | []
| net | 7d267278a9ece963d77eefec61630223fce08c6c | 71,511,741,291,878,410,000,000,000,000,000,000,000 | 12 | unix: avoid use-after-free in ep_remove_wait_queue
Rainer Weikusat <[email protected]> writes:
An AF_UNIX datagram socket being the client in an n:1 association with
some server socket is only allowed to send messages to the server if the
receive queue of this socket contains at most sk_max_ack_backlog
datagrams. This implies that prospective writers might be forced to go
to sleep despite none of the message presently enqueued on the server
receive queue were sent by them. In order to ensure that these will be
woken up once space becomes again available, the present unix_dgram_poll
routine does a second sock_poll_wait call with the peer_wait wait queue
of the server socket as queue argument (unix_dgram_recvmsg does a wake
up on this queue after a datagram was received). This is inherently
problematic because the server socket is only guaranteed to remain alive
for as long as the client still holds a reference to it. In case the
connection is dissolved via connect or by the dead peer detection logic
in unix_dgram_sendmsg, the server socket may be freed despite "the
polling mechanism" (in particular, epoll) still has a pointer to the
corresponding peer_wait queue. There's no way to forcibly deregister a
wait queue with epoll.
Based on an idea by Jason Baron, the patch below changes the code such
that a wait_queue_t belonging to the client socket is enqueued on the
peer_wait queue of the server whenever the peer receive queue full
condition is detected by either a sendmsg or a poll. A wake up on the
peer queue is then relayed to the ordinary wait queue of the client
socket via wake function. The connection to the peer wait queue is again
dissolved if either a wake up is about to be relayed or the client
socket reconnects or a dead peer is detected or the client socket is
itself closed. This enables removing the second sock_poll_wait from
unix_dgram_poll, thus avoiding the use-after-free, while still ensuring
that no blocked writer sleeps forever.
Signed-off-by: Rainer Weikusat <[email protected]>
Fixes: ec0d215f9420 ("af_unix: fix 'poll for write'/connected DGRAM sockets")
Reviewed-by: Jason Baron <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_ALL_INFO *pFindData)
{
struct smb_t2_qfi_req *pSMB = NULL;
struct smb_t2_qfi_rsp *pSMBr = NULL;
int rc = 0;
int bytes_returned;
__u16 params, byte_count;
QFileInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 2 /* level */ + 2 /* fid */;
pSMB->t2.TotalDataCount = 0;
pSMB->t2.MaxParameterCount = cpu_to_le16(4);
/* BB find exact max data count below from sess structure BB */
pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
pSMB->t2.MaxSetupCount = 0;
pSMB->t2.Reserved = 0;
pSMB->t2.Flags = 0;
pSMB->t2.Timeout = 0;
pSMB->t2.Reserved2 = 0;
pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
Fid) - 4);
pSMB->t2.DataCount = 0;
pSMB->t2.DataOffset = 0;
pSMB->t2.SetupCount = 1;
pSMB->t2.Reserved3 = 0;
pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->t2.TotalParameterCount = cpu_to_le16(params);
pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
pSMB->Pad = 0;
pSMB->Fid = netfid;
inc_rfc1001_len(pSMB, byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc) /* BB add auto retry on EOPNOTSUPP? */
rc = -EIO;
else if (get_bcc(&pSMBr->hdr) < 40)
rc = -EIO; /* bad smb */
else if (pFindData) {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
memcpy((char *) pFindData,
(char *) &pSMBr->hdr.Protocol +
data_offset, sizeof(FILE_ALL_INFO));
} else
rc = -ENOMEM;
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QFileInfoRetry;
return rc;
} | 0 | [
"CWE-362",
"CWE-119",
"CWE-189"
]
| linux | 9438fabb73eb48055b58b89fc51e0bc4db22fabd | 48,745,367,878,555,300,000,000,000,000,000,000,000 | 65 | cifs: fix possible memory corruption in CIFSFindNext
The name_len variable in CIFSFindNext is a signed int that gets set to
the resume_name_len in the cifs_search_info. The resume_name_len however
is unsigned and for some infolevels is populated directly from a 32 bit
value sent by the server.
If the server sends a very large value for this, then that value could
look negative when converted to a signed int. That would make that
value pass the PATH_MAX check later in CIFSFindNext. The name_len would
then be used as a length value for a memcpy. It would then be treated
as unsigned again, and the memcpy scribbles over a ton of memory.
Fix this by making the name_len an unsigned value in CIFSFindNext.
Cc: <[email protected]>
Reported-by: Darren Lavender <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]> |
static void printFlow(u_int16_t id, struct ndpi_flow_info *flow, u_int16_t thread_id) {
FILE *out = results_file ? results_file : stdout;
u_int8_t known_tls;
char buf[32], buf1[64];
u_int i;
double dos_ge_score;
double dos_slow_score;
double dos_hulk_score;
double ddos_score;
double hearthbleed_score;
double ftp_patator_score;
double ssh_patator_score;
double inf_score;
if(csv_fp != NULL) {
float data_ratio = ndpi_data_ratio(flow->src2dst_bytes, flow->dst2src_bytes);
double f = (double)flow->first_seen, l = (double)flow->last_seen;
/* PLEASE KEEP IN SYNC WITH printCSVHeader() */
dos_ge_score = Dos_goldeneye_score(flow);
dos_slow_score = Dos_slow_score(flow);
dos_hulk_score = Dos_hulk_score(flow);
ddos_score = Ddos_score(flow);
hearthbleed_score = Hearthbleed_score(flow);
ftp_patator_score = Ftp_patator_score(flow);
ssh_patator_score = Ssh_patator_score(flow);
inf_score = Infiltration_score(flow);
double benign_score = dos_ge_score < 1 && dos_slow_score < 1 && \
dos_hulk_score < 1 && ddos_score < 1 && hearthbleed_score < 1 && \
ftp_patator_score < 1 && ssh_patator_score < 1 && inf_score < 1 ? 1.1 : 0;
fprintf(csv_fp, "%u,%u,%.3f,%.3f,%.3f,%s,%u,%s,%u,",
flow->flow_id,
flow->protocol,
f/1000.0, l/1000.0,
(l-f)/1000.0,
flow->src_name, ntohs(flow->src_port),
flow->dst_name, ntohs(flow->dst_port)
);
fprintf(csv_fp, "%s,",
ndpi_protocol2id(ndpi_thread_info[thread_id].workflow->ndpi_struct,
flow->detected_protocol, buf, sizeof(buf)));
fprintf(csv_fp, "%s,%s,",
ndpi_protocol2name(ndpi_thread_info[thread_id].workflow->ndpi_struct,
flow->detected_protocol, buf, sizeof(buf)),
flow->host_server_name);
fprintf(csv_fp, "%.4lf,%.4lf,%.4lf,%.4lf,%.4lf,%.4lf,%.4lf,%.4lf,%.4lf,", \
benign_score, dos_slow_score, dos_ge_score, dos_hulk_score, \
ddos_score, hearthbleed_score, ftp_patator_score, \
ssh_patator_score, inf_score);
fprintf(csv_fp, "%u,%llu,%llu,", flow->src2dst_packets,
(long long unsigned int) flow->src2dst_bytes, (long long unsigned int) flow->src2dst_goodput_bytes);
fprintf(csv_fp, "%u,%llu,%llu,", flow->dst2src_packets,
(long long unsigned int) flow->dst2src_bytes, (long long unsigned int) flow->dst2src_goodput_bytes);
fprintf(csv_fp, "%.3f,%s,", data_ratio, ndpi_data_ratio2str(data_ratio));
fprintf(csv_fp, "%.1f,%.1f,", 100.0*((float)flow->src2dst_goodput_bytes / (float)(flow->src2dst_bytes+1)),
100.0*((float)flow->dst2src_goodput_bytes / (float)(flow->dst2src_bytes+1)));
/* IAT (Inter Arrival Time) */
fprintf(csv_fp, "%u,%.1f,%u,%.1f,",
ndpi_data_min(flow->iat_flow), ndpi_data_average(flow->iat_flow), ndpi_data_max(flow->iat_flow), ndpi_data_stddev(flow->iat_flow));
fprintf(csv_fp, "%u,%.1f,%u,%.1f,%u,%.1f,%u,%.1f,",
ndpi_data_min(flow->iat_c_to_s), ndpi_data_average(flow->iat_c_to_s), ndpi_data_max(flow->iat_c_to_s), ndpi_data_stddev(flow->iat_c_to_s),
ndpi_data_min(flow->iat_s_to_c), ndpi_data_average(flow->iat_s_to_c), ndpi_data_max(flow->iat_s_to_c), ndpi_data_stddev(flow->iat_s_to_c));
/* Packet Length */
fprintf(csv_fp, "%u,%.1f,%u,%.1f,%u,%.1f,%u,%.1f,",
ndpi_data_min(flow->pktlen_c_to_s), ndpi_data_average(flow->pktlen_c_to_s), ndpi_data_max(flow->pktlen_c_to_s), ndpi_data_stddev(flow->pktlen_c_to_s),
ndpi_data_min(flow->pktlen_s_to_c), ndpi_data_average(flow->pktlen_s_to_c), ndpi_data_max(flow->pktlen_s_to_c), ndpi_data_stddev(flow->pktlen_s_to_c));
/* TCP flags */
fprintf(csv_fp, "%d,%d,%d,%d,%d,%d,%d,%d,", flow->cwr_count, flow->ece_count, flow->urg_count, flow->ack_count, flow->psh_count, flow->rst_count, flow->syn_count, flow->fin_count);
fprintf(csv_fp, "%d,%d,%d,%d,%d,%d,%d,%d,", flow->src2dst_cwr_count, flow->src2dst_ece_count, flow->src2dst_urg_count, flow->src2dst_ack_count, flow->src2dst_psh_count, flow->src2dst_rst_count, flow->src2dst_syn_count, flow->src2dst_fin_count);
fprintf(csv_fp, "%d,%d,%d,%d,%d,%d,%d,%d,", flow->dst2src_cwr_count, flow->ece_count, flow->urg_count, flow->ack_count, flow->psh_count, flow->rst_count, flow->syn_count, flow->fin_count);
/* TCP window */
fprintf(csv_fp, "%u,%u,", flow->c_to_s_init_win, flow->s_to_c_init_win);
fprintf(csv_fp, "%s,%s,",
(flow->ssh_tls.client_requested_server_name[0] != '\0') ? flow->ssh_tls.client_requested_server_name : "",
(flow->ssh_tls.server_info[0] != '\0') ? flow->ssh_tls.server_info : "");
fprintf(csv_fp, "%s,%s,%s,%s,%s,",
(flow->ssh_tls.ssl_version != 0) ? ndpi_ssl_version2str(flow->ssh_tls.ssl_version, &known_tls) : "0",
(flow->ssh_tls.ja3_client[0] != '\0') ? flow->ssh_tls.ja3_client : "",
(flow->ssh_tls.ja3_client[0] != '\0') ? is_unsafe_cipher(flow->ssh_tls.client_unsafe_cipher) : "0",
(flow->ssh_tls.ja3_server[0] != '\0') ? flow->ssh_tls.ja3_server : "",
(flow->ssh_tls.ja3_server[0] != '\0') ? is_unsafe_cipher(flow->ssh_tls.server_unsafe_cipher) : "0");
fprintf(csv_fp, "%s,%s,",
flow->ssh_tls.tls_alpn ? flow->ssh_tls.tls_alpn : "",
flow->ssh_tls.tls_supported_versions ? flow->ssh_tls.tls_supported_versions : ""
);
fprintf(csv_fp, "%s,%s,",
flow->ssh_tls.tls_issuerDN ? flow->ssh_tls.tls_issuerDN : "",
flow->ssh_tls.tls_subjectDN ? flow->ssh_tls.tls_subjectDN : ""
);
fprintf(csv_fp, "%s,%s",
(flow->ssh_tls.client_hassh[0] != '\0') ? flow->ssh_tls.client_hassh : "",
(flow->ssh_tls.server_hassh[0] != '\0') ? flow->ssh_tls.server_hassh : ""
);
fprintf(csv_fp, ",%s", flow->info);
}
if((verbose != 1) && (verbose != 2)) {
if(csv_fp && enable_joy_stats) {
flowGetBDMeanandVariance(flow);
}
if(csv_fp)
fprintf(csv_fp, "\n");
return;
}
if(csv_fp || (verbose > 1)) {
#if 1
fprintf(out, "\t%u", id);
#else
fprintf(out, "\t%u(%u)", id, flow->flow_id);
#endif
fprintf(out, "\t%s ", ipProto2Name(flow->protocol));
fprintf(out, "%s%s%s:%u %s %s%s%s:%u ",
(flow->ip_version == 6) ? "[" : "",
flow->src_name, (flow->ip_version == 6) ? "]" : "", ntohs(flow->src_port),
flow->bidirectional ? "<->" : "->",
(flow->ip_version == 6) ? "[" : "",
flow->dst_name, (flow->ip_version == 6) ? "]" : "", ntohs(flow->dst_port)
);
if(flow->vlan_id > 0) fprintf(out, "[VLAN: %u]", flow->vlan_id);
if(enable_payload_analyzer) fprintf(out, "[flowId: %u]", flow->flow_id);
}
if(enable_joy_stats) {
/* Print entropy values for monitored flows. */
flowGetBDMeanandVariance(flow);
fflush(out);
fprintf(out, "[score: %.4f]", flow->entropy.score);
}
if(csv_fp) fprintf(csv_fp, "\n");
fprintf(out, "[proto: ");
if(flow->tunnel_type != ndpi_no_tunnel)
fprintf(out, "%s:", ndpi_tunnel2str(flow->tunnel_type));
fprintf(out, "%s/%s]",
ndpi_protocol2id(ndpi_thread_info[thread_id].workflow->ndpi_struct,
flow->detected_protocol, buf, sizeof(buf)),
ndpi_protocol2name(ndpi_thread_info[thread_id].workflow->ndpi_struct,
flow->detected_protocol, buf1, sizeof(buf1)));
if(flow->detected_protocol.category != 0)
fprintf(out, "[cat: %s/%u]",
ndpi_category_get_name(ndpi_thread_info[thread_id].workflow->ndpi_struct,
flow->detected_protocol.category),
(unsigned int)flow->detected_protocol.category);
fprintf(out, "[%u pkts/%llu bytes ", flow->src2dst_packets, (long long unsigned int) flow->src2dst_bytes);
fprintf(out, "%s %u pkts/%llu bytes]",
(flow->dst2src_packets > 0) ? "<->" : "->",
flow->dst2src_packets, (long long unsigned int) flow->dst2src_bytes);
fprintf(out, "[Goodput ratio: %.0f/%.0f]",
100.0*((float)flow->src2dst_goodput_bytes / (float)(flow->src2dst_bytes+1)),
100.0*((float)flow->dst2src_goodput_bytes / (float)(flow->dst2src_bytes+1)));
if(flow->last_seen > flow->first_seen)
fprintf(out, "[%.2f sec]", ((float)(flow->last_seen - flow->first_seen))/(float)1000);
else
fprintf(out, "[< 1 sec]");
if(flow->telnet.username[0] != '\0') fprintf(out, "[Username: %s]", flow->telnet.username);
if(flow->telnet.password[0] != '\0') fprintf(out, "[Password: %s]", flow->telnet.password);
if(flow->host_server_name[0] != '\0') fprintf(out, "[Host: %s]", flow->host_server_name);
if(flow->info[0] != '\0') fprintf(out, "[%s]", flow->info);
if(flow->flow_extra_info[0] != '\0') fprintf(out, "[%s]", flow->flow_extra_info);
if((flow->src2dst_packets+flow->dst2src_packets) > 5) {
if(flow->iat_c_to_s && flow->iat_s_to_c) {
float data_ratio = ndpi_data_ratio(flow->src2dst_bytes, flow->dst2src_bytes);
fprintf(out, "[bytes ratio: %.3f (%s)]", data_ratio, ndpi_data_ratio2str(data_ratio));
/* IAT (Inter Arrival Time) */
fprintf(out, "[IAT c2s/s2c min/avg/max/stddev: %u/%u %.0f/%.0f %u/%u %.0f/%.0f]",
ndpi_data_min(flow->iat_c_to_s), ndpi_data_min(flow->iat_s_to_c),
(float)ndpi_data_average(flow->iat_c_to_s), (float)ndpi_data_average(flow->iat_s_to_c),
ndpi_data_max(flow->iat_c_to_s), ndpi_data_max(flow->iat_s_to_c),
(float)ndpi_data_stddev(flow->iat_c_to_s), (float)ndpi_data_stddev(flow->iat_s_to_c));
/* Packet Length */
fprintf(out, "[Pkt Len c2s/s2c min/avg/max/stddev: %u/%u %.0f/%.0f %u/%u %.0f/%.0f]",
ndpi_data_min(flow->pktlen_c_to_s), ndpi_data_min(flow->pktlen_s_to_c),
ndpi_data_average(flow->pktlen_c_to_s), ndpi_data_average(flow->pktlen_s_to_c),
ndpi_data_max(flow->pktlen_c_to_s), ndpi_data_max(flow->pktlen_s_to_c),
ndpi_data_stddev(flow->pktlen_c_to_s), ndpi_data_stddev(flow->pktlen_s_to_c));
}
}
if(flow->http.url[0] != '\0') {
ndpi_risk_enum risk = ndpi_validate_url(flow->http.url);
if(risk != NDPI_NO_RISK)
NDPI_SET_BIT(flow->risk, risk);
fprintf(out, "[URL: %s][StatusCode: %u]",
flow->http.url, flow->http.response_status_code);
if(flow->http.content_type[0] != '\0')
fprintf(out, "[Content-Type: %s]", flow->http.content_type);
if(flow->http.user_agent[0] != '\0')
fprintf(out, "[User-Agent: %s]", flow->http.user_agent);
}
if(flow->risk) {
u_int i;
fprintf(out, "[Risk: ");
for(i=0; i<NDPI_MAX_RISK; i++)
if(NDPI_ISSET_BIT(flow->risk, i))
fprintf(out, "** %s **", ndpi_risk2str(i));
fprintf(out, "]");
}
if(flow->ssh_tls.ssl_version != 0) fprintf(out, "[%s]", ndpi_ssl_version2str(flow->ssh_tls.ssl_version, &known_tls));
if(flow->ssh_tls.client_requested_server_name[0] != '\0') fprintf(out, "[Client: %s]", flow->ssh_tls.client_requested_server_name);
if(flow->ssh_tls.client_hassh[0] != '\0') fprintf(out, "[HASSH-C: %s]", flow->ssh_tls.client_hassh);
if(flow->ssh_tls.ja3_client[0] != '\0') fprintf(out, "[JA3C: %s%s]", flow->ssh_tls.ja3_client,
print_cipher(flow->ssh_tls.client_unsafe_cipher));
if(flow->ssh_tls.server_info[0] != '\0') fprintf(out, "[Server: %s]", flow->ssh_tls.server_info);
if(flow->ssh_tls.server_names) fprintf(out, "[ServerNames: %s]", flow->ssh_tls.server_names);
if(flow->ssh_tls.server_hassh[0] != '\0') fprintf(out, "[HASSH-S: %s]", flow->ssh_tls.server_hassh);
if(flow->ssh_tls.ja3_server[0] != '\0') fprintf(out, "[JA3S: %s%s]", flow->ssh_tls.ja3_server,
print_cipher(flow->ssh_tls.server_unsafe_cipher));
if(flow->ssh_tls.tls_issuerDN) fprintf(out, "[Issuer: %s]", flow->ssh_tls.tls_issuerDN);
if(flow->ssh_tls.tls_subjectDN) fprintf(out, "[Subject: %s]", flow->ssh_tls.tls_subjectDN);
if((flow->detected_protocol.master_protocol == NDPI_PROTOCOL_TLS)
|| (flow->detected_protocol.app_protocol == NDPI_PROTOCOL_TLS)) {
if(flow->ssh_tls.sha1_cert_fingerprint_set) {
fprintf(out, "[Certificate SHA-1: ");
for(i=0; i<20; i++)
fprintf(out, "%s%02X", (i > 0) ? ":" : "",
flow->ssh_tls.sha1_cert_fingerprint[i] & 0xFF);
fprintf(out, "]");
}
}
if(flow->ssh_tls.notBefore && flow->ssh_tls.notAfter) {
char notBefore[32], notAfter[32];
struct tm a, b;
struct tm *before = gmtime_r(&flow->ssh_tls.notBefore, &a);
struct tm *after = gmtime_r(&flow->ssh_tls.notAfter, &b);
strftime(notBefore, sizeof(notBefore), "%F %T", before);
strftime(notAfter, sizeof(notAfter), "%F %T", after);
fprintf(out, "[Validity: %s - %s]", notBefore, notAfter);
}
if(flow->ssh_tls.server_cipher != '\0') fprintf(out, "[Cipher: %s]", ndpi_cipher2str(flow->ssh_tls.server_cipher));
if(flow->bittorent_hash[0] != '\0') fprintf(out, "[BT Hash: %s]", flow->bittorent_hash);
if(flow->dhcp_fingerprint[0] != '\0') fprintf(out, "[DHCP Fingerprint: %s]", flow->dhcp_fingerprint);
if(flow->has_human_readeable_strings) fprintf(out, "[PLAIN TEXT (%s)]", flow->human_readeable_string_buffer);
fprintf(out, "\n");
} | 0 | [
"CWE-125"
]
| nDPI | b7e666e465f138ae48ab81976726e67deed12701 | 168,268,128,840,138,250,000,000,000,000,000,000,000 | 297 | Added fix to avoid potential heap buffer overflow in H.323 dissector
Modified HTTP report information to make it closer to the HTTP field names |
void* Type_LUT8_Dup(struct _cms_typehandler_struct* self, const void *Ptr, cmsUInt32Number n)
{
return (void*) cmsPipelineDup((cmsPipeline*) Ptr);
cmsUNUSED_PARAMETER(n);
cmsUNUSED_PARAMETER(self);
} | 0 | []
| Little-CMS | 41d222df1bc6188131a8f46c32eab0a4d4cdf1b6 | 185,282,208,027,600,900,000,000,000,000,000,000,000 | 7 | Memory squeezing fix: lcms2 cmsPipeline construction
When creating a new pipeline, lcms would often try to allocate a stage
and pass it to cmsPipelineInsertStage without checking whether the
allocation succeeded. cmsPipelineInsertStage would then assert (or crash)
if it had not.
The fix here is to change cmsPipelineInsertStage to check and return
an error value. All calling code is then checked to test this return
value and cope. |
GF_Err stsf_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i, j;
u32 nb_entries;
GF_StsfEntry *p;
GF_SampleFragmentBox *ptr = (GF_SampleFragmentBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
nb_entries = gf_list_count(ptr->entryList);
gf_bs_write_u32(bs, nb_entries);
for ( i = 0; i < nb_entries; i++ ) {
p = (GF_StsfEntry*)gf_list_get(ptr->entryList, i);
gf_bs_write_u32(bs, p->SampleNumber);
gf_bs_write_u32(bs, p->fragmentCount);
for (j=0; j<p->fragmentCount; j++) {
gf_bs_write_u16(bs, p->fragmentSizes[j]);
}
}
return GF_OK;
} | 0 | [
"CWE-125"
]
| gpac | bceb03fd2be95097a7b409ea59914f332fb6bc86 | 130,180,792,919,528,490,000,000,000,000,000,000,000 | 22 | fixed 2 possible heap overflows (inc. #1088) |
lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_skb_get_tunnel_key:
return &bpf_skb_get_tunnel_key_proto;
case BPF_FUNC_skb_set_tunnel_key:
return bpf_get_skb_set_tunnel_proto(func_id);
case BPF_FUNC_skb_get_tunnel_opt:
return &bpf_skb_get_tunnel_opt_proto;
case BPF_FUNC_skb_set_tunnel_opt:
return bpf_get_skb_set_tunnel_proto(func_id);
case BPF_FUNC_redirect:
return &bpf_redirect_proto;
case BPF_FUNC_clone_redirect:
return &bpf_clone_redirect_proto;
case BPF_FUNC_skb_change_tail:
return &bpf_skb_change_tail_proto;
case BPF_FUNC_skb_change_head:
return &bpf_skb_change_head_proto;
case BPF_FUNC_skb_store_bytes:
return &bpf_skb_store_bytes_proto;
case BPF_FUNC_csum_update:
return &bpf_csum_update_proto;
case BPF_FUNC_l3_csum_replace:
return &bpf_l3_csum_replace_proto;
case BPF_FUNC_l4_csum_replace:
return &bpf_l4_csum_replace_proto;
case BPF_FUNC_set_hash_invalid:
return &bpf_set_hash_invalid_proto;
default:
return lwt_inout_func_proto(func_id, prog);
}
} | 0 | [
"CWE-120"
]
| linux | 050fad7c4534c13c8eb1d9c2ba66012e014773cb | 31,647,829,470,993,735,000,000,000,000,000,000,000 | 33 | bpf: fix truncated jump targets on heavy expansions
Recently during testing, I ran into the following panic:
[ 207.892422] Internal error: Accessing user space memory outside uaccess.h routines: 96000004 [#1] SMP
[ 207.901637] Modules linked in: binfmt_misc [...]
[ 207.966530] CPU: 45 PID: 2256 Comm: test_verifier Tainted: G W 4.17.0-rc3+ #7
[ 207.974956] Hardware name: FOXCONN R2-1221R-A4/C2U4N_MB, BIOS G31FB18A 03/31/2017
[ 207.982428] pstate: 60400005 (nZCv daif +PAN -UAO)
[ 207.987214] pc : bpf_skb_load_helper_8_no_cache+0x34/0xc0
[ 207.992603] lr : 0xffff000000bdb754
[ 207.996080] sp : ffff000013703ca0
[ 207.999384] x29: ffff000013703ca0 x28: 0000000000000001
[ 208.004688] x27: 0000000000000001 x26: 0000000000000000
[ 208.009992] x25: ffff000013703ce0 x24: ffff800fb4afcb00
[ 208.015295] x23: ffff00007d2f5038 x22: ffff00007d2f5000
[ 208.020599] x21: fffffffffeff2a6f x20: 000000000000000a
[ 208.025903] x19: ffff000009578000 x18: 0000000000000a03
[ 208.031206] x17: 0000000000000000 x16: 0000000000000000
[ 208.036510] x15: 0000ffff9de83000 x14: 0000000000000000
[ 208.041813] x13: 0000000000000000 x12: 0000000000000000
[ 208.047116] x11: 0000000000000001 x10: ffff0000089e7f18
[ 208.052419] x9 : fffffffffeff2a6f x8 : 0000000000000000
[ 208.057723] x7 : 000000000000000a x6 : 00280c6160000000
[ 208.063026] x5 : 0000000000000018 x4 : 0000000000007db6
[ 208.068329] x3 : 000000000008647a x2 : 19868179b1484500
[ 208.073632] x1 : 0000000000000000 x0 : ffff000009578c08
[ 208.078938] Process test_verifier (pid: 2256, stack limit = 0x0000000049ca7974)
[ 208.086235] Call trace:
[ 208.088672] bpf_skb_load_helper_8_no_cache+0x34/0xc0
[ 208.093713] 0xffff000000bdb754
[ 208.096845] bpf_test_run+0x78/0xf8
[ 208.100324] bpf_prog_test_run_skb+0x148/0x230
[ 208.104758] sys_bpf+0x314/0x1198
[ 208.108064] el0_svc_naked+0x30/0x34
[ 208.111632] Code: 91302260 f9400001 f9001fa1 d2800001 (29500680)
[ 208.117717] ---[ end trace 263cb8a59b5bf29f ]---
The program itself which caused this had a long jump over the whole
instruction sequence where all of the inner instructions required
heavy expansions into multiple BPF instructions. Additionally, I also
had BPF hardening enabled which requires once more rewrites of all
constant values in order to blind them. Each time we rewrite insns,
bpf_adj_branches() would need to potentially adjust branch targets
which cross the patchlet boundary to accommodate for the additional
delta. Eventually that lead to the case where the target offset could
not fit into insn->off's upper 0x7fff limit anymore where then offset
wraps around becoming negative (in s16 universe), or vice versa
depending on the jump direction.
Therefore it becomes necessary to detect and reject any such occasions
in a generic way for native eBPF and cBPF to eBPF migrations. For
the latter we can simply check bounds in the bpf_convert_filter()'s
BPF_EMIT_JMP helper macro and bail out once we surpass limits. The
bpf_patch_insn_single() for native eBPF (and cBPF to eBPF in case
of subsequent hardening) is a bit more complex in that we need to
detect such truncations before hitting the bpf_prog_realloc(). Thus
the latter is split into an extra pass to probe problematic offsets
on the original program in order to fail early. With that in place
and carefully tested I no longer hit the panic and the rewrites are
rejected properly. The above example panic I've seen on bpf-next,
though the issue itself is generic in that a guard against this issue
in bpf seems more appropriate in this case.
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Martin KaFai Lau <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]> |
intrusive_ptr<DocumentSourceGroup> DocumentSourceGroup::create(
const intrusive_ptr<ExpressionContext>& pExpCtx,
const boost::intrusive_ptr<Expression>& groupByExpression,
std::vector<AccumulationStatement> accumulationStatements,
boost::optional<size_t> maxMemoryUsageBytes) {
size_t memoryBytes = maxMemoryUsageBytes ? *maxMemoryUsageBytes
: internalDocumentSourceGroupMaxMemoryBytes.load();
intrusive_ptr<DocumentSourceGroup> groupStage(new DocumentSourceGroup(pExpCtx, memoryBytes));
groupStage->setIdExpression(groupByExpression);
for (auto&& statement : accumulationStatements) {
groupStage->addAccumulator(statement);
}
return groupStage;
} | 0 | []
| mongo | 07b8851825836911265e909d6842d4586832f9bb | 101,793,847,670,456,860,000,000,000,000,000,000,000 | 15 | SERVER-60218-44: SERVER-60218 add initialize helper function for document_source_group (cherry picked from commit 867f52afbb79bc00e35c70f8e0681b7d602f97b2) |
CmdAuthSchemaUpgrade() : BasicCommand("authSchemaUpgrade") {} | 0 | [
"CWE-613"
]
| mongo | db19e7ce84cfd702a4ba9983ee2ea5019f470f82 | 274,029,543,761,583,730,000,000,000,000,000,000,000 | 1 | SERVER-38984 Validate unique User ID on UserCache hit
(cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7) |
static int i40e_get_link_speed(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
switch (pf->hw.phy.link_info.link_speed) {
case I40E_LINK_SPEED_40GB:
return 40000;
case I40E_LINK_SPEED_25GB:
return 25000;
case I40E_LINK_SPEED_20GB:
return 20000;
case I40E_LINK_SPEED_10GB:
return 10000;
case I40E_LINK_SPEED_1GB:
return 1000;
default:
return -EINVAL;
}
} | 0 | [
"CWE-400",
"CWE-401"
]
| linux | 27d461333459d282ffa4a2bdb6b215a59d493a8f | 317,986,023,333,769,050,000,000,000,000,000,000,000 | 19 | i40e: prevent memory leak in i40e_setup_macvlans
In i40e_setup_macvlans if i40e_setup_channel fails the allocated memory
for ch should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Tested-by: Andrew Bowers <[email protected]>
Signed-off-by: Jeff Kirsher <[email protected]> |
static void com_property_write(zval *object, zval *member, zval *value, void **cache_slot)
{
php_com_dotnet_object *obj;
VARIANT v;
obj = CDNO_FETCH(object);
if (V_VT(&obj->v) == VT_DISPATCH) {
VariantInit(&v);
convert_to_string_ex(member);
if (SUCCESS == php_com_do_invoke(obj, Z_STRVAL_P(member), Z_STRLEN_P(member),
DISPATCH_PROPERTYPUT|DISPATCH_PROPERTYPUTREF, &v, 1, value, 0)) {
VariantClear(&v);
}
} else {
php_com_throw_exception(E_INVALIDARG, "this variant has no properties");
}
} | 0 | [
"CWE-502"
]
| php-src | 115ee49b0be12e3df7d2c7027609fbe1a1297e42 | 73,495,642,473,475,270,000,000,000,000,000,000,000 | 19 | Fix #77177: Serializing or unserializing COM objects crashes
Firstly, we avoid returning NULL from the get_property handler, but
instead return an empty HashTable, which already prevents the crashes.
Secondly, since (de-)serialization obviously makes no sense for COM,
DOTNET and VARIANT objects (at least with the current implementation),
we prohibit it right away. |
bytes_contains(PyObject *self, PyObject *arg)
{
Py_ssize_t ival = PyNumber_AsSsize_t(arg, PyExc_ValueError);
if (ival == -1 && PyErr_Occurred()) {
Py_buffer varg;
Py_ssize_t pos;
PyErr_Clear();
if (PyObject_GetBuffer(arg, &varg, PyBUF_SIMPLE) != 0)
return -1;
pos = stringlib_find(PyBytes_AS_STRING(self), Py_SIZE(self),
varg.buf, varg.len, 0);
PyBuffer_Release(&varg);
return pos >= 0;
}
if (ival < 0 || ival >= 256) {
PyErr_SetString(PyExc_ValueError, "byte must be in range(0, 256)");
return -1;
}
return memchr(PyBytes_AS_STRING(self), (int) ival, Py_SIZE(self)) != NULL;
} | 0 | [
"CWE-190"
]
| cpython | 6c004b40f9d51872d848981ef1a18bb08c2dfc42 | 330,166,141,104,512,500,000,000,000,000,000,000,000 | 21 | bpo-30657: Fix CVE-2017-1000158 (#4758)
Fixes possible integer overflow in PyBytes_DecodeEscape.
Co-Authored-By: Jay Bosamiya <[email protected]> |
static MagickCLEnv AcquireMagickCLEnv(void)
{
const char
*option;
MagickCLEnv
clEnv;
clEnv=(MagickCLEnv) AcquireMagickMemory(sizeof(*clEnv));
if (clEnv != (MagickCLEnv) NULL)
{
(void) ResetMagickMemory(clEnv,0,sizeof(*clEnv));
ActivateSemaphoreInfo(&clEnv->lock);
clEnv->cpu_score=MAGICKCORE_OPENCL_UNDEFINED_SCORE;
clEnv->enabled=MagickTrue;
option=getenv("MAGICK_OCL_DEVICE");
if ((option != (const char *) NULL) && (strcmp(option,"OFF") == 0))
clEnv->enabled=MagickFalse;
}
return clEnv;
} | 0 | [
"CWE-476"
]
| ImageMagick | cca91aa1861818342e3d072bb0fad7dc4ffac24a | 80,149,038,555,873,100,000,000,000,000,000,000,000 | 21 | https://github.com/ImageMagick/ImageMagick/issues/790 |
void RGWCORSRule::dump_origins() {
unsigned num_origins = allowed_origins.size();
dout(10) << "Allowed origins : " << num_origins << dendl;
for(set<string>::iterator it = allowed_origins.begin();
it != allowed_origins.end();
++it) {
dout(10) << *it << "," << dendl;
}
} | 0 | [
"CWE-113"
]
| ceph | 46817f30cee60bc5df8354ab326762e7c783fe2c | 43,694,218,182,942,760,000,000,000,000,000,000,000 | 9 | rgw: sanitize newlines in s3 CORSConfiguration's ExposeHeader
the values in the <ExposeHeader> element are sent back to clients in a
Access-Control-Expose-Headers response header. if the values are allowed
to have newlines in them, they can be used to inject arbitrary response
headers
this issue only affects s3, which gets these values from an xml document
in swift, they're given in the request header
X-Container-Meta-Access-Control-Expose-Headers, so the value itself
cannot contain newlines
Signed-off-by: Casey Bodley <[email protected]>
Reported-by: Adam Mohammed <[email protected]> |
bool vcol_assignment_allowed_value() const { return true; } | 0 | [
"CWE-617"
]
| server | 2e7891080667c59ac80f788eef4d59d447595772 | 101,833,151,645,145,400,000,000,000,000,000,000,000 | 1 | MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]> |
isdn_ppp_dev_ioctl_stats(int slot, struct ifreq *ifr, struct net_device *dev)
{
struct ppp_stats __user *res = ifr->ifr_data;
struct ppp_stats t;
isdn_net_local *lp = netdev_priv(dev);
if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats)))
return -EFAULT;
/* build a temporary stat struct and copy it to user space */
memset(&t, 0, sizeof(struct ppp_stats));
if (dev->flags & IFF_UP) {
t.p.ppp_ipackets = lp->stats.rx_packets;
t.p.ppp_ibytes = lp->stats.rx_bytes;
t.p.ppp_ierrors = lp->stats.rx_errors;
t.p.ppp_opackets = lp->stats.tx_packets;
t.p.ppp_obytes = lp->stats.tx_bytes;
t.p.ppp_oerrors = lp->stats.tx_errors;
#ifdef CONFIG_ISDN_PPP_VJ
if (slot >= 0 && ippp_table[slot]->slcomp) {
struct slcompress *slcomp = ippp_table[slot]->slcomp;
t.vj.vjs_packets = slcomp->sls_o_compressed + slcomp->sls_o_uncompressed;
t.vj.vjs_compressed = slcomp->sls_o_compressed;
t.vj.vjs_searches = slcomp->sls_o_searches;
t.vj.vjs_misses = slcomp->sls_o_misses;
t.vj.vjs_errorin = slcomp->sls_i_error;
t.vj.vjs_tossed = slcomp->sls_i_tossed;
t.vj.vjs_uncompressedin = slcomp->sls_i_uncompressed;
t.vj.vjs_compressedin = slcomp->sls_i_compressed;
}
#endif
}
if (copy_to_user(res, &t, sizeof(struct ppp_stats)))
return -EFAULT;
return 0;
} | 0 | []
| linux | 4ab42d78e37a294ac7bc56901d563c642e03c4ae | 88,324,388,497,301,600,000,000,000,000,000,000,000 | 37 | ppp, slip: Validate VJ compression slot parameters completely
Currently slhc_init() treats out-of-range values of rslots and tslots
as equivalent to 0, except that if tslots is too large it will
dereference a null pointer (CVE-2015-7799).
Add a range-check at the top of the function and make it return an
ERR_PTR() on error instead of NULL. Change the callers accordingly.
Compile-tested only.
Reported-by: 郭永刚 <[email protected]>
References: http://article.gmane.org/gmane.comp.security.oss.general/17908
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
{
struct mmu_gather_batch *batch;
VM_BUG_ON(!tlb->end);
VM_WARN_ON(tlb->page_size != page_size);
batch = tlb->active;
/*
* Add the page and check if we are full. If so
* force a flush.
*/
batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return true;
batch = tlb->active;
}
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
return false;
} | 0 | [
"CWE-119"
]
| linux | 1be7107fbe18eed3e319a6c3e83c78254b693acb | 136,125,200,123,092,680,000,000,000,000,000,000,000 | 22 | mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]> |
static int detect_early_fail(compiler_common *common, PCRE2_SPTR cc, int *private_data_start,
sljit_s32 depth, int start, BOOL fast_forward_allowed)
{
PCRE2_SPTR begin = cc;
PCRE2_SPTR next_alt;
PCRE2_SPTR end;
PCRE2_SPTR accelerated_start;
BOOL prev_fast_forward_allowed;
int result = 0;
int count;
SLJIT_ASSERT(*cc == OP_ONCE || *cc == OP_BRA || *cc == OP_CBRA);
SLJIT_ASSERT(*cc != OP_CBRA || common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] != 0);
SLJIT_ASSERT(start < EARLY_FAIL_ENHANCE_MAX);
next_alt = cc + GET(cc, 1);
if (*next_alt == OP_ALT)
fast_forward_allowed = FALSE;
do
{
count = start;
cc += 1 + LINK_SIZE + ((*cc == OP_CBRA) ? IMM2_SIZE : 0);
while (TRUE)
{
accelerated_start = NULL;
switch(*cc)
{
case OP_SOD:
case OP_SOM:
case OP_SET_SOM:
case OP_NOT_WORD_BOUNDARY:
case OP_WORD_BOUNDARY:
case OP_EODN:
case OP_EOD:
case OP_CIRC:
case OP_CIRCM:
case OP_DOLL:
case OP_DOLLM:
/* Zero width assertions. */
cc++;
continue;
case OP_NOT_DIGIT:
case OP_DIGIT:
case OP_NOT_WHITESPACE:
case OP_WHITESPACE:
case OP_NOT_WORDCHAR:
case OP_WORDCHAR:
case OP_ANY:
case OP_ALLANY:
case OP_ANYBYTE:
case OP_NOT_HSPACE:
case OP_HSPACE:
case OP_NOT_VSPACE:
case OP_VSPACE:
fast_forward_allowed = FALSE;
cc++;
continue;
case OP_ANYNL:
case OP_EXTUNI:
fast_forward_allowed = FALSE;
if (count == 0)
count = 1;
cc++;
continue;
case OP_NOTPROP:
case OP_PROP:
fast_forward_allowed = FALSE;
cc += 1 + 2;
continue;
case OP_CHAR:
case OP_CHARI:
case OP_NOT:
case OP_NOTI:
fast_forward_allowed = FALSE;
cc += 2;
#ifdef SUPPORT_UNICODE
if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
continue;
case OP_TYPESTAR:
case OP_TYPEMINSTAR:
case OP_TYPEPLUS:
case OP_TYPEMINPLUS:
case OP_TYPEPOSSTAR:
case OP_TYPEPOSPLUS:
/* The type or prop opcode is skipped in the next iteration. */
cc += 1;
if (cc[0] != OP_ANYNL && cc[0] != OP_EXTUNI)
{
accelerated_start = cc - 1;
break;
}
if (count == 0)
count = 1;
fast_forward_allowed = FALSE;
continue;
case OP_TYPEUPTO:
case OP_TYPEMINUPTO:
case OP_TYPEEXACT:
case OP_TYPEPOSUPTO:
cc += IMM2_SIZE;
/* Fall through */
case OP_TYPEQUERY:
case OP_TYPEMINQUERY:
case OP_TYPEPOSQUERY:
/* The type or prop opcode is skipped in the next iteration. */
fast_forward_allowed = FALSE;
if (count == 0)
count = 1;
cc += 1;
continue;
case OP_STAR:
case OP_MINSTAR:
case OP_PLUS:
case OP_MINPLUS:
case OP_POSSTAR:
case OP_POSPLUS:
case OP_STARI:
case OP_MINSTARI:
case OP_PLUSI:
case OP_MINPLUSI:
case OP_POSSTARI:
case OP_POSPLUSI:
case OP_NOTSTAR:
case OP_NOTMINSTAR:
case OP_NOTPLUS:
case OP_NOTMINPLUS:
case OP_NOTPOSSTAR:
case OP_NOTPOSPLUS:
case OP_NOTSTARI:
case OP_NOTMINSTARI:
case OP_NOTPLUSI:
case OP_NOTMINPLUSI:
case OP_NOTPOSSTARI:
case OP_NOTPOSPLUSI:
accelerated_start = cc;
cc += 2;
#ifdef SUPPORT_UNICODE
if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
break;
case OP_UPTO:
case OP_MINUPTO:
case OP_EXACT:
case OP_POSUPTO:
case OP_UPTOI:
case OP_MINUPTOI:
case OP_EXACTI:
case OP_POSUPTOI:
case OP_NOTUPTO:
case OP_NOTMINUPTO:
case OP_NOTEXACT:
case OP_NOTPOSUPTO:
case OP_NOTUPTOI:
case OP_NOTMINUPTOI:
case OP_NOTEXACTI:
case OP_NOTPOSUPTOI:
cc += IMM2_SIZE;
/* Fall through */
case OP_QUERY:
case OP_MINQUERY:
case OP_POSQUERY:
case OP_QUERYI:
case OP_MINQUERYI:
case OP_POSQUERYI:
case OP_NOTQUERY:
case OP_NOTMINQUERY:
case OP_NOTPOSQUERY:
case OP_NOTQUERYI:
case OP_NOTMINQUERYI:
case OP_NOTPOSQUERYI:
fast_forward_allowed = FALSE;
if (count == 0)
count = 1;
cc += 2;
#ifdef SUPPORT_UNICODE
if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
continue;
case OP_CLASS:
case OP_NCLASS:
#if defined SUPPORT_UNICODE || PCRE2_CODE_UNIT_WIDTH != 8
case OP_XCLASS:
accelerated_start = cc;
cc += ((*cc == OP_XCLASS) ? GET(cc, 1) : (unsigned int)(1 + (32 / sizeof(PCRE2_UCHAR))));
#else
accelerated_start = cc;
cc += (1 + (32 / sizeof(PCRE2_UCHAR)));
#endif
switch (*cc)
{
case OP_CRSTAR:
case OP_CRMINSTAR:
case OP_CRPLUS:
case OP_CRMINPLUS:
case OP_CRPOSSTAR:
case OP_CRPOSPLUS:
cc++;
break;
case OP_CRRANGE:
case OP_CRMINRANGE:
case OP_CRPOSRANGE:
cc += 2 * IMM2_SIZE;
/* Fall through */
case OP_CRQUERY:
case OP_CRMINQUERY:
case OP_CRPOSQUERY:
cc++;
if (count == 0)
count = 1;
/* Fall through */
default:
accelerated_start = NULL;
fast_forward_allowed = FALSE;
continue;
}
break;
case OP_ONCE:
case OP_BRA:
case OP_CBRA:
end = cc + GET(cc, 1);
prev_fast_forward_allowed = fast_forward_allowed;
fast_forward_allowed = FALSE;
if (depth >= 4)
break;
end = bracketend(cc) - (1 + LINK_SIZE);
if (*end != OP_KET || (*cc == OP_CBRA && common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0))
break;
count = detect_early_fail(common, cc, private_data_start, depth + 1, count, prev_fast_forward_allowed);
if (PRIVATE_DATA(cc) != 0)
common->private_data_ptrs[begin - common->start] = 1;
if (count < EARLY_FAIL_ENHANCE_MAX)
{
cc = end + (1 + LINK_SIZE);
continue;
}
break;
case OP_KET:
SLJIT_ASSERT(PRIVATE_DATA(cc) == 0);
if (cc >= next_alt)
break;
cc += 1 + LINK_SIZE;
continue;
}
if (accelerated_start != NULL)
{
if (count == 0)
{
count++;
if (fast_forward_allowed)
{
common->fast_forward_bc_ptr = accelerated_start;
common->private_data_ptrs[(accelerated_start + 1) - common->start] = ((*private_data_start) << 3) | type_skip;
*private_data_start += sizeof(sljit_sw);
}
else
{
common->private_data_ptrs[(accelerated_start + 1) - common->start] = ((*private_data_start) << 3) | type_fail;
if (common->early_fail_start_ptr == 0)
common->early_fail_start_ptr = *private_data_start;
*private_data_start += sizeof(sljit_sw);
common->early_fail_end_ptr = *private_data_start;
if (*private_data_start > SLJIT_MAX_LOCAL_SIZE)
return EARLY_FAIL_ENHANCE_MAX;
}
}
else
{
common->private_data_ptrs[(accelerated_start + 1) - common->start] = ((*private_data_start) << 3) | type_fail_range;
if (common->early_fail_start_ptr == 0)
common->early_fail_start_ptr = *private_data_start;
*private_data_start += 2 * sizeof(sljit_sw);
common->early_fail_end_ptr = *private_data_start;
if (*private_data_start > SLJIT_MAX_LOCAL_SIZE)
return EARLY_FAIL_ENHANCE_MAX;
}
/* Cannot be part of a repeat. */
common->private_data_ptrs[begin - common->start] = 1;
count++;
if (count < EARLY_FAIL_ENHANCE_MAX)
continue;
}
break;
}
if (*cc != OP_ALT && *cc != OP_KET)
result = EARLY_FAIL_ENHANCE_MAX;
else if (result < count)
result = count;
cc = next_alt;
next_alt = cc + GET(cc, 1);
}
while (*cc == OP_ALT);
return result;
} | 0 | [
"CWE-125"
]
| pcre2 | 50a51cb7e67268e6ad417eb07c9de9bfea5cc55a | 108,353,024,143,874,090,000,000,000,000,000,000,000 | 336 | Fixed a unicode properrty matching issue in JIT |
static inline int vmsvga_copy_rect(struct vmsvga_state_s *s,
int x0, int y0, int x1, int y1, int w, int h)
{
DisplaySurface *surface = qemu_console_surface(s->vga.con);
uint8_t *vram = s->vga.vram_ptr;
int bypl = surface_stride(surface);
int bypp = surface_bytes_per_pixel(surface);
int width = bypp * w;
int line = h;
uint8_t *ptr[2];
if (!vmsvga_verify_rect(surface, "vmsvga_copy_rect/src", x0, y0, w, h)) {
return -1;
}
if (!vmsvga_verify_rect(surface, "vmsvga_copy_rect/dst", x1, y1, w, h)) {
return -1;
}
if (y1 > y0) {
ptr[0] = vram + bypp * x0 + bypl * (y0 + h - 1);
ptr[1] = vram + bypp * x1 + bypl * (y1 + h - 1);
for (; line > 0; line --, ptr[0] -= bypl, ptr[1] -= bypl) {
memmove(ptr[1], ptr[0], width);
}
} else {
ptr[0] = vram + bypp * x0 + bypl * y0;
ptr[1] = vram + bypp * x1 + bypl * y1;
for (; line > 0; line --, ptr[0] += bypl, ptr[1] += bypl) {
memmove(ptr[1], ptr[0], width);
}
}
vmsvga_update_rect_delayed(s, x1, y1, w, h);
return 0;
} | 0 | []
| qemu | fa892e9abb728e76afcf27323ab29c57fb0fe7aa | 113,561,342,235,027,820,000,000,000,000,000,000,000 | 35 | ui/cursor: fix integer overflow in cursor_alloc (CVE-2021-4206)
Prevent potential integer overflow by limiting 'width' and 'height' to
512x512. Also change 'datasize' type to size_t. Refer to security
advisory https://starlabs.sg/advisories/22-4206/ for more information.
Fixes: CVE-2021-4206
Signed-off-by: Mauro Matteo Cascella <[email protected]>
Reviewed-by: Marc-André Lureau <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]> |
static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
{
struct hrtimer *hr = &cpuctx->hrtimer;
struct pmu *pmu = cpuctx->ctx.pmu;
/* not for SW PMU */
if (pmu->task_ctx_nr == perf_sw_context)
return;
if (hrtimer_active(hr))
return;
if (!hrtimer_callback_running(hr))
__hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
0, HRTIMER_MODE_REL_PINNED, 0);
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | f63a8daa5812afef4f06c962351687e1ff9ccb2b | 280,465,861,452,328,850,000,000,000,000,000,000,000 | 16 | perf: Fix event->ctx locking
There have been a few reported issues wrt. the lack of locking around
changing event->ctx. This patch tries to address those.
It avoids the whole rwsem thing; and while it appears to work, please
give it some thought in review.
What I did fail at is sensible runtime checks on the use of
event->ctx, the RCU use makes it very hard.
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
do_ressubflags(
sockaddr_u *srcadr,
endpt *inter,
struct req_pkt *inpkt
)
{
do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG);
} | 0 | [
"CWE-190"
]
| ntp | c04c3d3d940dfe1a53132925c4f51aef017d2e0f | 119,343,391,697,079,090,000,000,000,000,000,000,000 | 8 | [TALOS-CAN-0052] crash by loop counter underrun. |
isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto)
{
struct net_device *dev = net_dev->dev;
struct ippp_struct *is, *mis;
isdn_net_local *mlp = NULL;
int slot;
slot = lp->ppp_slot;
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "isdn_ppp_push_higher: lp->ppp_slot(%d)\n",
lp->ppp_slot);
goto drop_packet;
}
is = ippp_table[slot];
if (lp->master) { // FIXME?
mlp = ISDN_MASTER_PRIV(lp);
slot = mlp->ppp_slot;
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "isdn_ppp_push_higher: master->ppp_slot(%d)\n",
lp->ppp_slot);
goto drop_packet;
}
}
mis = ippp_table[slot];
if (is->debug & 0x10) {
printk(KERN_DEBUG "push, skb %d %04x\n", (int) skb->len, proto);
isdn_ppp_frame_log("rpush", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
}
if (mis->compflags & SC_DECOMP_ON) {
skb = isdn_ppp_decompress(skb, is, mis, &proto);
if (!skb) // decompression error
return;
}
switch (proto) {
case PPP_IPX: /* untested */
if (is->debug & 0x20)
printk(KERN_DEBUG "isdn_ppp: IPX\n");
skb->protocol = htons(ETH_P_IPX);
break;
case PPP_IP:
if (is->debug & 0x20)
printk(KERN_DEBUG "isdn_ppp: IP\n");
skb->protocol = htons(ETH_P_IP);
break;
case PPP_COMP:
case PPP_COMPFRAG:
printk(KERN_INFO "isdn_ppp: unexpected compressed frame dropped\n");
goto drop_packet;
#ifdef CONFIG_ISDN_PPP_VJ
case PPP_VJC_UNCOMP:
if (is->debug & 0x20)
printk(KERN_DEBUG "isdn_ppp: VJC_UNCOMP\n");
if (net_dev->local->ppp_slot < 0) {
printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
__func__, net_dev->local->ppp_slot);
goto drop_packet;
}
if (slhc_remember(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb->len) <= 0) {
printk(KERN_WARNING "isdn_ppp: received illegal VJC_UNCOMP frame!\n");
goto drop_packet;
}
skb->protocol = htons(ETH_P_IP);
break;
case PPP_VJC_COMP:
if (is->debug & 0x20)
printk(KERN_DEBUG "isdn_ppp: VJC_COMP\n");
{
struct sk_buff *skb_old = skb;
int pkt_len;
skb = dev_alloc_skb(skb_old->len + 128);
if (!skb) {
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
skb = skb_old;
goto drop_packet;
}
skb_put(skb, skb_old->len + 128);
skb_copy_from_linear_data(skb_old, skb->data,
skb_old->len);
if (net_dev->local->ppp_slot < 0) {
printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
__func__, net_dev->local->ppp_slot);
goto drop_packet;
}
pkt_len = slhc_uncompress(ippp_table[net_dev->local->ppp_slot]->slcomp,
skb->data, skb_old->len);
kfree_skb(skb_old);
if (pkt_len < 0)
goto drop_packet;
skb_trim(skb, pkt_len);
skb->protocol = htons(ETH_P_IP);
}
break;
#endif
case PPP_CCP:
case PPP_CCPFRAG:
isdn_ppp_receive_ccp(net_dev, lp, skb, proto);
/* Dont pop up ResetReq/Ack stuff to the daemon any
longer - the job is done already */
if (skb->data[0] == CCP_RESETREQ ||
skb->data[0] == CCP_RESETACK)
break;
/* fall through */
default:
isdn_ppp_fill_rq(skb->data, skb->len, proto, lp->ppp_slot); /* push data to pppd device */
kfree_skb(skb);
return;
}
#ifdef CONFIG_IPPP_FILTER
/* check if the packet passes the pass and active filters
* the filter instructions are constructed assuming
* a four-byte PPP header on each packet (which is still present) */
skb_push(skb, 4);
{
u_int16_t *p = (u_int16_t *) skb->data;
*p = 0; /* indicate inbound */
}
if (is->pass_filter
&& BPF_PROG_RUN(is->pass_filter, skb) == 0) {
if (is->debug & 0x2)
printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
kfree_skb(skb);
return;
}
if (!(is->active_filter
&& BPF_PROG_RUN(is->active_filter, skb) == 0)) {
if (is->debug & 0x2)
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
lp->huptimer = 0;
if (mlp)
mlp->huptimer = 0;
}
skb_pull(skb, 4);
#else /* CONFIG_IPPP_FILTER */
lp->huptimer = 0;
if (mlp)
mlp->huptimer = 0;
#endif /* CONFIG_IPPP_FILTER */
skb->dev = dev;
skb_reset_mac_header(skb);
netif_rx(skb);
/* net_dev->local->stats.rx_packets++; done in isdn_net.c */
return;
drop_packet:
net_dev->local->stats.rx_dropped++;
kfree_skb(skb);
} | 0 | []
| linux | 4ab42d78e37a294ac7bc56901d563c642e03c4ae | 72,328,757,252,317,600,000,000,000,000,000,000,000 | 155 | ppp, slip: Validate VJ compression slot parameters completely
Currently slhc_init() treats out-of-range values of rslots and tslots
as equivalent to 0, except that if tslots is too large it will
dereference a null pointer (CVE-2015-7799).
Add a range-check at the top of the function and make it return an
ERR_PTR() on error instead of NULL. Change the callers accordingly.
Compile-tested only.
Reported-by: 郭永刚 <[email protected]>
References: http://article.gmane.org/gmane.comp.security.oss.general/17908
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int sa_open_read_magic(int *fd, char *dfile, struct file_magic *file_magic,
int ignore, int *endian_mismatch, int do_swap)
{
int n;
unsigned int fm_types_nr[] = {FILE_MAGIC_ULL_NR, FILE_MAGIC_UL_NR, FILE_MAGIC_U_NR};
/* Open sa data file */
if ((*fd = open(dfile, O_RDONLY)) < 0) {
int saved_errno = errno;
fprintf(stderr, _("Cannot open %s: %s\n"), dfile, strerror(errno));
if ((saved_errno == ENOENT) && default_file_used) {
fprintf(stderr, _("Please check if data collecting is enabled\n"));
}
exit(2);
}
/* Read file magic data */
n = read(*fd, file_magic, FILE_MAGIC_SIZE);
if ((n != FILE_MAGIC_SIZE) ||
((file_magic->sysstat_magic != SYSSTAT_MAGIC) && (file_magic->sysstat_magic != SYSSTAT_MAGIC_SWAPPED)) ||
((file_magic->format_magic != FORMAT_MAGIC) && (file_magic->format_magic != FORMAT_MAGIC_SWAPPED) && !ignore)) {
#ifdef DEBUG
fprintf(stderr, "%s: Bytes read=%d sysstat_magic=%x format_magic=%x\n",
__FUNCTION__, n, file_magic->sysstat_magic, file_magic->format_magic);
#endif
/* Display error message and exit */
handle_invalid_sa_file(*fd, file_magic, dfile, n);
}
*endian_mismatch = (file_magic->sysstat_magic != SYSSTAT_MAGIC);
if (*endian_mismatch) {
if (do_swap) {
/* Swap bytes for file_magic fields */
file_magic->sysstat_magic = SYSSTAT_MAGIC;
file_magic->format_magic = __builtin_bswap16(file_magic->format_magic);
}
/*
* Start swapping at field "header_size" position.
* May not exist for older versions but in this case, it won't be used.
*/
swap_struct(fm_types_nr, &file_magic->header_size, 0);
}
if ((file_magic->sysstat_version > 10) ||
((file_magic->sysstat_version == 10) && (file_magic->sysstat_patchlevel >= 3))) {
/* header_size field exists only for sysstat versions 10.3.1 and later */
if ((file_magic->header_size <= MIN_FILE_HEADER_SIZE) ||
(file_magic->header_size > MAX_FILE_HEADER_SIZE)) {
#ifdef DEBUG
fprintf(stderr, "%s: header_size=%u\n",
__FUNCTION__, file_magic->header_size);
#endif
/* Display error message and exit */
handle_invalid_sa_file(*fd, file_magic, dfile, n);
}
}
if ((file_magic->sysstat_version > 11) ||
((file_magic->sysstat_version == 11) && (file_magic->sysstat_patchlevel >= 7))) {
/* hdr_types_nr field exists only for sysstat versions 11.7.1 and later */
if (MAP_SIZE(file_magic->hdr_types_nr) > file_magic->header_size) {
#ifdef DEBUG
fprintf(stderr, "%s: map_size=%u header_size=%u\n",
__FUNCTION__, MAP_SIZE(file_magic->hdr_types_nr), file_magic->header_size);
#endif
handle_invalid_sa_file(*fd, file_magic, dfile, n);
}
}
if ((file_magic->format_magic != FORMAT_MAGIC) &&
(file_magic->format_magic != FORMAT_MAGIC_SWAPPED))
/*
* This is an old (or new) sa datafile format to
* be read by sadf (since @ignore was set to TRUE).
*/
return -1;
return 0;
} | 0 | [
"CWE-415"
]
| sysstat | a5c8abd4a481ee6e27a3acf00e6d9b0f023e20ed | 5,859,268,147,699,617,000,000,000,000,000,000,000 | 81 | Fix #242: Double free in check_file_actlst()
Avoid freeing buffer() twice.
Signed-off-by: Sebastien GODARD <[email protected]> |
t_rw (int j)
{
int r;
char ch;
static char *q = 0;
char qtype[2], qclass[2];
struct tcpclient *x = NULL;
x = t + j;
if (x->state == -1)
{
r = write (x->tcp, x->buf + x->pos, x->len - x->pos);
if (r <= 0)
{
t_close (j);
return;
}
x->pos += r;
if (x->pos == x->len)
{
t_free (j);
x->state = 1; /* could drop connection immediately */
}
return;
}
r = read (x->tcp, &ch, 1);
if (r == 0)
{
errno = error_pipe;
t_close (j);
return;
}
if (r < 0)
{
t_close (j);
return;
}
if (x->state == 1)
{
x->len = (unsigned char)ch;
x->len <<= 8;
x->state = 2;
return;
}
if (x->state == 2)
{
x->len += (unsigned char)ch;
if (!x->len)
{
errno = error_proto;
t_close (j);
return;
}
x->buf = alloc (x->len);
if (!x->buf)
{
t_close(j);
return;
}
x->pos = 0;
x->state = 3;
return;
}
if (x->state != 3)
return; /* impossible */
x->buf[x->pos++] = ch;
if (x->pos < x->len)
return;
if (!packetquery (x->buf, x->len, &q, qtype, qclass, x->id))
{
t_close(j);
return;
}
x->active = ++numqueries;
if (debug_level)
log_query (&x->active, x->ip, x->port, x->id, q, qtype);
switch (query_start (&x->q, q, qtype, qclass, myipoutgoing))
{
case -1:
t_drop (j);
return;
case 1:
t_respond (j);
return;
}
t_free (j);
x->state = 0;
} | 0 | [
"CWE-362"
]
| ndjbdns | 177b5522e9b3d25778001c8cebfddd4d2973fcfd | 63,516,357,231,182,200,000,000,000,000,000,000,000 | 97 | Merge identical outgoing requests - patch 2.
This patch fixes dnscache to combine *same* client queries into one
single outgoing request, thus securing the server from possible cache
poisoning attacks. The merges operation takes place in the
dns_transmit layer, rather than between query and dns_transmit layers,
as done in the previous patch.
This fixes one of the cache poisoning vulnerability reported by
Mr Mark Johnson -> https://bugzilla.redhat.com/show_bug.cgi?id=838965.
Nonetheless the original patch for this issue was created by
Mr Jeff king -> http://marc.info/?l=djbdns&m=123859517723684&w=3#2
Sincere thanks to Mr Mark for reporting this issue and Mr Jeff for
creating the patch and releasing it under GPLv2. |
TEST(LengthFieldFrameDecoder, PreHeader) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 2, 0, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 5);
}))
.finalize();
auto bufFrame = createZeroedBuffer(4);
RWPrivateCursor c(bufFrame.get());
c.write((uint16_t)100); // header
c.writeBE((uint16_t)1); // frame size
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
} | 0 | [
"CWE-119",
"CWE-787"
]
| wangle | 5b3bceca875e4ea4ed9d14c20b20ce46c92c13c6 | 33,276,480,609,192,000,000,000,000,000,000,000,000 | 29 | Peek for \n in LineBasedFrameDecoder.
Summary:
Previously this could underflow if there was not a following \n.
CVE-2019-3563
Reviewed By: siyengar
Differential Revision: D14935715
fbshipit-source-id: 25c3eecf373f89efa1232456aeeb092f13b7fa06 |
void Curl_freeset(struct Curl_easy *data)
{
/* Free all dynamic strings stored in the data->set substructure. */
enum dupstring i;
enum dupblob j;
for(i = (enum dupstring)0; i < STRING_LAST; i++) {
Curl_safefree(data->set.str[i]);
}
for(j = (enum dupblob)0; j < BLOB_LAST; j++) {
Curl_safefree(data->set.blobs[j]);
}
if(data->state.referer_alloc) {
Curl_safefree(data->state.referer);
data->state.referer_alloc = FALSE;
}
data->state.referer = NULL;
if(data->state.url_alloc) {
Curl_safefree(data->state.url);
data->state.url_alloc = FALSE;
}
data->state.url = NULL;
Curl_mime_cleanpart(&data->set.mimepost);
} | 0 | []
| curl | 852aa5ad351ea53e5f01d2f44b5b4370c2bf5425 | 212,054,740,178,037,230,000,000,000,000,000,000,000 | 27 | url: check sasl additional parameters for connection reuse.
Also move static function safecmp() as non-static Curl_safecmp() since
its purpose is needed at several places.
Bug: https://curl.se/docs/CVE-2022-22576.html
CVE-2022-22576
Closes #8746 |
g_markup_parse_context_pop (GMarkupParseContext *context)
{
gpointer user_data;
if (!context->awaiting_pop)
possibly_finish_subparser (context);
g_assert (context->awaiting_pop);
context->awaiting_pop = FALSE;
/* valgrind friendliness */
user_data = context->held_user_data;
context->held_user_data = NULL;
return user_data;
} | 0 | [
"CWE-476"
]
| glib | fccef3cc822af74699cca84cd202719ae61ca3b9 | 340,247,080,058,103,270,000,000,000,000,000,000,000 | 17 | gmarkup: Fix crash in error handling path for closing elements
If something which looks like a closing tag is left unfinished, but
isn’t paired to an opening tag in the document, the error handling code
would do a null pointer dereference. Avoid that, at the cost of
introducing a new translatable error message.
Includes a test case, courtesy of pdknsk.
Signed-off-by: Philip Withnall <[email protected]>
https://gitlab.gnome.org/GNOME/glib/issues/1461 |
xfs_file_buffered_aio_write(
struct kiocb *iocb,
struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct xfs_inode *ip = XFS_I(inode);
ssize_t ret;
int enospc = 0;
int iolock = XFS_IOLOCK_EXCL;
xfs_rw_ilock(ip, iolock);
ret = xfs_file_aio_write_checks(iocb, from, &iolock);
if (ret)
goto out;
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
write_retry:
trace_xfs_file_buffered_write(ip, iov_iter_count(from),
iocb->ki_pos, 0);
ret = generic_perform_write(file, from, iocb->ki_pos);
if (likely(ret >= 0))
iocb->ki_pos += ret;
/*
* If we hit a space limit, try to free up some lingering preallocated
* space before returning an error. In the case of ENOSPC, first try to
* write back all dirty inodes to free up some of the excess reserved
* metadata space. This reduces the chances that the eofblocks scan
* waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
* also behaves as a filter to prevent too many eofblocks scans from
* running at the same time.
*/
if (ret == -EDQUOT && !enospc) {
enospc = xfs_inode_free_quota_eofblocks(ip);
if (enospc)
goto write_retry;
} else if (ret == -ENOSPC && !enospc) {
struct xfs_eofblocks eofb = {0};
enospc = 1;
xfs_flush_inodes(ip->i_mount);
eofb.eof_scan_owner = ip->i_ino; /* for locking */
eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
xfs_icache_free_eofblocks(ip->i_mount, &eofb);
goto write_retry;
}
current->backing_dev_info = NULL;
out:
xfs_rw_iunlock(ip, iolock);
return ret;
} | 0 | [
"CWE-19"
]
| linux | fc0561cefc04e7803c0f6501ca4f310a502f65b8 | 248,391,104,435,162,400,000,000,000,000,000,000,000 | 57 | xfs: optimise away log forces on timestamp updates for fdatasync
xfs: timestamp updates cause excessive fdatasync log traffic
Sage Weil reported that a ceph test workload was writing to the
log on every fdatasync during an overwrite workload. Event tracing
showed that the only metadata modification being made was the
timestamp updates during the write(2) syscall, but fdatasync(2)
is supposed to ignore them. The key observation was that the
transactions in the log all looked like this:
INODE: #regs: 4 ino: 0x8b flags: 0x45 dsize: 32
And contained a flags field of 0x45 or 0x85, and had data and
attribute forks following the inode core. This means that the
timestamp updates were triggering dirty relogging of previously
logged parts of the inode that hadn't yet been flushed back to
disk.
There are two parts to this problem. The first is that XFS relogs
dirty regions in subsequent transactions, so it carries around the
fields that have been dirtied since the last time the inode was
written back to disk, not since the last time the inode was forced
into the log.
The second part is that on v5 filesystems, the inode change count
update during inode dirtying also sets the XFS_ILOG_CORE flag, so
on v5 filesystems this makes a timestamp update dirty the entire
inode.
As a result when fdatasync is run, it looks at the dirty fields in
the inode, and sees more than just the timestamp flag, even though
the only metadata change since the last fdatasync was just the
timestamps. Hence we force the log on every subsequent fdatasync
even though it is not needed.
To fix this, add a new field to the inode log item that tracks
changes since the last time fsync/fdatasync forced the log to flush
the changes to the journal. This flag is updated when we dirty the
inode, but we do it before updating the change count so it does not
carry the "core dirty" flag from timestamp updates. The fields are
zeroed when the inode is marked clean (due to writeback/freeing) or
when an fsync/datasync forces the log. Hence if we only dirty the
timestamps on the inode between fsync/fdatasync calls, the fdatasync
will not trigger another log force.
Over 100 runs of the test program:
Ext4 baseline:
runtime: 1.63s +/- 0.24s
avg lat: 1.59ms +/- 0.24ms
iops: ~2000
XFS, vanilla kernel:
runtime: 2.45s +/- 0.18s
avg lat: 2.39ms +/- 0.18ms
log forces: ~400/s
iops: ~1000
XFS, patched kernel:
runtime: 1.49s +/- 0.26s
avg lat: 1.46ms +/- 0.25ms
log forces: ~30/s
iops: ~1500
Reported-by: Sage Weil <[email protected]>
Signed-off-by: Dave Chinner <[email protected]>
Reviewed-by: Brian Foster <[email protected]>
Signed-off-by: Dave Chinner <[email protected]> |
ncp__io2vol(struct ncp_server *server, unsigned char *vname, unsigned int *vlen,
const unsigned char *iname, unsigned int ilen, int cc)
{
struct nls_table *in = server->nls_io;
struct nls_table *out = server->nls_vol;
unsigned char *vname_start;
unsigned char *vname_end;
const unsigned char *iname_end;
iname_end = iname + ilen;
vname_start = vname;
vname_end = vname + *vlen - 1;
while (iname < iname_end) {
int chl;
wchar_t ec;
if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) {
int k;
unicode_t u;
k = utf8_to_utf32(iname, iname_end - iname, &u);
if (k < 0 || u > MAX_WCHAR_T)
return -EINVAL;
iname += k;
ec = u;
} else {
if (*iname == NCP_ESC) {
int k;
if (iname_end - iname < 5)
goto nospec;
ec = 0;
for (k = 1; k < 5; k++) {
unsigned char nc;
nc = iname[k] - '0';
if (nc >= 10) {
nc -= 'A' - '0' - 10;
if ((nc < 10) || (nc > 15)) {
goto nospec;
}
}
ec = (ec << 4) | nc;
}
iname += 5;
} else {
nospec:;
if ( (chl = in->char2uni(iname, iname_end - iname, &ec)) < 0)
return chl;
iname += chl;
}
}
/* unitoupper should be here! */
chl = out->uni2char(ec, vname, vname_end - vname);
if (chl < 0)
return chl;
/* this is wrong... */
if (cc) {
int chi;
for (chi = 0; chi < chl; chi++){
vname[chi] = ncp_toupper(out, vname[chi]);
}
}
vname += chl;
}
*vname = 0;
*vlen = vname - vname_start;
return 0;
} | 0 | [
"CWE-119"
]
| staging | 4c41aa24baa4ed338241d05494f2c595c885af8f | 155,734,680,992,679,300,000,000,000,000,000,000,000 | 76 | staging: ncpfs: memory corruption in ncp_read_kernel()
If the server is malicious then *bytes_read could be larger than the
size of the "target" buffer. It would lead to memory corruption when we
do the memcpy().
Reported-by: Dr Silvio Cesare of InfoSect <Silvio Cesare <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
struct rds_message *rm, __be16 sport,
__be16 dport, int *queued)
{
unsigned long flags;
u32 len;
if (*queued)
goto out;
len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
/* this is the only place which holds both the socket's rs_lock
* and the connection's c_lock */
spin_lock_irqsave(&rs->rs_lock, flags);
/*
* If there is a little space in sndbuf, we don't queue anything,
* and userspace gets -EAGAIN. But poll() indicates there's send
* room. This can lead to bad behavior (spinning) if snd_bytes isn't
* freed up by incoming acks. So we check the *old* value of
* rs_snd_bytes here to allow the last msg to exceed the buffer,
* and poll() now knows no more data can be sent.
*/
if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
rs->rs_snd_bytes += len;
/* let recv side know we are close to send space exhaustion.
* This is probably not the optimal way to do it, as this
* means we set the flag on *all* messages as soon as our
* throughput hits a certain threshold.
*/
if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
rds_message_addref(rm);
rm->m_rs = rs;
/* The code ordering is a little weird, but we're
trying to minimize the time we hold c_lock */
rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
rm->m_inc.i_conn = conn;
rds_message_addref(rm);
spin_lock(&conn->c_lock);
rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
spin_unlock(&conn->c_lock);
rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
rm, len, rs, rs->rs_snd_bytes,
(unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
*queued = 1;
}
spin_unlock_irqrestore(&rs->rs_lock, flags);
out:
return *queued;
} | 0 | [
"CWE-362"
]
| linux | 8c7188b23474cca017b3ef354c4a58456f68303a | 143,903,995,704,837,500,000,000,000,000,000,000,000 | 63 | RDS: fix race condition when sending a message on unbound socket
Sasha's found a NULL pointer dereference in the RDS connection code when
sending a message to an apparently unbound socket. The problem is caused
by the code checking if the socket is bound in rds_sendmsg(), which checks
the rs_bound_addr field without taking a lock on the socket. This opens a
race where rs_bound_addr is temporarily set but where the transport is not
in rds_bind(), leading to a NULL pointer dereference when trying to
dereference 'trans' in __rds_conn_create().
Vegard wrote a reproducer for this issue, so kindly ask him to share if
you're interested.
I cannot reproduce the NULL pointer dereference using Vegard's reproducer
with this patch, whereas I could without.
Complete earlier incomplete fix to CVE-2015-6937:
74e98eb08588 ("RDS: verify the underlying transport exists before creating a connection")
Cc: David S. Miller <[email protected]>
Cc: [email protected]
Reviewed-by: Vegard Nossum <[email protected]>
Reviewed-by: Sasha Levin <[email protected]>
Acked-by: Santosh Shilimkar <[email protected]>
Signed-off-by: Quentin Casasnovas <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
bool ExprMatchExpression::matches(const MatchableDocument* doc, MatchDetails* details) const {
if (_rewriteResult && _rewriteResult->matchExpression() &&
!_rewriteResult->matchExpression()->matches(doc, details)) {
return false;
}
Document document(doc->toBSON());
auto value = _expression->evaluate(document);
return value.coerceToBool();
} | 0 | []
| mongo | ee97c0699fd55b498310996ee002328e533681a3 | 85,725,202,270,799,780,000,000,000,000,000,000,000 | 10 | SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr. |
static int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th,
void **p, void *end)
{
ceph_decode_need(p, end, 1 + sizeof(u64), bad);
ceph_encode_8(p, 1);
ceph_encode_64(p, th->secret_id);
if (th->ticket_blob) {
const char *buf = th->ticket_blob->vec.iov_base;
u32 len = th->ticket_blob->vec.iov_len;
ceph_encode_32_safe(p, end, len, bad);
ceph_encode_copy_safe(p, end, buf, len, bad);
} else {
ceph_encode_32_safe(p, end, 0, bad);
}
return 0;
bad:
return -ERANGE;
} | 0 | [
"CWE-399",
"CWE-119"
]
| linux | c27a3e4d667fdcad3db7b104f75659478e0c68d8 | 43,287,985,765,187,470,000,000,000,000,000,000,000 | 20 | libceph: do not hard code max auth ticket len
We hard code cephx auth ticket buffer size to 256 bytes. This isn't
enough for any moderate setups and, in case tickets themselves are not
encrypted, leads to buffer overflows (ceph_x_decrypt() errors out, but
ceph_decode_copy() doesn't - it's just a memcpy() wrapper). Since the
buffer is allocated dynamically anyway, allocated it a bit later, at
the point where we know how much is going to be needed.
Fixes: http://tracker.ceph.com/issues/8979
Cc: [email protected]
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Sage Weil <[email protected]> |
static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
return VM_FAULT_OOM;
pmd = pmd_alloc(mm, pud, address);
if (!pmd)
return VM_FAULT_OOM;
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
int ret = VM_FAULT_FALLBACK;
if (!vma->vm_ops)
ret = do_huge_pmd_anonymous_page(mm, vma, address,
pmd, flags);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
pmd_t orig_pmd = *pmd;
int ret;
barrier();
if (pmd_trans_huge(orig_pmd)) {
unsigned int dirty = flags & FAULT_FLAG_WRITE;
/*
* If the pmd is splitting, return and retry the
* the fault. Alternative: wait until the split
* is done, and goto retry.
*/
if (pmd_trans_splitting(orig_pmd))
return 0;
if (pmd_protnone(orig_pmd))
return do_huge_pmd_numa_page(mm, vma, address,
orig_pmd, pmd);
if (dirty && !pmd_write(orig_pmd)) {
ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
orig_pmd);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
huge_pmd_set_accessed(mm, vma, address, pmd,
orig_pmd, dirty);
return 0;
}
}
}
/*
* Use __pte_alloc instead of pte_alloc_map, because we can't
* run pte_offset_map on the pmd, if an huge pmd could
* materialize from under us from a different thread.
*/
if (unlikely(pmd_none(*pmd)) &&
unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
return 0;
/*
* A regular pmd is established and it can't morph into a huge pmd
* from under us anymore at this point because we hold the mmap_sem
* read mode and khugepaged takes it in write mode. So now it's
* safe to run pte_offset_map().
*/
pte = pte_offset_map(pmd, address);
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
} | 0 | [
"CWE-20"
]
| linux | 6b7339f4c31ad69c8e9c0b2859276e22cf72176d | 127,597,761,072,461,960,000,000,000,000,000,000,000 | 79 | mm: avoid setting up anonymous pages into file mapping
Reading page fault handler code I've noticed that under right
circumstances kernel would map anonymous pages into file mappings: if
the VMA doesn't have vm_ops->fault() and the VMA wasn't fully populated
on ->mmap(), kernel would handle page fault to not populated pte with
do_anonymous_page().
Let's change page fault handler to use do_anonymous_page() only on
anonymous VMA (->vm_ops == NULL) and make sure that the VMA is not
shared.
For file mappings without vm_ops->fault() or shred VMA without vm_ops,
page fault on pte_none() entry would lead to SIGBUS.
Signed-off-by: Kirill A. Shutemov <[email protected]>
Acked-by: Oleg Nesterov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Willy Tarreau <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
eval_init(void)
{
evalvars_init();
func_init();
} | 0 | [
"CWE-823"
]
| vim | 8b91e71441069b1dde9ac9ff9d9a829b1b4aecca | 152,133,726,476,580,670,000,000,000,000,000,000,000 | 5 | patch 8.2.4774: crash when using a number for lambda name
Problem: Crash when using a number for lambda name.
Solution: Check the type of the lambda reference. |
static void t1_flush_cs(PDF pdf, boolean is_subr)
{
char *p;
byte *r, *return_cs = NULL;
cs_entry *tab, *end_tab, *ptr;
char *start_line, *line_end;
int count, size_pos;
unsigned short cr, cs_len;
if (is_subr) {
start_line = subr_array_start;
line_end = subr_array_end;
size_pos = subr_size_pos;
tab = subr_tab;
count = subr_max + 1;
end_tab = subr_tab + count;
} else {
start_line = cs_dict_start;
line_end = cs_dict_end;
size_pos = cs_size_pos;
tab = cs_tab;
end_tab = cs_ptr;
count = cs_counter;
}
t1_line_ptr = t1_line_array;
for (p = start_line; p - start_line < size_pos;)
*t1_line_ptr++ = *p++;
while (isdigit((unsigned char)*p))
p++;
sprintf(t1_line_ptr, "%u", count);
strcat(t1_line_ptr, p);
t1_line_ptr = eol(t1_line_array);
t1_putline(pdf);
/*tex For |-Wall|. */
cs_len = 0;
/*tex Create |return_cs| to replace unsused |subr|s. */
if (is_subr) {
cr = 4330;
cs_len = 0;
/*tex
At this point we have |t1_lenIV >= 0;| a negative value would be
caught in |t1_scan_param|.
*/
return_cs = xtalloc((unsigned) (t1_lenIV + 1), byte);
for (cs_len = 0, r = return_cs; cs_len < t1_lenIV; cs_len++, r++)
*r = cencrypt(0x00, &cr);
*r = cencrypt(CS_RETURN, &cr);
cs_len++;
}
for (ptr = tab; ptr < end_tab; ptr++) {
if (ptr->used) {
if (is_subr)
sprintf(t1_line_array, "dup %li %u", (long int) (ptr - tab),
ptr->cslen);
else
sprintf(t1_line_array, "/%s %u", ptr->name, ptr->cslen);
p = strend(t1_line_array);
memcpy(p, ptr->data, ptr->len);
t1_line_ptr = p + ptr->len;
t1_putline(pdf);
} else {
/*tex Replace unsused subr's by |return_cs|. */
if (is_subr) {
sprintf(t1_line_array, "dup %li %u%s ", (long int) (ptr - tab),
cs_len, cs_token_pair[0]);
p = strend(t1_line_array);
memcpy(p, return_cs, cs_len);
t1_line_ptr = p + cs_len;
t1_putline(pdf);
sprintf(t1_line_array, " %s", cs_token_pair[1]);
t1_line_ptr = eol(t1_line_array);
t1_putline(pdf);
}
}
xfree(ptr->data);
if (is_subr)
ptr->valid = false;
if (ptr->name != notdef)
xfree(ptr->name);
}
sprintf(t1_line_array, "%s", line_end);
t1_line_ptr = eol(t1_line_array);
t1_putline(pdf);
if (is_subr) {
end_tab = subr_tab + subr_size;
for (ptr = tab; ptr < end_tab; ptr++) {
if (ptr->valid) {
xfree(ptr->data);
if (ptr->name != notdef)
xfree(ptr->name);
}
}
xfree(return_cs);
}
xfree(tab);
xfree(start_line);
xfree(line_end);
} | 0 | [
"CWE-119"
]
| texlive-source | 6ed0077520e2b0da1fd060c7f88db7b2e6068e4c | 326,292,399,476,421,830,000,000,000,000,000,000,000 | 97 | writet1 protection against buffer overflow
git-svn-id: svn://tug.org/texlive/trunk/Build/source@48697 c570f23f-e606-0410-a88d-b1316a301751 |
static void flush_bufs(struct virtqueue *vq, bool can_sleep)
{
struct port_buffer *buf;
unsigned int len;
while ((buf = virtqueue_get_buf(vq, &len)))
free_buf(buf, can_sleep);
} | 0 | [
"CWE-120"
]
| linux | d00d8da5869a2608e97cfede094dfc5e11462a46 | 48,460,375,722,458,300,000,000,000,000,000,000,000 | 8 | virtio_console: Assure used length from device is limited
The buf->len might come from an untrusted device. This
ensures the value would not exceed the size of the buffer
to avoid data corruption or loss.
Signed-off-by: Xie Yongji <[email protected]>
Acked-by: Jason Wang <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Michael S. Tsirkin <[email protected]> |
cdf_find_stream(const cdf_dir_t *dir, const char *name, int type)
{
size_t i, name_len = strlen(name) + 1;
for (i = dir->dir_len; i > 0; i--)
if (dir->dir_tab[i - 1].d_type == type &&
cdf_namecmp(name, dir->dir_tab[i - 1].d_name, name_len)
== 0)
break;
if (i > 0)
return CAST(int, i);
DPRINTF(("Cannot find type %d `%s'\n", type, name));
errno = ESRCH;
return 0;
} | 0 | [
"CWE-787"
]
| file | 46a8443f76cec4b41ec736eca396984c74664f84 | 279,054,170,391,259,900,000,000,000,000,000,000,000 | 16 | Limit the number of elements in a vector (found by oss-fuzz) |
static int cxusb_lgz201_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61, NULL, DVB_PLL_LG_Z201);
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 3f190e3aec212fc8c61e202c51400afa7384d4bc | 83,277,223,505,839,480,000,000,000,000,000,000,000 | 5 | [media] cxusb: Use a dma capable buffer also for reading
Commit 17ce039b4e54 ("[media] cxusb: don't do DMA on stack")
added a kmalloc'ed bounce buffer for writes, but missed to do the same
for reads. As the read only happens after the write is finished, we can
reuse the same buffer.
As dvb_usb_generic_rw handles a read length of 0 by itself, avoid calling
it using the dvb_usb_generic_read wrapper function.
Signed-off-by: Stefan Brüns <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
winlink_count(struct winlinks *wwl)
{
struct winlink *wl;
u_int n;
n = 0;
RB_FOREACH(wl, winlinks, wwl)
n++;
return (n);
} | 0 | []
| src | b32e1d34e10a0da806823f57f02a4ae6e93d756e | 96,175,389,440,929,340,000,000,000,000,000,000,000 | 11 | evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547. |
NCR_ModifyMaxpoll(NCR_Instance inst, int new_maxpoll)
{
if (new_maxpoll < MIN_POLL || new_maxpoll > MAX_POLL)
return;
inst->maxpoll = new_maxpoll;
LOG(LOGS_INFO, LOGF_NtpCore, "Source %s new maxpoll %d", UTI_IPToString(&inst->remote_addr.ip_addr), new_maxpoll);
if (inst->minpoll > inst->maxpoll)
NCR_ModifyMinpoll(inst, inst->maxpoll);
} | 0 | []
| chrony | a78bf9725a7b481ebff0e0c321294ba767f2c1d8 | 155,081,107,331,753,290,000,000,000,000,000,000,000 | 9 | ntp: restrict authentication of server/peer to specified key
When a server/peer was specified with a key number to enable
authentication with a symmetric key, packets received from the
server/peer were accepted if they were authenticated with any of
the keys contained in the key file and not just the specified key.
This allowed an attacker who knew one key of a client/peer to modify
packets from its servers/peers that were authenticated with other
keys in a man-in-the-middle (MITM) attack. For example, in a network
where each NTP association had a separate key and all hosts had only
keys they needed, a client of a server could not attack other clients
of the server, but it could attack the server and also attack its own
clients (i.e. modify packets from other servers).
To not allow the server/peer to be authenticated with other keys
extend the authentication test to check if the key ID in the received
packet is equal to the configured key number. As a consequence, it's
no longer possible to authenticate two peers to each other with two
different keys, both peers have to be configured to use the same key.
This issue was discovered by Matt Street of Cisco ASIG. |
static void geneve_dellink(struct net_device *dev, struct list_head *head)
{
struct geneve_dev *geneve = netdev_priv(dev);
list_del(&geneve->next);
unregister_netdevice_queue(dev, head);
} | 0 | []
| net | 6c8991f41546c3c472503dff1ea9daaddf9331c2 | 203,755,288,427,532,700,000,000,000,000,000,000,000 | 7 | net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
dp_packet_hwol_is_ipv4(const struct dp_packet *b)
{
return !!(*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_IPV4);
} | 0 | [
"CWE-400"
]
| ovs | 79349cbab0b2a755140eedb91833ad2760520a83 | 195,885,401,963,391,860,000,000,000,000,000,000,000 | 4 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
e_ews_connection_create_items_sync (EEwsConnection *cnc,
gint pri,
const gchar *msg_disposition,
const gchar *send_invites,
const EwsFolderId *fid,
EEwsRequestCreationCallback create_cb,
gpointer create_user_data,
GSList **ids,
GCancellable *cancellable,
GError **error)
{
EAsyncClosure *closure;
GAsyncResult *result;
gboolean success;
g_return_val_if_fail (cnc != NULL, FALSE);
closure = e_async_closure_new ();
e_ews_connection_create_items (
cnc, pri, msg_disposition,
send_invites, fid,
create_cb, create_user_data,
cancellable,
e_async_closure_callback, closure);
result = e_async_closure_wait (closure);
success = e_ews_connection_create_items_finish (
cnc, result, ids, error);
e_async_closure_free (closure);
return success;
} | 0 | [
"CWE-295"
]
| evolution-ews | 915226eca9454b8b3e5adb6f2fff9698451778de | 228,880,584,259,561,380,000,000,000,000,000,000,000 | 35 | I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27 |
void elst_box_del(GF_Box *s)
{
GF_EditListBox *ptr;
u32 nb_entries;
u32 i;
ptr = (GF_EditListBox *)s;
if (ptr == NULL) return;
nb_entries = gf_list_count(ptr->entryList);
for (i = 0; i < nb_entries; i++) {
GF_EdtsEntry *p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i);
if (p) gf_free(p);
}
gf_list_del(ptr->entryList);
gf_free(ptr);
} | 0 | [
"CWE-787"
]
| gpac | 388ecce75d05e11fc8496aa4857b91245007d26e | 307,557,400,897,973,560,000,000,000,000,000,000,000 | 16 | fixed #1587 |
globextend(const Char *path, glob_t *pglob, struct glob_lim *limitp,
struct stat *sb)
{
char **pathv;
ssize_t i;
size_t newn, len;
char *copy = NULL;
const Char *p;
struct stat **statv;
newn = 2 + pglob->gl_pathc + pglob->gl_offs;
if (pglob->gl_offs >= INT_MAX ||
pglob->gl_pathc >= INT_MAX ||
newn >= INT_MAX ||
SIZE_MAX / sizeof(*pathv) <= newn ||
SIZE_MAX / sizeof(*statv) <= newn) {
nospace:
for (i = pglob->gl_offs; i < (ssize_t)(newn - 2); i++) {
if (pglob->gl_pathv && pglob->gl_pathv[i]) {
free(pglob->gl_pathv[i]);
}
if ((pglob->gl_flags & GLOB_KEEPSTAT) != 0 &&
pglob->gl_pathv && pglob->gl_pathv[i]) {
free(pglob->gl_statv[i]);
}
}
if (pglob->gl_pathv) {
free(pglob->gl_pathv);
pglob->gl_pathv = NULL;
}
if (pglob->gl_statv) {
free(pglob->gl_statv);
pglob->gl_statv = NULL;
}
return GLOB_NOSPACE;
}
pathv = realloc(pglob->gl_pathv, newn * sizeof(*pathv));
if (pathv == NULL) {
goto nospace;
}
if (pglob->gl_pathv == NULL && pglob->gl_offs > 0) {
/* first time around -- clear initial gl_offs items */
pathv += pglob->gl_offs;
for (i = pglob->gl_offs; --i >= 0; ) {
*--pathv = NULL;
}
}
pglob->gl_pathv = pathv;
if ((pglob->gl_flags & GLOB_KEEPSTAT) != 0) {
statv = realloc(pglob->gl_statv, newn * sizeof(*statv));
if (statv == NULL) {
goto nospace;
}
if (pglob->gl_statv == NULL && pglob->gl_offs > 0) {
/* first time around -- clear initial gl_offs items */
statv += pglob->gl_offs;
for (i = pglob->gl_offs; --i >= 0; ) {
*--statv = NULL;
}
}
pglob->gl_statv = statv;
if (sb == NULL) {
statv[pglob->gl_offs + pglob->gl_pathc] = NULL;
} else {
limitp->glim_malloc += sizeof(**statv);
if (limitp->glim_malloc >= GLOB_LIMIT_MALLOC) {
errno = 0;
return GLOB_NOSPACE;
}
if ((statv[pglob->gl_offs + pglob->gl_pathc] =
malloc(sizeof(**statv))) == NULL) {
goto copy_error;
}
memcpy(statv[pglob->gl_offs + pglob->gl_pathc], sb,
sizeof(*sb));
}
statv[pglob->gl_offs + pglob->gl_pathc + 1] = NULL;
}
for (p = path; *p++;)
;
len = (size_t)(p - path);
limitp->glim_malloc += len;
if ((copy = malloc(len)) != NULL) {
if (g_Ctoc(path, copy, len)) {
free(copy);
return GLOB_NOSPACE;
}
pathv[pglob->gl_offs + pglob->gl_pathc++] = copy;
}
pathv[pglob->gl_offs + pglob->gl_pathc] = NULL;
if ((newn * sizeof(*pathv)) + limitp->glim_malloc > GLOB_LIMIT_MALLOC) {
errno = 0;
return GLOB_NOSPACE;
}
copy_error:
return copy == NULL ? GLOB_NOSPACE : 0;
} | 0 | []
| pure-ftpd | 0627004e23a24108785dc1506c5767392b90f807 | 85,591,077,718,394,480,000,000,000,000,000,000,000 | 101 | BSD glob(): check max pattern length after having initialized pglob |
name_owner_changed (GDBusConnection *connection,
const gchar *sender_name,
const gchar *object_path,
const gchar *interface_name,
const gchar *signal_name,
GVariant *parameters,
gpointer user_data)
{
const char *name, *from, *to;
g_variant_get (parameters, "(&s&s&s)", &name, &from, &to);
if (name[0] == ':' &&
strcmp (name, from) == 0 &&
strcmp (to, "") == 0)
{
GHashTableIter iter;
PidData *pid_data = NULL;
gpointer value = NULL;
GList *list = NULL, *l;
g_hash_table_iter_init (&iter, client_pid_data_hash);
while (g_hash_table_iter_next (&iter, NULL, &value))
{
pid_data = value;
if (pid_data->watch_bus && g_str_equal (pid_data->client, name))
list = g_list_prepend (list, pid_data);
}
for (l = list; l; l = l->next)
{
pid_data = l->data;
g_debug ("%s dropped off the bus, killing %d", pid_data->client, pid_data->pid);
killpg (pid_data->pid, SIGINT);
}
g_list_free (list);
close_update_monitors_for_sender (name);
}
} | 0 | [
"CWE-94",
"CWE-74"
]
| flatpak | aeb6a7ab0abaac4a8f4ad98b3df476d9de6b8bd4 | 312,653,209,991,144,970,000,000,000,000,000,000,000 | 42 | portal: Convert --env in extra-args into --env-fd
This hides overridden variables from the command-line, which means
processes running under other uids can't see them in /proc/*/cmdline,
which might be important if they contain secrets.
Signed-off-by: Simon McVittie <[email protected]>
Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2 |
static jpc_enc_rlvl_t *rlvl_create(jpc_enc_rlvl_t *rlvl, jpc_enc_cp_t *cp,
jpc_enc_tcmpt_t *tcmpt, jpc_tsfb_band_t *bandinfos)
{
uint_fast16_t rlvlno;
uint_fast32_t tlprctlx;
uint_fast32_t tlprctly;
uint_fast32_t brprcbrx;
uint_fast32_t brprcbry;
uint_fast16_t bandno;
jpc_enc_band_t *band;
/* Deduce the resolution level. */
rlvlno = rlvl - tcmpt->rlvls;
/* Initialize members required for error recovery. */
rlvl->bands = 0;
rlvl->tcmpt = tcmpt;
/* Compute the coordinates of the top-left and bottom-right
corners of the tile-component at this resolution. */
rlvl->tlx = JPC_CEILDIVPOW2(jas_seq2d_xstart(tcmpt->data), tcmpt->numrlvls -
1 - rlvlno);
rlvl->tly = JPC_CEILDIVPOW2(jas_seq2d_ystart(tcmpt->data), tcmpt->numrlvls -
1 - rlvlno);
rlvl->brx = JPC_CEILDIVPOW2(jas_seq2d_xend(tcmpt->data), tcmpt->numrlvls -
1 - rlvlno);
rlvl->bry = JPC_CEILDIVPOW2(jas_seq2d_yend(tcmpt->data), tcmpt->numrlvls -
1 - rlvlno);
if (rlvl->tlx >= rlvl->brx || rlvl->tly >= rlvl->bry) {
rlvl->numhprcs = 0;
rlvl->numvprcs = 0;
rlvl->numprcs = 0;
return rlvl;
}
rlvl->numbands = (!rlvlno) ? 1 : 3;
rlvl->prcwidthexpn = cp->tccp.prcwidthexpns[rlvlno];
rlvl->prcheightexpn = cp->tccp.prcheightexpns[rlvlno];
if (!rlvlno) {
rlvl->cbgwidthexpn = rlvl->prcwidthexpn;
rlvl->cbgheightexpn = rlvl->prcheightexpn;
} else {
rlvl->cbgwidthexpn = rlvl->prcwidthexpn - 1;
rlvl->cbgheightexpn = rlvl->prcheightexpn - 1;
}
rlvl->cblkwidthexpn = JAS_MIN(cp->tccp.cblkwidthexpn, rlvl->cbgwidthexpn);
rlvl->cblkheightexpn = JAS_MIN(cp->tccp.cblkheightexpn, rlvl->cbgheightexpn);
/* Compute the number of precincts. */
tlprctlx = JPC_FLOORTOMULTPOW2(rlvl->tlx, rlvl->prcwidthexpn);
tlprctly = JPC_FLOORTOMULTPOW2(rlvl->tly, rlvl->prcheightexpn);
brprcbrx = JPC_CEILTOMULTPOW2(rlvl->brx, rlvl->prcwidthexpn);
brprcbry = JPC_CEILTOMULTPOW2(rlvl->bry, rlvl->prcheightexpn);
rlvl->numhprcs = JPC_FLOORDIVPOW2(brprcbrx - tlprctlx, rlvl->prcwidthexpn);
rlvl->numvprcs = JPC_FLOORDIVPOW2(brprcbry - tlprctly, rlvl->prcheightexpn);
rlvl->numprcs = rlvl->numhprcs * rlvl->numvprcs;
if (!(rlvl->bands = jas_alloc2(rlvl->numbands, sizeof(jpc_enc_band_t)))) {
goto error;
}
for (bandno = 0, band = rlvl->bands; bandno < rlvl->numbands;
++bandno, ++band) {
band->prcs = 0;
band->data = 0;
band->rlvl = rlvl;
}
for (bandno = 0, band = rlvl->bands; bandno < rlvl->numbands;
++bandno, ++band) {
if (!band_create(band, cp, rlvl, bandinfos)) {
goto error;
}
}
return rlvl;
error:
rlvl_destroy(rlvl);
return 0;
} | 0 | [
"CWE-189"
]
| jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 77,257,229,303,878,300,000,000,000,000,000,000,000 | 80 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
send_newstyle_option_reply_info_export (struct connection *conn,
uint32_t option, uint32_t reply,
uint16_t info)
{
struct fixed_new_option_reply fixed_new_option_reply;
struct fixed_new_option_reply_info_export export;
fixed_new_option_reply.magic = htobe64 (NBD_REP_MAGIC);
fixed_new_option_reply.option = htobe32 (option);
fixed_new_option_reply.reply = htobe32 (reply);
fixed_new_option_reply.replylen = htobe32 (sizeof export);
export.info = htobe16 (info);
export.exportsize = htobe64 (conn->exportsize);
export.eflags = htobe16 (conn->eflags);
if (conn->send (conn,
&fixed_new_option_reply,
sizeof fixed_new_option_reply, SEND_MORE) == -1 ||
conn->send (conn, &export, sizeof export, 0) == -1) {
nbdkit_error ("write: %s: %m", name_of_nbd_opt (option));
return -1;
}
return 0;
} | 0 | [
"CWE-406"
]
| nbdkit | e06cde00659ff97182173d0e33fff784041bcb4a | 157,787,393,162,558,590,000,000,000,000,000,000,000 | 25 | server: Wait until handshake complete before calling .open callback
Currently we call the plugin .open callback as soon as we receive a
TCP connection:
$ nbdkit -fv --tls=require --tls-certificates=tests/pki null \
--run "telnet localhost 10809"
[...]
Trying ::1...
Connected to localhost.
Escape character is '^]'.
nbdkit: debug: accepted connection
nbdkit: debug: null: open readonly=0 ◀ NOTE
nbdkit: null[1]: debug: newstyle negotiation: flags: global 0x3
NBDMAGICIHAVEOPT
In plugins such as curl, guestfs, ssh, vddk and others we do a
considerable amount of work in the .open callback (such as making a
remote connection or launching an appliance). Therefore we are
providing an easy Denial of Service / Amplification Attack for
unauthorized clients to cause a lot of work to be done for only the
cost of a simple TCP 3 way handshake.
This commit moves the call to the .open callback after the NBD
handshake has mostly completed. In particular TLS authentication must
be complete before we will call into the plugin.
It is unlikely that there are plugins which really depend on the
current behaviour of .open (which I found surprising even though I
guess I must have written it). If there are then we could add a new
.connect callback or similar to allow plugins to get control at the
earlier point in the connection.
After this change you can see that the .open callback is not called
from just a simple TCP connection:
$ ./nbdkit -fv --tls=require --tls-certificates=tests/pki null \
--run "telnet localhost 10809"
[...]
Trying ::1...
Connected to localhost.
Escape character is '^]'.
nbdkit: debug: accepted connection
nbdkit: null[1]: debug: newstyle negotiation: flags: global 0x3
NBDMAGICIHAVEOPT
xx
nbdkit: null[1]: debug: newstyle negotiation: client flags: 0xd0a7878
nbdkit: null[1]: error: client requested unknown flags 0xd0a7878
Connection closed by foreign host.
nbdkit: debug: null: unload plugin
Signed-off-by: Richard W.M. Jones <[email protected]>
(cherry picked from commit c05686f9577fa91b6a3a4d8c065954ca6fc3fd62) |
static int hva_to_pfn_remapped(struct vm_area_struct *vma,
unsigned long addr, bool *async,
bool write_fault, bool *writable,
kvm_pfn_t *p_pfn)
{
unsigned long pfn;
int r;
r = follow_pfn(vma, addr, &pfn);
if (r) {
/*
* get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
* not call the fault handler, so do it here.
*/
bool unlocked = false;
r = fixup_user_fault(current, current->mm, addr,
(write_fault ? FAULT_FLAG_WRITE : 0),
&unlocked);
if (unlocked)
return -EAGAIN;
if (r)
return r;
r = follow_pfn(vma, addr, &pfn);
if (r)
return r;
}
if (writable)
*writable = true;
/*
* Get a reference here because callers of *hva_to_pfn* and
* *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
* returned pfn. This is only needed if the VMA has VM_MIXEDMAP
* set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
* simply do nothing for reserved pfns.
*
* Whoever called remap_pfn_range is also going to call e.g.
* unmap_mapping_range before the underlying pages are freed,
* causing a call to our MMU notifier.
*/
kvm_get_pfn(pfn);
*p_pfn = pfn;
return 0;
} | 0 | [
"CWE-416"
]
| linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 165,521,394,941,430,060,000,000,000,000,000,000,000 | 48 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static void clear_atexit(void) {
EUID_ROOT();
delete_run_files(getpid());
} | 0 | [
"CWE-269",
"CWE-94"
]
| firejail | 27cde3d7d1e4e16d4190932347c7151dc2a84c50 | 259,185,045,015,653,650,000,000,000,000,000,000,000 | 4 | fixing CVE-2022-31214 |
static void __exit fuse_exit(void)
{
pr_debug("exit\n");
fuse_ctl_cleanup();
fuse_sysfs_cleanup();
fuse_fs_cleanup();
fuse_dev_cleanup();
} | 0 | [
"CWE-459"
]
| linux | 5d069dbe8aaf2a197142558b6fb2978189ba3454 | 99,780,738,052,325,700,000,000,000,000,000,000,000 | 9 | fuse: fix bad inode
Jan Kara's analysis of the syzbot report (edited):
The reproducer opens a directory on FUSE filesystem, it then attaches
dnotify mark to the open directory. After that a fuse_do_getattr() call
finds that attributes returned by the server are inconsistent, and calls
make_bad_inode() which, among other things does:
inode->i_mode = S_IFREG;
This then confuses dnotify which doesn't tear down its structures
properly and eventually crashes.
Avoid calling make_bad_inode() on a live inode: switch to a private flag on
the fuse inode. Also add the test to ops which the bad_inode_ops would
have caught.
This bug goes back to the initial merge of fuse in 2.6.14...
Reported-by: [email protected]
Signed-off-by: Miklos Szeredi <[email protected]>
Tested-by: Jan Kara <[email protected]>
Cc: <[email protected]> |
static bool ldm_parse_vblk (const u8 *buf, int len, struct vblk *vb)
{
bool result = false;
int r_objid;
BUG_ON (!buf || !vb);
r_objid = ldm_relative (buf, len, 0x18, 0);
if (r_objid < 0) {
ldm_error ("VBLK header is corrupt.");
return false;
}
vb->flags = buf[0x12];
vb->type = buf[0x13];
vb->obj_id = ldm_get_vnum (buf + 0x18);
ldm_get_vstr (buf+0x18+r_objid, vb->name, sizeof (vb->name));
switch (vb->type) {
case VBLK_CMP3: result = ldm_parse_cmp3 (buf, len, vb); break;
case VBLK_DSK3: result = ldm_parse_dsk3 (buf, len, vb); break;
case VBLK_DSK4: result = ldm_parse_dsk4 (buf, len, vb); break;
case VBLK_DGR3: result = ldm_parse_dgr3 (buf, len, vb); break;
case VBLK_DGR4: result = ldm_parse_dgr4 (buf, len, vb); break;
case VBLK_PRT3: result = ldm_parse_prt3 (buf, len, vb); break;
case VBLK_VOL5: result = ldm_parse_vol5 (buf, len, vb); break;
}
if (result)
ldm_debug ("Parsed VBLK 0x%llx (type: 0x%02x) ok.",
(unsigned long long) vb->obj_id, vb->type);
else
ldm_error ("Failed to parse VBLK 0x%llx (type: 0x%02x).",
(unsigned long long) vb->obj_id, vb->type);
return result;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | cae13fe4cc3f24820ffb990c09110626837e85d4 | 28,571,658,144,962,970,000,000,000,000,000,000,000 | 37 | Fix for buffer overflow in ldm_frag_add not sufficient
As Ben Hutchings discovered [1], the patch for CVE-2011-1017 (buffer
overflow in ldm_frag_add) is not sufficient. The original patch in
commit c340b1d64000 ("fs/partitions/ldm.c: fix oops caused by corrupted
partition table") does not consider that, for subsequent fragments,
previously allocated memory is used.
[1] http://lkml.org/lkml/2011/5/6/407
Reported-by: Ben Hutchings <[email protected]>
Signed-off-by: Timo Warns <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int bad_file_mmap(struct file *file, struct vm_area_struct *vma)
{
return -EIO;
} | 0 | []
| linux-2.6 | be6aab0e9fa6d3c6d75aa1e38ac972d8b4ee82b8 | 285,683,089,929,202,870,000,000,000,000,000,000,000 | 4 | [PATCH] fix memory corruption from misinterpreted bad_inode_ops return values
CVE-2006-5753 is for a case where an inode can be marked bad, switching
the ops to bad_inode_ops, which are all connected as:
static int return_EIO(void)
{
return -EIO;
}
#define EIO_ERROR ((void *) (return_EIO))
static struct inode_operations bad_inode_ops =
{
.create = bad_inode_create
...etc...
The problem here is that the void cast causes return types to not be
promoted, and for ops such as listxattr which expect more than 32 bits of
return value, the 32-bit -EIO is interpreted as a large positive 64-bit
number, i.e. 0x00000000fffffffa instead of 0xfffffffa.
This goes particularly badly when the return value is taken as a number of
bytes to copy into, say, a user's buffer for example...
I originally had coded up the fix by creating a return_EIO_<TYPE> macro
for each return type, like this:
static int return_EIO_int(void)
{
return -EIO;
}
#define EIO_ERROR_INT ((void *) (return_EIO_int))
static struct inode_operations bad_inode_ops =
{
.create = EIO_ERROR_INT,
...etc...
but Al felt that it was probably better to create an EIO-returner for each
actual op signature. Since so few ops share a signature, I just went ahead
& created an EIO function for each individual file & inode op that returns
a value.
Signed-off-by: Eric Sandeen <[email protected]>
Cc: Al Viro <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
struct sk_buff *skb)
{
struct list_head *fn = dreq ? &dreq->dreq_featneg : &dp->dccps_featneg;
struct dccp_feat_entry *pos, *next;
u8 opt, type, len, *ptr, nn_in_nbo[DCCP_OPTVAL_MAXLEN];
bool rpt;
/* put entries into @skb in the order they appear in the list */
list_for_each_entry_safe_reverse(pos, next, fn, node) {
opt = dccp_feat_genopt(pos);
type = dccp_feat_type(pos->feat_num);
rpt = false;
if (pos->empty_confirm) {
len = 0;
ptr = NULL;
} else {
if (type == FEAT_SP) {
len = pos->val.sp.len;
ptr = pos->val.sp.vec;
rpt = pos->needs_confirm;
} else if (type == FEAT_NN) {
len = dccp_feat_valid_nn_length(pos->feat_num);
ptr = nn_in_nbo;
dccp_encode_value_var(pos->val.nn, ptr, len);
} else {
DCCP_BUG("unknown feature %u", pos->feat_num);
return -1;
}
}
dccp_feat_print_opt(opt, pos->feat_num, ptr, len, 0);
if (dccp_insert_fn_opt(skb, opt, pos->feat_num, ptr, len, rpt))
return -1;
if (pos->needs_mandatory && dccp_insert_option_mandatory(skb))
return -1;
if (skb->sk->sk_state == DCCP_OPEN &&
(opt == DCCPO_CONFIRM_R || opt == DCCPO_CONFIRM_L)) {
/*
* Confirms don't get retransmitted (6.6.3) once the
* connection is in state OPEN
*/
dccp_feat_list_pop(pos);
} else {
/*
* Enter CHANGING after transmitting the Change
* option (6.6.2).
*/
if (pos->state == FEAT_INITIALISING)
pos->state = FEAT_CHANGING;
}
}
return 0;
} | 0 | [
"CWE-401"
]
| linux | 1d3ff0950e2b40dc861b1739029649d03f591820 | 99,345,695,786,952,920,000,000,000,000,000,000,000 | 56 | dccp: Fix memleak in __feat_register_sp
If dccp_feat_push_change fails, we forget free the mem
which is alloced by kmemdup in dccp_feat_clone_sp_val.
Reported-by: Hulk Robot <[email protected]>
Fixes: e8ef967a54f4 ("dccp: Registration routines for changing feature values")
Reviewed-by: Mukesh Ojha <[email protected]>
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static MagickBooleanType GetXMPProperty(const Image *image,const char *property)
{
char
*xmp_profile;
const char
*content;
const StringInfo
*profile;
ExceptionInfo
*exception;
MagickBooleanType
status;
register const char
*p;
XMLTreeInfo
*child,
*description,
*node,
*rdf,
*xmp;
profile=GetImageProfile(image,"xmp");
if (profile == (StringInfo *) NULL)
return(MagickFalse);
if (GetStringInfoLength(profile) < 17)
return(MagickFalse);
if ((property == (const char *) NULL) || (*property == '\0'))
return(MagickFalse);
xmp_profile=StringInfoToString(profile);
if (xmp_profile == (char *) NULL)
return(MagickFalse);
if (ValidateXMPProfile(xmp_profile,GetStringInfoLength(profile)) == MagickFalse)
{
xmp_profile=DestroyString(xmp_profile);
return(MagickFalse);
}
for (p=xmp_profile; *p != '\0'; p++)
if ((*p == '<') && (*(p+1) == 'x'))
break;
exception=AcquireExceptionInfo();
xmp=NewXMLTree((char *) p,exception);
xmp_profile=DestroyString(xmp_profile);
exception=DestroyExceptionInfo(exception);
if (xmp == (XMLTreeInfo *) NULL)
return(MagickFalse);
status=MagickFalse;
rdf=GetXMLTreeChild(xmp,"rdf:RDF");
if (rdf != (XMLTreeInfo *) NULL)
{
if (image->properties == (void *) NULL)
((Image *) image)->properties=NewSplayTree(CompareSplayTreeString,
RelinquishMagickMemory,RelinquishMagickMemory);
description=GetXMLTreeChild(rdf,"rdf:Description");
while (description != (XMLTreeInfo *) NULL)
{
char
*xmp_namespace;
node=GetXMLTreeChild(description,(const char *) NULL);
while (node != (XMLTreeInfo *) NULL)
{
child=GetXMLTreeChild(node,(const char *) NULL);
content=GetXMLTreeContent(node);
if ((child == (XMLTreeInfo *) NULL) &&
(SkipXMPValue(content) == MagickFalse))
{
xmp_namespace=ConstantString(GetXMLTreeTag(node));
(void) SubstituteString(&xmp_namespace,"exif:","xmp:");
(void) AddValueToSplayTree((SplayTreeInfo *) image->properties,
xmp_namespace,ConstantString(content));
}
while (child != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(child);
if (SkipXMPValue(content) == MagickFalse)
{
xmp_namespace=ConstantString(GetXMLTreeTag(node));
(void) SubstituteString(&xmp_namespace,"exif:","xmp:");
(void) AddValueToSplayTree((SplayTreeInfo *) image->properties,
xmp_namespace,ConstantString(content));
}
child=GetXMLTreeSibling(child);
}
node=GetXMLTreeSibling(node);
}
description=GetNextXMLTreeTag(description);
}
}
xmp=DestroyXMLTree(xmp);
return(status);
} | 0 | [
"CWE-476"
]
| ImageMagick | 2c75f301d9ac84f91071393b02d8c88c8341c91c | 297,741,043,506,512,440,000,000,000,000,000,000,000 | 97 | https://github.com/ImageMagick/ImageMagick/issues/1225 |
static int init_ssl_connection(SSL *con)
{
int i;
const char *str;
X509 *peer;
long verify_error;
MS_STATIC char buf[BUFSIZ];
#ifndef OPENSSL_NO_KRB5
char *client_princ;
#endif
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
const unsigned char *next_proto_neg;
unsigned next_proto_neg_len;
#endif
unsigned char *exportedkeymat;
i=SSL_accept(con);
#ifndef OPENSSL_NO_SRP
while (i <= 0 && SSL_get_error(con,i) == SSL_ERROR_WANT_X509_LOOKUP)
{
BIO_printf(bio_s_out,"LOOKUP during accept %s\n",srp_callback_parm.login);
srp_callback_parm.user = SRP_VBASE_get_by_user(srp_callback_parm.vb, srp_callback_parm.login);
if (srp_callback_parm.user)
BIO_printf(bio_s_out,"LOOKUP done %s\n",srp_callback_parm.user->info);
else
BIO_printf(bio_s_out,"LOOKUP not successful\n");
i=SSL_accept(con);
}
#endif
if (i <= 0)
{
if (BIO_sock_should_retry(i))
{
BIO_printf(bio_s_out,"DELAY\n");
return(1);
}
BIO_printf(bio_err,"ERROR\n");
verify_error=SSL_get_verify_result(con);
if (verify_error != X509_V_OK)
{
BIO_printf(bio_err,"verify error:%s\n",
X509_verify_cert_error_string(verify_error));
}
/* Always print any error messages */
ERR_print_errors(bio_err);
return(0);
}
if (s_brief)
print_ssl_summary(bio_err, con);
PEM_write_bio_SSL_SESSION(bio_s_out,SSL_get_session(con));
peer=SSL_get_peer_certificate(con);
if (peer != NULL)
{
BIO_printf(bio_s_out,"Client certificate\n");
PEM_write_bio_X509(bio_s_out,peer);
X509_NAME_oneline(X509_get_subject_name(peer),buf,sizeof buf);
BIO_printf(bio_s_out,"subject=%s\n",buf);
X509_NAME_oneline(X509_get_issuer_name(peer),buf,sizeof buf);
BIO_printf(bio_s_out,"issuer=%s\n",buf);
X509_free(peer);
}
if (SSL_get_shared_ciphers(con,buf,sizeof buf) != NULL)
BIO_printf(bio_s_out,"Shared ciphers:%s\n",buf);
str=SSL_CIPHER_get_name(SSL_get_current_cipher(con));
ssl_print_sigalgs(bio_s_out, con);
ssl_print_curves(bio_s_out, con, 0);
BIO_printf(bio_s_out,"CIPHER is %s\n",(str != NULL)?str:"(NONE)");
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
SSL_get0_next_proto_negotiated(con, &next_proto_neg, &next_proto_neg_len);
if (next_proto_neg)
{
BIO_printf(bio_s_out,"NEXTPROTO is ");
BIO_write(bio_s_out, next_proto_neg, next_proto_neg_len);
BIO_printf(bio_s_out, "\n");
}
#endif
{
SRTP_PROTECTION_PROFILE *srtp_profile
= SSL_get_selected_srtp_profile(con);
if(srtp_profile)
BIO_printf(bio_s_out,"SRTP Extension negotiated, profile=%s\n",
srtp_profile->name);
}
if (SSL_cache_hit(con)) BIO_printf(bio_s_out,"Reused session-id\n");
if (SSL_ctrl(con,SSL_CTRL_GET_FLAGS,0,NULL) &
TLS1_FLAGS_TLS_PADDING_BUG)
BIO_printf(bio_s_out,
"Peer has incorrect TLSv1 block padding\n");
#ifndef OPENSSL_NO_KRB5
client_princ = kssl_ctx_get0_client_princ(SSL_get0_kssl_ctx(con));
if (client_princ != NULL)
{
BIO_printf(bio_s_out,"Kerberos peer principal is %s\n",
client_princ);
}
#endif /* OPENSSL_NO_KRB5 */
BIO_printf(bio_s_out, "Secure Renegotiation IS%s supported\n",
SSL_get_secure_renegotiation_support(con) ? "" : " NOT");
if (keymatexportlabel != NULL)
{
BIO_printf(bio_s_out, "Keying material exporter:\n");
BIO_printf(bio_s_out, " Label: '%s'\n", keymatexportlabel);
BIO_printf(bio_s_out, " Length: %i bytes\n",
keymatexportlen);
exportedkeymat = OPENSSL_malloc(keymatexportlen);
if (exportedkeymat != NULL)
{
if (!SSL_export_keying_material(con, exportedkeymat,
keymatexportlen,
keymatexportlabel,
strlen(keymatexportlabel),
NULL, 0, 0))
{
BIO_printf(bio_s_out, " Error\n");
}
else
{
BIO_printf(bio_s_out, " Keying material: ");
for (i=0; i<keymatexportlen; i++)
BIO_printf(bio_s_out, "%02X",
exportedkeymat[i]);
BIO_printf(bio_s_out, "\n");
}
OPENSSL_free(exportedkeymat);
}
}
return(1);
} | 1 | []
| openssl | a70da5b3ecc3160368529677006801c58cb369db | 98,312,292,479,521,450,000,000,000,000,000,000,000 | 137 | New functions to check a hostname email or IP address against a
certificate. Add options to s_client, s_server and x509 utilities
to print results of checks. |
void dtls1_clear(SSL *s)
{
pqueue unprocessed_rcds;
pqueue processed_rcds;
pqueue buffered_messages;
pqueue sent_messages;
pqueue buffered_app_data;
unsigned int mtu;
if (s->d1)
{
unprocessed_rcds = s->d1->unprocessed_rcds.q;
processed_rcds = s->d1->processed_rcds.q;
buffered_messages = s->d1->buffered_messages;
sent_messages = s->d1->sent_messages;
buffered_app_data = s->d1->buffered_app_data.q;
mtu = s->d1->mtu;
dtls1_clear_queues(s);
pq_64bit_free(&(s->d1->bitmap.map));
pq_64bit_free(&(s->d1->bitmap.max_seq_num));
pq_64bit_free(&(s->d1->next_bitmap.map));
pq_64bit_free(&(s->d1->next_bitmap.max_seq_num));
memset(s->d1, 0, sizeof(*(s->d1)));
if (s->server)
{
s->d1->cookie_len = sizeof(s->d1->cookie);
}
if (SSL_get_options(s) & SSL_OP_NO_QUERY_MTU)
{
s->d1->mtu = mtu;
}
s->d1->unprocessed_rcds.q = unprocessed_rcds;
s->d1->processed_rcds.q = processed_rcds;
s->d1->buffered_messages = buffered_messages;
s->d1->sent_messages = sent_messages;
s->d1->buffered_app_data.q = buffered_app_data;
#if defined(OPENSSL_SYS_VMS) || defined(VMS_TEST)
s->d1->bitmap.length=64;
#else
s->d1->bitmap.length=sizeof(s->d1->bitmap.map) * 8;
#endif
pq_64bit_init(&(s->d1->bitmap.map));
pq_64bit_init(&(s->d1->bitmap.max_seq_num));
s->d1->next_bitmap.length = s->d1->bitmap.length;
pq_64bit_init(&(s->d1->next_bitmap.map));
pq_64bit_init(&(s->d1->next_bitmap.max_seq_num));
}
ssl3_clear(s);
if (s->options & SSL_OP_CISCO_ANYCONNECT)
s->version=DTLS1_BAD_VER;
else
s->version=DTLS1_VERSION;
} | 0 | [
"CWE-310"
]
| openssl | c6a876473cbff0fd323c8abcaace98ee2d21863d | 279,519,259,010,879,100,000,000,000,000,000,000,000 | 63 | Support TLS_FALLBACK_SCSV.
Reviewed-by: Stephen Henson <[email protected]> |
void Sys_Sleep( int msec )
{
if( msec == 0 )
return;
#ifdef DEDICATED
if( msec < 0 )
WaitForSingleObject( GetStdHandle( STD_INPUT_HANDLE ), INFINITE );
else
WaitForSingleObject( GetStdHandle( STD_INPUT_HANDLE ), msec );
#else
// Client Sys_Sleep doesn't support waiting on stdin
if( msec < 0 )
return;
Sleep( msec );
#endif
} | 0 | [
"CWE-59"
]
| ioq3 | b5acc31a4da72cc3a4a6d88facb15b6214d745c6 | 213,169,263,515,342,160,000,000,000,000,000,000,000 | 18 | CVE-2012-3345 |
int SSL_set1_param(SSL *ssl, X509_VERIFY_PARAM *vpm)
{
return X509_VERIFY_PARAM_set1(ssl->param, vpm);
} | 0 | []
| openssl | ee2ffc279417f15fef3b1073c7dc81a908991516 | 94,914,511,582,351,500,000,000,000,000,000,000,000 | 4 | Add Next Protocol Negotiation. |
static void qemu_input_queue_sync(struct QemuInputEventQueueHead *queue)
{
QemuInputEventQueue *item = g_new0(QemuInputEventQueue, 1);
item->type = QEMU_INPUT_QUEUE_SYNC;
QTAILQ_INSERT_TAIL(queue, item, node);
} | 1 | [
"CWE-772"
]
| qemu | fa18f36a461984eae50ab957e47ec78dae3c14fc | 126,148,791,878,515,320,000,000,000,000,000,000,000 | 7 | input: limit kbd queue depth
Apply a limit to the number of items we accept into the keyboard queue.
Impact: Without this limit vnc clients can exhaust host memory by
sending keyboard events faster than qemu feeds them to the guest.
Fixes: CVE-2017-8379
Cc: P J P <[email protected]>
Cc: Huawei PSIRT <[email protected]>
Reported-by: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
Message-id: [email protected] |
struct file *filp_open(const char *filename, int flags, int mode)
{
struct open_flags op;
int lookup = build_open_flags(flags, mode, &op);
return do_filp_open(AT_FDCWD, filename, &op, lookup);
} | 0 | [
"CWE-732"
]
| linux-stable | e57712ebebbb9db7d8dcef216437b3171ddcf115 | 275,446,384,112,806,180,000,000,000,000,000,000,000 | 6 | merge fchmod() and fchmodat() guts, kill ancient broken kludge
The kludge in question is undocumented and doesn't work for 32bit
binaries on amd64, sparc64 and s390. Passing (mode_t)-1 as
mode had (since 0.99.14v and contrary to behaviour of any
other Unix, prescriptions of POSIX, SuS and our own manpages)
was kinda-sorta no-op. Note that any software relying on
that (and looking for examples shows none) would be visibly
broken on sparc64, where practically all userland is built
32bit. No such complaints noticed...
Signed-off-by: Al Viro <[email protected]> |
virNodeDeviceCapsListExport(virNodeDeviceDefPtr def,
virNodeDevCapType **list)
{
virNodeDevCapsDefPtr caps = NULL;
virNodeDevCapType *tmp = NULL;
bool want_list = !!list;
int ncaps = 0;
int ret = -1;
#define MAYBE_ADD_CAP(cap) \
do { \
if (want_list) \
tmp[ncaps] = cap; \
} while (0)
if (virNodeDeviceUpdateCaps(def) < 0)
goto cleanup;
if (want_list)
tmp = g_new0(virNodeDevCapType, VIR_NODE_DEV_CAP_LAST - 1);
for (caps = def->caps; caps; caps = caps->next) {
unsigned int flags;
MAYBE_ADD_CAP(caps->data.type);
ncaps++;
/* check nested caps for a given type as well */
if (caps->data.type == VIR_NODE_DEV_CAP_SCSI_HOST) {
flags = caps->data.scsi_host.flags;
if (flags & VIR_NODE_DEV_CAP_FLAG_HBA_FC_HOST) {
MAYBE_ADD_CAP(VIR_NODE_DEV_CAP_FC_HOST);
ncaps++;
}
if (flags & VIR_NODE_DEV_CAP_FLAG_HBA_VPORT_OPS) {
MAYBE_ADD_CAP(VIR_NODE_DEV_CAP_VPORTS);
ncaps++;
}
}
if (caps->data.type == VIR_NODE_DEV_CAP_PCI_DEV) {
flags = caps->data.pci_dev.flags;
if (flags & VIR_NODE_DEV_CAP_FLAG_PCI_MDEV) {
MAYBE_ADD_CAP(VIR_NODE_DEV_CAP_MDEV_TYPES);
ncaps++;
}
}
if (caps->data.type == VIR_NODE_DEV_CAP_CSS_DEV) {
flags = caps->data.ccw_dev.flags;
if (flags & VIR_NODE_DEV_CAP_FLAG_CSS_MDEV) {
MAYBE_ADD_CAP(VIR_NODE_DEV_CAP_MDEV_TYPES);
ncaps++;
}
}
}
#undef MAYBE_ADD_CAP
if (want_list)
*list = g_steal_pointer(&tmp);
ret = ncaps;
cleanup:
VIR_FREE(tmp);
return ret;
} | 0 | [
"CWE-119"
]
| libvirt | 4c4d0e2da07b5a035b26a0ff13ec27070f7c7b1a | 74,076,090,729,029,260,000,000,000,000,000,000,000 | 70 | conf: Fix segfault when parsing mdev types
Commit f1b0890 introduced a potential crash due to incorrect operator
precedence when accessing an element from a pointer to an array.
Backtrace below:
#0 virNodeDeviceGetMdevTypesCaps (sysfspath=0x7fff801661e0 "/sys/devices/pci0000:00/0000:00:02.0", mdev_types=0x7fff801c9b40, nmdev_types=0x7fff801c9b48) at ../src/conf/node_device_conf.c:2676
#1 0x00007ffff7caf53d in virNodeDeviceGetPCIDynamicCaps (sysfsPath=0x7fff801661e0 "/sys/devices/pci0000:00/0000:00:02.0", pci_dev=0x7fff801c9ac8) at ../src/conf/node_device_conf.c:2705
#2 0x00007ffff7cae38f in virNodeDeviceUpdateCaps (def=0x7fff80168a10) at ../src/conf/node_device_conf.c:2342
#3 0x00007ffff7cb11c0 in virNodeDeviceObjMatch (obj=0x7fff84002e50, flags=0) at ../src/conf/virnodedeviceobj.c:850
#4 0x00007ffff7cb153d in virNodeDeviceObjListExportCallback (payload=0x7fff84002e50, name=0x7fff801cbc20 "pci_0000_00_02_0", opaque=0x7fffe2ffc6a0) at ../src/conf/virnodedeviceobj.c:909
#5 0x00007ffff7b69146 in virHashForEach (table=0x7fff9814b700 = {...}, iter=0x7ffff7cb149e <virNodeDeviceObjListExportCallback>, opaque=0x7fffe2ffc6a0) at ../src/util/virhash.c:394
#6 0x00007ffff7cb1694 in virNodeDeviceObjListExport (conn=0x7fff98013170, devs=0x7fff98154430, devices=0x7fffe2ffc798, filter=0x7ffff7cf47a1 <virConnectListAllNodeDevicesCheckACL>, flags=0)
at ../src/conf/virnodedeviceobj.c:943
#7 0x00007fffe00694b2 in nodeConnectListAllNodeDevices (conn=0x7fff98013170, devices=0x7fffe2ffc798, flags=0) at ../src/node_device/node_device_driver.c:228
#8 0x00007ffff7e703aa in virConnectListAllNodeDevices (conn=0x7fff98013170, devices=0x7fffe2ffc798, flags=0) at ../src/libvirt-nodedev.c:130
#9 0x000055555557f796 in remoteDispatchConnectListAllNodeDevices (server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000, rerr=0x7fffe2ffc8a0, args=0x7fffd4008470, ret=0x7fffd40084e0)
at src/remote/remote_daemon_dispatch_stubs.h:1613
#10 0x000055555557f6f9 in remoteDispatchConnectListAllNodeDevicesHelper (server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000, rerr=0x7fffe2ffc8a0, args=0x7fffd4008470, ret=0x7fffd40084e0)
at src/remote/remote_daemon_dispatch_stubs.h:1591
#11 0x00007ffff7ce9542 in virNetServerProgramDispatchCall (prog=0x555555690c10, server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000) at ../src/rpc/virnetserverprogram.c:428
#12 0x00007ffff7ce90bd in virNetServerProgramDispatch (prog=0x555555690c10, server=0x555555627080, client=0x5555556bf050, msg=0x5555556c0000) at ../src/rpc/virnetserverprogram.c:302
#13 0x00007ffff7cf042b in virNetServerProcessMsg (srv=0x555555627080, client=0x5555556bf050, prog=0x555555690c10, msg=0x5555556c0000) at ../src/rpc/virnetserver.c:137
#14 0x00007ffff7cf04eb in virNetServerHandleJob (jobOpaque=0x5555556b66b0, opaque=0x555555627080) at ../src/rpc/virnetserver.c:154
#15 0x00007ffff7bd912f in virThreadPoolWorker (opaque=0x55555562bc70) at ../src/util/virthreadpool.c:163
#16 0x00007ffff7bd8645 in virThreadHelper (data=0x55555562bc90) at ../src/util/virthread.c:233
#17 0x00007ffff6d90432 in start_thread () at /lib64/libpthread.so.0
#18 0x00007ffff75c5913 in clone () at /lib64/libc.so.6
Signed-off-by: Jonathon Jongsma <[email protected]>
Reviewed-by: Ján Tomko <[email protected]>
Signed-off-by: Ján Tomko <[email protected]> |
smpl_t aubio_tempo_get_bpm(aubio_tempo_t *o) {
return aubio_beattracking_get_bpm(o->bt);
} | 0 | []
| aubio | b1559f4c9ce2b304d8d27ffdc7128b6795ca82e5 | 290,778,675,722,490,900,000,000,000,000,000,000,000 | 3 | [tempo] fix buffer overflow in method parser |
static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
const unsigned char *tag,
struct netlbl_lsm_secattr *secattr)
{
secattr->attr.secid = *(u32 *)&tag[2];
secattr->flags |= NETLBL_SECATTR_SECID;
return 0;
} | 0 | [
"CWE-362"
]
| linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 276,603,123,501,858,500,000,000,000,000,000,000,000 | 9 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
ret = nouveau_mem_host(reg, &nvbe->ttm);
if (ret)
return ret;
ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
if (ret) {
nouveau_mem_fini(mem);
return ret;
}
nvbe->mem = mem;
return 0;
} | 0 | []
| linux | 5de5b6ecf97a021f29403aa272cb4e03318ef586 | 175,395,002,335,962,900,000,000,000,000,000,000,000 | 19 | drm/ttm/nouveau: don't call tt destroy callback on alloc failure.
This is confusing, and from my reading of all the drivers only
nouveau got this right.
Just make the API act under driver control of it's own allocation
failing, and don't call destroy, if the page table fails to
create there is nothing to cleanup here.
(I'm willing to believe I've missed something here, so please
review deeply).
Reviewed-by: Christian König <[email protected]>
Signed-off-by: Dave Airlie <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected] |
flatpak_dir_get_locale_subpaths (FlatpakDir *self)
{
char **subpaths = flatpak_dir_get_locale_languages (self);
int i;
/* Convert languages to paths */
for (i = 0; subpaths[i] != NULL; i++)
{
char *lang = subpaths[i];
/* For backwards compat with old xa.languages we support the configuration having slashes already */
if (*lang != '/')
{
subpaths[i] = g_strconcat ("/", lang, NULL);
g_free (lang);
}
}
return subpaths;
} | 0 | [
"CWE-668"
]
| flatpak | cd2142888fc4c199723a0dfca1f15ea8788a5483 | 330,272,101,816,209,530,000,000,000,000,000,000,000 | 18 | Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this. |
RoleNameIterator AuthorizationSession::getImpersonatedRoleNames() {
return makeRoleNameIterator(_impersonatedRoleNames.begin(), _impersonatedRoleNames.end());
} | 0 | [
"CWE-613"
]
| mongo | db19e7ce84cfd702a4ba9983ee2ea5019f470f82 | 148,383,856,106,428,390,000,000,000,000,000,000,000 | 3 | SERVER-38984 Validate unique User ID on UserCache hit
(cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7) |
JSString *status_message(JSObject *obj) {
MOZ_ASSERT(is_instance(obj));
return JS::GetReservedSlot(obj, Slots::StatusMessage).toString();
} | 0 | [
"CWE-94"
]
| js-compute-runtime | 65524ffc962644e9fc39f4b368a326b6253912a9 | 66,122,163,163,758,280,000,000,000,000,000,000,000 | 4 | use rangom_get instead of arc4random as arc4random does not work correctly with wizer
wizer causes the seed in arc4random to be the same between executions which is not random |
bool Vers_parse_info::is_end(const char *name) const
{
DBUG_ASSERT(name);
return as_row.end && as_row.end.streq(name);
} | 0 | [
"CWE-416"
]
| server | af810407f78b7f792a9bb8c47c8c532eb3b3a758 | 153,360,848,600,750,960,000,000,000,000,000,000,000 | 5 | MDEV-28098 incorrect key in "dup value" error after long unique
reset errkey after using it, so that it wouldn't affect
the next error message in the next statement |
Bool gf_fs_check_filter_register_cap_ex(const GF_FilterRegister *f_reg, u32 incode, GF_PropertyValue *cap_input, u32 outcode, GF_PropertyValue *cap_output, Bool exact_match_only, Bool out_cap_excluded)
{
u32 j;
u32 has_raw_in = 0;
u32 has_cid_match = 0;
u32 exclude_cid_out = 0;
u32 has_exclude_cid_out = 0;
for (j=0; j<f_reg->nb_caps; j++) {
const GF_FilterCapability *cap = &f_reg->caps[j];
if (!(cap->flags & GF_CAPFLAG_IN_BUNDLE)) {
//CID not excluded, raw in present and CID explicit match or not included in excluded set
if (!exclude_cid_out && has_raw_in && (has_cid_match || (!exact_match_only && has_exclude_cid_out) ) ) {
return GF_TRUE;
}
if (has_raw_in != 2) has_raw_in = 0;
if (has_cid_match != 2) has_cid_match = 0;
if (exclude_cid_out != 2) exclude_cid_out = 0;
if (has_exclude_cid_out != 2) has_exclude_cid_out = 0;
continue;
}
if ( (cap->flags & GF_CAPFLAG_INPUT) && (cap->code == incode) ) {
if (! (cap->flags & GF_CAPFLAG_EXCLUDED) && gf_props_equal(&cap->val, cap_input) ) {
has_raw_in = (cap->flags & GF_CAPS_INPUT_STATIC) ? 2 : 1;
}
}
if ( (cap->flags & GF_CAPFLAG_OUTPUT) && (cap->code == outcode) ) {
if (! (cap->flags & GF_CAPFLAG_EXCLUDED)) {
if (gf_props_equal(&cap->val, cap_output) ) {
if (out_cap_excluded) {
exclude_cid_out = (cap->flags & GF_CAPS_OUTPUT_STATIC) ? 2 : 1;
} else {
has_cid_match = (cap->flags & GF_CAPS_OUTPUT_STATIC) ? 2 : 1;
}
}
} else {
Bool prop_equal = gf_props_equal(&cap->val, cap_output);
if (out_cap_excluded)
prop_equal = !prop_equal;
if (prop_equal) {
exclude_cid_out = (cap->flags & GF_CAPS_OUTPUT_STATIC) ? 2 : 1;
} else {
has_exclude_cid_out = (cap->flags & GF_CAPS_OUTPUT_STATIC) ? 2 : 1;
}
}
}
}
//CID not excluded, raw in present and CID explicit match or not included in excluded set
if (!exclude_cid_out && has_raw_in && (has_cid_match || (!exact_match_only && has_exclude_cid_out) ) ) {
return GF_TRUE;
}
return GF_FALSE;
} | 0 | [
"CWE-787"
]
| gpac | da37ec8582266983d0ec4b7550ec907401ec441e | 321,911,869,974,109,800,000,000,000,000,000,000,000 | 56 | fixed crashes for very long path - cf #1908 |
outputCode (Int64 code, Int64 &c, int &lc, char *&out)
{
outputBits (hufLength (code), hufCode (code), c, lc, out);
} | 0 | [
"CWE-125"
]
| openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 115,143,906,836,191,770,000,000,000,000,000,000,000 | 4 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
{
if (flags & KVM_IOEVENTFD_FLAG_PIO)
return KVM_PIO_BUS;
if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
return KVM_VIRTIO_CCW_NOTIFY_BUS;
return KVM_MMIO_BUS;
} | 0 | [
"CWE-20",
"CWE-617"
]
| linux | 36ae3c0a36b7456432fedce38ae2f7bd3e01a563 | 301,015,796,615,233,530,000,000,000,000,000,000,000 | 8 | KVM: Don't accept obviously wrong gsi values via KVM_IRQFD
We cannot add routes for gsi values >= KVM_MAX_IRQ_ROUTES -- see
kvm_set_irq_routing(). Hence, there is no sense in accepting them
via KVM_IRQFD. Prevent them from entering the system in the first
place.
Signed-off-by: Jan H. Schönherr <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-264"
]
| linux | 57e68e9cd65b4b8eb4045a1e0d0746458502554c | 124,236,671,600,417,050,000,000,000,000,000,000,000 | 6 | mm: try_to_unmap_cluster() should lock_page() before mlocking
A BUG_ON(!PageLocked) was triggered in mlock_vma_page() by Sasha Levin
fuzzing with trinity. The call site try_to_unmap_cluster() does not lock
the pages other than its check_page parameter (which is already locked).
The BUG_ON in mlock_vma_page() is not documented and its purpose is
somewhat unclear, but apparently it serializes against page migration,
which could otherwise fail to transfer the PG_mlocked flag. This would
not be fatal, as the page would be eventually encountered again, but
NR_MLOCK accounting would become distorted nevertheless. This patch adds
a comment to the BUG_ON in mlock_vma_page() and munlock_vma_page() to that
effect.
The call site try_to_unmap_cluster() is fixed so that for page !=
check_page, trylock_page() is attempted (to avoid possible deadlocks as we
already have check_page locked) and mlock_vma_page() is performed only
upon success. If the page lock cannot be obtained, the page is left
without PG_mlocked, which is again not a problem in the whole unevictable
memory design.
Signed-off-by: Vlastimil Babka <[email protected]>
Signed-off-by: Bob Liu <[email protected]>
Reported-by: Sasha Levin <[email protected]>
Cc: Wanpeng Li <[email protected]>
Cc: Michel Lespinasse <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void txtc_del(GF_Box *s)
{
GF_TextConfigBox *ptr = (GF_TextConfigBox*)s;
if (ptr == NULL) return;
if (ptr->config) gf_free(ptr->config);
gf_free(ptr); | 0 | [
"CWE-400",
"CWE-401"
]
| gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 22,476,687,927,056,190,000,000,000,000,000,000,000 | 8 | prevent dref memleak on invalid input (#1183) |
static RAND_DRBG *rand_drbg_new(int secure,
int type,
unsigned int flags,
RAND_DRBG *parent)
{
RAND_DRBG *drbg = secure ? OPENSSL_secure_zalloc(sizeof(*drbg))
: OPENSSL_zalloc(sizeof(*drbg));
if (drbg == NULL) {
RANDerr(RAND_F_RAND_DRBG_NEW, ERR_R_MALLOC_FAILURE);
return NULL;
}
drbg->secure = secure && CRYPTO_secure_allocated(drbg);
drbg->fork_id = openssl_get_fork_id();
drbg->parent = parent;
if (parent == NULL) {
drbg->get_entropy = rand_drbg_get_entropy;
drbg->cleanup_entropy = rand_drbg_cleanup_entropy;
#ifndef RAND_DRBG_GET_RANDOM_NONCE
drbg->get_nonce = rand_drbg_get_nonce;
drbg->cleanup_nonce = rand_drbg_cleanup_nonce;
#endif
drbg->reseed_interval = master_reseed_interval;
drbg->reseed_time_interval = master_reseed_time_interval;
} else {
drbg->get_entropy = rand_drbg_get_entropy;
drbg->cleanup_entropy = rand_drbg_cleanup_entropy;
/*
* Do not provide nonce callbacks, the child DRBGs will
* obtain their nonce using random bits from the parent.
*/
drbg->reseed_interval = slave_reseed_interval;
drbg->reseed_time_interval = slave_reseed_time_interval;
}
if (RAND_DRBG_set(drbg, type, flags) == 0)
goto err;
if (parent != NULL) {
rand_drbg_lock(parent);
if (drbg->strength > parent->strength) {
/*
* We currently don't support the algorithm from NIST SP 800-90C
* 10.1.2 to use a weaker DRBG as source
*/
rand_drbg_unlock(parent);
RANDerr(RAND_F_RAND_DRBG_NEW, RAND_R_PARENT_STRENGTH_TOO_WEAK);
goto err;
}
rand_drbg_unlock(parent);
}
return drbg;
err:
RAND_DRBG_free(drbg);
return NULL;
} | 0 | [
"CWE-330"
]
| openssl | 1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be | 294,892,434,780,025,720,000,000,000,000,000,000,000 | 63 | drbg: ensure fork-safety without using a pthread_atfork handler
When the new OpenSSL CSPRNG was introduced in version 1.1.1,
it was announced in the release notes that it would be fork-safe,
which the old CSPRNG hadn't been.
The fork-safety was implemented using a fork count, which was
incremented by a pthread_atfork handler. Initially, this handler
was enabled by default. Unfortunately, the default behaviour
had to be changed for other reasons in commit b5319bdbd095, so
the new OpenSSL CSPRNG failed to keep its promise.
This commit restores the fork-safety using a different approach.
It replaces the fork count by a fork id, which coincides with
the process id on UNIX-like operating systems and is zero on other
operating systems. It is used to detect when an automatic reseed
after a fork is necessary.
To prevent a future regression, it also adds a test to verify that
the child reseeds after fork.
CVE-2019-1549
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9802) |
bool Item_func::setup_args_and_comparator(THD *thd, Arg_comparator *cmp)
{
DBUG_ASSERT(arg_count >= 2); // Item_func_nullif has arg_count == 3
if (args[0]->cmp_type() == STRING_RESULT &&
args[1]->cmp_type() == STRING_RESULT)
{
DTCollation tmp;
if (agg_arg_charsets_for_comparison(tmp, args, 2))
return true;
cmp->m_compare_collation= tmp.collation;
}
// Convert constants when compared to int/year field
DBUG_ASSERT(functype() != LIKE_FUNC);
convert_const_compared_to_int_field(thd);
return cmp->set_cmp_func(this, &args[0], &args[1], true);
} | 0 | [
"CWE-617"
]
| server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 325,254,645,100,071,070,000,000,000,000,000,000,000 | 18 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
void DecimalQuantity::roundToIncrement(double roundingIncrement, RoundingMode roundingMode,
int32_t maxFrac, UErrorCode& status) {
// TODO(13701): Move the nickel check into a higher-level API.
if (roundingIncrement == 0.05) {
roundToMagnitude(-2, roundingMode, true, status);
roundToMagnitude(-maxFrac, roundingMode, false, status);
return;
} else if (roundingIncrement == 0.5) {
roundToMagnitude(-1, roundingMode, true, status);
roundToMagnitude(-maxFrac, roundingMode, false, status);
return;
}
// TODO(13701): This is innefficient. Improve?
// TODO(13701): Should we convert to decNumber instead?
roundToInfinity();
double temp = toDouble();
temp /= roundingIncrement;
// Use another DecimalQuantity to perform the actual rounding...
DecimalQuantity dq;
dq.setToDouble(temp);
dq.roundToMagnitude(0, roundingMode, status);
temp = dq.toDouble();
temp *= roundingIncrement;
setToDouble(temp);
// Since we reset the value to a double, we need to specify the rounding boundary
// in order to get the DecimalQuantity out of approximation mode.
// NOTE: In Java, we have minMaxFrac, but in C++, the two are differentiated.
roundToMagnitude(-maxFrac, roundingMode, status);
} | 0 | [
"CWE-190"
]
| icu | 53d8c8f3d181d87a6aa925b449b51c4a2c922a51 | 283,231,579,099,890,400,000,000,000,000,000,000,000 | 29 | ICU-20246 Fixing another integer overflow in number parsing. |
bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason,
UpstreamRequest& upstream_request) {
// We don't retry if we already started the response, don't have a retry policy defined,
// or if we've already retried this upstream request (currently only possible if a per
// try timeout occurred and hedge_on_per_try_timeout is enabled).
if (downstream_response_started_ || !retry_state_ || upstream_request.retried_) {
return false;
}
const RetryStatus retry_status =
retry_state_->shouldRetryReset(reset_reason, [this]() -> void { doRetry(); });
if (retry_status == RetryStatus::Yes && setupRetry()) {
if (upstream_request.upstream_host_) {
upstream_request.upstream_host_->stats().rq_error_.inc();
}
upstream_request.removeFromList(upstream_requests_);
return true;
} else if (retry_status == RetryStatus::NoOverflow) {
callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow);
} else if (retry_status == RetryStatus::NoRetryLimitExceeded) {
callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded);
}
return false;
} | 0 | [
"CWE-400",
"CWE-703"
]
| envoy | afc39bea36fd436e54262f150c009e8d72db5014 | 69,822,502,479,371,340,000,000,000,000,000,000,000 | 25 | Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]> |
int encode_msg(struct sip_msg *msg,char *payload,int len)
{
int i,j,k,u,request;
unsigned short int h;
struct hdr_field* hf;
struct msg_start* ms;
struct sip_uri miuri;
char *myerror=NULL;
ptrdiff_t diff;
if(len < MAX_ENCODED_MSG + MAX_MESSAGE_LEN)
return -1;
if(parse_headers(msg,HDR_EOH_F,0)<0){
myerror="in parse_headers";
goto error;
}
memset(payload,0,len);
ms=&msg->first_line;
if(ms->type == SIP_REQUEST)
request=1;
else if(ms->type == SIP_REPLY)
request=0;
else{
myerror="message is neither request nor response";
goto error;
}
if(request) {
for(h=0;h<32;j=(0x01<<h),h++)
if(j & ms->u.request.method_value)
break;
} else {
h=(unsigned short)(ms->u.reply.statuscode);
}
if(h==32){/*statuscode wont be 32...*/
myerror="unknown message type\n";
goto error;
}
h=htons(h);
/*first goes the message code type*/
memcpy(payload,&h,2);
h=htons((unsigned short int)msg->len);
/*then goes the message start idx, but we'll put it later*/
/*then goes the message length (we hope it to be less than 65535 bytes...)*/
memcpy(&payload[MSG_LEN_IDX],&h,2);
/*then goes the content start index (starting from SIP MSG START)*/
if(0>(diff=(get_body(msg)-(msg->buf)))){
myerror="body starts before the message (uh ?)";
goto error;
}else
h=htons((unsigned short int)diff);
memcpy(payload+CONTENT_IDX,&h,2);
payload[METHOD_CODE_IDX]=(unsigned char)(request?
(ms->u.request.method.s-msg->buf):
(ms->u.reply.status.s-msg->buf));
payload[METHOD_CODE_IDX+1]=(unsigned char)(request?
(ms->u.request.method.len):
(ms->u.reply.status.len));
payload[URI_REASON_IDX]=(unsigned char)(request?
(ms->u.request.uri.s-msg->buf):
(ms->u.reply.reason.s-msg->buf));
payload[URI_REASON_IDX+1]=(unsigned char)(request?
(ms->u.request.uri.len):
(ms->u.reply.reason.len));
payload[VERSION_IDX]=(unsigned char)(request?
(ms->u.request.version.s-msg->buf):
(ms->u.reply.version.s-msg->buf));
if(request){
if (parse_uri(ms->u.request.uri.s,ms->u.request.uri.len, &miuri)<0){
LM_ERR("<%.*s>\n",ms->u.request.uri.len,ms->u.request.uri.s);
myerror="while parsing the R-URI";
goto error;
}
if(0>(j=encode_uri2(msg->buf,
ms->u.request.method.s-msg->buf+ms->len,
ms->u.request.uri,&miuri,
(unsigned char*)&payload[REQUEST_URI_IDX+1])))
{
myerror="ENCODE_MSG: ERROR while encoding the R-URI";
goto error;
}
payload[REQUEST_URI_IDX]=(unsigned char)j;
k=REQUEST_URI_IDX+1+j;
}else
k=REQUEST_URI_IDX;
u=k;
k++;
for(i=0,hf=msg->headers;hf;hf=hf->next,i++);
i++;/*we do as if there was an extra header, that marks the end of
the previous header in the headers hashtable(read below)*/
j=k+3*i;
for(i=0,hf=msg->headers;hf;hf=hf->next,k+=3){
payload[k]=(unsigned char)(hf->type & 0xFF);
h=htons(j);
/*now goes a payload-based-ptr to where the header-code starts*/
memcpy(&payload[k+1],&h,2);
/*TODO fix this... fixed with k-=3?*/
if(0>(i=encode_header(msg,hf,(unsigned char*)(payload+j),MAX_ENCODED_MSG+MAX_MESSAGE_LEN-j))){
LM_ERR("encoding header %.*s\n",hf->name.len,hf->name.s);
goto error;
k-=3;
continue;
}
j+=(unsigned short int)i;
}
/*now goes the number of headers that have been found, right after the meta-msg-section*/
payload[u]=(unsigned char)((k-u-1)/3);
j=htons(j);
/*now copy the number of bytes that the headers-meta-section has occupied,right afther
* headers-meta-section(the array with ['v',[2:where],'r',[2:where],'R',[2:where],...]
* this is to know where the LAST header ends, since the length of each header-struct
* is calculated substracting the nextHeaderStart - presentHeaderStart
* the k+1 is because payload[k] is usually the letter*/
memcpy(&payload[k+1],&j,2);
k+=3;
j=ntohs(j);
/*now we copy the headers-meta-section after the msg-headers-meta-section*/
/*memcpy(&payload[k],payload2,j);*/
/*j+=k;*/
/*pkg_free(payload2);*/
/*now we copy the actual message after the headers-meta-section*/
memcpy(&payload[j],msg->buf,msg->len);
LM_DBG("msglen = %d,msg starts at %d\n",msg->len,j);
j=htons(j);
/*now we copy at the beginning, the index to where the actual message starts*/
memcpy(&payload[MSG_START_IDX],&j,2);
return GET_PAY_SIZE( payload );
error:
LM_ERR("%s\n",myerror);
return -1;
} | 1 | [
"CWE-119",
"CWE-284"
]
| kamailio | f50c9c853e7809810099c970780c30b0765b0643 | 177,023,325,228,656,560,000,000,000,000,000,000,000 | 130 | seas: safety check for target buffer size before copying message in encode_msg()
- avoid buffer overflow for large SIP messages
- reported by Stelios Tsampas |
dir_s_each_child(int argc, VALUE *argv, VALUE io)
{
VALUE dir;
RETURN_ENUMERATOR(io, argc, argv);
dir = dir_open_dir(argc, argv);
rb_ensure(dir_each_child, dir, dir_close, dir);
return Qnil;
} | 0 | [
"CWE-22"
]
| ruby | bd5661a3cbb38a8c3a3ea10cd76c88bbef7871b8 | 100,861,167,097,653,700,000,000,000,000,000,000,000 | 9 | dir.c: check NUL bytes
* dir.c (GlobPathValue): should be used in rb_push_glob only.
other methods should use FilePathValue.
https://hackerone.com/reports/302338
* dir.c (rb_push_glob): expand GlobPathValue
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62989 b2dd03c8-39d4-4d8f-98ff-823fe69b080e |
static int check_pe64_bytes(const ut8 *buf, ut64 length) {
int idx, ret = false;
if (!buf || length <= 0x3d) {
return false;
}
idx = buf[0x3c] | (buf[0x3d]<<8);
if (length >= idx + 0x20) {
if (!memcmp (buf, "MZ", 2) && !memcmp (buf+idx, "PE", 2) && !memcmp (buf+idx+0x18, "\x0b\x02", 2)) {
ret = true;
}
}
return ret;
} | 0 | [
"CWE-125"
]
| radare2 | eb7deb281df54771fb8ecf5890dc325a7d22d3e2 | 78,784,987,201,652,180,000,000,000,000,000,000,000 | 13 | Fix #10464 - oobread crash in mdmp (#10683) |
cmsBool AddConversion(cmsPipeline* Result, cmsColorSpaceSignature InPCS, cmsColorSpaceSignature OutPCS, cmsMAT3* m, cmsVEC3* off)
{
cmsFloat64Number* m_as_dbl = (cmsFloat64Number*) m;
cmsFloat64Number* off_as_dbl = (cmsFloat64Number*) off;
// Handle PCS mismatches. A specialized stage is added to the LUT in such case
switch (InPCS) {
case cmsSigXYZData: // Input profile operates in XYZ
switch (OutPCS) {
case cmsSigXYZData: // XYZ -> XYZ
if (!IsEmptyLayer(m, off) &&
!cmsPipelineInsertStage(Result, cmsAT_END, cmsStageAllocMatrix(Result ->ContextID, 3, 3, m_as_dbl, off_as_dbl)))
return FALSE;
break;
case cmsSigLabData: // XYZ -> Lab
if (!IsEmptyLayer(m, off) &&
!cmsPipelineInsertStage(Result, cmsAT_END, cmsStageAllocMatrix(Result ->ContextID, 3, 3, m_as_dbl, off_as_dbl)))
return FALSE;
if (!cmsPipelineInsertStage(Result, cmsAT_END, _cmsStageAllocXYZ2Lab(Result ->ContextID)))
return FALSE;
break;
default:
return FALSE; // Colorspace mismatch
}
break;
case cmsSigLabData: // Input profile operates in Lab
switch (OutPCS) {
case cmsSigXYZData: // Lab -> XYZ
if (!cmsPipelineInsertStage(Result, cmsAT_END, _cmsStageAllocLab2XYZ(Result ->ContextID)))
return FALSE;
if (!IsEmptyLayer(m, off) &&
!cmsPipelineInsertStage(Result, cmsAT_END, cmsStageAllocMatrix(Result ->ContextID, 3, 3, m_as_dbl, off_as_dbl)))
return FALSE;
break;
case cmsSigLabData: // Lab -> Lab
if (!IsEmptyLayer(m, off)) {
if (!cmsPipelineInsertStage(Result, cmsAT_END, _cmsStageAllocLab2XYZ(Result ->ContextID)) ||
!cmsPipelineInsertStage(Result, cmsAT_END, cmsStageAllocMatrix(Result ->ContextID, 3, 3, m_as_dbl, off_as_dbl)) ||
!cmsPipelineInsertStage(Result, cmsAT_END, _cmsStageAllocXYZ2Lab(Result ->ContextID)))
return FALSE;
}
break;
default:
return FALSE; // Mismatch
}
break;
// On colorspaces other than PCS, check for same space
default:
if (InPCS != OutPCS) return FALSE;
break;
}
return TRUE;
} | 0 | [
"CWE-94"
]
| Little-CMS | fefaaa43c382eee632ea3ad0cfa915335140e1db | 309,383,987,567,277,360,000,000,000,000,000,000,000 | 67 | Fix a double free on error recovering |
kadm5_modify_principal(void *server_handle,
kadm5_principal_ent_t entry, long mask)
{
int ret, ret2, i;
kadm5_policy_ent_rec pol;
krb5_boolean have_pol = FALSE;
krb5_db_entry *kdb;
krb5_tl_data *tl_data_orig;
osa_princ_ent_rec adb;
kadm5_server_handle_t handle = server_handle;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if(entry == NULL)
return EINVAL;
if((mask & KADM5_PRINCIPAL) || (mask & KADM5_LAST_PWD_CHANGE) ||
(mask & KADM5_MOD_TIME) || (mask & KADM5_MOD_NAME) ||
(mask & KADM5_MKVNO) || (mask & KADM5_AUX_ATTRIBUTES) ||
(mask & KADM5_KEY_DATA) || (mask & KADM5_LAST_SUCCESS) ||
(mask & KADM5_LAST_FAILED))
return KADM5_BAD_MASK;
if((mask & ~ALL_PRINC_MASK))
return KADM5_BAD_MASK;
if((mask & KADM5_POLICY) && entry->policy == NULL)
return KADM5_BAD_MASK;
if((mask & KADM5_POLICY) && (mask & KADM5_POLICY_CLR))
return KADM5_BAD_MASK;
if (mask & KADM5_TL_DATA) {
tl_data_orig = entry->tl_data;
while (tl_data_orig) {
if (tl_data_orig->tl_data_type < 256)
return KADM5_BAD_TL_TYPE;
tl_data_orig = tl_data_orig->tl_data_next;
}
}
ret = kdb_get_entry(handle, entry->principal, &kdb, &adb);
if (ret)
return(ret);
/*
* This is pretty much the same as create ...
*/
if ((mask & KADM5_POLICY)) {
ret = get_policy(handle, entry->policy, &pol, &have_pol);
if (ret)
goto done;
/* set us up to use the new policy */
adb.aux_attributes |= KADM5_POLICY;
if (adb.policy)
free(adb.policy);
adb.policy = strdup(entry->policy);
}
if (have_pol) {
/* set pw_max_life based on new policy */
if (pol.pw_max_life) {
ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb,
&(kdb->pw_expiration));
if (ret)
goto done;
kdb->pw_expiration += pol.pw_max_life;
} else {
kdb->pw_expiration = 0;
}
}
if ((mask & KADM5_POLICY_CLR) && (adb.aux_attributes & KADM5_POLICY)) {
free(adb.policy);
adb.policy = NULL;
adb.aux_attributes &= ~KADM5_POLICY;
kdb->pw_expiration = 0;
}
if ((mask & KADM5_ATTRIBUTES))
kdb->attributes = entry->attributes;
if ((mask & KADM5_MAX_LIFE))
kdb->max_life = entry->max_life;
if ((mask & KADM5_PRINC_EXPIRE_TIME))
kdb->expiration = entry->princ_expire_time;
if (mask & KADM5_PW_EXPIRATION)
kdb->pw_expiration = entry->pw_expiration;
if (mask & KADM5_MAX_RLIFE)
kdb->max_renewable_life = entry->max_renewable_life;
if((mask & KADM5_KVNO)) {
for (i = 0; i < kdb->n_key_data; i++)
kdb->key_data[i].key_data_kvno = entry->kvno;
}
if (mask & KADM5_TL_DATA) {
krb5_tl_data *tl;
/* may have to change the version number of the API. Updates the list with the given tl_data rather than over-writting */
for (tl = entry->tl_data; tl;
tl = tl->tl_data_next)
{
ret = krb5_dbe_update_tl_data(handle->context, kdb, tl);
if( ret )
{
goto done;
}
}
}
/*
* Setting entry->fail_auth_count to 0 can be used to manually unlock
* an account. It is not possible to set fail_auth_count to any other
* value using kadmin.
*/
if (mask & KADM5_FAIL_AUTH_COUNT) {
if (entry->fail_auth_count != 0) {
ret = KADM5_BAD_SERVER_PARAMS;
goto done;
}
kdb->fail_auth_count = 0;
}
/* let the mask propagate to the database provider */
kdb->mask = mask;
ret = k5_kadm5_hook_modify(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, entry, mask);
if (ret)
goto done;
ret = kdb_put_entry(handle, kdb, &adb);
if (ret) goto done;
(void) k5_kadm5_hook_modify(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, entry, mask);
ret = KADM5_OK;
done:
if (have_pol) {
ret2 = kadm5_free_policy_ent(handle->lhandle, &pol);
ret = ret ? ret : ret2;
}
kdb_free_entry(handle, kdb, &adb);
return ret;
} | 0 | [
"CWE-703"
]
| krb5 | b863de7fbf080b15e347a736fdda0a82d42f4f6b | 210,024,763,500,624,270,000,000,000,000,000,000,000 | 145 | Check for null kadm5 policy name [CVE-2015-8630]
In kadm5_create_principal_3() and kadm5_modify_principal(), check for
entry->policy being null when KADM5_POLICY is included in the mask.
CVE-2015-8630:
In MIT krb5 1.12 and later, an authenticated attacker with permission
to modify a principal entry can cause kadmind to dereference a null
pointer by supplying a null policy value but including KADM5_POLICY in
the mask.
CVSSv2 Vector: AV:N/AC:H/Au:S/C:N/I:N/A:C/E:POC/RL:OF/RC:C
ticket: 8342 (new)
target_version: 1.14-next
target_version: 1.13-next
tags: pullup |
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
if (parent) {
sk->sk_type = parent->sk_type;
bt_sk(sk)->flags = bt_sk(parent)->flags;
security_sk_clone(parent, sk);
} else {
bt_sk(sk)->skb_put_cmsg = sco_skb_put_cmsg;
}
} | 0 | []
| linux | f6b8c6b5543983e9de29dc14716bfa4eb3f157c4 | 209,629,393,641,213,700,000,000,000,000,000,000,000 | 12 | Bluetooth: sco: Fix crash when using BT_SNDMTU/BT_RCVMTU option
This commit add the invalid check for connected socket, without it will
causes the following crash due to sco_pi(sk)->conn being NULL:
KASAN: null-ptr-deref in range [0x0000000000000050-0x0000000000000057]
CPU: 3 PID: 4284 Comm: test_sco Not tainted 5.10.0-rc3+ #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1 04/01/2014
RIP: 0010:sco_sock_getsockopt+0x45d/0x8e0
Code: 48 c1 ea 03 80 3c 02 00 0f 85 ca 03 00 00 49 8b 9d f8 04 00 00 48 b8 00
00 00 00 00 fc ff df 48 8d 7b 50 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84
c0 74 08 3c 03 0f 8e b5 03 00 00 8b 43 50 48 8b 0c
RSP: 0018:ffff88801bb17d88 EFLAGS: 00010206
RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffffffff83a4ecdf
RDX: 000000000000000a RSI: ffffc90002fce000 RDI: 0000000000000050
RBP: 1ffff11003762fb4 R08: 0000000000000001 R09: ffff88810e1008c0
R10: ffffffffbd695dcf R11: fffffbfff7ad2bb9 R12: 0000000000000000
R13: ffff888018ff1000 R14: dffffc0000000000 R15: 000000000000000d
FS: 00007fb4f76c1700(0000) GS:ffff88811af80000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00005555e3b7a938 CR3: 00000001117be001 CR4: 0000000000770ee0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
? sco_skb_put_cmsg+0x80/0x80
? sco_skb_put_cmsg+0x80/0x80
__sys_getsockopt+0x12a/0x220
? __ia32_sys_setsockopt+0x150/0x150
? syscall_enter_from_user_mode+0x18/0x50
? rcu_read_lock_bh_held+0xb0/0xb0
__x64_sys_getsockopt+0xba/0x150
? syscall_enter_from_user_mode+0x1d/0x50
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
Fixes: 0fc1a726f897 ("Bluetooth: sco: new getsockopt options BT_SNDMTU/BT_RCVMTU")
Reported-by: Hulk Robot <[email protected]>
Signed-off-by: Wei Yongjun <[email protected]>
Reviewed-by: Luiz Augusto Von Dentz <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
Signed-off-by: Johan Hedberg <[email protected]> |
DEFUN (bgp_default_ipv4_unicast,
bgp_default_ipv4_unicast_cmd,
"bgp default ipv4-unicast",
"BGP specific commands\n"
"Configure BGP defaults\n"
"Activate ipv4-unicast for a peer by default\n")
{
struct bgp *bgp;
bgp = vty->index;
bgp_flag_unset (bgp, BGP_FLAG_NO_DEFAULT_IPV4);
return CMD_SUCCESS;
} | 0 | [
"CWE-125"
]
| frr | 6d58272b4cf96f0daa846210dd2104877900f921 | 68,976,784,335,576,830,000,000,000,000,000,000,000 | 13 | [bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space). |
RzCallable *function_type_derive(RzAnalysis *analysis, RZ_NONNULL const char *fcn_name, bool *owned) {
rz_return_val_if_fail(fcn_name && owned, NULL);
// Because in the case of `rz_type_func_get()` we get the borrowed
// pointer to the RzCallable and in the case of `rz_analysis_function_derive_type()`
// we get the owned pointer, we should free the pointer in the one case but not another.
*owned = false;
RzCallable *callable = rz_type_func_get(analysis->typedb, fcn_name);
if (!callable) {
RzAnalysisFunction *fcn = rz_analysis_get_function_byname(analysis, fcn_name);
if (!fcn) {
return NULL;
}
callable = rz_analysis_function_derive_type(analysis, fcn);
if (!callable) {
return NULL;
}
*owned = true;
}
return callable;
} | 0 | [
"CWE-703"
]
| rizin | 6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939 | 86,197,365,441,130,700,000,000,000,000,000,000,000 | 20 | Initialize retctx,ctx before freeing the inner elements
In rz_core_analysis_type_match retctx structure was initialized on the
stack only after a "goto out_function", where a field of that structure
was freed. When the goto path is taken, the field is not properly
initialized and it cause cause a crash of Rizin or have other effects.
Fixes: CVE-2021-4022 |
static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
switch (msr_index) {
case MSR_IA32_VMX_BASIC:
return vmx_restore_vmx_basic(vmx, data);
case MSR_IA32_VMX_PINBASED_CTLS:
case MSR_IA32_VMX_PROCBASED_CTLS:
case MSR_IA32_VMX_EXIT_CTLS:
case MSR_IA32_VMX_ENTRY_CTLS:
/*
* The "non-true" VMX capability MSRs are generated from the
* "true" MSRs, so we do not support restoring them directly.
*
* If userspace wants to emulate VMX_BASIC[55]=0, userspace
* should restore the "true" MSRs with the must-be-1 bits
* set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
* DEFAULT SETTINGS".
*/
return -EINVAL;
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
case MSR_IA32_VMX_PROCBASED_CTLS2:
return vmx_restore_control_msr(vmx, msr_index, data);
case MSR_IA32_VMX_MISC:
return vmx_restore_vmx_misc(vmx, data);
case MSR_IA32_VMX_CR0_FIXED0:
case MSR_IA32_VMX_CR4_FIXED0:
return vmx_restore_fixed0_msr(vmx, msr_index, data);
case MSR_IA32_VMX_CR0_FIXED1:
case MSR_IA32_VMX_CR4_FIXED1:
/*
* These MSRs are generated based on the vCPU's CPUID, so we
* do not support restoring them directly.
*/
return -EINVAL;
case MSR_IA32_VMX_EPT_VPID_CAP:
return vmx_restore_vmx_ept_vpid_cap(vmx, data);
case MSR_IA32_VMX_VMCS_ENUM:
vmx->nested.nested_vmx_vmcs_enum = data;
return 0;
default:
/*
* The rest of the VMX capability MSRs do not support restore.
*/
return -EINVAL;
}
} | 0 | [
"CWE-20",
"CWE-617"
]
| linux | 3a8b0677fc6180a467e26cc32ce6b0c09a32f9bb | 143,833,485,240,620,630,000,000,000,000,000,000,000 | 51 | KVM: VMX: Do not BUG() on out-of-bounds guest IRQ
The value of the guest_irq argument to vmx_update_pi_irte() is
ultimately coming from a KVM_IRQFD API call. Do not BUG() in
vmx_update_pi_irte() if the value is out-of bounds. (Especially,
since KVM as a whole seems to hang after that.)
Instead, print a message only once if we find that we don't have a
route for a certain IRQ (which can be out-of-bounds or within the
array).
This fixes CVE-2017-1000252.
Fixes: efc644048ecde54 ("KVM: x86: Update IRTE for posted-interrupts")
Signed-off-by: Jan H. Schönherr <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static void guc_init_params(struct intel_guc *guc)
{
u32 *params = guc->params;
int i;
BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
} | 1 | [
"CWE-20",
"CWE-190"
]
| linux | c784e5249e773689e38d2bc1749f08b986621a26 | 59,890,601,311,104,760,000,000,000,000,000,000,000 | 16 | drm/i915/guc: Update to use firmware v49.0.1
The latest GuC firmware includes a number of interface changes that
require driver updates to match.
* Starting from Gen11, the ID to be provided to GuC needs to contain
the engine class in bits [0..2] and the instance in bits [3..6].
NOTE: this patch breaks pointer dereferences in some existing GuC
functions that use the guc_id to dereference arrays but these functions
are not used for now as we have GuC submission disabled and we will
update these functions in follow up patch which requires new IDs.
* The new GuC requires the additional data structure (ADS) and associated
'private_data' pointer to be setup. This is basically a scratch area
of memory that the GuC owns. The size is read from the CSS header.
* There is now a physical to logical engine mapping table in the ADS
which needs to be configured in order for the firmware to load. For
now, the table is initialised with a 1 to 1 mapping.
* GUC_CTL_CTXINFO has been removed from the initialization params.
* reg_state_buffer is maintained internally by the GuC as part of
the private data.
* The ADS layout has changed significantly. This patch updates the
shared structure and also adds better documentation of the layout.
* While i915 does not use GuC doorbells, the firmware now requires
that some initialisation is done.
* The number of engine classes and instances supported in the ADS has
been increased.
Signed-off-by: John Harrison <[email protected]>
Signed-off-by: Matthew Brost <[email protected]>
Signed-off-by: Daniele Ceraolo Spurio <[email protected]>
Signed-off-by: Oscar Mateo <[email protected]>
Signed-off-by: Michel Thierry <[email protected]>
Signed-off-by: Rodrigo Vivi <[email protected]>
Signed-off-by: Michal Wajdeczko <[email protected]>
Cc: Michal Winiarski <[email protected]>
Cc: Tomasz Lis <[email protected]>
Cc: Joonas Lahtinen <[email protected]>
Reviewed-by: Daniele Ceraolo Spurio <[email protected]>
Signed-off-by: Joonas Lahtinen <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected] |
MagickExport MagickBooleanType IsFuzzyEquivalencePixelInfo(const PixelInfo *p,
const PixelInfo *q)
{
double
fuzz,
pixel;
register double
scale,
distance;
fuzz=(double) MagickMax(MagickMax(p->fuzz,q->fuzz),(MagickRealType)
MagickSQ1_2);
fuzz*=fuzz;
scale=1.0;
distance=0.0;
if ((p->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
/*
Transparencies are involved - set alpha distance.
*/
pixel=(p->alpha_trait != UndefinedPixelTrait ? p->alpha : OpaqueAlpha)-
(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha);
distance=pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
/*
Generate a alpha scaling factor to generate a 4D cone on colorspace.
If one color is transparent, distance has no color component.
*/
if (p->alpha_trait != UndefinedPixelTrait)
scale=(QuantumScale*p->alpha);
if (q->alpha_trait != UndefinedPixelTrait)
scale*=(QuantumScale*q->alpha);
if (scale <= MagickEpsilon )
return(MagickTrue);
}
/*
CMYK create a CMY cube with a multi-dimensional cone toward black.
*/
if (p->colorspace == CMYKColorspace)
{
pixel=p->black-q->black;
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
scale*=(double) (QuantumScale*(QuantumRange-p->black));
scale*=(double) (QuantumScale*(QuantumRange-q->black));
}
/*
RGB or CMY color cube.
*/
distance*=3.0; /* rescale appropriately */
fuzz*=3.0;
pixel=p->red-q->red;
if ((p->colorspace == HSLColorspace) || (p->colorspace == HSBColorspace) ||
(p->colorspace == HWBColorspace))
{
/*
This calculates a arc distance for hue-- it should be a vector
angle of 'S'/'W' length with 'L'/'B' forming appropriate cones.
In other words this is a hack - Anthony.
*/
if (fabs((double) pixel) > (QuantumRange/2))
pixel-=QuantumRange;
pixel*=2;
}
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
pixel=p->green-q->green;
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
pixel=p->blue-q->blue;
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
return(MagickTrue);
} | 0 | [
"CWE-190"
]
| ImageMagick | 406da3af9e09649cda152663c179902edf5ab3ac | 229,749,972,165,875,180,000,000,000,000,000,000,000 | 81 | https://github.com/ImageMagick/ImageMagick/issues/1732 |
static void vmx_leave_nested(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu))
nested_vmx_vmexit(vcpu, -1, 0, 0);
free_nested(to_vmx(vcpu));
} | 0 | []
| kvm | a642fc305053cc1c6e47e4f4df327895747ab485 | 62,163,608,697,000,450,000,000,000,000,000,000,000 | 6 | kvm: vmx: handle invvpid vm exit gracefully
On systems with invvpid instruction support (corresponding bit in
IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
causes vm exit, which is currently not handled and results in
propagation of unknown exit to userspace.
Fix this by installing an invvpid vm exit handler.
This is CVE-2014-3646.
Cc: [email protected]
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
Subsets and Splits