func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
void CLASS sony_load_raw()
{
uchar head[40];
ushort *pixel;
unsigned i, key, row, col;
fseek (ifp, 200896, SEEK_SET);
fseek (ifp, (unsigned) fgetc(ifp)*4 - 1, SEEK_CUR);
order = 0x4d4d;
key = get4();
fseek (ifp, 164600, SEEK_SET);
fread (head, 1, 40, ifp);
sony_decrypt ((unsigned int *) head, 10, 1, key);
for (i=26; i-- > 22; )
key = key << 8 | head[i];
fseek (ifp, data_offset, SEEK_SET);
for (row=0; row < raw_height; row++) {
pixel = raw_image + row*raw_width;
if (fread (pixel, 2, raw_width, ifp) < raw_width) derror();
sony_decrypt ((unsigned int *) pixel, raw_width/2, !row, key);
for (col=0; col < raw_width; col++)
if ((pixel[col] = ntohs(pixel[col])) >> 14) derror();
}
maximum = 0x3ff0;
} | 0 | [
"CWE-703"
]
| LibRaw | 11909cc59e712e09b508dda729b99aeaac2b29ad | 175,856,702,571,960,040,000,000,000,000,000,000,000 | 25 | cumulated data checks patch |
PHP_FUNCTION(posix_access)
{
long mode = 0;
int filename_len, ret;
char *filename, *path;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &filename, &filename_len, &mode) == FAILURE) {
RETURN_FALSE;
}
path = expand_filepath(filename, NULL TSRMLS_CC);
if (!path) {
POSIX_G(last_error) = EIO;
RETURN_FALSE;
}
if (php_check_open_basedir_ex(path, 0 TSRMLS_CC) ||
(PG(safe_mode) && (!php_checkuid_ex(filename, NULL, CHECKUID_CHECK_FILE_AND_DIR, CHECKUID_NO_ERRORS)))) {
efree(path);
POSIX_G(last_error) = EPERM;
RETURN_FALSE;
}
ret = access(path, mode);
efree(path);
if (ret) {
POSIX_G(last_error) = errno;
RETURN_FALSE;
}
RETURN_TRUE;
} | 1 | []
| php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 44,385,487,977,497,710,000,000,000,000,000,000,000 | 33 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
long error_code, siginfo_t *info)
{
struct task_struct *tsk = current;
if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
return;
/*
* We want error_code and trap_nr set for userspace faults and
* kernelspace faults which result in die(), but not
* kernelspace faults which are fixed up. die() gives the
* process no chance to handle the signal and notice the
* kernel fault information, so that won't result in polluting
* the information about previously queued, but not yet
* delivered, faults. See also do_general_protection below.
*/
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
#ifdef CONFIG_X86_64
if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
printk_ratelimit()) {
pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
tsk->comm, tsk->pid, str,
regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
pr_cont("\n");
}
#endif
force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
} | 0 | [
"CWE-17"
]
| linux-2.6 | 6f442be2fb22be02cafa606f1769fa1e6f894441 | 229,040,341,753,206,700,000,000,000,000,000,000,000 | 33 | x86_64, traps: Stop using IST for #SS
On a 32-bit kernel, this has no effect, since there are no IST stacks.
On a 64-bit kernel, #SS can only happen in user code, on a failed iret
to user space, a canonical violation on access via RSP or RBP, or a
genuine stack segment violation in 32-bit kernel code. The first two
cases don't need IST, and the latter two cases are unlikely fatal bugs,
and promoting them to double faults would be fine.
This fixes a bug in which the espfix64 code mishandles a stack segment
violation.
This saves 4k of memory per CPU and a tiny bit of code.
Signed-off-by: Andy Lutomirski <[email protected]>
Reviewed-by: Thomas Gleixner <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
int ahash_mcryptd_finup(struct ahash_request *desc)
{
/* alignment is to be done by multi-buffer crypto algorithm if needed */
return crypto_ahash_finup(desc);
} | 0 | [
"CWE-476",
"CWE-284"
]
| linux | 48a992727d82cb7db076fa15d372178743b1f4cd | 30,501,451,456,358,113,000,000,000,000,000,000,000 | 6 | crypto: mcryptd - Check mcryptd algorithm compatibility
Algorithms not compatible with mcryptd could be spawned by mcryptd
with a direct crypto_alloc_tfm invocation using a "mcryptd(alg)" name
construct. This causes mcryptd to crash the kernel if an arbitrary
"alg" is incompatible and not intended to be used with mcryptd. It is
an issue if AF_ALG tries to spawn mcryptd(alg) to expose it externally.
But such algorithms must be used internally and not be exposed.
We added a check to enforce that only internal algorithms are allowed
with mcryptd at the time mcryptd is spawning an algorithm.
Link: http://marc.info/?l=linux-crypto-vger&m=148063683310477&w=2
Cc: [email protected]
Reported-by: Mikulas Patocka <[email protected]>
Signed-off-by: Tim Chen <[email protected]>
Signed-off-by: Herbert Xu <[email protected]> |
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
{
int time_incr;
int time_div, time_mod;
if (s->pict_type == AV_PICTURE_TYPE_I) {
if (!(s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy
mpeg4_encode_visual_object_header(s);
if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number == 0) // HACK, the reference sw is buggy
mpeg4_encode_vol_header(s, 0, 0);
}
if (!(s->workaround_bugs & FF_BUG_MS))
mpeg4_encode_gop_header(s);
}
s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B;
put_bits(&s->pb, 16, 0); /* vop header */
put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */
put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */
time_div = FFUDIV(s->time, s->avctx->time_base.den);
time_mod = FFUMOD(s->time, s->avctx->time_base.den);
time_incr = time_div - s->last_time_base;
av_assert0(time_incr >= 0);
// This limits the frame duration to max 1 hour
if (time_incr > 3600) {
av_log(s->avctx, AV_LOG_ERROR, "time_incr %d too large\n", time_incr);
return AVERROR(EINVAL);
}
while (time_incr--)
put_bits(&s->pb, 1, 1);
put_bits(&s->pb, 1, 0);
put_bits(&s->pb, 1, 1); /* marker */
put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
put_bits(&s->pb, 1, 1); /* marker */
put_bits(&s->pb, 1, 1); /* vop coded */
if (s->pict_type == AV_PICTURE_TYPE_P) {
put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
}
put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
if (!s->progressive_sequence) {
put_bits(&s->pb, 1, s->current_picture_ptr->f->top_field_first);
put_bits(&s->pb, 1, s->alternate_scan);
}
// FIXME sprite stuff
put_bits(&s->pb, 5, s->qscale);
if (s->pict_type != AV_PICTURE_TYPE_I)
put_bits(&s->pb, 3, s->f_code); /* fcode_for */
if (s->pict_type == AV_PICTURE_TYPE_B)
put_bits(&s->pb, 3, s->b_code); /* fcode_back */
return 0;
} | 0 | [
"CWE-20"
]
| FFmpeg | 6bbef938839adc55e8e048bc9cc2e0fafe2064df | 299,490,559,948,358,720,000,000,000,000,000,000,000 | 60 | avcodec/mpeg4videoenc: Use 64 bit for times in mpeg4_encode_gop_header()
Fixes truncation
Fixes Assertion n <= 31 && value < (1U << n) failed at libavcodec/put_bits.h:169
Fixes: ffmpeg_crash_2.avi
Found-by: Thuan Pham <[email protected]>, Marcel Böhme, Andrew Santosa and Alexandru RazvanCaciulescu with AFLSmart
Signed-off-by: Michael Niedermayer <[email protected]>
(cherry picked from commit e1182fac1afba92a4975917823a5f644bee7e6e8)
Signed-off-by: Michael Niedermayer <[email protected]> |
ArgParser::argFilteredStreamData()
{
o.show_filtered_stream_data = true;
} | 0 | [
"CWE-787"
]
| qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 51,260,213,840,732,540,000,000,000,000,000,000,000 | 4 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
rsa_verify (gcry_sexp_t s_sig, gcry_sexp_t s_data, gcry_sexp_t keyparms)
{
gcry_err_code_t rc;
struct pk_encoding_ctx ctx;
gcry_sexp_t l1 = NULL;
gcry_mpi_t sig = NULL;
gcry_mpi_t data = NULL;
RSA_public_key pk = { NULL, NULL };
gcry_mpi_t result = NULL;
_gcry_pk_util_init_encoding_ctx (&ctx, PUBKEY_OP_VERIFY,
rsa_get_nbits (keyparms));
/* Extract the data. */
rc = _gcry_pk_util_data_to_mpi (s_data, &data, &ctx);
if (rc)
goto leave;
if (DBG_CIPHER)
log_printmpi ("rsa_verify data", data);
if (mpi_is_opaque (data))
{
rc = GPG_ERR_INV_DATA;
goto leave;
}
/* Extract the signature value. */
rc = _gcry_pk_util_preparse_sigval (s_sig, rsa_names, &l1, NULL);
if (rc)
goto leave;
rc = sexp_extract_param (l1, NULL, "s", &sig, NULL);
if (rc)
goto leave;
if (DBG_CIPHER)
log_printmpi ("rsa_verify sig", sig);
/* Extract the key. */
rc = sexp_extract_param (keyparms, NULL, "ne", &pk.n, &pk.e, NULL);
if (rc)
goto leave;
if (DBG_CIPHER)
{
log_printmpi ("rsa_verify n", pk.n);
log_printmpi ("rsa_verify e", pk.e);
}
/* Do RSA computation and compare. */
result = mpi_new (0);
public (result, sig, &pk);
if (DBG_CIPHER)
log_printmpi ("rsa_verify cmp", result);
if (ctx.verify_cmp)
rc = ctx.verify_cmp (&ctx, result);
else
rc = mpi_cmp (result, data) ? GPG_ERR_BAD_SIGNATURE : 0;
leave:
_gcry_mpi_release (result);
_gcry_mpi_release (pk.n);
_gcry_mpi_release (pk.e);
_gcry_mpi_release (data);
_gcry_mpi_release (sig);
sexp_release (l1);
_gcry_pk_util_free_encoding_ctx (&ctx);
if (DBG_CIPHER)
log_debug ("rsa_verify => %s\n", rc?gpg_strerror (rc):"Good");
return rc;
} | 0 | [
"CWE-310"
]
| libgcrypt | 8725c99ffa41778f382ca97233183bcd687bb0ce | 132,137,780,683,090,970,000,000,000,000,000,000,000 | 67 | rsa: Add exponent blinding.
* cipher/rsa.c (secret_core_crt): Blind secret D with randomized
nonce R for mpi_powm computation.
--
Co-authored-by: Werner Koch <[email protected]>
Signed-off-by: NIIBE Yutaka <[email protected]>
The paper describing attack: https://eprint.iacr.org/2017/627
Sliding right into disaster: Left-to-right sliding windows leak
by Daniel J. Bernstein and Joachim Breitner and Daniel Genkin and
Leon Groot Bruinderink and Nadia Heninger and Tanja Lange and
Christine van Vredendaal and Yuval Yarom
It is well known that constant-time implementations of modular
exponentiation cannot use sliding windows. However, software
libraries such as Libgcrypt, used by GnuPG, continue to use sliding
windows. It is widely believed that, even if the complete pattern of
squarings and multiplications is observed through a side-channel
attack, the number of exponent bits leaked is not sufficient to
carry out a full key-recovery attack against RSA. Specifically,
4-bit sliding windows leak only 40% of the bits, and 5-bit sliding
windows leak only 33% of the bits.
In this paper we demonstrate a complete break of RSA-1024 as
implemented in Libgcrypt. Our attack makes essential use of the fact
that Libgcrypt uses the left-to-right method for computing the
sliding-window expansion. We show for the first time that the
direction of the encoding matters: the pattern of squarings and
multiplications in left-to-right sliding windows leaks significantly
more information about exponent bits than for right-to-left. We show
how to incorporate this additional information into the
Heninger-Shacham algorithm for partial key reconstruction, and use
it to obtain very efficient full key recovery for RSA-1024. We also
provide strong evidence that the same attack works for RSA-2048 with
only moderately more computation.
Exponent blinding is a kind of workaround to add noise. Signal (leak)
is still there for non-constant-time implementation. |
is_header_keep_alive(const HTTPVersion & http_version, const HTTPVersion & request_http_version, MIMEField* con_hdr /*, bool* unknown_tokens */)
{
enum
{
CON_TOKEN_NONE = 0,
CON_TOKEN_KEEP_ALIVE,
CON_TOKEN_CLOSE
};
int con_token = CON_TOKEN_NONE;
HTTPKeepAlive keep_alive = HTTP_NO_KEEPALIVE;
// *unknown_tokens = false;
if (con_hdr) {
if (con_hdr->value_get_index("keep-alive", 10) >= 0)
con_token = CON_TOKEN_KEEP_ALIVE;
else if (con_hdr->value_get_index("close", 5) >= 0)
con_token = CON_TOKEN_CLOSE;
}
if (HTTPVersion(1, 0) == http_version) {
keep_alive = (con_token == CON_TOKEN_KEEP_ALIVE) ? (HTTP_KEEPALIVE) : (HTTP_NO_KEEPALIVE);
} else if (HTTPVersion(1, 1) == http_version) {
// We deviate from the spec here. If the we got a response where
// where there is no Connection header and the request 1.0 was
// 1.0 don't treat this as keep-alive since Netscape-Enterprise/3.6 SP1
// server doesn't
keep_alive = ((con_token == CON_TOKEN_KEEP_ALIVE) ||
(con_token == CON_TOKEN_NONE && HTTPVersion(1, 1) == request_http_version)) ? (HTTP_KEEPALIVE)
: (HTTP_NO_KEEPALIVE);
} else {
keep_alive = HTTP_NO_KEEPALIVE;
}
return (keep_alive);
} | 0 | [
"CWE-119"
]
| trafficserver | 8b5f0345dade6b2822d9b52c8ad12e63011a5c12 | 193,002,616,570,820,850,000,000,000,000,000,000,000 | 36 | Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug |
pg_timezone_names(PG_FUNCTION_ARGS)
{
MemoryContext oldcontext;
FuncCallContext *funcctx;
pg_tzenum *tzenum;
pg_tz *tz;
Datum result;
HeapTuple tuple;
Datum values[4];
bool nulls[4];
int tzoff;
struct pg_tm tm;
fsec_t fsec;
const char *tzn;
Interval *resInterval;
struct pg_tm itm;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
TupleDesc tupdesc;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
* switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/* initialize timezone scanning code */
tzenum = pg_tzenumerate_start();
funcctx->user_fctx = (void *) tzenum;
/*
* build tupdesc for result tuples. This must match this function's
* pg_proc entry!
*/
tupdesc = CreateTemplateTupleDesc(4, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
TEXTOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "abbrev",
TEXTOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 3, "utc_offset",
INTERVALOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_dst",
BOOLOID, -1, 0);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
MemoryContextSwitchTo(oldcontext);
}
/* stuff done on every call of the function */
funcctx = SRF_PERCALL_SETUP();
tzenum = (pg_tzenum *) funcctx->user_fctx;
/* search for another zone to display */
for (;;)
{
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
tz = pg_tzenumerate_next(tzenum);
MemoryContextSwitchTo(oldcontext);
if (!tz)
{
pg_tzenumerate_end(tzenum);
funcctx->user_fctx = NULL;
SRF_RETURN_DONE(funcctx);
}
/* Convert now() to local time in this zone */
if (timestamp2tm(GetCurrentTransactionStartTimestamp(),
&tzoff, &tm, &fsec, &tzn, tz) != 0)
continue; /* ignore if conversion fails */
/* Ignore zic's rather silly "Factory" time zone */
if (tzn && strcmp(tzn, "Local time zone must be set--see zic manual page") == 0)
continue;
/* Found a displayable zone */
break;
}
MemSet(nulls, 0, sizeof(nulls));
values[0] = CStringGetTextDatum(pg_get_timezone_name(tz));
values[1] = CStringGetTextDatum(tzn ? tzn : "");
MemSet(&itm, 0, sizeof(struct pg_tm));
itm.tm_sec = -tzoff;
resInterval = (Interval *) palloc(sizeof(Interval));
tm2interval(&itm, 0, resInterval);
values[2] = IntervalPGetDatum(resInterval);
values[3] = BoolGetDatum(tm.tm_isdst > 0);
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
result = HeapTupleGetDatum(tuple);
SRF_RETURN_NEXT(funcctx, result);
} | 0 | [
"CWE-119"
]
| postgres | 01824385aead50e557ca1af28640460fa9877d51 | 89,404,799,790,192,820,000,000,000,000,000,000,000 | 101 | Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt() |
static MSG_PROCESS_RETURN tls_process_encrypted_extensions(SSL *s, PACKET *pkt)
{
PACKET extensions;
RAW_EXTENSION *rawexts = NULL;
if (!PACKET_as_length_prefixed_2(pkt, &extensions)
|| PACKET_remaining(pkt) != 0) {
SSLfatal(s, SSL_AD_DECODE_ERROR, SSL_R_LENGTH_MISMATCH);
goto err;
}
if (!tls_collect_extensions(s, &extensions,
SSL_EXT_TLS1_3_ENCRYPTED_EXTENSIONS, &rawexts,
NULL, 1)
|| !tls_parse_all_extensions(s, SSL_EXT_TLS1_3_ENCRYPTED_EXTENSIONS,
rawexts, NULL, 0, 1)) {
/* SSLfatal() already called */
goto err;
}
OPENSSL_free(rawexts);
return MSG_PROCESS_CONTINUE_READING;
err:
OPENSSL_free(rawexts);
return MSG_PROCESS_ERROR;
} | 0 | [
"CWE-835"
]
| openssl | 758754966791c537ea95241438454aa86f91f256 | 96,089,332,969,373,620,000,000,000,000,000,000,000 | 27 | Fix invalid handling of verify errors in libssl
In the event that X509_verify() returned an internal error result then
libssl would mishandle this and set rwstate to SSL_RETRY_VERIFY. This
subsequently causes SSL_get_error() to return SSL_ERROR_WANT_RETRY_VERIFY.
That return code is supposed to only ever be returned if an application
is using an app verify callback to complete replace the use of
X509_verify(). Applications may not be written to expect that return code
and could therefore crash (or misbehave in some other way) as a result.
CVE-2021-4044
Reviewed-by: Tomas Mraz <[email protected]> |
static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
void __user *measure = (void __user *)(uintptr_t)argp->data;
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_launch_measure data;
struct kvm_sev_launch_measure params;
void __user *p = NULL;
void *blob = NULL;
int ret;
if (!sev_guest(kvm))
return -ENOTTY;
if (copy_from_user(¶ms, measure, sizeof(params)))
return -EFAULT;
memset(&data, 0, sizeof(data));
/* User wants to query the blob length */
if (!params.len)
goto cmd;
p = (void __user *)(uintptr_t)params.uaddr;
if (p) {
if (params.len > SEV_FW_BLOB_MAX_SIZE)
return -EINVAL;
blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
if (!blob)
return -ENOMEM;
data.address = __psp_pa(blob);
data.len = params.len;
}
cmd:
data.handle = sev->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
/*
* If we query the session length, FW responded with expected data.
*/
if (!params.len)
goto done;
if (ret)
goto e_free_blob;
if (blob) {
if (copy_to_user(p, blob, params.len))
ret = -EFAULT;
}
done:
params.len = data.len;
if (copy_to_user(measure, ¶ms, sizeof(params)))
ret = -EFAULT;
e_free_blob:
kfree(blob);
return ret;
} | 0 | [
"CWE-459"
]
| linux | 683412ccf61294d727ead4a73d97397396e69a6b | 203,965,214,064,778,600,000,000,000,000,000,000,000 | 61 | KVM: SEV: add cache flush to solve SEV cache incoherency issues
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: [email protected]
Suggested-by: Sean Christpherson <[email protected]>
Reported-by: Mingwei Zhang <[email protected]>
Signed-off-by: Mingwei Zhang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
function_stat_next(void *v, int idx)
{
struct ftrace_profile *rec = v;
struct ftrace_profile_page *pg;
pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
again:
if (idx != 0)
rec++;
if ((void *)rec >= (void *)&pg->records[pg->index]) {
pg = pg->next;
if (!pg)
return NULL;
rec = &pg->records[0];
if (!rec->counter)
goto again;
}
return rec;
} | 0 | [
"CWE-703"
]
| linux | 6a76f8c0ab19f215af2a3442870eeb5f0e81998d | 68,559,698,242,383,170,000,000,000,000,000,000,000 | 22 | tracing: Fix possible NULL pointer dereferences
Currently set_ftrace_pid and set_graph_function files use seq_lseek
for their fops. However seq_open() is called only for FMODE_READ in
the fops->open() so that if an user tries to seek one of those file
when she open it for writing, it sees NULL seq_file and then panic.
It can be easily reproduced with following command:
$ cd /sys/kernel/debug/tracing
$ echo 1234 | sudo tee -a set_ftrace_pid
In this example, GNU coreutils' tee opens the file with fopen(, "a")
and then the fopen() internally calls lseek().
Link: http://lkml.kernel.org/r/[email protected]
Cc: Frederic Weisbecker <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: [email protected]
Signed-off-by: Namhyung Kim <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]> |
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
if (hasActiveChildProcess()) return C_ERR;
server.dirty_before_bgsave = server.dirty;
server.lastbgsave_try = time(NULL);
openChildInfoPipe();
if ((childpid = redisFork(CHILD_TYPE_RDB)) == 0) {
int retval;
/* Child */
redisSetProcTitle("redis-rdb-bgsave");
redisSetCpuAffinity(server.bgsave_cpulist);
retval = rdbSave(filename,rsi);
if (retval == C_OK) {
sendChildCOWInfo(CHILD_TYPE_RDB, "RDB");
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* Parent */
if (childpid == -1) {
closeChildInfoPipe();
server.lastbgsave_status = C_ERR;
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
return C_ERR;
}
serverLog(LL_NOTICE,"Background saving started by pid %d",childpid);
server.rdb_save_time_start = time(NULL);
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
updateDictResizePolicy();
return C_OK;
}
return C_OK; /* unreached */
} | 0 | [
"CWE-190"
]
| redis | a30d367a71b7017581cf1ca104242a3c644dec0f | 224,796,544,475,679,840,000,000,000,000,000,000,000 | 38 | Fix Integer overflow issue with intsets (CVE-2021-32687)
The vulnerability involves changing the default set-max-intset-entries
configuration parameter to a very large value and constructing specially
crafted commands to manipulate sets |
static int connect_to_db(char *host, char *user,char *passwd)
{
char buff[20+FN_REFLEN];
DBUG_ENTER("connect_to_db");
verbose_msg("-- Connecting to %s...\n", host ? host : "localhost");
mysql_init(&mysql_connection);
if (opt_compress)
mysql_options(&mysql_connection,MYSQL_OPT_COMPRESS,NullS);
#ifdef HAVE_OPENSSL
if (opt_use_ssl)
mysql_ssl_set(&mysql_connection, opt_ssl_key, opt_ssl_cert, opt_ssl_ca,
opt_ssl_capath, opt_ssl_cipher);
mysql_options(&mysql_connection,MYSQL_OPT_SSL_VERIFY_SERVER_CERT,
(char*)&opt_ssl_verify_server_cert);
#endif
if (opt_protocol)
mysql_options(&mysql_connection,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
#ifdef HAVE_SMEM
if (shared_memory_base_name)
mysql_options(&mysql_connection,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
#endif
mysql_options(&mysql_connection, MYSQL_SET_CHARSET_NAME, default_charset);
if (opt_plugin_dir && *opt_plugin_dir)
mysql_options(&mysql_connection, MYSQL_PLUGIN_DIR, opt_plugin_dir);
if (opt_default_auth && *opt_default_auth)
mysql_options(&mysql_connection, MYSQL_DEFAULT_AUTH, opt_default_auth);
if (using_opt_enable_cleartext_plugin)
mysql_options(&mysql_connection, MYSQL_ENABLE_CLEARTEXT_PLUGIN,
(char *) &opt_enable_cleartext_plugin);
if (!(mysql= mysql_connect_ssl_check(&mysql_connection, host, user,
passwd, NULL, opt_mysql_port,
opt_mysql_unix_port, 0,
opt_ssl_mode == SSL_MODE_REQUIRED)))
{
DB_error(&mysql_connection, "when trying to connect");
DBUG_RETURN(1);
}
if ((mysql_get_server_version(&mysql_connection) < 40100) ||
(opt_compatible_mode & 3))
{
/* Don't dump SET NAMES with a pre-4.1 server (bug#7997). */
opt_set_charset= 0;
/* Don't switch charsets for 4.1 and earlier. (bug#34192). */
server_supports_switching_charsets= FALSE;
}
/*
As we're going to set SQL_MODE, it would be lost on reconnect, so we
cannot reconnect.
*/
mysql->reconnect= 0;
my_snprintf(buff, sizeof(buff), "/*!40100 SET @@SQL_MODE='%s' */",
compatible_mode_normal_str);
if (mysql_query_with_error_report(mysql, 0, buff))
DBUG_RETURN(1);
/*
set time_zone to UTC to allow dumping date types between servers with
different time zone settings
*/
if (opt_tz_utc)
{
my_snprintf(buff, sizeof(buff), "/*!40103 SET TIME_ZONE='+00:00' */");
if (mysql_query_with_error_report(mysql, 0, buff))
DBUG_RETURN(1);
}
DBUG_RETURN(0);
} /* connect_to_db */ | 0 | [
"CWE-319"
]
| mysql-server | 060b1eadf4913f7066484ea34ec62feead1bca44 | 132,071,556,139,890,970,000,000,000,000,000,000,000 | 72 | BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit 3b2d28578c526f347f5cfe763681eff365731f99) |
static inline int request_slot(struct b43_dmaring *ring)
{
int slot;
B43_WARN_ON(!ring->tx);
B43_WARN_ON(ring->stopped);
B43_WARN_ON(free_slots(ring) == 0);
slot = next_slot(ring, ring->current_slot);
ring->current_slot = slot;
ring->used_slots++;
update_max_used_slots(ring, ring->used_slots);
return slot;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | c85ce65ecac078ab1a1835c87c4a6319cf74660a | 28,118,353,428,074,274,000,000,000,000,000,000,000 | 16 | b43: allocate receive buffers big enough for max frame len + offset
Otherwise, skb_put inside of dma_rx can fail...
https://bugzilla.kernel.org/show_bug.cgi?id=32042
Signed-off-by: John W. Linville <[email protected]>
Acked-by: Larry Finger <[email protected]>
Cc: [email protected] |
int ByteVector::find(const ByteVector &pattern, uint offset, int byteAlign) const
{
return vectorFind<ByteVector>(*this, pattern, offset, byteAlign);
} | 0 | [
"CWE-189"
]
| taglib | dcdf4fd954e3213c355746fa15b7480461972308 | 32,872,396,739,542,120,000,000,000,000,000,000,000 | 4 | Avoid uint overflow in case the length + index is over UINT_MAX |
RBinPEObj* PE_(r_bin_pe_new)(const char* file, bool verbose) {
RBinPEObj* pe = R_NEW0 (RBinPEObj);
if (!pe) {
return NULL;
}
pe->file = file;
size_t binsz;
ut8 *buf = (ut8*)r_file_slurp (file, &binsz);
pe->size = binsz;
if (!buf) {
return PE_(r_bin_pe_free)(pe);
}
pe->b = r_buf_new ();
if (!r_buf_set_bytes (pe->b, buf, pe->size)) {
free (buf);
return PE_(r_bin_pe_free)(pe);
}
pe->verbose = verbose;
free (buf);
if (!bin_pe_init (pe)) {
return PE_(r_bin_pe_free)(pe);
}
return pe;
} | 0 | [
"CWE-400",
"CWE-703"
]
| radare2 | 634b886e84a5c568d243e744becc6b3223e089cf | 287,587,044,366,476,350,000,000,000,000,000,000,000 | 24 | Fix DoS in PE/QNX/DYLDCACHE/PSX parsers ##crash
* Reported by lazymio
* Reproducer: AAA4AAAAAB4= |
static int oidc_handle_post_authorization_response(request_rec *r, oidc_cfg *c,
oidc_session_t *session) {
oidc_debug(r, "enter");
/* initialize local variables */
char *response_mode = NULL;
/* read the parameters that are POST-ed to us */
apr_table_t *params = apr_table_make(r->pool, 8);
if (oidc_util_read_post_params(r, params) == FALSE) {
oidc_error(r, "something went wrong when reading the POST parameters");
return HTTP_INTERNAL_SERVER_ERROR;
}
/* see if we've got any POST-ed data at all */
if ((apr_table_elts(params)->nelts < 1)
|| ((apr_table_elts(params)->nelts == 1)
&& apr_table_get(params, OIDC_PROTO_RESPONSE_MODE)
&& (apr_strnatcmp(
apr_table_get(params, OIDC_PROTO_RESPONSE_MODE),
OIDC_PROTO_RESPONSE_MODE_FRAGMENT) == 0))) {
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
"You've hit an OpenID Connect Redirect URI with no parameters, this is an invalid request; you should not open this URL in your browser directly, or have the server administrator use a different " OIDCRedirectURI " setting.",
HTTP_INTERNAL_SERVER_ERROR);
}
/* get the parameters */
response_mode = (char *) apr_table_get(params, OIDC_PROTO_RESPONSE_MODE);
/* do the actual implicit work */
return oidc_handle_authorization_response(r, c, session, params,
response_mode ? response_mode : OIDC_PROTO_RESPONSE_MODE_FORM_POST);
} | 0 | [
"CWE-79"
]
| mod_auth_openidc | 132a4111bf3791e76437619a66336dce2ce4c79b | 189,395,971,522,125,130,000,000,000,000,000,000,000 | 35 | release 2.3.10.2: fix XSS vulnerability for poll parameter
in OIDC Session Management RP iframe; CSNC-2019-001; thanks Mischa
Bachmann
Signed-off-by: Hans Zandbelt <[email protected]> |
static void pointer_event(VncState *vs, int button_mask, int x, int y)
{
static uint32_t bmap[INPUT_BUTTON_MAX] = {
[INPUT_BUTTON_LEFT] = 0x01,
[INPUT_BUTTON_MIDDLE] = 0x02,
[INPUT_BUTTON_RIGHT] = 0x04,
[INPUT_BUTTON_WHEEL_UP] = 0x08,
[INPUT_BUTTON_WHEEL_DOWN] = 0x10,
};
QemuConsole *con = vs->vd->dcl.con;
int width = pixman_image_get_width(vs->vd->server);
int height = pixman_image_get_height(vs->vd->server);
if (vs->last_bmask != button_mask) {
qemu_input_update_buttons(con, bmap, vs->last_bmask, button_mask);
vs->last_bmask = button_mask;
}
if (vs->absolute) {
qemu_input_queue_abs(con, INPUT_AXIS_X, x, width);
qemu_input_queue_abs(con, INPUT_AXIS_Y, y, height);
} else if (vnc_has_feature(vs, VNC_FEATURE_POINTER_TYPE_CHANGE)) {
qemu_input_queue_rel(con, INPUT_AXIS_X, x - 0x7FFF);
qemu_input_queue_rel(con, INPUT_AXIS_Y, y - 0x7FFF);
} else {
if (vs->last_x != -1) {
qemu_input_queue_rel(con, INPUT_AXIS_X, x - vs->last_x);
qemu_input_queue_rel(con, INPUT_AXIS_Y, y - vs->last_y);
}
vs->last_x = x;
vs->last_y = y;
}
qemu_input_event_sync();
} | 0 | [
"CWE-125"
]
| qemu | bea60dd7679364493a0d7f5b54316c767cf894ef | 271,626,691,180,306,150,000,000,000,000,000,000,000 | 34 | ui/vnc: fix potential memory corruption issues
this patch makes the VNC server work correctly if the
server surface and the guest surface have different sizes.
Basically the server surface is adjusted to not exceed VNC_MAX_WIDTH
x VNC_MAX_HEIGHT and additionally the width is rounded up to multiple of
VNC_DIRTY_PIXELS_PER_BIT.
If we have a resolution whose width is not dividable by VNC_DIRTY_PIXELS_PER_BIT
we now get a small black bar on the right of the screen.
If the surface is too big to fit the limits only the upper left area is shown.
On top of that this fixes 2 memory corruption issues:
The first was actually discovered during playing
around with a Windows 7 vServer. During resolution
change in Windows 7 it happens sometimes that Windows
changes to an intermediate resolution where
server_stride % cmp_bytes != 0 (in vnc_refresh_server_surface).
This happens only if width % VNC_DIRTY_PIXELS_PER_BIT != 0.
The second is a theoretical issue, but is maybe exploitable
by the guest. If for some reason the guest surface size is bigger
than VNC_MAX_WIDTH x VNC_MAX_HEIGHT we end up in severe corruption since
this limit is nowhere enforced.
Signed-off-by: Peter Lieven <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]> |
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0;
} | 0 | [
"CWE-20",
"CWE-617"
]
| linux | 9e3f7a29694049edd728e2400ab57ad7553e5aa9 | 23,034,088,061,391,330,000,000,000,000,000,000,000 | 9 | arm64: KVM: pmu: Fix AArch32 cycle counter access
We're missing the handling code for the cycle counter accessed
from a 32bit guest, leading to unexpected results.
Cc: [email protected] # 4.6+
Signed-off-by: Wei Huang <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]> |
ENCODE_JSON(UInt16) {
char buf[6];
UA_UInt16 digits = itoaUnsigned(*src, buf, 10);
if(ctx->pos + digits > ctx->end)
return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED;
if(!ctx->calcOnly)
memcpy(ctx->pos, buf, digits);
ctx->pos += digits;
return UA_STATUSCODE_GOOD;
} | 0 | [
"CWE-703",
"CWE-787"
]
| open62541 | c800e2987b10bb3af6ef644b515b5d6392f8861d | 253,364,905,637,414,930,000,000,000,000,000,000,000 | 12 | fix(json): Check max recursion depth in more places |
static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp)
{
int act_comp=0,num_packets=0,chained,dummy;
stbi__pic_packet packets[10];
if (!x) x = &dummy;
if (!y) y = &dummy;
if (!comp) comp = &dummy;
if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) {
stbi__rewind(s);
return 0;
}
stbi__skip(s, 88);
*x = stbi__get16be(s);
*y = stbi__get16be(s);
if (stbi__at_eof(s)) {
stbi__rewind( s);
return 0;
}
if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) {
stbi__rewind( s );
return 0;
}
stbi__skip(s, 8);
do {
stbi__pic_packet *packet;
if (num_packets==sizeof(packets)/sizeof(packets[0]))
return 0;
packet = &packets[num_packets++];
chained = stbi__get8(s);
packet->size = stbi__get8(s);
packet->type = stbi__get8(s);
packet->channel = stbi__get8(s);
act_comp |= packet->channel;
if (stbi__at_eof(s)) {
stbi__rewind( s );
return 0;
}
if (packet->size != 8) {
stbi__rewind( s );
return 0;
}
} while (chained);
*comp = (act_comp & 0x10 ? 4 : 3);
return 1;
} | 0 | [
"CWE-787"
]
| stb | 5ba0baaa269b3fd681828e0e3b3ac0f1472eaf40 | 105,216,679,605,867,600,000,000,000,000,000,000,000 | 56 | stb_image: Reject fractional JPEG component subsampling ratios
The component resamplers are not written to support this and I've
never seen it happen in a real (non-crafted) JPEG file so I'm
fine rejecting this as outright corrupt.
Fixes issue #1178. |
Status RunRestore(const RunOptions& run_options, const string& export_dir,
const StringPiece restore_op_name,
const StringPiece variable_filename_const_op_name,
const std::vector<AssetFileDef>& asset_file_defs,
Session* session) {
LOG(INFO) << "Restoring SavedModel bundle.";
// Find path to variables to be restored in export directory.
const string variables_directory =
io::JoinPath(export_dir, kSavedModelVariablesDirectory);
// Check for saver checkpoints in v2 format. Models exported in the checkpoint
// v2 format will have a variables.index file. The corresponding
// variables are stored in the variables.data-?????-of-????? files.
const string variables_index_path = io::JoinPath(
variables_directory, MetaFilename(kSavedModelVariablesFilename));
if (!Env::Default()->FileExists(variables_index_path).ok()) {
LOG(INFO) << "The specified SavedModel has no variables; no checkpoints "
"were restored. File does not exist: "
<< variables_index_path;
return Status::OK();
}
const string variables_path =
io::JoinPath(variables_directory, kSavedModelVariablesFilename);
// Add variables to the graph.
Tensor variables_path_tensor(DT_STRING, TensorShape({}));
variables_path_tensor.scalar<tstring>()() = variables_path;
std::vector<std::pair<string, Tensor>> inputs = {
{string(variable_filename_const_op_name), variables_path_tensor}};
AddAssetsTensorsToInputs(export_dir, asset_file_defs, &inputs);
RunMetadata run_metadata;
return RunOnce(run_options, inputs, {}, {string(restore_op_name)},
nullptr /* outputs */, &run_metadata, session);
} | 0 | [
"CWE-20",
"CWE-703"
]
| tensorflow | adf095206f25471e864a8e63a0f1caef53a0e3a6 | 30,006,326,354,103,150,000,000,000,000,000,000,000 | 36 | Validate `NodeDef`s from `FunctionDefLibrary` of a `GraphDef`.
We already validated `NodeDef`s from a `GraphDef` but missed validating those from the `FunctionDefLibrary`. Thus, some maliciously crafted models could evade detection and cause denial of service due to a `CHECK`-fail.
PiperOrigin-RevId: 332536309
Change-Id: I052efe919ff1fe2f90815e286a1aa4c54c7b94ff |
ModuleExport size_t RegisterPCLImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("PCL");
entry->decoder=(DecodeImageHandler *) ReadPCLImage;
entry->encoder=(EncodeImageHandler *) WritePCLImage;
entry->magick=(IsImageFormatHandler *) IsPCL;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->thread_support=EncoderThreadSupport;
entry->description=ConstantString("Printer Control Language");
entry->module=ConstantString("PCL");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
} | 0 | [
"CWE-401"
]
| ImageMagick6 | ff840181f631b1b7f29160cae24d792fcd176bae | 224,074,657,223,233,550,000,000,000,000,000,000,000 | 17 | https://github.com/ImageMagick/ImageMagick/issues/1520 |
longlong val_time_packed(THD *thd)
{
return has_value() ? value : 0;
} | 0 | [
"CWE-617"
]
| server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 99,113,618,166,753,740,000,000,000,000,000,000,000 | 4 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestRouteConfigUpdate(
Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) {
parent_.requestRouteConfigUpdate(dispatcher(), std::move(route_config_updated_cb));
} | 0 | [
"CWE-400"
]
| envoy | 0e49a495826ea9e29134c1bd54fdeb31a034f40c | 334,490,924,352,710,750,000,000,000,000,000,000,000 | 4 | http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]> |
_equalConstraintsSetStmt(const ConstraintsSetStmt *a, const ConstraintsSetStmt *b)
{
COMPARE_NODE_FIELD(constraints);
COMPARE_SCALAR_FIELD(deferred);
return true;
} | 0 | [
"CWE-362"
]
| postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 162,871,474,666,311,640,000,000,000,000,000,000,000 | 7 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
selection_changed_cb (GtkTreeSelection *selection,
gpointer user_data)
{
FrWindow *window = user_data;
fr_window_update_statusbar_list_info (window);
fr_window_update_sensitivity (window);
return FALSE;
} | 0 | [
"CWE-22"
]
| file-roller | b147281293a8307808475e102a14857055f81631 | 53,409,059,794,137,580,000,000,000,000,000,000,000 | 10 | libarchive: sanitize filenames before extracting |
MultiPartInputFile::initialize()
{
readMagicNumberAndVersionField(*_data->is, _data->version);
bool multipart = isMultiPart(_data->version);
bool tiled = isTiled(_data->version);
//
// Multipart files don't have and shouldn't have the tiled bit set.
//
if (tiled && multipart)
throw IEX_NAMESPACE::InputExc ("Multipart files cannot have the tiled bit set");
int pos = 0;
while (true)
{
Header header;
header.readFrom(*_data->is, _data->version);
//
// If we read nothing then we stop reading.
//
if (header.readsNothing())
{
pos++;
break;
}
_data->_headers.push_back(header);
if(multipart == false)
break;
}
//
// Perform usual check on headers.
//
if ( _data->_headers.size() == 0)
{
throw IEX_NAMESPACE::ArgExc ("Files must contain at least one header");
}
for (size_t i = 0; i < _data->_headers.size(); i++)
{
//
// Silently invent a type if the file is a single part regular image.
//
if( _data->_headers[i].hasType() == false )
{
if(multipart)
throw IEX_NAMESPACE::ArgExc ("Every header in a multipart file should have a type");
_data->_headers[i].setType(tiled ? TILEDIMAGE : SCANLINEIMAGE);
}
else
{
//
// Silently fix the header type if it's wrong
// (happens when a regular Image file written by EXR_2.0 is rewritten by an older library,
// so doesn't effect deep image types)
//
if(!multipart && !isNonImage(_data->version))
{
_data->_headers[i].setType(tiled ? TILEDIMAGE : SCANLINEIMAGE);
}
}
if( _data->_headers[i].hasName() == false )
{
if(multipart)
throw IEX_NAMESPACE::ArgExc ("Every header in a multipart file should have a name");
}
if (isTiled(_data->_headers[i].type()))
_data->_headers[i].sanityCheck(true, multipart);
else
_data->_headers[i].sanityCheck(false, multipart);
}
//
// Check name uniqueness.
//
if (multipart)
{
set<string> names;
for (size_t i = 0; i < _data->_headers.size(); i++)
{
if (names.find(_data->_headers[i].name()) != names.end())
{
throw IEX_NAMESPACE::InputExc ("Header name " + _data->_headers[i].name() +
" is not a unique name.");
}
names.insert(_data->_headers[i].name());
}
}
//
// Check shared attributes compliance.
//
if (multipart && strictSharedAttribute)
{
for (size_t i = 1; i < _data->_headers.size(); i++)
{
vector <string> attrs;
if (_data->checkSharedAttributesValues (_data->_headers[0], _data->_headers[i], attrs))
{
string attrNames;
for (size_t j=0; j<attrs.size(); j++)
attrNames += " " + attrs[j];
throw IEX_NAMESPACE::InputExc ("Header name " + _data->_headers[i].name() +
" has non-conforming shared attributes: "+
attrNames);
}
}
}
//
// Create InputParts and read chunk offset tables.
//
for (size_t i = 0; i < _data->_headers.size(); i++)
_data->parts.push_back(
new InputPartData(_data, _data->_headers[i], i, _data->numThreads, _data->version));
_data->readChunkOffsetTables(_data->reconstructChunkOffsetTable);
} | 0 | [
"CWE-476"
]
| openexr | 25e9515b06a6bc293d871622b8cafaee7af84e0f | 273,371,316,919,432,270,000,000,000,000,000,000,000 | 139 | add sanity check for reading multipart files with no parts (#840)
Signed-off-by: Peter Hillman <[email protected]> |
png_fixed(png_const_structrp png_ptr, double fp, png_const_charp text)
{
double r = floor(100000 * fp + .5);
if (r > 2147483647. || r < -2147483648.)
png_fixed_error(png_ptr, text);
# ifndef PNG_ERROR_TEXT_SUPPORTED
PNG_UNUSED(text)
# endif
return (png_fixed_point)r;
} | 0 | [
"CWE-476"
]
| libpng | 812768d7a9c973452222d454634496b25ed415eb | 232,273,933,145,119,150,000,000,000,000,000,000,000 | 13 | [libpng16] Fixed a potential null pointer dereference in png_set_text_2()
(bug report and patch by Patrick Keshishian). |
static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_options_received tmp_opt;
struct request_sock *req;
struct inet_request_sock *ireq;
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
bool want_cookie = false;
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_conn_request(sk, skb);
if (!ipv6_unicast_destination(skb))
goto drop;
if ((sysctl_tcp_syncookies == 2 ||
inet_csk_reqsk_queue_is_full(sk)) && !isn) {
want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
if (!want_cookie)
goto drop;
}
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
goto drop;
}
req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
if (req == NULL)
goto drop;
#ifdef CONFIG_TCP_MD5SIG
tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
#endif
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmp_opt.user_mss = tp->rx_opt.user_mss;
tcp_parse_options(skb, &tmp_opt, 0, NULL);
if (want_cookie && !tmp_opt.saw_tstamp)
tcp_clear_options(&tmp_opt);
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb);
ireq = inet_rsk(req);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, skb, sock_net(sk));
ireq->ir_iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq->ir_iif = inet6_iif(skb);
if (!isn) {
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim ||
np->repflow) {
atomic_inc(&skb->users);
ireq->pktopts = skb;
}
if (want_cookie) {
isn = cookie_v6_init_sequence(sk, skb, &req->mss);
req->cookie_ts = tmp_opt.tstamp_ok;
goto have_isn;
}
/* VJ's idea. We save last timestamp seen
* from the destination in peer table, when entering
* state TIME-WAIT, and check against it before
* accepting new connection request.
*
* If "isn" is not zero, this request hit alive
* timewait bucket, so that all the necessary checks
* are made in the function processing timewait state.
*/
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
(dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
if (!tcp_peer_is_proven(req, dst, true)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release;
}
}
/* Kill the following clause, if you dislike this way. */
else if (!sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) &&
!tcp_peer_is_proven(req, dst, false)) {
/* Without syncookies last quarter of
* backlog is filled with destinations,
* proven to be alive.
* It means that we continue to communicate
* to destinations, already remembered
* to the moment of synflood.
*/
LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
&ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
goto drop_and_release;
}
isn = tcp_v6_init_sequence(skb);
}
have_isn:
tcp_rsk(req)->snt_isn = isn;
if (security_inet_conn_request(sk, skb, req))
goto drop_and_release;
if (tcp_v6_send_synack(sk, dst, &fl6, req,
skb_get_queue_mapping(skb)) ||
want_cookie)
goto drop_and_free;
tcp_rsk(req)->snt_synack = tcp_time_stamp;
tcp_rsk(req)->listener = NULL;
inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
return 0;
drop_and_release:
dst_release(dst);
drop_and_free:
reqsk_free(req);
drop:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return 0; /* don't send reset */
} | 0 | []
| linux | 7bced397510ab569d31de4c70b39e13355046387 | 292,980,006,057,804,300,000,000,000,000,000,000,000 | 137 | net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]> |
static int copy_translation_tables(struct intel_iommu *iommu)
{
struct context_entry **ctxt_tbls;
struct root_entry *old_rt;
phys_addr_t old_rt_phys;
int ctxt_table_entries;
unsigned long flags;
u64 rtaddr_reg;
int bus, ret;
bool new_ext, ext;
rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
new_ext = !!ecap_ecs(iommu->ecap);
/*
* The RTT bit can only be changed when translation is disabled,
* but disabling translation means to open a window for data
* corruption. So bail out and don't copy anything if we would
* have to change the bit.
*/
if (new_ext != ext)
return -EINVAL;
old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
if (!old_rt_phys)
return -EINVAL;
old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
if (!old_rt)
return -ENOMEM;
/* This is too big for the stack - allocate it from slab */
ctxt_table_entries = ext ? 512 : 256;
ret = -ENOMEM;
ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
if (!ctxt_tbls)
goto out_unmap;
for (bus = 0; bus < 256; bus++) {
ret = copy_context_table(iommu, &old_rt[bus],
ctxt_tbls, bus, ext);
if (ret) {
pr_err("%s: Failed to copy context table for bus %d\n",
iommu->name, bus);
continue;
}
}
spin_lock_irqsave(&iommu->lock, flags);
/* Context tables are copied, now write them to the root_entry table */
for (bus = 0; bus < 256; bus++) {
int idx = ext ? bus * 2 : bus;
u64 val;
if (ctxt_tbls[idx]) {
val = virt_to_phys(ctxt_tbls[idx]) | 1;
iommu->root_entry[bus].lo = val;
}
if (!ext || !ctxt_tbls[idx + 1])
continue;
val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
iommu->root_entry[bus].hi = val;
}
spin_unlock_irqrestore(&iommu->lock, flags);
kfree(ctxt_tbls);
__iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
ret = 0;
out_unmap:
memunmap(old_rt);
return ret;
} | 0 | []
| linux | d8b8591054575f33237556c32762d54e30774d28 | 276,837,595,722,712,450,000,000,000,000,000,000,000 | 81 | iommu/vt-d: Disable ATS support on untrusted devices
Commit fb58fdcd295b9 ("iommu/vt-d: Do not enable ATS for untrusted
devices") disables ATS support on the devices which have been marked
as untrusted. Unfortunately this is not enough to fix the DMA attack
vulnerabiltiies because IOMMU driver allows translated requests as
long as a device advertises the ATS capability. Hence a malicious
peripheral device could use this to bypass IOMMU.
This disables the ATS support on untrusted devices by clearing the
internal per-device ATS mark. As the result, IOMMU driver will block
any translated requests from any device marked as untrusted.
Cc: Jacob Pan <[email protected]>
Cc: Mika Westerberg <[email protected]>
Suggested-by: Kevin Tian <[email protected]>
Suggested-by: Ashok Raj <[email protected]>
Fixes: fb58fdcd295b9 ("iommu/vt-d: Do not enable ATS for untrusted devices")
Signed-off-by: Lu Baolu <[email protected]>
Signed-off-by: Joerg Roedel <[email protected]> |
int EC_POINT_get_affine_coordinates_GF2m(const EC_GROUP *group,
const EC_POINT *point, BIGNUM *x,
BIGNUM *y, BN_CTX *ctx)
{
return EC_POINT_get_affine_coordinates(group, point, x, y, ctx);
} | 0 | []
| openssl | 30c22fa8b1d840036b8e203585738df62a03cec8 | 41,500,194,896,577,190,000,000,000,000,000,000,000 | 6 | [crypto/ec] for ECC parameters with NULL or zero cofactor, compute it
The cofactor argument to EC_GROUP_set_generator is optional, and SCA
mitigations for ECC currently use it. So the library currently falls
back to very old SCA-vulnerable code if the cofactor is not present.
This PR allows EC_GROUP_set_generator to compute the cofactor for all
curves of cryptographic interest. Steering scalar multiplication to more
SCA-robust code.
This issue affects persisted private keys in explicit parameter form,
where the (optional) cofactor field is zero or absent.
It also affects curves not built-in to the library, but constructed
programatically with explicit parameters, then calling
EC_GROUP_set_generator with a nonsensical value (NULL, zero).
The very old scalar multiplication code is known to be vulnerable to
local uarch attacks, outside of the OpenSSL threat model. New results
suggest the code path is also vulnerable to traditional wall clock
timing attacks.
CVE-2019-1547
Reviewed-by: Matt Caswell <[email protected]>
Reviewed-by: Tomas Mraz <[email protected]>
Reviewed-by: Nicola Tuveri <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9781) |
ipcp_propagate_stage (class ipa_topo_info *topo)
{
struct cgraph_node *node;
if (dump_file)
fprintf (dump_file, "\n Propagating constants:\n\n");
max_count = profile_count::uninitialized ();
FOR_EACH_DEFINED_FUNCTION (node)
{
if (node->has_gimple_body_p ()
&& opt_for_fn (node->decl, flag_ipa_cp)
&& opt_for_fn (node->decl, optimize))
{
class ipa_node_params *info = IPA_NODE_REF (node);
determine_versionability (node, info);
info->lattices = XCNEWVEC (class ipcp_param_lattices,
ipa_get_param_count (info));
initialize_node_lattices (node);
}
ipa_size_summary *s = ipa_size_summaries->get (node);
if (node->definition && !node->alias && s != NULL)
overall_size += s->self_size;
max_count = max_count.max (node->count.ipa ());
}
max_new_size = overall_size;
if (max_new_size < param_large_unit_insns)
max_new_size = param_large_unit_insns;
max_new_size += max_new_size * param_ipcp_unit_growth / 100 + 1;
if (dump_file)
fprintf (dump_file, "\noverall_size: %li, max_new_size: %li\n",
overall_size, max_new_size);
propagate_constants_topo (topo);
if (flag_checking)
ipcp_verify_propagated_values ();
topo->constants.propagate_effects ();
topo->contexts.propagate_effects ();
if (dump_file)
{
fprintf (dump_file, "\nIPA lattices after all propagation:\n");
print_all_lattices (dump_file, (dump_flags & TDF_DETAILS), true);
}
} | 0 | [
"CWE-20"
]
| gcc | a09ccc22459c565814f79f96586fe4ad083fe4eb | 68,880,516,842,376,280,000,000,000,000,000,000,000 | 48 | Avoid segfault when doing IPA-VRP but not IPA-CP (PR 93015)
2019-12-21 Martin Jambor <[email protected]>
PR ipa/93015
* ipa-cp.c (ipcp_store_vr_results): Check that info exists
testsuite/
* gcc.dg/lto/pr93015_0.c: New test.
From-SVN: r279695 |
static double mp_draw(_cimg_math_parser& mp) {
const int x = (int)_mp_arg(4), y = (int)_mp_arg(5), z = (int)_mp_arg(6), c = (int)_mp_arg(7);
unsigned int ind = (unsigned int)mp.opcode[3];
if (ind!=~0U) {
if (!mp.imglist.width()) return cimg::type<double>::nan();
ind = (unsigned int)cimg::mod((int)_mp_arg(3),mp.imglist.width());
}
CImg<T> &img = ind==~0U?mp.imgout:mp.imglist[ind];
unsigned int
dx = (unsigned int)mp.opcode[8],
dy = (unsigned int)mp.opcode[9],
dz = (unsigned int)mp.opcode[10],
dc = (unsigned int)mp.opcode[11];
dx = dx==~0U?img._width:(unsigned int)_mp_arg(8);
dy = dy==~0U?img._height:(unsigned int)_mp_arg(9);
dz = dz==~0U?img._depth:(unsigned int)_mp_arg(10);
dc = dc==~0U?img._spectrum:(unsigned int)_mp_arg(11);
const ulongT sizS = mp.opcode[2];
if (sizS<(ulongT)dx*dy*dz*dc)
throw CImgArgumentException("[" cimg_appname "_math_parser] CImg<%s>: Function 'draw()': "
"Sprite dimension (%lu values) and specified sprite geometry (%u,%u,%u,%u) "
"(%lu values) do not match.",
mp.imgin.pixel_type(),sizS,dx,dy,dz,dc,(ulongT)dx*dy*dz*dc);
CImg<doubleT> S(&_mp_arg(1) + 1,dx,dy,dz,dc,true);
const float opacity = (float)_mp_arg(12);
if (img._data) {
if (mp.opcode[13]!=~0U) { // Opacity mask specified
const ulongT sizM = mp.opcode[14];
if (sizM<(ulongT)dx*dy*dz)
throw CImgArgumentException("[" cimg_appname "_math_parser] CImg<%s>: Function 'draw()': "
"Mask dimension (%lu values) and specified sprite geometry (%u,%u,%u,%u) "
"(%lu values) do not match.",
mp.imgin.pixel_type(),sizS,dx,dy,dz,dc,(ulongT)dx*dy*dz*dc);
const CImg<doubleT> M(&_mp_arg(13) + 1,dx,dy,dz,(unsigned int)(sizM/(dx*dy*dz)),true);
img.draw_image(x,y,z,c,S,M,opacity,(float)_mp_arg(15));
} else img.draw_image(x,y,z,c,S,opacity);
}
return cimg::type<double>::nan();
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 307,893,081,292,303,380,000,000,000,000,000,000,000 | 41 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
check_utf16 (const guint8 * data, gint len, gint endianness)
{
GstByteReader br;
guint16 high, low;
low = high = 0;
if (len & 1)
return FALSE;
gst_byte_reader_init (&br, data, len);
while (len >= 2) {
/* test first for a single 16 bit value in the BMP */
if (endianness == G_BIG_ENDIAN)
high = gst_byte_reader_get_uint16_be_unchecked (&br);
else
high = gst_byte_reader_get_uint16_le_unchecked (&br);
if (high >= 0xD800 && high <= 0xDBFF) {
/* start of a surrogate pair */
if (len < 4)
return FALSE;
len -= 2;
if (endianness == G_BIG_ENDIAN)
low = gst_byte_reader_get_uint16_be_unchecked (&br);
else
low = gst_byte_reader_get_uint16_le_unchecked (&br);
if (low >= 0xDC00 && low <= 0xDFFF) {
/* second half of the surrogate pair */
} else
return FALSE;
} else {
if (high >= 0xDC00 && high <= 0xDFFF)
return FALSE;
}
len -= 2;
}
return TRUE;
} | 0 | [
"CWE-125"
]
| gst-plugins-base | 2fdccfd64fc609e44e9c4b8eed5bfdc0ab9c9095 | 297,887,700,484,475,950,000,000,000,000,000,000,000 | 38 | typefind: bounds check windows ico detection
Fixes out of bounds read
https://bugzilla.gnome.org/show_bug.cgi?id=774902 |
size_t ADDCALL sass_option_get_include_path_size(struct Sass_Options* options)
{
size_t len = 0;
struct string_list* cur = options->include_paths;
while (cur) { len ++; cur = cur->next; }
return len;
} | 0 | [
"CWE-125"
]
| libsass | 8f40dc03e5ab5a8b2ebeb72b31f8d1adbb2fd6ae | 316,583,423,571,871,150,000,000,000,000,000,000,000 | 7 | Optimize line_begin/end search in `handle_error`
There is no need to advance by UTF-8 code points when searching for an
ASCII character, because UTF-8 is a prefix-free encoding. |
virDomainChrSourceDefParseFile(virDomainChrSourceDefPtr def,
xmlNodePtr source)
{
g_autofree char *append = NULL;
def->data.file.path = virXMLPropString(source, "path");
if ((append = virXMLPropString(source, "append")) &&
(def->data.file.append = virTristateSwitchTypeFromString(append)) <= 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Invalid append attribute value '%s'"),
append);
return -1;
}
return 0;
} | 0 | [
"CWE-212"
]
| libvirt | a5b064bf4b17a9884d7d361733737fb614ad8979 | 41,753,938,223,694,214,000,000,000,000,000,000,000 | 17 | conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]> |
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container)
{
struct ip_tunnel_info *info;
struct ip_tunnel_key *key;
/* A quick check to see if there might be something to do. */
if (!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID) &&
!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) &&
!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) &&
!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS) &&
!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IP) &&
!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_OPTS))
return;
info = skb_tunnel_info(skb);
if (!info)
return;
key = &info->key;
switch (ip_tunnel_info_af(info)) {
case AF_INET:
skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
flow_dissector,
target_container);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_dissector_key_ipv4_addrs *ipv4;
ipv4 = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
target_container);
ipv4->src = key->u.ipv4.src;
ipv4->dst = key->u.ipv4.dst;
}
break;
case AF_INET6:
skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
flow_dissector,
target_container);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_dissector_key_ipv6_addrs *ipv6;
ipv6 = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
target_container);
ipv6->src = key->u.ipv6.src;
ipv6->dst = key->u.ipv6.dst;
}
break;
}
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_dissector_key_keyid *keyid;
keyid = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID,
target_container);
keyid->keyid = tunnel_id_to_key32(key->tun_id);
}
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
struct flow_dissector_key_ports *tp;
tp = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS,
target_container);
tp->src = key->tp_src;
tp->dst = key->tp_dst;
}
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
struct flow_dissector_key_ip *ip;
ip = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_IP,
target_container);
ip->tos = key->tos;
ip->ttl = key->ttl;
}
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
struct flow_dissector_key_enc_opts *enc_opt;
enc_opt = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_OPTS,
target_container);
if (info->options_len) {
enc_opt->len = info->options_len;
ip_tunnel_info_opts_get(enc_opt->data, info);
enc_opt->dst_opt_type = info->key.tun_flags &
TUNNEL_OPTIONS_PRESENT;
}
}
} | 0 | [
"CWE-330"
]
| linux | 55667441c84fa5e0911a0aac44fb059c15ba6da2 | 100,445,087,782,120,320,000,000,000,000,000,000,000 | 107 | net/flow_dissector: switch to siphash
UDP IPv6 packets auto flowlabels are using a 32bit secret
(static u32 hashrnd in net/core/flow_dissector.c) and
apply jhash() over fields known by the receivers.
Attackers can easily infer the 32bit secret and use this information
to identify a device and/or user, since this 32bit secret is only
set at boot time.
Really, using jhash() to generate cookies sent on the wire
is a serious security concern.
Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be
a dead end. Trying to periodically change the secret (like in sch_sfq.c)
could change paths taken in the network for long lived flows.
Let's switch to siphash, as we did in commit df453700e8d8
("inet: switch IP ID generator to siphash")
Using a cryptographically strong pseudo random function will solve this
privacy issue and more generally remove other weak points in the stack.
Packet schedulers using skb_get_hash_perturb() benefit from this change.
Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default")
Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels")
Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel")
Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Berger <[email protected]>
Reported-by: Amit Klein <[email protected]>
Reported-by: Benny Pinkas <[email protected]>
Cc: Tom Herbert <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static bool b43_dma_mapping_error(struct b43_dmaring *ring,
dma_addr_t addr,
size_t buffersize, bool dma_to_device)
{
if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
return 1;
switch (ring->type) {
case B43_DMA_30BIT:
if ((u64)addr + buffersize > (1ULL << 30))
goto address_error;
break;
case B43_DMA_32BIT:
if ((u64)addr + buffersize > (1ULL << 32))
goto address_error;
break;
case B43_DMA_64BIT:
/* Currently we can't have addresses beyond
* 64bit in the kernel. */
break;
}
/* The address is OK. */
return 0;
address_error:
/* We can't support this address. Unmap it again. */
unmap_descbuffer(ring, addr, buffersize, dma_to_device);
return 1;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | c85ce65ecac078ab1a1835c87c4a6319cf74660a | 235,063,914,789,488,000,000,000,000,000,000,000,000 | 31 | b43: allocate receive buffers big enough for max frame len + offset
Otherwise, skb_put inside of dma_rx can fail...
https://bugzilla.kernel.org/show_bug.cgi?id=32042
Signed-off-by: John W. Linville <[email protected]>
Acked-by: Larry Finger <[email protected]>
Cc: [email protected] |
GF_Box *m4ds_box_new()
{
GF_MPEG4ExtensionDescriptorsBox *tmp = (GF_MPEG4ExtensionDescriptorsBox *) gf_malloc(sizeof(GF_MPEG4ExtensionDescriptorsBox));
if (tmp == NULL) return NULL;
memset(tmp, 0, sizeof(GF_MPEG4ExtensionDescriptorsBox));
tmp->type = GF_ISOM_BOX_TYPE_M4DS;
tmp->descriptors = gf_list_new();
return (GF_Box *)tmp;
} | 0 | [
"CWE-401"
]
| gpac | 0a85029d694f992f3631e2f249e4999daee15cbf | 8,110,649,435,852,414,000,000,000,000,000,000,000 | 9 | fixed #1785 (fuzz) |
String *Field_string::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
ASSERT_COLUMN_MARKED_FOR_READ;
/* See the comment for Field_long::store(long long) */
DBUG_ASSERT(!table || table->in_use == current_thd);
size_t length;
if (get_thd()->variables.sql_mode &
MODE_PAD_CHAR_TO_FULL_LENGTH)
length= my_charpos(field_charset, ptr, ptr + field_length,
field_length / field_charset->mbmaxlen);
else
length= field_charset->cset->lengthsp(field_charset, (const char*) ptr,
field_length);
val_ptr->set((const char*) ptr, length, field_charset);
return val_ptr;
} | 0 | [
"CWE-416",
"CWE-703"
]
| server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 45,045,939,160,016,880,000,000,000,000,000,000,000 | 17 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
void doport(const char *arg)
{
unsigned int a1, a2, a3, a4, p1, p2;
struct sockaddr_storage a;
if (sscanf(arg, "%u,%u,%u,%u,%u,%u",
&a1, &a2, &a3, &a4, &p1, &p2) != 6 ||
a1 > 255 || a2 > 255 || a3 > 255 || a4 > 255 ||
p1 > 255 || p2 > 255 || (a1|a2|a3|a4) == 0 ||
(p1 | p2) == 0) {
addreply_noformat(501, MSG_SYNTAX_ERROR_IP);
return;
}
memset(&a, 0, sizeof a);
STORAGE_FAMILY(a) = AF_INET;
STORAGE_SIN_ADDR(a) =
htonl(((uint32_t) a1 << 24) | ((uint32_t) a2 << 16) |
(a3 << 8) | a4);
SET_STORAGE_LEN(a, sizeof(struct sockaddr_in));
doport2(a, (p1 << 8) | p2);
} | 0 | [
"CWE-434"
]
| pure-ftpd | 37ad222868e52271905b94afea4fc780d83294b4 | 236,822,399,339,841,800,000,000,000,000,000,000,000 | 21 | Initialize the max upload file size when quotas are enabled
Due to an unwanted check, files causing the quota to be exceeded
were deleted after the upload, but not during the upload.
The bug was introduced in 2009 in version 1.0.23
Spotted by @DroidTest, thanks! |
nautilus_file_invalidate_attributes_internal (NautilusFile *file,
NautilusFileAttributes file_attributes)
{
Request request;
if (file == NULL) {
return;
}
if (NAUTILUS_IS_DESKTOP_ICON_FILE (file)) {
/* Desktop icon files are always up to date.
* If we invalidate their attributes they
* will lose data, so we just ignore them.
*/
return;
}
request = nautilus_directory_set_up_request (file_attributes);
if (REQUEST_WANTS_TYPE (request, REQUEST_DIRECTORY_COUNT)) {
invalidate_directory_count (file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_DEEP_COUNT)) {
invalidate_deep_counts (file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_MIME_LIST)) {
invalidate_mime_list (file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_FILE_INFO)) {
invalidate_file_info (file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_TOP_LEFT_TEXT)) {
invalidate_top_left_text (file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_LINK_INFO)) {
invalidate_link_info (file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_EXTENSION_INFO)) {
nautilus_file_invalidate_extension_info_internal (file);
}
if (REQUEST_WANTS_TYPE (request, REQUEST_THUMBNAIL)) {
invalidate_thumbnail (file);
}
/* FIXME bugzilla.gnome.org 45075: implement invalidating metadata */
} | 0 | []
| nautilus | 7632a3e13874a2c5e8988428ca913620a25df983 | 296,833,423,868,298,800,000,000,000,000,000,000,000 | 46 | Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003 |
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
bool req_immediate_exit = false;
if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
__kvm_migrate_timers(vcpu);
if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
kvm_gen_update_masterclock(vcpu->kvm);
if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
kvm_gen_kvmclock_update(vcpu);
if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
r = kvm_guest_time_update(vcpu);
if (unlikely(r))
goto out;
}
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
vcpu->fpu_active = 0;
kvm_x86_ops->fpu_deactivate(vcpu);
}
if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
/* Page is swapped out. Do synthetic halt */
vcpu->arch.apf.halted = true;
r = 1;
goto out;
}
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu))
kvm_handle_pmu_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu))
kvm_deliver_pmi(vcpu);
if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
vcpu_scan_ioapic(vcpu);
if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
kvm_vcpu_reload_apic_access_page(vcpu);
}
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
kvm_apic_accept_events(vcpu);
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
r = 1;
goto out;
}
if (inject_pending_event(vcpu, req_int_win) != 0)
req_immediate_exit = true;
/* enable NMI/IRQ window open exits if needed */
else if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
if (kvm_lapic_enabled(vcpu)) {
/*
* Update architecture specific hints for APIC
* virtual interrupt delivery.
*/
if (kvm_x86_ops->hwapic_irr_update)
kvm_x86_ops->hwapic_irr_update(vcpu,
kvm_lapic_find_highest_irr(vcpu));
update_cr8_intercept(vcpu);
kvm_lapic_sync_to_vapic(vcpu);
}
}
r = kvm_mmu_reload(vcpu);
if (unlikely(r)) {
goto cancel_injection;
}
preempt_disable();
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
kvm_load_guest_xcr0(vcpu);
vcpu->mode = IN_GUEST_MODE;
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
/* We should set ->mode before check ->requests,
* see the comment in make_all_cpus_request.
*/
smp_mb__after_srcu_read_unlock();
local_irq_disable();
if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|| need_resched() || signal_pending(current)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = 1;
goto cancel_injection;
}
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);
kvm_guest_enter();
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
set_debugreg(vcpu->arch.eff_db[1], 1);
set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3);
set_debugreg(vcpu->arch.dr6, 6);
}
trace_kvm_entry(vcpu->vcpu_id);
kvm_x86_ops->run(vcpu);
/*
* Do this here before restoring debug registers on the host. And
* since we do this before handling the vmexit, a DR access vmexit
* can (a) read the correct value of the debug registers, (b) set
* KVM_DEBUGREG_WONT_EXIT again.
*/
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
int i;
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
kvm_x86_ops->sync_dirty_debug_regs(vcpu);
for (i = 0; i < KVM_NR_DB_REGS; i++)
vcpu->arch.eff_db[i] = vcpu->arch.db[i];
}
/*
* If the guest has used debug registers, at least dr7
* will be disabled while returning to the host.
* If we don't have active breakpoints in the host, we don't
* care about the messed up debug address registers. But if
* we have some of them active, restore the old state.
*/
if (hw_breakpoint_active())
hw_breakpoint_restore();
vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
native_read_tsc());
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
/* Interrupt is enabled by handle_external_intr() */
kvm_x86_ops->handle_external_intr(vcpu);
++vcpu->stat.exits;
/*
* We must have an instruction between local_irq_enable() and
* kvm_guest_exit(), so the timer interrupt isn't delayed by
* the interrupt shadow. The stat.exits increment will do nicely.
* But we need to prevent reordering, hence this barrier():
*/
barrier();
kvm_guest_exit();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
/*
* Profile KVM exit RIPs:
*/
if (unlikely(prof_on == KVM_PROFILING)) {
unsigned long rip = kvm_rip_read(vcpu);
profile_hit(KVM_PROFILING, (void *)rip);
}
if (unlikely(vcpu->arch.tsc_always_catchup))
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->arch.apic_attention)
kvm_lapic_sync_from_vapic(vcpu);
r = kvm_x86_ops->handle_exit(vcpu);
return r;
cancel_injection:
kvm_x86_ops->cancel_injection(vcpu);
if (unlikely(vcpu->arch.apic_attention))
kvm_lapic_sync_from_vapic(vcpu);
out:
return r;
} | 0 | []
| kvm | 854e8bb1aa06c578c2c9145fa6bfe3680ef63b23 | 256,537,899,536,306,060,000,000,000,000,000,000,000 | 212 | KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static int fts3InsertTerms(
Fts3Table *p,
int iLangid,
sqlite3_value **apVal,
u32 *aSz
){
int i; /* Iterator variable */
for(i=2; i<p->nColumn+2; i++){
int iCol = i-2;
if( p->abNotindexed[iCol]==0 ){
const char *zText = (const char *)sqlite3_value_text(apVal[i]);
int rc = fts3PendingTermsAdd(p, iLangid, zText, iCol, &aSz[iCol]);
if( rc!=SQLITE_OK ){
return rc;
}
aSz[p->nColumn] += sqlite3_value_bytes(apVal[i]);
}
}
return SQLITE_OK;
} | 0 | [
"CWE-787"
]
| sqlite | c72f2fb7feff582444b8ffdc6c900c69847ce8a9 | 333,820,642,033,495,300,000,000,000,000,000,000,000 | 20 | More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d |
static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
{
q->bins[slot].perturbation = prandom_u32();
} | 1 | [
"CWE-330"
]
| linux | 55667441c84fa5e0911a0aac44fb059c15ba6da2 | 199,438,947,154,835,770,000,000,000,000,000,000,000 | 4 | net/flow_dissector: switch to siphash
UDP IPv6 packets auto flowlabels are using a 32bit secret
(static u32 hashrnd in net/core/flow_dissector.c) and
apply jhash() over fields known by the receivers.
Attackers can easily infer the 32bit secret and use this information
to identify a device and/or user, since this 32bit secret is only
set at boot time.
Really, using jhash() to generate cookies sent on the wire
is a serious security concern.
Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be
a dead end. Trying to periodically change the secret (like in sch_sfq.c)
could change paths taken in the network for long lived flows.
Let's switch to siphash, as we did in commit df453700e8d8
("inet: switch IP ID generator to siphash")
Using a cryptographically strong pseudo random function will solve this
privacy issue and more generally remove other weak points in the stack.
Packet schedulers using skb_get_hash_perturb() benefit from this change.
Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default")
Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels")
Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel")
Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Berger <[email protected]>
Reported-by: Amit Klein <[email protected]>
Reported-by: Benny Pinkas <[email protected]>
Cc: Tom Herbert <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline int parse_unix_address(php_stream_xport_param *xparam, struct sockaddr_un *unix_addr)
{
memset(unix_addr, 0, sizeof(*unix_addr));
unix_addr->sun_family = AF_UNIX;
/* we need to be binary safe on systems that support an abstract
* namespace */
if (xparam->inputs.namelen >= sizeof(unix_addr->sun_path)) {
/* On linux, when the path begins with a NUL byte we are
* referring to an abstract namespace. In theory we should
* allow an extra byte below, since we don't need the NULL.
* BUT, to get into this branch of code, the name is too long,
* so we don't care. */
xparam->inputs.namelen = sizeof(unix_addr->sun_path) - 1;
php_error_docref(NULL, E_NOTICE,
"socket path exceeded the maximum allowed length of %lu bytes "
"and was truncated", (unsigned long)sizeof(unix_addr->sun_path));
}
memcpy(unix_addr->sun_path, xparam->inputs.name, xparam->inputs.namelen);
return 1;
} | 0 | [
"CWE-20",
"CWE-918"
]
| php-src | bab0b99f376dac9170ac81382a5ed526938d595a | 25,136,566,035,132,986,000,000,000,000,000,000,000 | 23 | Detect invalid port in xp_socket parse ip address
For historical reasons, fsockopen() accepts the port and hostname
separately: fsockopen('127.0.0.1', 80)
However, with the introdcution of stream transports in PHP 4.3,
it became possible to include the port in the hostname specifier:
fsockopen('127.0.0.1:80')
Or more formally: fsockopen('tcp://127.0.0.1:80')
Confusing results when these two forms are combined, however.
fsockopen('127.0.0.1:80', 443) results in fsockopen() attempting
to connect to '127.0.0.1:80:443' which any reasonable stack would
consider invalid.
Unfortunately, PHP parses the address looking for the first colon
(with special handling for IPv6, don't worry) and calls atoi()
from there. atoi() in turn, simply stops parsing at the first
non-numeric character and returns the value so far.
The end result is that the explicitly supplied port is treated
as ignored garbage, rather than producing an error.
This diff replaces atoi() with strtol() and inspects the
stop character. If additional "garbage" of any kind is found,
it fails and returns an error. |
grepfile (char const *file, struct stats *stats)
{
int desc;
int count;
int status;
filename = (file ? file : label ? label : _("(standard input)"));
if (! file)
desc = STDIN_FILENO;
else if (devices == SKIP_DEVICES)
{
/* Don't open yet, since that might have side effects on a device. */
desc = -1;
}
else
{
/* When skipping directories, don't worry about directories
that can't be opened. */
desc = open (file, O_RDONLY);
if (desc < 0 && directories != SKIP_DIRECTORIES)
{
suppressible_error (file, errno);
return 1;
}
}
if (desc < 0
? stat (file, &stats->stat) != 0
: fstat (desc, &stats->stat) != 0)
{
suppressible_error (filename, errno);
if (file)
close (desc);
return 1;
}
if ((directories == SKIP_DIRECTORIES && S_ISDIR (stats->stat.st_mode))
|| (devices == SKIP_DEVICES && (S_ISCHR (stats->stat.st_mode)
|| S_ISBLK (stats->stat.st_mode)
|| S_ISSOCK (stats->stat.st_mode)
|| S_ISFIFO (stats->stat.st_mode))))
{
if (file)
close (desc);
return 1;
}
/* If there is a regular file on stdout and the current file refers
to the same i-node, we have to report the problem and skip it.
Otherwise when matching lines from some other input reach the
disk before we open this file, we can end up reading and matching
those lines and appending them to the file from which we're reading.
Then we'd have what appears to be an infinite loop that'd terminate
only upon filling the output file system or reaching a quota.
However, there is no risk of an infinite loop if grep is generating
no output, i.e., with --silent, --quiet, -q.
Similarly, with any of these:
--max-count=N (-m) (for N >= 2)
--files-with-matches (-l)
--files-without-match (-L)
there is no risk of trouble.
For --max-count=1, grep stops after printing the first match,
so there is no risk of malfunction. But even --max-count=2, with
input==output, while there is no risk of infloop, there is a race
condition that could result in "alternate" output. */
if (!out_quiet && list_files == 0 && 1 < max_count
&& S_ISREG (out_stat.st_mode) && out_stat.st_ino
&& SAME_INODE (stats->stat, out_stat))
{
if (! suppress_errors)
error (0, 0, _("input file %s is also the output"), quote (filename));
errseen = 1;
if (file)
close (desc);
return 1;
}
if (desc < 0)
{
desc = open (file, O_RDONLY);
if (desc < 0)
{
suppressible_error (file, errno);
return 1;
}
}
#if defined SET_BINARY
/* Set input to binary mode. Pipes are simulated with files
on DOS, so this includes the case of "foo | grep bar". */
if (!isatty (desc))
SET_BINARY (desc);
#endif
count = grep (desc, file, stats);
if (count < 0)
status = count + 2;
else
{
if (count_matches)
{
if (out_file)
{
print_filename ();
if (filename_mask)
print_sep (SEP_CHAR_SELECTED);
else
fputc (0, stdout);
}
printf ("%d\n", count);
}
status = !count;
if (list_files == 1 - 2 * status)
{
print_filename ();
fputc ('\n' & filename_mask, stdout);
}
if (! file)
{
off_t required_offset = outleft ? bufoffset : after_last_match;
if (required_offset != bufoffset
&& lseek (desc, required_offset, SEEK_SET) < 0
&& S_ISREG (stats->stat.st_mode))
suppressible_error (filename, errno);
}
else
while (close (desc) != 0)
if (errno != EINTR)
{
suppressible_error (file, errno);
break;
}
}
return status;
} | 1 | [
"CWE-189"
]
| grep | 8fcf61523644df42e1905c81bed26838e0b04f91 | 207,482,351,217,498,850,000,000,000,000,000,000,000 | 139 | grep: fix integer-overflow issues in main program
* NEWS: Document this.
* bootstrap.conf (gnulib_modules): Add inttypes, xstrtoimax.
Remove xstrtoumax.
* src/main.c: Include <inttypes.h>, for INTMAX_MAX, PRIdMAX.
(context_length_arg, prtext, grepbuf, grep, grepfile)
(get_nondigit_option, main):
Use intmax_t, not int, for line counts.
(context_length_arg, main): Silently ceiling line counts
to maximum value, since there's no practical difference between
doing that and using infinite-precision arithmetic.
(out_before, out_after, pending): Now intmax_t, not int.
(max_count, outleft): Now intmax_t, not off_t.
(prepend_args, prepend_default_options, main):
Use size_t, not int, for sizes.
(prepend_default_options): Check for int and size_t overflow. |
vrrp_script_handler(vector_t *strvec)
{
if (!strvec)
return;
alloc_vrrp_script(strvec_slot(strvec, 1));
script_user_set = false;
remove_script = false;
} | 0 | [
"CWE-59",
"CWE-61"
]
| keepalived | 04f2d32871bb3b11d7dc024039952f2fe2750306 | 262,397,512,119,676,980,000,000,000,000,000,000,000 | 9 | When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]> |
int perf_event_init_task(struct task_struct *child)
{
int ctxn, ret;
memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
mutex_init(&child->perf_event_mutex);
INIT_LIST_HEAD(&child->perf_event_list);
for_each_task_context_nr(ctxn) {
ret = perf_event_init_context(child, ctxn);
if (ret)
return ret;
}
return 0;
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 8176cced706b5e5d15887584150764894e94e02f | 86,564,128,300,308,650,000,000,000,000,000,000,000 | 16 | perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
Status getSqliteJournalMode(const fs::path& sqlite_db) {
TableRows result;
auto status = genTableRowsForSqliteTable(
sqlite_db, "PRAGMA journal_mode;", result, true);
if (!status.ok()) {
return status;
}
if (result.empty()) {
VLOG(1) << "PRAGMA query returned empty results";
return Status(1, "Could not retrieve journal mode");
}
auto resultmap = static_cast<Row>(*result[0]);
if (resultmap.find("journal_mode") == resultmap.end()) {
VLOG(1) << "journal_mode not found PRAGMA query results";
return Status(1, "Could not retrieve journal mode");
}
return Status(Status::kSuccessCode,
boost::algorithm::to_lower_copy(resultmap["journal_mode"]));
} | 0 | [
"CWE-77",
"CWE-295"
]
| osquery | c3f9a3dae22d43ed3b4f6a403cbf89da4cba7c3c | 169,081,836,941,239,700,000,000,000,000,000,000,000 | 19 | Merge pull request from GHSA-4g56-2482-x7q8
* Proposed fix for attach tables vulnerability
* Add authorizer to ATC tables and cleanups
- Add unit test for authorizer function |
static __always_inline u16 vmcs_read16(unsigned long field)
{
vmcs_check16(field);
if (static_branch_unlikely(&enable_evmcs))
return evmcs_read16(field);
return __vmcs_readl(field);
} | 0 | [
"CWE-284"
]
| linux | 727ba748e110b4de50d142edca9d6a9b7e6111d8 | 122,611,536,929,451,620,000,000,000,000,000,000,000 | 7 | kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static int merge_horizontally(gx_device_txtwrite_t *tdev)
{
#ifdef TRACE_TXTWRITE
text_list_entry_t *debug_x;
#endif
unsigned short UnicodeSpace = 0x20;
page_text_list_t *y_list = tdev->PageData.y_ordered_list;
while (y_list) {
float average_width;
text_list_entry_t *from, *to;
from = y_list->x_ordered_list;
to = from->next;
while (from && to) {
average_width = (from->end.x - from->start.x) / from->Unicode_Text_Size;
if (to->start.x - from->end.x < average_width / 2) {
/* consolidate fragments */
unsigned short *NewText;
float *NewWidths;
NewText = (unsigned short *)gs_malloc(tdev->memory->stable_memory,
(from->Unicode_Text_Size + to->Unicode_Text_Size), sizeof(unsigned short), "txtwrite alloc working text buffer");
NewWidths = (float *)gs_malloc(tdev->memory->stable_memory,
(from->Unicode_Text_Size + to->Unicode_Text_Size), sizeof(float), "txtwrite alloc Widths array");
if (!NewText || !NewWidths) {
if (NewText)
gs_free(tdev->memory, NewText, from->Unicode_Text_Size + to->Unicode_Text_Size, sizeof (unsigned short), "free working text fragment");
/* ran out of memory, don't consolidate */
from = from->next;
to = to->next;
} else {
#ifdef TRACE_TXTWRITE
gp_fprintf(tdev->DebugFile, "Consolidating two horizontal fragments in one line, before:\n\t");
gp_fwrite(from->Unicode_Text, sizeof(unsigned short), from->Unicode_Text_Size, tdev->DebugFile);
gp_fprintf(tdev->DebugFile, "\n\t");
gp_fwrite(to->Unicode_Text, sizeof(unsigned short), to->Unicode_Text_Size, tdev->DebugFile);
#endif
memcpy(NewText, from->Unicode_Text, from->Unicode_Text_Size * sizeof(unsigned short));
memcpy(&NewText[from->Unicode_Text_Size], to->Unicode_Text, to->Unicode_Text_Size * sizeof(unsigned short));
memcpy(NewWidths, from->Widths, from->Unicode_Text_Size * sizeof(float));
memcpy(&NewWidths[from->Unicode_Text_Size], to->Widths, to->Unicode_Text_Size * sizeof(float));
gs_free(tdev->memory, from->Unicode_Text, from->Unicode_Text_Size, sizeof (unsigned short), "free consolidated text fragment");
gs_free(tdev->memory, to->Unicode_Text, to->Unicode_Text_Size, sizeof (unsigned short), "free consolidated text fragment");
gs_free(tdev->memory, from->Widths, from->Unicode_Text_Size, sizeof (float), "free consolidated Widths array");
gs_free(tdev->memory, to->Widths, to->Unicode_Text_Size, sizeof (float), "free consolidated Widths array");
gs_free(tdev->memory, to->FontName, 1, strlen(from->FontName) + 1, "free FontName");
from->Unicode_Text = NewText;
from->Unicode_Text_Size += to->Unicode_Text_Size;
from->Widths = NewWidths;
#ifdef TRACE_TXTWRITE
gp_fprintf(tdev->DebugFile, "After:\n\t");
gp_fwrite(from->Unicode_Text, sizeof(unsigned short), from->Unicode_Text_Size, tdev->DebugFile);
#endif
from->end = to->end;
from->next = to->next;
if (from->next)
from->next->previous = from;
gs_free(tdev->memory, to, 1, sizeof(text_list_entry_t), "free consolidated fragment");
to = from->next;
}
} else {
if (to->start.x - from->end.x < average_width *2){
unsigned short *NewText;
float *NewWidths;
NewText = (unsigned short *)gs_malloc(tdev->memory->stable_memory,
(from->Unicode_Text_Size + to->Unicode_Text_Size + 1), sizeof(unsigned short), "txtwrite alloc text state");
NewWidths = (float *)gs_malloc(tdev->memory->stable_memory,
(from->Unicode_Text_Size + to->Unicode_Text_Size + 1), sizeof(float), "txtwrite alloc Widths array");
if (!NewText || !NewWidths) {
if (NewText)
gs_free(tdev->memory, NewText, from->Unicode_Text_Size + to->Unicode_Text_Size, sizeof (unsigned short), "free working text fragment");
/* ran out of memory, don't consolidate */
from = from->next;
to = to->next;
} else {
memcpy(NewText, from->Unicode_Text, from->Unicode_Text_Size * sizeof(unsigned short));
memcpy(&NewText[from->Unicode_Text_Size], &UnicodeSpace, sizeof(unsigned short));
memcpy(&NewText[from->Unicode_Text_Size + 1], to->Unicode_Text, to->Unicode_Text_Size * sizeof(unsigned short));
memcpy(NewWidths, from->Widths, from->Unicode_Text_Size * sizeof(float));
NewWidths[from->Unicode_Text_Size] = to->start.x - from->end.x;
memcpy(&NewWidths[from->Unicode_Text_Size + 1], to->Widths, to->Unicode_Text_Size * sizeof(float));
gs_free(tdev->memory, from->Unicode_Text, from->Unicode_Text_Size, sizeof (unsigned short), "free consolidated text fragment");
gs_free(tdev->memory, to->Unicode_Text, to->Unicode_Text_Size, sizeof (unsigned short), "free consolidated text fragment");
gs_free(tdev->memory, from->Widths, from->Unicode_Text_Size, sizeof (float), "free consolidated Widths array");
gs_free(tdev->memory, to->Widths, to->Unicode_Text_Size, sizeof (float), "free consolidated Widths array");
gs_free(tdev->memory, to->FontName, 1, strlen(from->FontName) + 1, "free FontName");
from->Unicode_Text = NewText;
from->Unicode_Text_Size += to->Unicode_Text_Size + 1;
from->Widths = NewWidths;
from->end = to->end;
from->next = to->next;
if (from->next)
from->next->previous = from;
gs_free(tdev->memory, to, 1, sizeof(text_list_entry_t), "free consolidated fragment");
to = from->next;
}
} else {
from = from->next;
to = to->next;
}
}
}
y_list = y_list->next;
}
return 0;
} | 0 | [
"CWE-476"
]
| ghostpdl | 407c98a38c3a6ac1681144ed45cc2f4fc374c91f | 15,226,933,656,397,460,000,000,000,000,000,000,000 | 109 | txtwrite - guard against using GS_NO_GLYPH to retrieve Unicode values
Bug 701822 "Segmentation fault at psi/iname.c:296 in names_index_ref"
Avoid using a glyph with the value GS_NO_GLYPH to retrieve a glyph
name or Unicode code point from the glyph ID, as this is not a valid
ID. |
static int coolkey_get_challenge(sc_card_t *card, u8 *rnd, size_t len)
{
LOG_FUNC_CALLED(card->ctx);
if (len > COOLKEY_MAX_CHUNK_SIZE)
len = COOLKEY_MAX_CHUNK_SIZE;
LOG_TEST_RET(card->ctx,
coolkey_apdu_io(card, COOLKEY_CLASS, COOLKEY_INS_GET_RANDOM, 0, 0,
NULL, 0, &rnd, &len, NULL, 0),
"Could not get challenge");
LOG_FUNC_RETURN(card->ctx, (int) len);
} | 0 | [
"CWE-415"
]
| OpenSC | c246f6f69a749d4f68626b40795a4f69168008f4 | 65,246,515,333,337,870,000,000,000,000,000,000,000 | 14 | coolkey: Make sure the object ID is unique when filling list
Thanks to oss-fuzz
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=19208 |
struct file_id smb_vfs_call_file_id_create(struct vfs_handle_struct *handle,
const SMB_STRUCT_STAT *sbuf)
{
VFS_FIND(file_id_create);
return handle->fns->file_id_create(handle, sbuf);
} | 0 | [
"CWE-22"
]
| samba | bd269443e311d96ef495a9db47d1b95eb83bb8f4 | 147,262,332,549,578,360,000,000,000,000,000,000,000 | 6 | Fix bug 7104 - "wide links" and "unix extensions" are incompatible.
Change parameter "wide links" to default to "no".
Ensure "wide links = no" if "unix extensions = yes" on a share.
Fix man pages to refect this.
Remove "within share" checks for a UNIX symlink set - even if
widelinks = no. The server will not follow that link anyway.
Correct DEBUG message in check_reduced_name() to add missing "\n"
so it's really clear when a path is being denied as it's outside
the enclosing share path.
Jeremy. |
TEST_P(Security, BuiltinAuthenticationAndAccessAndCryptoPlugin_PermissionsEnableDiscoveryEnableAccessNone_validation_ok_enable_discovery_enable_access_none)
// *INDENT-ON*
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
std::string governance_file("governance_enable_discovery_enable_access_none.smime");
BuiltinAuthenticationAndAccessAndCryptoPlugin_Permissions_validation_ok_common(reader, writer, governance_file);
} | 0 | [
"CWE-284"
]
| Fast-DDS | d2aeab37eb4fad4376b68ea4dfbbf285a2926384 | 97,574,982,774,376,880,000,000,000,000,000,000,000 | 9 | check remote permissions (#1387)
* Refs 5346. Blackbox test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. one-way string compare
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Do not add partition separator on last partition
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Access control unit testing
It only covers Partition and Topic permissions
Signed-off-by: Iker Luengo <[email protected]>
* Refs #3680. Fix partition check on Permissions plugin.
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix tests on mac
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix windows tests
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Avoid memory leak on test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Proxy data mocks should not return temporary objects
Signed-off-by: Iker Luengo <[email protected]>
* refs 3680. uncrustify
Signed-off-by: Iker Luengo <[email protected]>
Co-authored-by: Miguel Company <[email protected]> |
static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
__u8 encrypt;
if (conn->state == BT_CONFIG) {
if (status)
conn->state = BT_CONNECTED;
hci_connect_cfm(conn, status);
hci_conn_drop(conn);
return;
}
if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
encrypt = 0x00;
else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
encrypt = 0x02;
else
encrypt = 0x01;
if (conn->sec_level == BT_SECURITY_SDP)
conn->sec_level = BT_SECURITY_LOW;
if (conn->pending_sec_level > conn->sec_level)
conn->sec_level = conn->pending_sec_level;
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
mutex_unlock(&hci_cb_list_lock);
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
} | 0 | [
"CWE-290"
]
| linux | 3ca44c16b0dcc764b641ee4ac226909f5c421aa3 | 157,888,086,928,095,970,000,000,000,000,000,000,000 | 37 | Bluetooth: Consolidate encryption handling in hci_encrypt_cfm
This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection
state is BT_CONFIG so callers don't have to check the state.
Signed-off-by: Luiz Augusto von Dentz <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]> |
struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
struct udphdr *uh)
{
struct udp_offload_priv *uo_priv;
struct sk_buff *p, **pp = NULL;
struct udphdr *uh2;
unsigned int off = skb_gro_offset(skb);
int flush = 1;
if (NAPI_GRO_CB(skb)->encap_mark ||
(skb->ip_summed != CHECKSUM_PARTIAL &&
NAPI_GRO_CB(skb)->csum_cnt == 0 &&
!NAPI_GRO_CB(skb)->csum_valid))
goto out;
/* mark that this skb passed once through the tunnel gro layer */
NAPI_GRO_CB(skb)->encap_mark = 1;
rcu_read_lock();
uo_priv = rcu_dereference(udp_offload_base);
for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
uo_priv->offload->port == uh->dest &&
uo_priv->offload->callbacks.gro_receive)
goto unflush;
}
goto out_unlock;
unflush:
flush = 0;
for (p = *head; p; p = p->next) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
uh2 = (struct udphdr *)(p->data + off);
/* Match ports and either checksums are either both zero
* or nonzero.
*/
if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
(!uh->check ^ !uh2->check)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
}
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
pp = uo_priv->offload->callbacks.gro_receive(head, skb,
uo_priv->offload);
out_unlock:
rcu_read_unlock();
out:
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
} | 0 | [
"CWE-400",
"CWE-703"
]
| linux | fac8e0f579695a3ecbc4d3cac369139d7f819971 | 146,721,979,258,605,560,000,000,000,000,000,000,000 | 59 | tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
mbfl_buffer_converter_feed(mbfl_buffer_converter *convd, mbfl_string *string)
{
return mbfl_buffer_converter_feed2(convd, string, NULL);
} | 0 | [
"CWE-119"
]
| php-src | 64f42c73efc58e88671ad76b6b6bc8e2b62713e1 | 122,677,257,127,257,120,000,000,000,000,000,000,000 | 4 | Fixed bug #71906: AddressSanitizer: negative-size-param (-1) in mbfl_strcut |
void mark_mounts_for_expiry(struct list_head *mounts)
{
struct vfsmount *mnt, *next;
LIST_HEAD(graveyard);
if (list_empty(mounts))
return;
spin_lock(&vfsmount_lock);
/* extract from the expiration list every vfsmount that matches the
* following criteria:
* - only referenced by its parent vfsmount
* - still marked for expiry (marked on the last call here; marks are
* cleared by mntput())
*/
list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
if (!xchg(&mnt->mnt_expiry_mark, 1) ||
atomic_read(&mnt->mnt_count) != 1)
continue;
mntget(mnt);
list_move(&mnt->mnt_expire, &graveyard);
}
expire_mount_list(&graveyard, mounts);
spin_unlock(&vfsmount_lock);
} | 0 | [
"CWE-269"
]
| linux-2.6 | ee6f958291e2a768fd727e7a67badfff0b67711a | 14,477,755,083,510,403,000,000,000,000,000,000,000 | 29 | check privileges before setting mount propagation
There's a missing check for CAP_SYS_ADMIN in do_change_type().
Signed-off-by: Miklos Szeredi <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
event_help( RenderState state )
{
ADisplay display = (ADisplay)state->display.disp;
grEvent dummy_event;
adisplay_clear( display );
grGotoxy( 0, 0 );
grSetMargin( 2, 1 );
grGotobitmap( display->bitmap );
grWriteln( "Text Viewer - Simple text/font proofer for the FreeType project" );
grLn();
grWriteln( "This program is used to display text using two distinct algorithms." );
grWriteln( "On the left, text is rendered by the TrueType bytecode interpreter." );
grWriteln( "In the middle, text is rendered through the FreeType auto-hinter." );
grWriteln( "On the right, text is rendered unhinted." );
grLn();
grWriteln( "Use the following keys:" );
grLn();
grWriteln( " F1, ? display this help screen" );
grLn();
grWriteln( " n, p select previous/next font" );
grLn();
grWriteln( " 1, 2, 3 select left, middle, or right column" );
grWriteln( " a toggle `ignore global advance width flag'" );
grWriteln( " d toggle lsb/rsb deltas" );
grWriteln( " h toggle hinting mode" );
grWriteln( " k toggle kerning" );
grWriteln( " g, v adjust gamma value" );
grWriteln( " r toggle rendering mode" );
grLn();
grWriteln( " l change LCD filter type" );
grWriteln( " [, ] select custom LCD filter weight" );
grWriteln( " -, +(=) adjust selected custom LCD filter weight");
grLn();
grWriteln( " Up, Down adjust pointsize by 0.5 unit" );
grWriteln( " PgUp, PgDn adjust pointsize by 5 units" );
grLn();
grWriteln( "press any key to exit this help screen" );
grRefreshSurface( display->surface );
grListenSurface( display->surface, gr_event_key, &dummy_event );
} | 0 | [
"CWE-120"
]
| freetype2-demos | b995299b73ba4cd259f221f500d4e63095508bec | 173,356,074,471,160,500,000,000,000,000,000,000,000 | 44 | Fix Savannah bug #30054.
* src/ftdiff.c, src/ftgrid.c, src/ftmulti.c, src/ftstring.c,
src/ftview.c: Use precision for `%s' where appropriate to avoid
buffer overflows. |
void linenoiseSetCompletionCallback(linenoiseCompletionCallback* fn) {
completionCallback = fn;
} | 0 | [
"CWE-200"
]
| mongo | 035cf2afc04988b22cb67f4ebfd77e9b344cb6e0 | 91,161,191,022,938,640,000,000,000,000,000,000,000 | 3 | SERVER-25335 avoid group and other permissions when creating .dbshell history file |
int usb_device_supports_lpm(struct usb_device *udev)
{
/* Some devices have trouble with LPM */
if (udev->quirks & USB_QUIRK_NO_LPM)
return 0;
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
*/
if (udev->speed == USB_SPEED_HIGH || udev->speed == USB_SPEED_FULL) {
if (udev->bos->ext_cap &&
(USB_LPM_SUPPORT &
le32_to_cpu(udev->bos->ext_cap->bmAttributes)))
return 1;
return 0;
}
/*
* According to the USB 3.0 spec, all USB 3.0 devices must support LPM.
* However, there are some that don't, and they set the U1/U2 exit
* latencies to zero.
*/
if (!udev->bos->ss_cap) {
dev_info(&udev->dev, "No LPM exit latency info found, disabling LPM.\n");
return 0;
}
if (udev->bos->ss_cap->bU1devExitLat == 0 &&
udev->bos->ss_cap->bU2DevExitLat == 0) {
if (udev->parent)
dev_info(&udev->dev, "LPM exit latency is zeroed, disabling LPM.\n");
else
dev_info(&udev->dev, "We don't know the algorithms for LPM for this host, disabling LPM.\n");
return 0;
}
if (!udev->parent || udev->parent->lpm_capable)
return 1;
return 0;
} | 0 | [
"CWE-703"
]
| linux | e50293ef9775c5f1cf3fcc093037dd6a8c5684ea | 178,997,382,301,608,300,000,000,000,000,000,000,000 | 40 | USB: fix invalid memory access in hub_activate()
Commit 8520f38099cc ("USB: change hub initialization sleeps to
delayed_work") changed the hub_activate() routine to make part of it
run in a workqueue. However, the commit failed to take a reference to
the usb_hub structure or to lock the hub interface while doing so. As
a result, if a hub is plugged in and quickly unplugged before the work
routine can run, the routine will try to access memory that has been
deallocated. Or, if the hub is unplugged while the routine is
running, the memory may be deallocated while it is in active use.
This patch fixes the problem by taking a reference to the usb_hub at
the start of hub_activate() and releasing it at the end (when the work
is finished), and by locking the hub interface while the work routine
is running. It also adds a check at the start of the routine to see
if the hub has already been disconnected, in which nothing should be
done.
Signed-off-by: Alan Stern <[email protected]>
Reported-by: Alexandru Cornea <[email protected]>
Tested-by: Alexandru Cornea <[email protected]>
Fixes: 8520f38099cc ("USB: change hub initialization sleeps to delayed_work")
CC: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static int dns_transaction_on_stream_packet(DnsTransaction *t, DnsPacket *p) {
assert(t);
assert(p);
dns_transaction_close_connection(t);
if (dns_packet_validate_reply(p) <= 0) {
log_debug("Invalid TCP reply packet.");
dns_transaction_complete(t, DNS_TRANSACTION_INVALID_REPLY);
return 0;
}
dns_scope_check_conflicts(t->scope, p);
t->block_gc++;
dns_transaction_process_reply(t, p);
t->block_gc--;
/* If the response wasn't useful, then complete the transition
* now. After all, we are the worst feature set now with TCP
* sockets, and there's really no point in retrying. */
if (t->state == DNS_TRANSACTION_PENDING)
dns_transaction_complete(t, DNS_TRANSACTION_INVALID_REPLY);
else
dns_transaction_gc(t);
return 0;
} | 0 | [
"CWE-416"
]
| systemd | 904dcaf9d4933499f8334859f52ea8497f2d24ff | 330,328,864,127,120,220,000,000,000,000,000,000,000 | 28 | resolved: take particular care when detaching DnsServer from its default stream
DnsStream and DnsServer have a symbiotic relationship: one DnsStream is
the current "default" stream of the server (and thus reffed by it), but
each stream also refs the server it is connected to. This cyclic
dependency can result in weird situations: when one is
destroyed/unlinked/stopped it needs to unregister itself from the other,
but doing this will trigger unregistration of the other. Hence, let's
make sure we unregister the stream from the server before destroying it,
to break this cycle.
Most likely fixes: #10725 |
irc_server_set_prefix_modes_chars (struct t_irc_server *server,
const char *prefix)
{
char *pos;
int i, length_modes, length_chars;
if (!server || !prefix)
return;
/* free previous values */
if (server->prefix_modes)
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
if (server->prefix_chars)
{
free (server->prefix_chars);
server->prefix_chars = NULL;
}
/* assign new values */
pos = strchr (prefix, ')');
if (pos)
{
server->prefix_modes = weechat_strndup (prefix + 1,
pos - prefix - 1);
if (server->prefix_modes)
{
pos++;
length_modes = strlen (server->prefix_modes);
length_chars = strlen (pos);
server->prefix_chars = malloc (length_modes + 1);
if (server->prefix_chars)
{
for (i = 0; i < length_modes; i++)
{
server->prefix_chars[i] = (i < length_chars) ? pos[i] : ' ';
}
server->prefix_chars[length_modes] = '\0';
}
else
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
}
}
} | 0 | [
"CWE-20"
]
| weechat | c265cad1c95b84abfd4e8d861f25926ef13b5d91 | 297,603,703,948,279,500,000,000,000,000,000,000,000 | 49 | Fix verification of SSL certificates by calling gnutls verify callback (patch #7459) |
void ndpi_category_set_name(struct ndpi_detection_module_struct *ndpi_str,
ndpi_protocol_category_t category,
char *name) {
if(!name)
return;
switch (category) {
case NDPI_PROTOCOL_CATEGORY_CUSTOM_1:
snprintf(ndpi_str->custom_category_labels[0], CUSTOM_CATEGORY_LABEL_LEN, "%s", name);
break;
case NDPI_PROTOCOL_CATEGORY_CUSTOM_2:
snprintf(ndpi_str->custom_category_labels[1], CUSTOM_CATEGORY_LABEL_LEN, "%s", name);
break;
case NDPI_PROTOCOL_CATEGORY_CUSTOM_3:
snprintf(ndpi_str->custom_category_labels[2], CUSTOM_CATEGORY_LABEL_LEN, "%s", name);
break;
case NDPI_PROTOCOL_CATEGORY_CUSTOM_4:
snprintf(ndpi_str->custom_category_labels[3], CUSTOM_CATEGORY_LABEL_LEN, "%s", name);
break;
case NDPI_PROTOCOL_CATEGORY_CUSTOM_5:
snprintf(ndpi_str->custom_category_labels[4], CUSTOM_CATEGORY_LABEL_LEN, "%s", name);
break;
default:
break;
}
} | 0 | [
"CWE-416",
"CWE-787"
]
| nDPI | 6a9f5e4f7c3fd5ddab3e6727b071904d76773952 | 211,023,442,689,197,000,000,000,000,000,000,000,000 | 31 | Fixed use after free caused by dangling pointer
* This fix also improved RCE Injection detection
Signed-off-by: Toni Uhlig <[email protected]> |
inline void Pad(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const P* pad_value_ptr, const RuntimeShape& output_shape,
T* output_data) {
PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape,
output_data);
} | 0 | [
"CWE-476",
"CWE-369"
]
| tensorflow | 15691e456c7dc9bd6be203b09765b063bf4a380c | 191,033,199,554,764,350,000,000,000,000,000,000,000 | 7 | Prevent dereferencing of null pointers in TFLite's `add.cc`.
PiperOrigin-RevId: 387244946
Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9 |
static int is_non_fatal(int lib_error_code) {
return lib_error_code < 0 && lib_error_code > NGHTTP2_ERR_FATAL;
} | 0 | []
| nghttp2 | 0a6ce87c22c69438ecbffe52a2859c3a32f1620f | 69,239,808,865,704,995,000,000,000,000,000,000,000 | 3 | Add nghttp2_option_set_max_outbound_ack |
static void test20(char const* infile,
char const* password,
char const* outfile,
char const* outfile2)
{
qpdf_read(qpdf, infile, password);
qpdf_init_write(qpdf, outfile);
qpdf_set_static_ID(qpdf, QPDF_TRUE);
qpdf_set_static_aes_IV(qpdf, QPDF_TRUE);
qpdf_set_compress_streams(qpdf, QPDF_FALSE);
qpdf_set_decode_level(qpdf, qpdf_dl_specialized);
qpdf_write(qpdf);
report_errors();
} | 0 | [
"CWE-787"
]
| qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 211,407,405,459,158,330,000,000,000,000,000,000,000 | 14 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
static void mark_reg_not_init(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs except FP */
for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(env, regs + regno);
return;
}
__mark_reg_not_init(env, regs + regno);
} | 0 | [
"CWE-119",
"CWE-681",
"CWE-787"
]
| linux | 5b9fbeb75b6a98955f628e205ac26689bcb1383e | 53,966,576,477,166,040,000,000,000,000,000,000,000 | 12 | bpf: Fix scalar32_min_max_or bounds tracking
Simon reported an issue with the current scalar32_min_max_or() implementation.
That is, compared to the other 32 bit subreg tracking functions, the code in
scalar32_min_max_or() stands out that it's using the 64 bit registers instead
of 32 bit ones. This leads to bounds tracking issues, for example:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x1; 0x700000000),s32_max_value=1,u32_max_value=1) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
The bound tests on the map value force the upper unsigned bound to be 25769803777
in 64 bit (0b11000000000000000000000000000000001) and then lower one to be 1. By
using OR they are truncated and thus result in the range [1,1] for the 32 bit reg
tracker. This is incorrect given the only thing we know is that the value must be
positive and thus 2147483647 (0b1111111111111111111111111111111) at max for the
subregs. Fix it by using the {u,s}32_{min,max}_value vars instead. This also makes
sense, for example, for the case where we update dst_reg->s32_{min,max}_value in
the else branch we need to use the newly computed dst_reg->u32_{min,max}_value as
we know that these are positive. Previously, in the else branch the 64 bit values
of umin_value=1 and umax_value=32212254719 were used and latter got truncated to
be 1 as upper bound there. After the fix the subreg range is now correct:
[...]
8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
8: (79) r1 = *(u64 *)(r0 +0)
R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm
9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
9: (b7) r0 = 1
10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm
10: (18) r2 = 0x600000002
12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
12: (ad) if r1 < r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
13: (95) exit
14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
14: (25) if r1 > 0x0 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
15: (95) exit
16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
16: (47) r1 |= 0
17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm
[...]
Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking")
Reported-by: Simon Scannell <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]> |
*/
PHP_FUNCTION(timezone_name_from_abbr)
{
char *abbr;
char *tzid;
int abbr_len;
long gmtoffset = -1;
long isdst = -1;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|ll", &abbr, &abbr_len, &gmtoffset, &isdst) == FAILURE) {
RETURN_FALSE;
}
tzid = timelib_timezone_id_from_abbr(abbr, gmtoffset, isdst);
if (tzid) {
RETURN_STRING(tzid, 1);
} else {
RETURN_FALSE;
} | 0 | []
| php-src | c377f1a715476934133f3254d1e0d4bf3743e2d2 | 186,004,407,244,319,320,000,000,000,000,000,000,000 | 19 | Fix bug #68942 (Use after free vulnerability in unserialize() with DateTimeZone) |
static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
const struct iphdr *iph,
__be32 saddr, u8 tos,
int type, int code,
struct icmp_bxm *param)
{
struct flowi4 fl4 = {
.daddr = (param->replyopts.opt.opt.srr ?
param->replyopts.opt.opt.faddr : iph->saddr),
.saddr = saddr,
.flowi4_tos = RT_TOS(tos),
.flowi4_proto = IPPROTO_ICMP,
.fl4_icmp_type = type,
.fl4_icmp_code = code,
};
struct rtable *rt, *rt2;
int err;
security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4));
rt = __ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return rt;
/* No need to clone since we're just using its address. */
rt2 = rt;
if (!fl4.saddr)
fl4.saddr = rt->rt_src;
rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
flowi4_to_flowi(&fl4), NULL, 0);
if (!IS_ERR(rt)) {
if (rt != rt2)
return rt;
} else if (PTR_ERR(rt) == -EPERM) {
rt = NULL;
} else
return rt;
err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET);
if (err)
goto relookup_failed;
if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4);
if (IS_ERR(rt2))
err = PTR_ERR(rt2);
} else {
struct flowi4 fl4_2 = {};
unsigned long orefdst;
fl4_2.daddr = fl4.saddr;
rt2 = ip_route_output_key(net, &fl4_2);
if (IS_ERR(rt2)) {
err = PTR_ERR(rt2);
goto relookup_failed;
}
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
err = ip_route_input(skb_in, fl4.daddr, fl4.saddr,
RT_TOS(tos), rt2->dst.dev);
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
skb_in->_skb_refdst = orefdst; /* restore old refdst */
}
if (err)
goto relookup_failed;
rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
flowi4_to_flowi(&fl4), NULL,
XFRM_LOOKUP_ICMP);
if (!IS_ERR(rt2)) {
dst_release(&rt->dst);
rt = rt2;
} else if (PTR_ERR(rt2) == -EPERM) {
if (rt)
dst_release(&rt->dst);
return rt2;
} else {
err = PTR_ERR(rt2);
goto relookup_failed;
}
return rt;
relookup_failed:
if (rt)
return rt;
return ERR_PTR(err);
} | 0 | [
"CWE-362"
]
| linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 166,877,242,763,749,700,000,000,000,000,000,000,000 | 91 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline int pfkey_mode_from_xfrm(int mode)
{
switch(mode) {
case XFRM_MODE_TRANSPORT:
return IPSEC_MODE_TRANSPORT;
case XFRM_MODE_TUNNEL:
return IPSEC_MODE_TUNNEL;
case XFRM_MODE_BEET:
return IPSEC_MODE_BEET;
default:
return -1;
}
} | 0 | []
| linux | 096f41d3a8fcbb8dde7f71379b1ca85fe213eded | 177,876,692,407,933,140,000,000,000,000,000,000,000 | 13 | af_key: Fix sadb_x_ipsecrequest parsing
The parsing of sadb_x_ipsecrequest is broken in a number of ways.
First of all we're not verifying sadb_x_ipsecrequest_len. This
is needed when the structure carries addresses at the end. Worse
we don't even look at the length when we parse those optional
addresses.
The migration code had similar parsing code that's better but
it also has some deficiencies. The length is overcounted first
of all as it includes the header itself. It also fails to check
the length before dereferencing the sa_family field.
This patch fixes those problems in parse_sockaddr_pair and then
uses it in parse_ipsecrequest.
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
rsvg_radial_gradient_set_atts (RsvgNode * self, RsvgHandle * ctx, RsvgPropertyBag * atts)
{
RsvgRadialGradient *grad = (RsvgRadialGradient *) self;
const char *value;
if (rsvg_property_bag_size (atts)) {
if ((value = rsvg_property_bag_lookup (atts, "id")))
rsvg_defs_register_name (ctx->priv->defs, value, self);
if ((value = rsvg_property_bag_lookup (atts, "cx"))) {
grad->cx = _rsvg_css_parse_length (value);
grad->hascx = TRUE;
if (!grad->hasfx)
grad->fx = grad->cx;
}
if ((value = rsvg_property_bag_lookup (atts, "cy"))) {
grad->cy = _rsvg_css_parse_length (value);
grad->hascy = TRUE;
if (!grad->hasfy)
grad->fy = grad->cy;
}
if ((value = rsvg_property_bag_lookup (atts, "r"))) {
grad->r = _rsvg_css_parse_length (value);
grad->hasr = TRUE;
}
if ((value = rsvg_property_bag_lookup (atts, "fx"))) {
grad->fx = _rsvg_css_parse_length (value);
grad->hasfx = TRUE;
}
if ((value = rsvg_property_bag_lookup (atts, "fy"))) {
grad->fy = _rsvg_css_parse_length (value);
grad->hasfy = TRUE;
}
if ((value = rsvg_property_bag_lookup (atts, "xlink:href"))) {
if (self != rsvg_defs_lookup (ctx->priv->defs, value))
rsvg_defs_add_resolver (ctx->priv->defs, &grad->fallback, value);
}
if ((value = rsvg_property_bag_lookup (atts, "gradientTransform"))) {
rsvg_parse_transform (&grad->affine, value);
grad->hastransform = TRUE;
}
if ((value = rsvg_property_bag_lookup (atts, "color"))) {
grad->current_color = rsvg_css_parse_color (value, 0);
}
if ((value = rsvg_property_bag_lookup (atts, "spreadMethod"))) {
if (!strcmp (value, "pad"))
grad->spread = CAIRO_EXTEND_PAD;
else if (!strcmp (value, "reflect"))
grad->spread = CAIRO_EXTEND_REFLECT;
else if (!strcmp (value, "repeat"))
grad->spread = CAIRO_EXTEND_REPEAT;
grad->hasspread = TRUE;
}
if ((value = rsvg_property_bag_lookup (atts, "gradientUnits"))) {
if (!strcmp (value, "userSpaceOnUse"))
grad->obj_bbox = FALSE;
else if (!strcmp (value, "objectBoundingBox"))
grad->obj_bbox = TRUE;
grad->hasbbox = TRUE;
}
rsvg_parse_style_attrs (ctx, self->state, "radialGradient", NULL, NULL, atts);
}
} | 0 | [
"CWE-125"
]
| librsvg | 0035e95118a60c0cd3949c2300472d805e16a022 | 14,879,690,220,243,133,000,000,000,000,000,000,000 | 62 | bgo#744299 - Ensure the type of pattern fallbacks
Atte Kettunen's fuzz testing yielded an SVG with a pattern paint server that
had an xlink:href to a *rect*, not to another patern. Since we were not checking
type type of resolved nodes when applying pattern fallbacks, we were using a
structure of the wrong type.
Fixes https://bugzilla.gnome.org/show_bug.cgi?id=744299
Signed-off-by: Federico Mena Quintero <[email protected]> |
string_append2_listele_n(gstring * list, const uschar * sepstr,
const uschar * ele, unsigned len)
{
if (list && list->ptr)
list = string_cat(list, sepstr);
list = string_catn(list, ele, len);
(void) string_from_gstring(list);
return list;
} | 0 | []
| exim | 2600301ba6dbac5c9d640c87007a07ee6dcea1f4 | 6,626,045,891,434,462,000,000,000,000,000,000,000 | 10 | string.c: do not interpret '\\' before '\0' (CVE-2019-15846)
Add documents about CVE-2019-15846
Add testcase for CVE-2019-15846
Update Changelog
Add Announcements |
TEST_P(Security, BuiltinAuthenticationAndAccessAndCryptoPlugin_PermissionsEnableDiscoveryDisableAccessNone_validation_ok_enable_discovery_enable_access_none)
// *INDENT-ON*
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
std::string governance_file("governance_enable_discovery_disable_access_none.smime");
BuiltinAuthenticationAndAccessAndCryptoPlugin_Permissions_validation_ok_common(reader, writer, governance_file);
} | 0 | [
"CWE-284"
]
| Fast-DDS | d2aeab37eb4fad4376b68ea4dfbbf285a2926384 | 252,356,070,950,150,960,000,000,000,000,000,000,000 | 9 | check remote permissions (#1387)
* Refs 5346. Blackbox test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. one-way string compare
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Do not add partition separator on last partition
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Access control unit testing
It only covers Partition and Topic permissions
Signed-off-by: Iker Luengo <[email protected]>
* Refs #3680. Fix partition check on Permissions plugin.
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix tests on mac
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix windows tests
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Avoid memory leak on test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Proxy data mocks should not return temporary objects
Signed-off-by: Iker Luengo <[email protected]>
* refs 3680. uncrustify
Signed-off-by: Iker Luengo <[email protected]>
Co-authored-by: Miguel Company <[email protected]> |
static void hdr_dump_keyslots(struct crypt_device *cd, json_object *hdr_jobj)
{
char slot[16];
json_object *keyslots_jobj, *digests_jobj, *jobj2, *jobj3, *val;
const char *tmps;
int i, j, r;
log_std(cd, "Keyslots:\n");
json_object_object_get_ex(hdr_jobj, "keyslots", &keyslots_jobj);
for (j = 0; j < LUKS2_KEYSLOTS_MAX; j++) {
(void) snprintf(slot, sizeof(slot), "%i", j);
json_object_object_get_ex(keyslots_jobj, slot, &val);
if (!val)
continue;
json_object_object_get_ex(val, "type", &jobj2);
tmps = json_object_get_string(jobj2);
r = LUKS2_keyslot_for_segment(crypt_get_hdr(cd, CRYPT_LUKS2), j, CRYPT_ONE_SEGMENT);
log_std(cd, " %s: %s%s\n", slot, tmps, r == -ENOENT ? " (unbound)" : "");
if (json_object_object_get_ex(val, "key_size", &jobj2))
log_std(cd, "\tKey: %u bits\n", crypt_jobj_get_uint32(jobj2) * 8);
log_std(cd, "\tPriority: %s\n", get_priority_desc(val));
LUKS2_keyslot_dump(cd, j);
json_object_object_get_ex(hdr_jobj, "digests", &digests_jobj);
json_object_object_foreach(digests_jobj, key2, val2) {
json_object_object_get_ex(val2, "keyslots", &jobj2);
for (i = 0; i < (int) json_object_array_length(jobj2); i++) {
jobj3 = json_object_array_get_idx(jobj2, i);
if (!strcmp(slot, json_object_get_string(jobj3))) {
log_std(cd, "\tDigest ID: %s\n", key2);
}
}
}
}
} | 0 | [
"CWE-787"
]
| cryptsetup | 52f5cb8cedf22fb3e14c744814ec8af7614146c7 | 140,771,245,638,143,120,000,000,000,000,000,000,000 | 41 | Check segment gaps regardless of heap space.
Segments are validated in hdr_validate_segments. Gaps in segment keys
are detected when collecting offsets. But if an invalid segment is very
large, larger than count, it could happen that cryptsetup is unable to
allocate enough memory, not giving a clue about what actually is the
problem.
Therefore check for gaps even if not enough memory is available. This
gives much more information with debug output enabled.
Obviously cryptsetup still fails if segments are perfectly fine but not
enough RAM available. But at that stage, the user knows that it's the
fault of the system, not of an invalid segment. |
MagickPrivate int XDialogWidget(Display *display,XWindows *windows,
const char *action,const char *query,char *reply)
{
#define CancelButtonText "Cancel"
char
primary_selection[MagickPathExtent];
int
x;
int
i;
static MagickBooleanType
raised = MagickFalse;
Status
status;
unsigned int
anomaly,
height,
width;
size_t
state;
XEvent
event;
XFontStruct
*font_info;
XTextProperty
window_name;
XWidgetInfo
action_info,
cancel_info,
reply_info,
special_info,
text_info;
XWindowChanges
window_changes;
/*
Determine Dialog widget attributes.
*/
assert(display != (Display *) NULL);
assert(windows != (XWindows *) NULL);
assert(action != (char *) NULL);
assert(query != (char *) NULL);
assert(reply != (char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",action);
XCheckRefreshWindows(display,windows);
font_info=windows->widget.font_info;
width=WidgetTextWidth(font_info,(char *) action);
if (WidgetTextWidth(font_info,CancelButtonText) > width)
width=WidgetTextWidth(font_info,CancelButtonText);
width+=(3*QuantumMargin) >> 1;
height=(unsigned int) (font_info->ascent+font_info->descent);
/*
Position Dialog widget.
*/
windows->widget.width=(unsigned int) MagickMax((int) (2*width),(int)
WidgetTextWidth(font_info,(char *) query));
if (windows->widget.width < WidgetTextWidth(font_info,reply))
windows->widget.width=WidgetTextWidth(font_info,reply);
windows->widget.width+=6*QuantumMargin;
windows->widget.min_width=(unsigned int)
(width+28*XTextWidth(font_info,"#",1)+4*QuantumMargin);
if (windows->widget.width < windows->widget.min_width)
windows->widget.width=windows->widget.min_width;
windows->widget.height=(unsigned int) (7*height+(QuantumMargin << 1));
windows->widget.min_height=windows->widget.height;
if (windows->widget.height < windows->widget.min_height)
windows->widget.height=windows->widget.min_height;
XConstrainWindowPosition(display,&windows->widget);
/*
Map Dialog widget.
*/
(void) CopyMagickString(windows->widget.name,"Dialog",MagickPathExtent);
status=XStringListToTextProperty(&windows->widget.name,1,&window_name);
if (status != False)
{
XSetWMName(display,windows->widget.id,&window_name);
XSetWMIconName(display,windows->widget.id,&window_name);
(void) XFree((void *) window_name.value);
}
window_changes.width=(int) windows->widget.width;
window_changes.height=(int) windows->widget.height;
window_changes.x=windows->widget.x;
window_changes.y=windows->widget.y;
(void) XReconfigureWMWindow(display,windows->widget.id,windows->widget.screen,
(unsigned int) (CWWidth | CWHeight | CWX | CWY),&window_changes);
(void) XMapRaised(display,windows->widget.id);
windows->widget.mapped=MagickFalse;
/*
Respond to X events.
*/
anomaly=(LocaleCompare(action,"Background") == 0) ||
(LocaleCompare(action,"New") == 0) ||
(LocaleCompare(action,"Quantize") == 0) ||
(LocaleCompare(action,"Resize") == 0) ||
(LocaleCompare(action,"Save") == 0) ||
(LocaleCompare(action,"Shade") == 0);
state=UpdateConfigurationState;
XSetCursorState(display,windows,MagickTrue);
do
{
if (state & UpdateConfigurationState)
{
/*
Initialize button information.
*/
XGetWidgetInfo(CancelButtonText,&cancel_info);
cancel_info.width=width;
cancel_info.height=(unsigned int) ((3*height) >> 1);
cancel_info.x=(int)
(windows->widget.width-cancel_info.width-((3*QuantumMargin) >> 1));
cancel_info.y=(int)
(windows->widget.height-cancel_info.height-((3*QuantumMargin) >> 1));
XGetWidgetInfo(action,&action_info);
action_info.width=width;
action_info.height=(unsigned int) ((3*height) >> 1);
action_info.x=cancel_info.x-(cancel_info.width+QuantumMargin+
(action_info.bevel_width << 1));
action_info.y=cancel_info.y;
/*
Initialize reply information.
*/
XGetWidgetInfo(reply,&reply_info);
reply_info.raised=MagickFalse;
reply_info.bevel_width--;
reply_info.width=windows->widget.width-(3*QuantumMargin);
reply_info.height=height << 1;
reply_info.x=(3*QuantumMargin) >> 1;
reply_info.y=action_info.y-reply_info.height-QuantumMargin;
/*
Initialize option information.
*/
XGetWidgetInfo("Dither",&special_info);
special_info.raised=raised;
special_info.bevel_width--;
special_info.width=(unsigned int) QuantumMargin >> 1;
special_info.height=(unsigned int) QuantumMargin >> 1;
special_info.x=reply_info.x;
special_info.y=action_info.y+action_info.height-special_info.height;
if (LocaleCompare(action,"Background") == 0)
special_info.text=(char *) "Backdrop";
if (LocaleCompare(action,"New") == 0)
special_info.text=(char *) "Gradation";
if (LocaleCompare(action,"Resize") == 0)
special_info.text=(char *) "Constrain ratio";
if (LocaleCompare(action,"Save") == 0)
special_info.text=(char *) "Non-progressive";
if (LocaleCompare(action,"Shade") == 0)
special_info.text=(char *) "Color shading";
/*
Initialize text information.
*/
XGetWidgetInfo(query,&text_info);
text_info.width=reply_info.width;
text_info.height=height;
text_info.x=reply_info.x-(QuantumMargin >> 1);
text_info.y=QuantumMargin;
text_info.center=MagickFalse;
state&=(~UpdateConfigurationState);
}
if (state & RedrawWidgetState)
{
/*
Redraw Dialog widget.
*/
XDrawWidgetText(display,&windows->widget,&text_info);
XDrawBeveledMatte(display,&windows->widget,&reply_info);
XDrawMatteText(display,&windows->widget,&reply_info);
if (anomaly)
XDrawBeveledButton(display,&windows->widget,&special_info);
XDrawBeveledButton(display,&windows->widget,&action_info);
XDrawBeveledButton(display,&windows->widget,&cancel_info);
XHighlightWidget(display,&windows->widget,BorderOffset,BorderOffset);
state&=(~RedrawWidgetState);
}
/*
Wait for next event.
*/
(void) XIfEvent(display,&event,XScreenEvent,(char *) windows);
switch (event.type)
{
case ButtonPress:
{
if (anomaly)
if (MatteIsActive(special_info,event.xbutton))
{
/*
Option button status changed.
*/
special_info.raised=!special_info.raised;
XDrawBeveledButton(display,&windows->widget,&special_info);
break;
}
if (MatteIsActive(action_info,event.xbutton))
{
/*
User pressed Action button.
*/
action_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&action_info);
break;
}
if (MatteIsActive(cancel_info,event.xbutton))
{
/*
User pressed Cancel button.
*/
cancel_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&cancel_info);
break;
}
if (MatteIsActive(reply_info,event.xbutton) == MagickFalse)
break;
if (event.xbutton.button != Button2)
{
static Time
click_time;
/*
Move text cursor to position of button press.
*/
x=event.xbutton.x-reply_info.x-(QuantumMargin >> 2);
for (i=1; i <= Extent(reply_info.marker); i++)
if (XTextWidth(font_info,reply_info.marker,i) > x)
break;
reply_info.cursor=reply_info.marker+i-1;
if (event.xbutton.time > (click_time+DoubleClick))
reply_info.highlight=MagickFalse;
else
{
/*
Become the XA_PRIMARY selection owner.
*/
(void) CopyMagickString(primary_selection,reply_info.text,
MagickPathExtent);
(void) XSetSelectionOwner(display,XA_PRIMARY,windows->widget.id,
event.xbutton.time);
reply_info.highlight=XGetSelectionOwner(display,XA_PRIMARY) ==
windows->widget.id ? MagickTrue : MagickFalse;
}
XDrawMatteText(display,&windows->widget,&reply_info);
click_time=event.xbutton.time;
break;
}
/*
Request primary selection.
*/
(void) XConvertSelection(display,XA_PRIMARY,XA_STRING,XA_STRING,
windows->widget.id,event.xbutton.time);
break;
}
case ButtonRelease:
{
if (windows->widget.mapped == MagickFalse)
break;
if (action_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
if (MatteIsActive(action_info,event.xbutton))
state|=ExitState;
action_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&action_info);
}
if (cancel_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
if (MatteIsActive(cancel_info,event.xbutton))
{
*reply_info.text='\0';
state|=ExitState;
}
cancel_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&cancel_info);
}
break;
}
case ClientMessage:
{
/*
If client window delete message, exit.
*/
if (event.xclient.message_type != windows->wm_protocols)
break;
if (*event.xclient.data.l == (int) windows->wm_take_focus)
{
(void) XSetInputFocus(display,event.xclient.window,RevertToParent,
(Time) event.xclient.data.l[1]);
break;
}
if (*event.xclient.data.l != (int) windows->wm_delete_window)
break;
if (event.xclient.window == windows->widget.id)
{
*reply_info.text='\0';
state|=ExitState;
break;
}
break;
}
case ConfigureNotify:
{
/*
Update widget configuration.
*/
if (event.xconfigure.window != windows->widget.id)
break;
if ((event.xconfigure.width == (int) windows->widget.width) &&
(event.xconfigure.height == (int) windows->widget.height))
break;
windows->widget.width=(unsigned int)
MagickMax(event.xconfigure.width,(int) windows->widget.min_width);
windows->widget.height=(unsigned int)
MagickMax(event.xconfigure.height,(int) windows->widget.min_height);
state|=UpdateConfigurationState;
break;
}
case EnterNotify:
{
if (event.xcrossing.window != windows->widget.id)
break;
state&=(~InactiveWidgetState);
break;
}
case Expose:
{
if (event.xexpose.window != windows->widget.id)
break;
if (event.xexpose.count != 0)
break;
state|=RedrawWidgetState;
break;
}
case KeyPress:
{
static char
command[MagickPathExtent];
static int
length;
static KeySym
key_symbol;
/*
Respond to a user key press.
*/
if (event.xkey.window != windows->widget.id)
break;
length=XLookupString((XKeyEvent *) &event.xkey,command,
(int) sizeof(command),&key_symbol,(XComposeStatus *) NULL);
*(command+length)='\0';
if ((key_symbol == XK_Return) || (key_symbol == XK_KP_Enter))
{
action_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&action_info);
state|=ExitState;
break;
}
if (key_symbol == XK_Control_L)
{
state|=ControlState;
break;
}
if (state & ControlState)
switch ((int) key_symbol)
{
case XK_u:
case XK_U:
{
/*
Erase the entire line of text.
*/
*reply_info.text='\0';
reply_info.cursor=reply_info.text;
reply_info.marker=reply_info.text;
reply_info.highlight=MagickFalse;
break;
}
default:
break;
}
XEditText(display,&reply_info,key_symbol,command,state);
XDrawMatteText(display,&windows->widget,&reply_info);
break;
}
case KeyRelease:
{
static char
command[MagickPathExtent];
static KeySym
key_symbol;
/*
Respond to a user key release.
*/
if (event.xkey.window != windows->widget.id)
break;
(void) XLookupString((XKeyEvent *) &event.xkey,command,
(int) sizeof(command),&key_symbol,(XComposeStatus *) NULL);
if (key_symbol == XK_Control_L)
state&=(~ControlState);
break;
}
case LeaveNotify:
{
if (event.xcrossing.window != windows->widget.id)
break;
state|=InactiveWidgetState;
break;
}
case MotionNotify:
{
/*
Discard pending button motion events.
*/
while (XCheckMaskEvent(display,ButtonMotionMask,&event)) ;
if (state & InactiveWidgetState)
break;
if (action_info.raised == MatteIsActive(action_info,event.xmotion))
{
/*
Action button status changed.
*/
action_info.raised=action_info.raised == MagickFalse ?
MagickTrue : MagickFalse;
XDrawBeveledButton(display,&windows->widget,&action_info);
break;
}
if (cancel_info.raised == MatteIsActive(cancel_info,event.xmotion))
{
/*
Cancel button status changed.
*/
cancel_info.raised=cancel_info.raised == MagickFalse ?
MagickTrue : MagickFalse;
XDrawBeveledButton(display,&windows->widget,&cancel_info);
break;
}
break;
}
case SelectionClear:
{
reply_info.highlight=MagickFalse;
XDrawMatteText(display,&windows->widget,&reply_info);
break;
}
case SelectionNotify:
{
Atom
type;
int
format;
unsigned char
*data;
unsigned long
after,
length;
/*
Obtain response from primary selection.
*/
if (event.xselection.property == (Atom) None)
break;
status=XGetWindowProperty(display,event.xselection.requestor,
event.xselection.property,0L,2047L,MagickTrue,XA_STRING,&type,
&format,&length,&after,&data);
if ((status != Success) || (type != XA_STRING) || (format == 32) ||
(length == 0))
break;
if ((Extent(reply_info.text)+length) >= (MagickPathExtent-1))
(void) XBell(display,0);
else
{
/*
Insert primary selection in reply text.
*/
*(data+length)='\0';
XEditText(display,&reply_info,(KeySym) XK_Insert,(char *) data,
state);
XDrawMatteText(display,&windows->widget,&reply_info);
}
(void) XFree((void *) data);
break;
}
case SelectionRequest:
{
XSelectionEvent
notify;
XSelectionRequestEvent
*request;
if (reply_info.highlight == MagickFalse)
break;
/*
Set primary selection.
*/
request=(&(event.xselectionrequest));
(void) XChangeProperty(request->display,request->requestor,
request->property,request->target,8,PropModeReplace,
(unsigned char *) primary_selection,Extent(primary_selection));
notify.type=SelectionNotify;
notify.display=request->display;
notify.requestor=request->requestor;
notify.selection=request->selection;
notify.target=request->target;
notify.time=request->time;
if (request->property == None)
notify.property=request->target;
else
notify.property=request->property;
(void) XSendEvent(request->display,request->requestor,False,0,
(XEvent *) ¬ify);
}
default:
break;
}
} while ((state & ExitState) == 0);
XSetCursorState(display,windows,MagickFalse);
(void) XWithdrawWindow(display,windows->widget.id,windows->widget.screen);
XCheckRefreshWindows(display,windows);
if (anomaly)
if (special_info.raised)
if (*reply != '\0')
raised=MagickTrue;
return(raised == MagickFalse);
} | 0 | []
| ImageMagick | d95735d25a39300dd874f0227c430d5dbb1f83cc | 180,948,172,065,645,260,000,000,000,000,000,000,000 | 542 | https://github.com/ImageMagick/ImageMagick/issues/3333 |
static char *theme_format_compress_colors(THEME_REC *theme, const char *format)
{
GString *str;
char *ret, last_fg, last_bg;
str = g_string_new(NULL);
last_fg = last_bg = '\0';
while (*format != '\0') {
if (*format == '$') {
/* $variable, skrip it entirely */
theme_format_append_variable(str, &format);
last_fg = last_bg = '\0';
} else if (*format != '%') {
/* a normal character */
g_string_append_c(str, *format);
format++;
} else {
/* %format */
format++;
if (IS_OLD_FORMAT(*format, last_fg, last_bg)) {
/* active color set again */
} else if (IS_FGCOLOR_FORMAT(*format) &&
format[1] == '%' &&
IS_FGCOLOR_FORMAT(format[2]) &&
(*format != 'n' || format[2] == 'n')) {
/* two fg colors in a row. bg colors are
so rare that we don't bother checking
them */
} else {
/* some format, add it */
g_string_append_c(str, '%');
g_string_append_c(str, *format);
if (IS_FGCOLOR_FORMAT(*format))
last_fg = *format;
if (IS_BGCOLOR_FORMAT(*format))
last_bg = *format;
}
format++;
}
}
ret = str->str;
g_string_free(str, FALSE);
return ret;
} | 0 | [
"CWE-125"
]
| irssi | e0c66e31224894674356ddaf6d46016c1abc994f | 24,463,501,916,853,870,000,000,000,000,000,000,000 | 47 | Previous theme patch fixes by c0ffee
git-svn-id: http://svn.irssi.org/repos/irssi/trunk@3058 dbcabf3a-b0e7-0310-adc4-f8d773084564 |
static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack,
struct ip_tunnel_info *info, bool *metadata,
bool *use_udp6_rx_checksums, bool *ttl_inherit,
enum ifla_geneve_df *df, bool changelink)
{
int attrtype;
if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) {
NL_SET_ERR_MSG(extack,
"Cannot specify both IPv4 and IPv6 Remote addresses");
return -EINVAL;
}
if (data[IFLA_GENEVE_REMOTE]) {
if (changelink && (ip_tunnel_info_af(info) == AF_INET6)) {
attrtype = IFLA_GENEVE_REMOTE;
goto change_notsup;
}
info->key.u.ipv4.dst =
nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
if (ipv4_is_multicast(info->key.u.ipv4.dst)) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE],
"Remote IPv4 address cannot be Multicast");
return -EINVAL;
}
}
if (data[IFLA_GENEVE_REMOTE6]) {
#if IS_ENABLED(CONFIG_IPV6)
if (changelink && (ip_tunnel_info_af(info) == AF_INET)) {
attrtype = IFLA_GENEVE_REMOTE6;
goto change_notsup;
}
info->mode = IP_TUNNEL_INFO_IPV6;
info->key.u.ipv6.dst =
nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]);
if (ipv6_addr_type(&info->key.u.ipv6.dst) &
IPV6_ADDR_LINKLOCAL) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6],
"Remote IPv6 address cannot be link-local");
return -EINVAL;
}
if (ipv6_addr_is_multicast(&info->key.u.ipv6.dst)) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6],
"Remote IPv6 address cannot be Multicast");
return -EINVAL;
}
info->key.tun_flags |= TUNNEL_CSUM;
*use_udp6_rx_checksums = true;
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6],
"IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
#endif
}
if (data[IFLA_GENEVE_ID]) {
__u32 vni;
__u8 tvni[3];
__be64 tunid;
vni = nla_get_u32(data[IFLA_GENEVE_ID]);
tvni[0] = (vni & 0x00ff0000) >> 16;
tvni[1] = (vni & 0x0000ff00) >> 8;
tvni[2] = vni & 0x000000ff;
tunid = vni_to_tunnel_id(tvni);
if (changelink && (tunid != info->key.tun_id)) {
attrtype = IFLA_GENEVE_ID;
goto change_notsup;
}
info->key.tun_id = tunid;
}
if (data[IFLA_GENEVE_TTL_INHERIT]) {
if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT]))
*ttl_inherit = true;
else
*ttl_inherit = false;
} else if (data[IFLA_GENEVE_TTL]) {
info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
*ttl_inherit = false;
}
if (data[IFLA_GENEVE_TOS])
info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
if (data[IFLA_GENEVE_DF])
*df = nla_get_u8(data[IFLA_GENEVE_DF]);
if (data[IFLA_GENEVE_LABEL]) {
info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
IPV6_FLOWLABEL_MASK;
if (info->key.label && (!(info->mode & IP_TUNNEL_INFO_IPV6))) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_LABEL],
"Label attribute only applies for IPv6 Geneve devices");
return -EINVAL;
}
}
if (data[IFLA_GENEVE_PORT]) {
if (changelink) {
attrtype = IFLA_GENEVE_PORT;
goto change_notsup;
}
info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]);
}
if (data[IFLA_GENEVE_COLLECT_METADATA]) {
if (changelink) {
attrtype = IFLA_GENEVE_COLLECT_METADATA;
goto change_notsup;
}
*metadata = true;
}
if (data[IFLA_GENEVE_UDP_CSUM]) {
if (changelink) {
attrtype = IFLA_GENEVE_UDP_CSUM;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
info->key.tun_flags |= TUNNEL_CSUM;
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
info->key.tun_flags &= ~TUNNEL_CSUM;
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
"IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
#endif
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) {
#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
*use_udp6_rx_checksums = false;
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX],
"IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
#endif
}
return 0;
change_notsup:
NL_SET_ERR_MSG_ATTR(extack, data[attrtype],
"Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported");
return -EOPNOTSUPP;
} | 0 | []
| net | 6c8991f41546c3c472503dff1ea9daaddf9331c2 | 336,736,277,758,413,260,000,000,000,000,000,000,000 | 166 | net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void nego_set_restricted_admin_mode_required(rdpNego* nego, BOOL RestrictedAdminModeRequired)
{
WLog_DBG(TAG, "Enabling restricted admin mode: %s",
RestrictedAdminModeRequired ? "TRUE" : "FALSE");
nego->RestrictedAdminModeRequired = RestrictedAdminModeRequired;
} | 0 | [
"CWE-125"
]
| FreeRDP | 6b485b146a1b9d6ce72dfd7b5f36456c166e7a16 | 179,702,917,927,009,440,000,000,000,000,000,000,000 | 6 | Fixed oob read in irp_write and similar |
bool Binary::remove(const LoadCommand& command) {
const auto it = std::find_if(
std::begin(commands_), std::end(commands_),
[&command] (const std::unique_ptr<LoadCommand>& cmd) {
return *cmd == command;
});
if (it == std::end(commands_)) {
LIEF_ERR("Unable to find command: {}", command);
return false;
}
LoadCommand* cmd_rm = it->get();
if (DylibCommand::classof(cmd_rm)) {
auto it_cache = std::find(std::begin(libraries_), std::end(libraries_), cmd_rm);
if (it_cache == std::end(libraries_)) {
const auto* lib = cmd_rm->as<const DylibCommand>();
LIEF_WARN("Library {} not found in cache. The binary object is likely in an inconsistent state", lib->name());
} else {
libraries_.erase(it_cache);
}
}
if (SegmentCommand::classof(cmd_rm)) {
auto it_cache = std::find(std::begin(segments_), std::end(segments_), cmd_rm);
const auto* seg = cmd_rm->as<const SegmentCommand>();
if (it_cache == std::end(segments_)) {
LIEF_WARN("Segment {} not found in cache. The binary object is likely in an inconsistent state", seg->name());
} else {
// Update the indexes to keep a consistent state
for (auto it = it_cache; it != std::end(segments_); ++it) {
(*it)->index_--;
}
segments_.erase(it_cache);
}
}
const size_t cmd_rm_offset = cmd_rm->command_offset();
for (std::unique_ptr<LoadCommand>& cmd : commands_) {
if (cmd->command_offset() >= cmd_rm_offset) {
cmd->command_offset(cmd->command_offset() - cmd_rm->size());
}
}
Header& header = this->header();
header.sizeof_cmds(header.sizeof_cmds() - cmd_rm->size());
header.nb_cmds(header.nb_cmds() - 1);
available_command_space_ += cmd_rm->size();
commands_.erase(it);
refresh_seg_offset();
return true;
} | 0 | [
"CWE-703"
]
| LIEF | 7acf0bc4224081d4f425fcc8b2e361b95291d878 | 137,947,996,225,161,100,000,000,000,000,000,000,000 | 56 | Resolve #764 |
longlong Item_func_row_count::val_int()
{
DBUG_ASSERT(fixed == 1);
THD *thd= current_thd;
return thd->get_row_count_func();
} | 0 | [
"CWE-120"
]
| server | eca207c46293bc72dd8d0d5622153fab4d3fccf1 | 128,122,773,763,371,280,000,000,000,000,000,000,000 | 7 | MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix. |
struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc,
const struct sctp_chunk *chunk)
{
struct sctp_chunk *retval;
void *cookie;
int cookie_len;
cookie = asoc->peer.cookie;
cookie_len = asoc->peer.cookie_len;
/* Build a cookie echo chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len);
if (!retval)
goto nodata;
retval->subh.cookie_hdr =
sctp_addto_chunk(retval, cookie_len, cookie);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
* An endpoint SHOULD transmit reply chunks (e.g., SACK,
* HEARTBEAT ACK, * etc.) to the same destination transport
* address from which it * received the DATA or control chunk
* to which it is replying.
*
* [COOKIE ECHO back to where the INIT ACK came from.]
*/
if (chunk)
retval->transport = chunk->transport;
nodata:
return retval;
} | 0 | [
"CWE-20"
]
| linux-2.6 | ba0166708ef4da7eeb61dd92bbba4d5a749d6561 | 113,179,727,241,077,500,000,000,000,000,000,000,000 | 32 | sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void tipc_sk_remove(struct tipc_sock *tsk)
{
struct sock *sk = &tsk->sk;
struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
} | 0 | [
"CWE-200",
"CWE-909"
]
| linux | d6d86830705f173fca6087a3e67ceaf68db80523 | 65,399,746,202,411,460,000,000,000,000,000,000,000 | 10 | net ticp:fix a kernel-infoleak in __tipc_sendmsg()
struct tipc_socket_addr.ref has a 4-byte hole,and __tipc_getname() currently
copying it to user space,causing kernel-infoleak.
BUG: KMSAN: kernel-infoleak in instrument_copy_to_user include/linux/instrumented.h:121 [inline]
BUG: KMSAN: kernel-infoleak in instrument_copy_to_user include/linux/instrumented.h:121 [inline] lib/usercopy.c:33
BUG: KMSAN: kernel-infoleak in _copy_to_user+0x1c9/0x270 lib/usercopy.c:33 lib/usercopy.c:33
instrument_copy_to_user include/linux/instrumented.h:121 [inline]
instrument_copy_to_user include/linux/instrumented.h:121 [inline] lib/usercopy.c:33
_copy_to_user+0x1c9/0x270 lib/usercopy.c:33 lib/usercopy.c:33
copy_to_user include/linux/uaccess.h:209 [inline]
copy_to_user include/linux/uaccess.h:209 [inline] net/socket.c:287
move_addr_to_user+0x3f6/0x600 net/socket.c:287 net/socket.c:287
__sys_getpeername+0x470/0x6b0 net/socket.c:1987 net/socket.c:1987
__do_sys_getpeername net/socket.c:1997 [inline]
__se_sys_getpeername net/socket.c:1994 [inline]
__do_sys_getpeername net/socket.c:1997 [inline] net/socket.c:1994
__se_sys_getpeername net/socket.c:1994 [inline] net/socket.c:1994
__x64_sys_getpeername+0xda/0x120 net/socket.c:1994 net/socket.c:1994
do_syscall_x64 arch/x86/entry/common.c:51 [inline]
do_syscall_x64 arch/x86/entry/common.c:51 [inline] arch/x86/entry/common.c:82
do_syscall_64+0x54/0xd0 arch/x86/entry/common.c:82 arch/x86/entry/common.c:82
entry_SYSCALL_64_after_hwframe+0x44/0xae
Uninit was stored to memory at:
tipc_getname+0x575/0x5e0 net/tipc/socket.c:757 net/tipc/socket.c:757
__sys_getpeername+0x3b3/0x6b0 net/socket.c:1984 net/socket.c:1984
__do_sys_getpeername net/socket.c:1997 [inline]
__se_sys_getpeername net/socket.c:1994 [inline]
__do_sys_getpeername net/socket.c:1997 [inline] net/socket.c:1994
__se_sys_getpeername net/socket.c:1994 [inline] net/socket.c:1994
__x64_sys_getpeername+0xda/0x120 net/socket.c:1994 net/socket.c:1994
do_syscall_x64 arch/x86/entry/common.c:51 [inline]
do_syscall_x64 arch/x86/entry/common.c:51 [inline] arch/x86/entry/common.c:82
do_syscall_64+0x54/0xd0 arch/x86/entry/common.c:82 arch/x86/entry/common.c:82
entry_SYSCALL_64_after_hwframe+0x44/0xae
Uninit was stored to memory at:
msg_set_word net/tipc/msg.h:212 [inline]
msg_set_destport net/tipc/msg.h:619 [inline]
msg_set_word net/tipc/msg.h:212 [inline] net/tipc/socket.c:1486
msg_set_destport net/tipc/msg.h:619 [inline] net/tipc/socket.c:1486
__tipc_sendmsg+0x44fa/0x5890 net/tipc/socket.c:1486 net/tipc/socket.c:1486
tipc_sendmsg+0xeb/0x140 net/tipc/socket.c:1402 net/tipc/socket.c:1402
sock_sendmsg_nosec net/socket.c:704 [inline]
sock_sendmsg net/socket.c:724 [inline]
sock_sendmsg_nosec net/socket.c:704 [inline] net/socket.c:2409
sock_sendmsg net/socket.c:724 [inline] net/socket.c:2409
____sys_sendmsg+0xe11/0x12c0 net/socket.c:2409 net/socket.c:2409
___sys_sendmsg net/socket.c:2463 [inline]
___sys_sendmsg net/socket.c:2463 [inline] net/socket.c:2492
__sys_sendmsg+0x704/0x840 net/socket.c:2492 net/socket.c:2492
__do_sys_sendmsg net/socket.c:2501 [inline]
__se_sys_sendmsg net/socket.c:2499 [inline]
__do_sys_sendmsg net/socket.c:2501 [inline] net/socket.c:2499
__se_sys_sendmsg net/socket.c:2499 [inline] net/socket.c:2499
__x64_sys_sendmsg+0xe2/0x120 net/socket.c:2499 net/socket.c:2499
do_syscall_x64 arch/x86/entry/common.c:51 [inline]
do_syscall_x64 arch/x86/entry/common.c:51 [inline] arch/x86/entry/common.c:82
do_syscall_64+0x54/0xd0 arch/x86/entry/common.c:82 arch/x86/entry/common.c:82
entry_SYSCALL_64_after_hwframe+0x44/0xae
Local variable skaddr created at:
__tipc_sendmsg+0x2d0/0x5890 net/tipc/socket.c:1419 net/tipc/socket.c:1419
tipc_sendmsg+0xeb/0x140 net/tipc/socket.c:1402 net/tipc/socket.c:1402
Bytes 4-7 of 16 are uninitialized
Memory access of size 16 starts at ffff888113753e00
Data copied to user address 0000000020000280
Reported-by: [email protected]
Signed-off-by: Haimin Zhang <[email protected]>
Acked-by: Jon Maloy <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
unset_global_local_option(char_u *name, void *from)
{
struct vimoption *p;
int opt_idx;
buf_T *buf = (buf_T *)from;
opt_idx = findoption(name);
if (opt_idx < 0)
return;
p = &(options[opt_idx]);
switch ((int)p->indir)
{
// global option with local value: use local value if it's been set
case PV_EP:
clear_string_option(&buf->b_p_ep);
break;
case PV_KP:
clear_string_option(&buf->b_p_kp);
break;
case PV_PATH:
clear_string_option(&buf->b_p_path);
break;
case PV_AR:
buf->b_p_ar = -1;
break;
case PV_BKC:
clear_string_option(&buf->b_p_bkc);
buf->b_bkc_flags = 0;
break;
case PV_TAGS:
clear_string_option(&buf->b_p_tags);
break;
case PV_TC:
clear_string_option(&buf->b_p_tc);
buf->b_tc_flags = 0;
break;
case PV_SISO:
curwin->w_p_siso = -1;
break;
case PV_SO:
curwin->w_p_so = -1;
break;
#ifdef FEAT_FIND_ID
case PV_DEF:
clear_string_option(&buf->b_p_def);
break;
case PV_INC:
clear_string_option(&buf->b_p_inc);
break;
#endif
case PV_DICT:
clear_string_option(&buf->b_p_dict);
break;
case PV_TSR:
clear_string_option(&buf->b_p_tsr);
break;
case PV_FP:
clear_string_option(&buf->b_p_fp);
break;
#ifdef FEAT_QUICKFIX
case PV_EFM:
clear_string_option(&buf->b_p_efm);
break;
case PV_GP:
clear_string_option(&buf->b_p_gp);
break;
case PV_MP:
clear_string_option(&buf->b_p_mp);
break;
#endif
#if defined(FEAT_BEVAL) && defined(FEAT_EVAL)
case PV_BEXPR:
clear_string_option(&buf->b_p_bexpr);
break;
#endif
#if defined(FEAT_CRYPT)
case PV_CM:
clear_string_option(&buf->b_p_cm);
break;
#endif
#ifdef FEAT_LINEBREAK
case PV_SBR:
clear_string_option(&((win_T *)from)->w_p_sbr);
break;
#endif
#ifdef FEAT_STL_OPT
case PV_STL:
clear_string_option(&((win_T *)from)->w_p_stl);
break;
#endif
case PV_UL:
buf->b_p_ul = NO_LOCAL_UNDOLEVEL;
break;
#ifdef FEAT_LISP
case PV_LW:
clear_string_option(&buf->b_p_lw);
break;
#endif
case PV_MENC:
clear_string_option(&buf->b_p_menc);
break;
case PV_LCS:
clear_string_option(&((win_T *)from)->w_p_lcs);
set_chars_option((win_T *)from, &((win_T *)from)->w_p_lcs);
redraw_later(NOT_VALID);
break;
case PV_VE:
clear_string_option(&((win_T *)from)->w_p_ve);
((win_T *)from)->w_ve_flags = 0;
break;
}
} | 0 | [
"CWE-122"
]
| vim | b7081e135a16091c93f6f5f7525a5c58fb7ca9f9 | 116,166,999,819,581,380,000,000,000,000,000,000,000 | 113 | patch 8.2.3402: invalid memory access when using :retab with large value
Problem: Invalid memory access when using :retab with large value.
Solution: Check the number is positive. |
static void scsi_realize(SCSIDevice *dev, Error **errp)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
bool read_only;
if (!s->qdev.conf.blk) {
error_setg(errp, "drive property not set");
return;
}
if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
!blk_is_inserted(s->qdev.conf.blk)) {
error_setg(errp, "Device needs media, but drive is empty");
return;
}
if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
return;
}
if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
!s->qdev.hba_supports_iothread)
{
error_setg(errp, "HBA does not support iothreads");
return;
}
if (dev->type == TYPE_DISK) {
if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
return;
}
}
read_only = !blk_supports_write_perm(s->qdev.conf.blk);
if (dev->type == TYPE_ROM) {
read_only = true;
}
if (!blkconf_apply_backend_options(&dev->conf, read_only,
dev->type == TYPE_DISK, errp)) {
return;
}
if (s->qdev.conf.discard_granularity == -1) {
s->qdev.conf.discard_granularity =
MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
}
if (!s->version) {
s->version = g_strdup(qemu_hw_version());
}
if (!s->vendor) {
s->vendor = g_strdup("QEMU");
}
if (!s->device_id) {
if (s->serial) {
s->device_id = g_strdup_printf("%.20s", s->serial);
} else {
const char *str = blk_name(s->qdev.conf.blk);
if (str && *str) {
s->device_id = g_strdup(str);
}
}
}
if (blk_is_sg(s->qdev.conf.blk)) {
error_setg(errp, "unwanted /dev/sg*");
return;
}
if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
!(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
} else {
blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
}
blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
blk_iostatus_enable(s->qdev.conf.blk);
add_boot_device_lchs(&dev->qdev, NULL,
dev->conf.lcyls,
dev->conf.lheads,
dev->conf.lsecs);
} | 0 | [
"CWE-193"
]
| qemu | b3af7fdf9cc537f8f0dd3e2423d83f5c99a457e8 | 314,521,614,116,127,700,000,000,000,000,000,000,000 | 85 | hw/scsi/scsi-disk: MODE_PAGE_ALLS not allowed in MODE SELECT commands
This avoids an off-by-one read of 'mode_sense_valid' buffer in
hw/scsi/scsi-disk.c:mode_sense_page().
Fixes: CVE-2021-3930
Cc: [email protected]
Reported-by: Alexander Bulekov <[email protected]>
Fixes: a8f4bbe2900 ("scsi-disk: store valid mode pages in a table")
Fixes: #546
Reported-by: Qiuhao Li <[email protected]>
Signed-off-by: Mauro Matteo Cascella <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
OSD::res_result OSD::_try_resurrect_pg(
OSDMapRef curmap, spg_t pgid, spg_t *resurrected, PGRef *old_pg_state)
{
assert(resurrected);
assert(old_pg_state);
// find nearest ancestor
DeletingStateRef df;
spg_t cur(pgid);
while (true) {
df = service.deleting_pgs.lookup(cur);
if (df)
break;
if (!cur.ps())
break;
cur = cur.get_parent();
}
if (!df)
return RES_NONE; // good to go
df->old_pg_state->lock();
OSDMapRef create_map = df->old_pg_state->get_osdmap();
df->old_pg_state->unlock();
set<spg_t> children;
if (cur == pgid) {
if (df->try_stop_deletion()) {
dout(10) << __func__ << ": halted deletion on pg " << pgid << dendl;
*resurrected = cur;
*old_pg_state = df->old_pg_state;
service.deleting_pgs.remove(pgid); // PG is no longer being removed!
return RES_SELF;
} else {
// raced, ensure we don't see DeletingStateRef when we try to
// delete this pg
service.deleting_pgs.remove(pgid);
return RES_NONE;
}
} else if (cur.is_split(create_map->get_pg_num(cur.pool()),
curmap->get_pg_num(cur.pool()),
&children) &&
children.count(pgid)) {
if (df->try_stop_deletion()) {
dout(10) << __func__ << ": halted deletion on ancestor pg " << pgid
<< dendl;
*resurrected = cur;
*old_pg_state = df->old_pg_state;
service.deleting_pgs.remove(cur); // PG is no longer being removed!
return RES_PARENT;
} else {
/* this is not a problem, failing to cancel proves that all objects
* have been removed, so no hobject_t overlap is possible
*/
return RES_NONE;
}
}
return RES_NONE;
} | 0 | [
"CWE-287",
"CWE-284"
]
| ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 91,617,453,340,591,400,000,000,000,000,000,000,000 | 57 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
static void upnp_event_recv(struct upnp_event_notify * obj)
{
int n;
n = recv(obj->s, obj->buffer, obj->buffersize, 0);
if(n<0) {
if(errno != EAGAIN &&
errno != EWOULDBLOCK &&
errno != EINTR) {
syslog(LOG_ERR, "%s: recv(): %m", "upnp_event_recv");
obj->state = EError;
}
return;
}
syslog(LOG_DEBUG, "%s: (%dbytes) %.*s", "upnp_event_recv",
n, n, obj->buffer);
/* TODO : do something with the data recevied ?
* right now, n (number of bytes received) is ignored
* We may need to recv() more bytes. */
obj->state = EFinished;
if(obj->sub)
obj->sub->seq++;
} | 0 | [
"CWE-200",
"CWE-252"
]
| miniupnp | bec6ccec63cadc95655721bc0e1dd49dac759d94 | 265,219,685,605,011,000,000,000,000,000,000,000,000 | 22 | upnp_event_prepare(): check the return value of snprintf() |
void CL_Reconnect_f( void ) {
if ( !strlen( cl_reconnectArgs ) )
return;
Cvar_Set("ui_singlePlayerActive", "0");
Cbuf_AddText( va("connect %s\n", cl_reconnectArgs ) );
} | 0 | [
"CWE-269"
]
| ioq3 | 376267d534476a875d8b9228149c4ee18b74a4fd | 184,818,003,659,183,900,000,000,000,000,000,000,000 | 6 | Don't load .pk3s as .dlls, and don't load user config files from .pk3s. |
apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
const char *data, apr_size_t len)
{
gset_decoder decoder;
unsigned char log2n, log2p;
int N, i;
apr_pool_t *pool = diary->entries->pool;
h2_push_diary_entry e;
apr_status_t status = APR_SUCCESS;
if (len < 2) {
/* at least this should be there */
return APR_EINVAL;
}
log2n = data[0];
log2p = data[1];
diary->mask_bits = log2n + log2p;
if (diary->mask_bits > 64) {
/* cannot handle */
return APR_ENOTIMPL;
}
/* whatever is in the digest, it replaces the diary entries */
apr_array_clear(diary->entries);
if (!authority || !strcmp("*", authority)) {
diary->authority = NULL;
}
else if (!diary->authority || strcmp(diary->authority, authority)) {
diary->authority = apr_pstrdup(diary->entries->pool, authority);
}
N = h2_log2inv(log2n + log2p);
decoder.diary = diary;
decoder.pool = pool;
decoder.log2p = log2p;
decoder.data = (const unsigned char*)data;
decoder.datalen = len;
decoder.offset = 1;
decoder.bit = 8;
decoder.last_val = 0;
diary->N = N;
/* Determine effective N we use for storage */
if (!N) {
/* a totally empty cache digest. someone tells us that she has no
* entries in the cache at all. Use our own preferences for N+mask
*/
diary->N = diary->NMax;
return APR_SUCCESS;
}
else if (N > diary->NMax) {
/* Store not more than diary is configured to hold. We open us up
* to DOS attacks otherwise. */
diary->N = diary->NMax;
}
/* Intentional no APLOGNO */
ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
"h2_push_diary_digest_set: N=%d, log2n=%d, "
"diary->mask_bits=%d, dec.log2p=%d",
(int)diary->N, (int)log2n, diary->mask_bits,
(int)decoder.log2p);
for (i = 0; i < diary->N; ++i) {
if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) {
/* the data may have less than N values */
break;
}
h2_push_diary_append(diary, &e);
}
/* Intentional no APLOGNO */
ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
"h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d",
(int)diary->entries->nelts, diary->mask_bits);
return status;
} | 1 | [
"CWE-444"
]
| mod_h2 | b8a8c5061eada0ce3339b24ba1d587134552bc0c | 277,652,365,017,142,250,000,000,000,000,000,000,000 | 78 | * Removing support for abandoned draft of http-wg regarding cache-digests. |
RGWOp *RGWHandler_REST_Bucket_S3::op_put()
{
if (s->info.args.sub_resource_exists("logging"))
return NULL;
if (s->info.args.sub_resource_exists("versioning"))
return new RGWSetBucketVersioning_ObjStore_S3;
if (s->info.args.sub_resource_exists("website")) {
if (!s->cct->_conf->rgw_enable_static_website) {
return NULL;
}
return new RGWSetBucketWebsite_ObjStore_S3;
}
if (is_acl_op()) {
return new RGWPutACLs_ObjStore_S3;
} else if (is_cors_op()) {
return new RGWPutCORS_ObjStore_S3;
} else if (is_request_payment_op()) {
return new RGWSetRequestPayment_ObjStore_S3;
} else if(is_lc_op()) {
return new RGWPutLC_ObjStore_S3;
} else if(is_policy_op()) {
return new RGWPutBucketPolicy;
}
return new RGWCreateBucket_ObjStore_S3;
} | 0 | [
"CWE-79"
]
| ceph | ba0790a01ba5252db1ebc299db6e12cd758d0ff9 | 99,023,782,856,160,010,000,000,000,000,000,000,000 | 25 | rgw: reject unauthenticated response-header actions
Signed-off-by: Matt Benjamin <[email protected]>
Reviewed-by: Casey Bodley <[email protected]>
(cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400) |
static void skb_entail(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
skb->csum = 0;
tcb->seq = tcb->end_seq = tp->write_seq;
tcb->tcp_flags = TCPHDR_ACK;
tcb->sacked = 0;
__skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize);
if (tp->nonagle & TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
tcp_slow_start_after_idle_check(sk);
} | 0 | [
"CWE-399",
"CWE-835"
]
| linux | ccf7abb93af09ad0868ae9033d1ca8108bdaec82 | 159,440,401,103,117,360,000,000,000,000,000,000,000 | 18 | tcp: avoid infinite loop in tcp_splice_read()
Splicing from TCP socket is vulnerable when a packet with URG flag is
received and stored into receive queue.
__tcp_splice_read() returns 0, and sk_wait_data() immediately
returns since there is the problematic skb in queue.
This is a nice way to burn cpu (aka infinite loop) and trigger
soft lockups.
Again, this gem was found by syzkaller tool.
Fixes: 9c55e01c0cc8 ("[TCP]: Splice receive support.")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Dmitry Vyukov <[email protected]>
Cc: Willy Tarreau <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
Item_cache* Item_cache::get_cache(const Item *item)
{
return get_cache(item, item->cmp_type());
} | 0 | []
| server | b000e169562697aa072600695d4f0c0412f94f4f | 189,033,444,084,613,230,000,000,000,000,000,000,000 | 4 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
ConnectionRef SimpleMessenger::get_loopback_connection()
{
return local_connection;
} | 0 | [
"CWE-287",
"CWE-284"
]
| ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 310,970,435,701,875,300,000,000,000,000,000,000,000 | 4 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
char* oidc_get_current_url(request_rec *r) {
char *url = NULL, *path = NULL;
apr_uri_t uri;
path = r->uri;
/* check if we're dealing with a forward proxying secenario i.e. a non-relative URL */
if ((path) && (path[0] != '/')) {
memset(&uri, 0, sizeof(apr_uri_t));
if (apr_uri_parse(r->pool, r->uri, &uri) == APR_SUCCESS)
path = apr_pstrcat(r->pool, uri.path,
(r->args != NULL && *r->args != '\0' ? "?" : ""), r->args,
NULL);
else
oidc_warn(r, "apr_uri_parse failed on non-relative URL: %s",
r->uri);
} else {
/* make sure we retain URL-encoded characters original URL that we send the user back to */
path = r->unparsed_uri;
}
url = apr_pstrcat(r->pool, oidc_get_current_url_base(r), path, NULL);
oidc_debug(r, "current URL '%s'", url);
return url;
} | 0 | [
"CWE-79"
]
| mod_auth_openidc | 55ea0a085290cd2c8cdfdd960a230cbc38ba8b56 | 86,944,072,034,787,050,000,000,000,000,000,000,000 | 27 | Add a function to escape Javascript characters |
static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
vaddr addr, vaddr len)
{
/* We know the lengths are non-zero, but a little caution is
* required to avoid errors in the case where the range ends
* exactly at the top of the address space and so addr + len
* wraps round to zero.
*/
vaddr wpend = wp->vaddr + wp->len - 1;
vaddr addrend = addr + len - 1;
return !(addr > wpend || wp->vaddr > addrend);
} | 0 | [
"CWE-787"
]
| qemu | 4bfb024bc76973d40a359476dc0291f46e435442 | 42,696,842,316,021,080,000,000,000,000,000,000,000 | 13 | memory: clamp cached translation in case it points to an MMIO region
In using the address_space_translate_internal API, address_space_cache_init
forgot one piece of advice that can be found in the code for
address_space_translate_internal:
/* MMIO registers can be expected to perform full-width accesses based only
* on their address, without considering adjacent registers that could
* decode to completely different MemoryRegions. When such registers
* exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
* regions overlap wildly. For this reason we cannot clamp the accesses
* here.
*
* If the length is small (as is the case for address_space_ldl/stl),
* everything works fine. If the incoming length is large, however,
* the caller really has to do the clamping through memory_access_size.
*/
address_space_cache_init is exactly one such case where "the incoming length
is large", therefore we need to clamp the resulting length---not to
memory_access_size though, since we are not doing an access yet, but to
the size of the resulting section. This ensures that subsequent accesses
to the cached MemoryRegionSection will be in range.
With this patch, the enclosed testcase notices that the used ring does
not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used"
error.
Signed-off-by: Paolo Bonzini <[email protected]> |
static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{
struct bpf_insn *insn = insn_buf;
int off;
switch (si->off) {
case offsetof(struct __sk_buff, data_end):
off = si->off;
off -= offsetof(struct __sk_buff, data_end);
off += offsetof(struct sk_buff, cb);
off += offsetof(struct tcp_skb_cb, bpf.data_end);
*insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
si->src_reg, off);
break;
default:
return bpf_convert_ctx_access(type, si, insn_buf, prog,
target_size);
}
return insn - insn_buf;
} | 0 | [
"CWE-120"
]
| linux | 050fad7c4534c13c8eb1d9c2ba66012e014773cb | 25,785,291,310,229,324,000,000,000,000,000,000,000 | 24 | bpf: fix truncated jump targets on heavy expansions
Recently during testing, I ran into the following panic:
[ 207.892422] Internal error: Accessing user space memory outside uaccess.h routines: 96000004 [#1] SMP
[ 207.901637] Modules linked in: binfmt_misc [...]
[ 207.966530] CPU: 45 PID: 2256 Comm: test_verifier Tainted: G W 4.17.0-rc3+ #7
[ 207.974956] Hardware name: FOXCONN R2-1221R-A4/C2U4N_MB, BIOS G31FB18A 03/31/2017
[ 207.982428] pstate: 60400005 (nZCv daif +PAN -UAO)
[ 207.987214] pc : bpf_skb_load_helper_8_no_cache+0x34/0xc0
[ 207.992603] lr : 0xffff000000bdb754
[ 207.996080] sp : ffff000013703ca0
[ 207.999384] x29: ffff000013703ca0 x28: 0000000000000001
[ 208.004688] x27: 0000000000000001 x26: 0000000000000000
[ 208.009992] x25: ffff000013703ce0 x24: ffff800fb4afcb00
[ 208.015295] x23: ffff00007d2f5038 x22: ffff00007d2f5000
[ 208.020599] x21: fffffffffeff2a6f x20: 000000000000000a
[ 208.025903] x19: ffff000009578000 x18: 0000000000000a03
[ 208.031206] x17: 0000000000000000 x16: 0000000000000000
[ 208.036510] x15: 0000ffff9de83000 x14: 0000000000000000
[ 208.041813] x13: 0000000000000000 x12: 0000000000000000
[ 208.047116] x11: 0000000000000001 x10: ffff0000089e7f18
[ 208.052419] x9 : fffffffffeff2a6f x8 : 0000000000000000
[ 208.057723] x7 : 000000000000000a x6 : 00280c6160000000
[ 208.063026] x5 : 0000000000000018 x4 : 0000000000007db6
[ 208.068329] x3 : 000000000008647a x2 : 19868179b1484500
[ 208.073632] x1 : 0000000000000000 x0 : ffff000009578c08
[ 208.078938] Process test_verifier (pid: 2256, stack limit = 0x0000000049ca7974)
[ 208.086235] Call trace:
[ 208.088672] bpf_skb_load_helper_8_no_cache+0x34/0xc0
[ 208.093713] 0xffff000000bdb754
[ 208.096845] bpf_test_run+0x78/0xf8
[ 208.100324] bpf_prog_test_run_skb+0x148/0x230
[ 208.104758] sys_bpf+0x314/0x1198
[ 208.108064] el0_svc_naked+0x30/0x34
[ 208.111632] Code: 91302260 f9400001 f9001fa1 d2800001 (29500680)
[ 208.117717] ---[ end trace 263cb8a59b5bf29f ]---
The program itself which caused this had a long jump over the whole
instruction sequence where all of the inner instructions required
heavy expansions into multiple BPF instructions. Additionally, I also
had BPF hardening enabled which requires once more rewrites of all
constant values in order to blind them. Each time we rewrite insns,
bpf_adj_branches() would need to potentially adjust branch targets
which cross the patchlet boundary to accommodate for the additional
delta. Eventually that lead to the case where the target offset could
not fit into insn->off's upper 0x7fff limit anymore where then offset
wraps around becoming negative (in s16 universe), or vice versa
depending on the jump direction.
Therefore it becomes necessary to detect and reject any such occasions
in a generic way for native eBPF and cBPF to eBPF migrations. For
the latter we can simply check bounds in the bpf_convert_filter()'s
BPF_EMIT_JMP helper macro and bail out once we surpass limits. The
bpf_patch_insn_single() for native eBPF (and cBPF to eBPF in case
of subsequent hardening) is a bit more complex in that we need to
detect such truncations before hitting the bpf_prog_realloc(). Thus
the latter is split into an extra pass to probe problematic offsets
on the original program in order to fail early. With that in place
and carefully tested I no longer hit the panic and the rewrites are
rejected properly. The above example panic I've seen on bpf-next,
though the issue itself is generic in that a guard against this issue
in bpf seems more appropriate in this case.
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Martin KaFai Lau <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]> |
static int php_libxml_streams_IO_read(void *context, char *buffer, int len)
{
TSRMLS_FETCH();
return php_stream_read((php_stream*)context, buffer, len);
} | 0 | [
"CWE-200"
]
| php-src | 8e76d0404b7f664ee6719fd98f0483f0ac4669d6 | 126,615,423,080,765,640,000,000,000,000,000,000,000 | 5 | Fixed external entity loading |
Subsets and Splits