func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static long long lsqlite3_get_eid(struct lsqlite3_private *lsqlite3,
struct ldb_dn *dn)
{
TALLOC_CTX *local_ctx;
long long eid = -1;
char *cdn;
/* ignore ltdb specials */
if (ldb_dn_is_special(dn)) {
return -1;
}
/* create a local ctx */
local_ctx = talloc_named(lsqlite3, 0, "lsqlite3_get_eid local context");
if (local_ctx == NULL) {
return -1;
}
cdn = ldb_dn_alloc_casefold(local_ctx, dn);
if (!cdn) goto done;
eid = lsqlite3_get_eid_ndn(lsqlite3->sqlite, local_ctx, cdn);
done:
talloc_free(local_ctx);
return eid;
} | 0 | [
"CWE-20"
] | samba | 3f95957d6de321c803a66f3ec67a8ff09befd16d | 244,337,273,326,711,060,000,000,000,000,000,000,000 | 27 | CVE-2018-1140 ldb: Check for ldb_dn_get_casefold() failure in ldb_sqlite
Signed-off-by: Andrew Bartlett <[email protected]>
Reviewed-by: Douglas Bagnall <[email protected]>
BUG: https://bugzilla.samba.org/show_bug.cgi?id=13374 |
void readRuleset(YAML::Node node, string_array &dest, bool scope_limit = true)
{
std::string strLine, name, url, group, interval;
YAML::Node object;
for(unsigned int i = 0; i < node.size(); i++)
{
object = node[i];
object["import"] >>= name;
if(name.size())
{
dest.emplace_back("!!import:" + name);
continue;
}
object["ruleset"] >>= url;
object["group"] >>= group;
object["rule"] >>= name;
object["interval"] >>= interval;
if(url.size())
{
strLine = group + "," + url;
if(interval.size())
strLine += "," + interval;
}
else if(name.size())
strLine = group + ",[]" + name;
else
continue;
dest.emplace_back(std::move(strLine));
}
importItems(dest, scope_limit);
} | 0 | [
"CWE-434",
"CWE-94"
] | subconverter | ce8d2bd0f13f05fcbd2ed90755d097f402393dd3 | 43,719,117,596,496,280,000,000,000,000,000,000,000 | 32 | Enhancements
Add authorization check before loading scripts.
Add detailed logs when loading preference settings. |
static inline int __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state)
{
return 0;
} | 0 | [
"CWE-119"
] | linux | 1be7107fbe18eed3e319a6c3e83c78254b693acb | 74,949,078,647,743,620,000,000,000,000,000,000,000 | 5 | mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]> |
const CImg<T> get_shared_points(const unsigned int x0, const unsigned int x1,
const unsigned int y0=0, const unsigned int z0=0, const unsigned int c0=0) const {
const ulongT
beg = (ulongT)offset(x0,y0,z0,c0),
end = (ulongT)offset(x1,y0,z0,c0);
if (beg>end || beg>=size() || end>=size())
throw CImgArgumentException(_cimg_instance
"get_shared_points(): Invalid request of a shared-memory subset (%u->%u,%u,%u,%u).",
cimg_instance,
x0,x1,y0,z0,c0);
return CImg<T>(_data + beg,x1 - x0 + 1,1,1,1,true);
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 255,253,388,528,126,250,000,000,000,000,000,000,000 | 12 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
size_t datasz)
{
int err = 0;
struct audit_entry *entry;
struct audit_field *ino_f;
void *bufp;
size_t remain = datasz - sizeof(struct audit_rule_data);
int i;
char *str;
entry = audit_to_entry_common((struct audit_rule *)data);
if (IS_ERR(entry))
goto exit_nofree;
bufp = data->buf;
entry->rule.vers_ops = 2;
for (i = 0; i < data->field_count; i++) {
struct audit_field *f = &entry->rule.fields[i];
err = -EINVAL;
if (!(data->fieldflags[i] & AUDIT_OPERATORS) ||
data->fieldflags[i] & ~AUDIT_OPERATORS)
goto exit_free;
f->op = data->fieldflags[i] & AUDIT_OPERATORS;
f->type = data->fields[i];
f->val = data->values[i];
f->lsm_str = NULL;
f->lsm_rule = NULL;
switch(f->type) {
case AUDIT_PID:
case AUDIT_UID:
case AUDIT_EUID:
case AUDIT_SUID:
case AUDIT_FSUID:
case AUDIT_GID:
case AUDIT_EGID:
case AUDIT_SGID:
case AUDIT_FSGID:
case AUDIT_LOGINUID:
case AUDIT_PERS:
case AUDIT_MSGTYPE:
case AUDIT_PPID:
case AUDIT_DEVMAJOR:
case AUDIT_DEVMINOR:
case AUDIT_EXIT:
case AUDIT_SUCCESS:
case AUDIT_ARG0:
case AUDIT_ARG1:
case AUDIT_ARG2:
case AUDIT_ARG3:
break;
case AUDIT_ARCH:
entry->rule.arch_f = f;
break;
case AUDIT_SUBJ_USER:
case AUDIT_SUBJ_ROLE:
case AUDIT_SUBJ_TYPE:
case AUDIT_SUBJ_SEN:
case AUDIT_SUBJ_CLR:
case AUDIT_OBJ_USER:
case AUDIT_OBJ_ROLE:
case AUDIT_OBJ_TYPE:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
str = audit_unpack_string(&bufp, &remain, f->val);
if (IS_ERR(str))
goto exit_free;
entry->rule.buflen += f->val;
err = security_audit_rule_init(f->type, f->op, str,
(void **)&f->lsm_rule);
/* Keep currently invalid fields around in case they
* become valid after a policy reload. */
if (err == -EINVAL) {
printk(KERN_WARNING "audit rule for LSM "
"\'%s\' is invalid\n", str);
err = 0;
}
if (err) {
kfree(str);
goto exit_free;
} else
f->lsm_str = str;
break;
case AUDIT_WATCH:
str = audit_unpack_string(&bufp, &remain, f->val);
if (IS_ERR(str))
goto exit_free;
entry->rule.buflen += f->val;
err = audit_to_watch(&entry->rule, str, f->val, f->op);
if (err) {
kfree(str);
goto exit_free;
}
break;
case AUDIT_DIR:
str = audit_unpack_string(&bufp, &remain, f->val);
if (IS_ERR(str))
goto exit_free;
entry->rule.buflen += f->val;
err = audit_make_tree(&entry->rule, str, f->op);
kfree(str);
if (err)
goto exit_free;
break;
case AUDIT_INODE:
err = audit_to_inode(&entry->rule, f);
if (err)
goto exit_free;
break;
case AUDIT_FILTERKEY:
err = -EINVAL;
if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
goto exit_free;
str = audit_unpack_string(&bufp, &remain, f->val);
if (IS_ERR(str))
goto exit_free;
entry->rule.buflen += f->val;
entry->rule.filterkey = str;
break;
case AUDIT_PERM:
if (f->val & ~15)
goto exit_free;
break;
case AUDIT_FILETYPE:
if ((f->val & ~S_IFMT) > S_IFMT)
goto exit_free;
break;
default:
goto exit_free;
}
}
ino_f = entry->rule.inode_f;
if (ino_f) {
switch(ino_f->op) {
case AUDIT_NOT_EQUAL:
entry->rule.inode_f = NULL;
case AUDIT_EQUAL:
break;
default:
err = -EINVAL;
goto exit_free;
}
}
exit_nofree:
return entry;
exit_free:
audit_free_rule(entry);
return ERR_PTR(err);
} | 0 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 76,214,767,768,403,595,000,000,000,000,000,000,000 | 157 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int EncodeCert(Cert* cert, DerCert* der, RsaKey* rsaKey, ecc_key* eccKey,
WC_RNG* rng, const byte* ntruKey, word16 ntruSz, DsaKey* dsaKey,
ed25519_key* ed25519Key, ed448_key* ed448Key)
{
int ret;
if (cert == NULL || der == NULL || rng == NULL)
return BAD_FUNC_ARG;
/* make sure at least one key type is provided */
if (rsaKey == NULL && eccKey == NULL && ed25519Key == NULL &&
dsaKey == NULL && ed448Key == NULL && ntruKey == NULL) {
return PUBLIC_KEY_E;
}
/* init */
XMEMSET(der, 0, sizeof(DerCert));
/* version */
der->versionSz = SetMyVersion(cert->version, der->version, TRUE);
/* serial number (must be positive) */
if (cert->serialSz == 0) {
/* generate random serial */
cert->serialSz = CTC_GEN_SERIAL_SZ;
ret = wc_RNG_GenerateBlock(rng, cert->serial, cert->serialSz);
if (ret != 0)
return ret;
/* Clear the top bit to avoid a negative value */
cert->serial[0] &= 0x7f;
}
der->serialSz = SetSerialNumber(cert->serial, cert->serialSz, der->serial,
sizeof(der->serial), CTC_SERIAL_SIZE);
if (der->serialSz < 0)
return der->serialSz;
/* signature algo */
der->sigAlgoSz = SetAlgoID(cert->sigType, der->sigAlgo, oidSigType, 0);
if (der->sigAlgoSz <= 0)
return ALGO_ID_E;
/* public key */
#ifndef NO_RSA
if (cert->keyType == RSA_KEY) {
if (rsaKey == NULL)
return PUBLIC_KEY_E;
der->publicKeySz = SetRsaPublicKey(der->publicKey, rsaKey,
sizeof(der->publicKey), 1);
}
#endif
#ifdef HAVE_ECC
if (cert->keyType == ECC_KEY) {
if (eccKey == NULL)
return PUBLIC_KEY_E;
der->publicKeySz = SetEccPublicKey(der->publicKey, eccKey, 1);
}
#endif
#if !defined(NO_DSA) && !defined(HAVE_SELFTEST)
if (cert->keyType == DSA_KEY) {
if (dsaKey == NULL)
return PUBLIC_KEY_E;
der->publicKeySz = wc_SetDsaPublicKey(der->publicKey, dsaKey,
sizeof(der->publicKey), 1);
}
#endif
#ifdef HAVE_ED25519
if (cert->keyType == ED25519_KEY) {
if (ed25519Key == NULL)
return PUBLIC_KEY_E;
der->publicKeySz = SetEd25519PublicKey(der->publicKey, ed25519Key, 1);
}
#endif
#ifdef HAVE_ED448
if (cert->keyType == ED448_KEY) {
if (ed448Key == NULL)
return PUBLIC_KEY_E;
der->publicKeySz = SetEd448PublicKey(der->publicKey, ed448Key, 1);
}
#endif
#ifdef HAVE_NTRU
if (cert->keyType == NTRU_KEY) {
word32 rc;
word16 encodedSz;
if (ntruKey == NULL)
return PUBLIC_KEY_E;
rc = ntru_crypto_ntru_encrypt_publicKey2SubjectPublicKeyInfo(ntruSz,
ntruKey, &encodedSz, NULL);
if (rc != NTRU_OK)
return PUBLIC_KEY_E;
if (encodedSz > MAX_PUBLIC_KEY_SZ)
return PUBLIC_KEY_E;
rc = ntru_crypto_ntru_encrypt_publicKey2SubjectPublicKeyInfo(ntruSz,
ntruKey, &encodedSz, der->publicKey);
if (rc != NTRU_OK)
return PUBLIC_KEY_E;
der->publicKeySz = encodedSz;
}
#else
(void)ntruSz;
#endif /* HAVE_NTRU */
if (der->publicKeySz <= 0)
return PUBLIC_KEY_E;
der->validitySz = 0;
#ifdef WOLFSSL_ALT_NAMES
/* date validity copy ? */
if (cert->beforeDateSz && cert->afterDateSz) {
der->validitySz = CopyValidity(der->validity, cert);
if (der->validitySz <= 0)
return DATE_E;
}
#endif
/* date validity */
if (der->validitySz == 0) {
der->validitySz = SetValidity(der->validity, cert->daysValid);
if (der->validitySz <= 0)
return DATE_E;
}
/* subject name */
#if defined(WOLFSSL_CERT_EXT) || defined(OPENSSL_EXTRA)
if (XSTRLEN((const char*)cert->sbjRaw) > 0) {
/* Use the raw subject */
int idx;
der->subjectSz = min(sizeof(der->subject),
(word32)XSTRLEN((const char*)cert->sbjRaw));
/* header */
idx = SetSequence(der->subjectSz, der->subject);
if (der->subjectSz + idx > (int)sizeof(der->subject)) {
return SUBJECT_E;
}
XMEMCPY((char*)der->subject + idx, (const char*)cert->sbjRaw,
der->subjectSz);
der->subjectSz += idx;
}
else
#endif
{
/* Use the name structure */
der->subjectSz = SetName(der->subject, sizeof(der->subject),
&cert->subject);
}
if (der->subjectSz <= 0)
return SUBJECT_E;
/* issuer name */
#if defined(WOLFSSL_CERT_EXT) || defined(OPENSSL_EXTRA)
if (XSTRLEN((const char*)cert->issRaw) > 0) {
/* Use the raw issuer */
int idx;
der->issuerSz = min(sizeof(der->issuer),
(word32)XSTRLEN((const char*)cert->issRaw));
/* header */
idx = SetSequence(der->issuerSz, der->issuer);
if (der->issuerSz + idx > (int)sizeof(der->issuer)) {
return ISSUER_E;
}
XMEMCPY((char*)der->issuer + idx, (const char*)cert->issRaw,
der->issuerSz);
der->issuerSz += idx;
}
else
#endif
{
/* Use the name structure */
der->issuerSz = SetName(der->issuer, sizeof(der->issuer),
cert->selfSigned ? &cert->subject : &cert->issuer);
}
if (der->issuerSz <= 0)
return ISSUER_E;
/* set the extensions */
der->extensionsSz = 0;
/* CA */
if (cert->isCA) {
der->caSz = SetCa(der->ca, sizeof(der->ca));
if (der->caSz <= 0)
return CA_TRUE_E;
der->extensionsSz += der->caSz;
}
else
der->caSz = 0;
#ifdef WOLFSSL_ALT_NAMES
/* Alternative Name */
if (cert->altNamesSz) {
der->altNamesSz = SetAltNames(der->altNames, sizeof(der->altNames),
cert->altNames, cert->altNamesSz);
if (der->altNamesSz <= 0)
return ALT_NAME_E;
der->extensionsSz += der->altNamesSz;
}
else
der->altNamesSz = 0;
#endif
#ifdef WOLFSSL_CERT_EXT
/* SKID */
if (cert->skidSz) {
/* check the provided SKID size */
if (cert->skidSz > (int)min(CTC_MAX_SKID_SIZE, sizeof(der->skid)))
return SKID_E;
/* Note: different skid buffers sizes for der (MAX_KID_SZ) and
cert (CTC_MAX_SKID_SIZE). */
der->skidSz = SetSKID(der->skid, sizeof(der->skid),
cert->skid, cert->skidSz);
if (der->skidSz <= 0)
return SKID_E;
der->extensionsSz += der->skidSz;
}
else
der->skidSz = 0;
/* AKID */
if (cert->akidSz) {
/* check the provided AKID size */
if (cert->akidSz > (int)min(CTC_MAX_AKID_SIZE, sizeof(der->akid)))
return AKID_E;
der->akidSz = SetAKID(der->akid, sizeof(der->akid),
cert->akid, cert->akidSz, cert->heap);
if (der->akidSz <= 0)
return AKID_E;
der->extensionsSz += der->akidSz;
}
else
der->akidSz = 0;
/* Key Usage */
if (cert->keyUsage != 0){
der->keyUsageSz = SetKeyUsage(der->keyUsage, sizeof(der->keyUsage),
cert->keyUsage);
if (der->keyUsageSz <= 0)
return KEYUSAGE_E;
der->extensionsSz += der->keyUsageSz;
}
else
der->keyUsageSz = 0;
/* Extended Key Usage */
if (cert->extKeyUsage != 0){
der->extKeyUsageSz = SetExtKeyUsage(cert, der->extKeyUsage,
sizeof(der->extKeyUsage), cert->extKeyUsage);
if (der->extKeyUsageSz <= 0)
return EXTKEYUSAGE_E;
der->extensionsSz += der->extKeyUsageSz;
}
else
der->extKeyUsageSz = 0;
/* Certificate Policies */
if (cert->certPoliciesNb != 0) {
der->certPoliciesSz = SetCertificatePolicies(der->certPolicies,
sizeof(der->certPolicies),
cert->certPolicies,
cert->certPoliciesNb,
cert->heap);
if (der->certPoliciesSz <= 0)
return CERTPOLICIES_E;
der->extensionsSz += der->certPoliciesSz;
}
else
der->certPoliciesSz = 0;
#endif /* WOLFSSL_CERT_EXT */
/* put extensions */
if (der->extensionsSz > 0) {
/* put the start of extensions sequence (ID, Size) */
der->extensionsSz = SetExtensionsHeader(der->extensions,
sizeof(der->extensions),
der->extensionsSz);
if (der->extensionsSz <= 0)
return EXTENSIONS_E;
/* put CA */
if (der->caSz) {
ret = SetExtensions(der->extensions, sizeof(der->extensions),
&der->extensionsSz,
der->ca, der->caSz);
if (ret == 0)
return EXTENSIONS_E;
}
#ifdef WOLFSSL_ALT_NAMES
/* put Alternative Names */
if (der->altNamesSz) {
ret = SetExtensions(der->extensions, sizeof(der->extensions),
&der->extensionsSz,
der->altNames, der->altNamesSz);
if (ret <= 0)
return EXTENSIONS_E;
}
#endif
#ifdef WOLFSSL_CERT_EXT
/* put SKID */
if (der->skidSz) {
ret = SetExtensions(der->extensions, sizeof(der->extensions),
&der->extensionsSz,
der->skid, der->skidSz);
if (ret <= 0)
return EXTENSIONS_E;
}
/* put AKID */
if (der->akidSz) {
ret = SetExtensions(der->extensions, sizeof(der->extensions),
&der->extensionsSz,
der->akid, der->akidSz);
if (ret <= 0)
return EXTENSIONS_E;
}
/* put KeyUsage */
if (der->keyUsageSz) {
ret = SetExtensions(der->extensions, sizeof(der->extensions),
&der->extensionsSz,
der->keyUsage, der->keyUsageSz);
if (ret <= 0)
return EXTENSIONS_E;
}
/* put ExtendedKeyUsage */
if (der->extKeyUsageSz) {
ret = SetExtensions(der->extensions, sizeof(der->extensions),
&der->extensionsSz,
der->extKeyUsage, der->extKeyUsageSz);
if (ret <= 0)
return EXTENSIONS_E;
}
/* put Certificate Policies */
if (der->certPoliciesSz) {
ret = SetExtensions(der->extensions, sizeof(der->extensions),
&der->extensionsSz,
der->certPolicies, der->certPoliciesSz);
if (ret <= 0)
return EXTENSIONS_E;
}
#endif /* WOLFSSL_CERT_EXT */
}
der->total = der->versionSz + der->serialSz + der->sigAlgoSz +
der->publicKeySz + der->validitySz + der->subjectSz + der->issuerSz +
der->extensionsSz;
return 0;
} | 0 | [
"CWE-125",
"CWE-345"
] | wolfssl | f93083be72a3b3d956b52a7ec13f307a27b6e093 | 92,907,216,995,802,280,000,000,000,000,000,000,000 | 374 | OCSP: improve handling of OCSP no check extension |
static Image *ReadXWDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define CheckOverflowException(length,width,height) \
(((height) != 0) && ((length)/((size_t) height) != ((size_t) width)))
char
*comment;
Image
*image;
IndexPacket
index;
int
x_status;
MagickBooleanType
authentic_colormap;
MagickStatusType
status;
register IndexPacket
*indexes;
register ssize_t
x;
register PixelPacket
*q;
register ssize_t
i;
register size_t
pixel;
size_t
length;
ssize_t
count,
y;
unsigned long
lsb_first;
XColor
*colors;
XImage
*ximage;
XWDFileHeader
header;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read in header information.
*/
count=ReadBlob(image,sz_XWDheader,(unsigned char *) &header);
if (count != sz_XWDheader)
ThrowReaderException(CorruptImageError,"UnableToReadImageHeader");
/*
Ensure the header byte-order is most-significant byte first.
*/
lsb_first=1;
if ((int) (*(char *) &lsb_first) != 0)
MSBOrderLong((unsigned char *) &header,sz_XWDheader);
/*
Check to see if the dump file is in the proper format.
*/
if (header.file_version != XWD_FILE_VERSION)
ThrowReaderException(CorruptImageError,"FileFormatVersionMismatch");
if (header.header_size < sz_XWDheader)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
switch (header.visual_class)
{
case StaticGray:
case GrayScale:
{
if (header.bits_per_pixel != 1)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
break;
}
case StaticColor:
case PseudoColor:
{
if ((header.bits_per_pixel < 1) || (header.bits_per_pixel > 15) ||
(header.ncolors == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
break;
}
case TrueColor:
case DirectColor:
{
if ((header.bits_per_pixel != 16) && (header.bits_per_pixel != 24) &&
(header.bits_per_pixel != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
break;
}
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
switch (header.pixmap_format)
{
case XYBitmap:
{
if (header.pixmap_depth != 1)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
break;
}
case XYPixmap:
case ZPixmap:
{
if ((header.pixmap_depth < 1) || (header.pixmap_depth > 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
switch (header.bitmap_pad)
{
case 8:
case 16:
case 32:
break;
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
break;
}
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
switch (header.bitmap_unit)
{
case 8:
case 16:
case 32:
break;
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
switch (header.byte_order)
{
case LSBFirst:
case MSBFirst:
break;
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
switch (header.bitmap_bit_order)
{
case LSBFirst:
case MSBFirst:
break;
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (((header.bitmap_pad % 8) != 0) || (header.bitmap_pad > 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (header.ncolors > 65535)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
length=(size_t) (header.header_size-sz_XWDheader);
if ((length+1) != ((size_t) ((CARD32) (length+1))))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
comment=(char *) AcquireQuantumMemory(length+1,sizeof(*comment));
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,length,(unsigned char *) comment);
comment[length]='\0';
(void) SetImageProperty(image,"comment",comment);
comment=DestroyString(comment);
if (count != (ssize_t) length)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
/*
Initialize the X image.
*/
ximage=(XImage *) AcquireMagickMemory(sizeof(*ximage));
if (ximage == (XImage *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
ximage->depth=(int) header.pixmap_depth;
ximage->format=(int) header.pixmap_format;
ximage->xoffset=(int) header.xoffset;
ximage->data=(char *) NULL;
ximage->width=(int) header.pixmap_width;
ximage->height=(int) header.pixmap_height;
ximage->bitmap_pad=(int) header.bitmap_pad;
ximage->bytes_per_line=(int) header.bytes_per_line;
ximage->byte_order=(int) header.byte_order;
ximage->bitmap_unit=(int) header.bitmap_unit;
ximage->bitmap_bit_order=(int) header.bitmap_bit_order;
ximage->bits_per_pixel=(int) header.bits_per_pixel;
ximage->red_mask=header.red_mask;
ximage->green_mask=header.green_mask;
ximage->blue_mask=header.blue_mask;
if ((ximage->width < 0) || (ximage->height < 0) || (ximage->depth < 0) ||
(ximage->format < 0) || (ximage->byte_order < 0) ||
(ximage->bitmap_bit_order < 0) || (ximage->bitmap_pad < 0) ||
(ximage->bytes_per_line < 0))
{
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if ((ximage->width > 65535) || (ximage->height > 65535))
{
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if ((ximage->bits_per_pixel > 32) || (ximage->bitmap_unit > 32))
{
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
x_status=XInitImage(ximage);
if (x_status == 0)
{
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
/*
Read colormap.
*/
authentic_colormap=MagickFalse;
colors=(XColor *) NULL;
if (header.ncolors != 0)
{
XWDColor
color;
colors=(XColor *) AcquireQuantumMemory((size_t) header.ncolors,
sizeof(*colors));
if (colors == (XColor *) NULL)
{
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) header.ncolors; i++)
{
count=ReadBlob(image,sz_XWDColor,(unsigned char *) &color);
if (count != sz_XWDColor)
{
colors=(XColor *) RelinquishMagickMemory(colors);
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
colors[i].pixel=color.pixel;
colors[i].red=color.red;
colors[i].green=color.green;
colors[i].blue=color.blue;
colors[i].flags=(char) color.flags;
if (color.flags != 0)
authentic_colormap=MagickTrue;
}
/*
Ensure the header byte-order is most-significant byte first.
*/
lsb_first=1;
if ((int) (*(char *) &lsb_first) != 0)
for (i=0; i < (ssize_t) header.ncolors; i++)
{
MSBOrderLong((unsigned char *) &colors[i].pixel,
sizeof(colors[i].pixel));
MSBOrderShort((unsigned char *) &colors[i].red,3*
sizeof(colors[i].red));
}
}
/*
Allocate the pixel buffer.
*/
length=(size_t) ximage->bytes_per_line*ximage->height;
if (CheckOverflowException(length,ximage->bytes_per_line,ximage->height))
{
if (header.ncolors != 0)
colors=(XColor *) RelinquishMagickMemory(colors);
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (ximage->format != ZPixmap)
{
size_t
extent;
extent=length;
length*=ximage->depth;
if (CheckOverflowException(length,extent,ximage->depth))
{
if (header.ncolors != 0)
colors=(XColor *) RelinquishMagickMemory(colors);
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
}
ximage->data=(char *) AcquireQuantumMemory(length,sizeof(*ximage->data));
if (ximage->data == (char *) NULL)
{
if (header.ncolors != 0)
colors=(XColor *) RelinquishMagickMemory(colors);
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
count=ReadBlob(image,length,(unsigned char *) ximage->data);
if (count != (ssize_t) length)
{
if (header.ncolors != 0)
colors=(XColor *) RelinquishMagickMemory(colors);
ximage->data=DestroyString(ximage->data);
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
}
/*
Convert image to MIFF format.
*/
image->columns=(size_t) ximage->width;
image->rows=(size_t) ximage->height;
image->depth=8;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
if (header.ncolors != 0)
colors=(XColor *) RelinquishMagickMemory(colors);
ximage->data=DestroyString(ximage->data);
ximage=(XImage *) RelinquishMagickMemory(ximage);
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if ((header.ncolors == 0U) || (ximage->red_mask != 0) ||
(ximage->green_mask != 0) || (ximage->blue_mask != 0))
image->storage_class=DirectClass;
else
image->storage_class=PseudoClass;
image->colors=header.ncolors;
if (image_info->ping == MagickFalse)
switch (image->storage_class)
{
case DirectClass:
default:
{
register size_t
color;
size_t
blue_mask,
blue_shift,
green_mask,
green_shift,
red_mask,
red_shift;
/*
Determine shift and mask for red, green, and blue.
*/
red_mask=ximage->red_mask;
red_shift=0;
while ((red_mask != 0) && ((red_mask & 0x01) == 0))
{
red_mask>>=1;
red_shift++;
}
green_mask=ximage->green_mask;
green_shift=0;
while ((green_mask != 0) && ((green_mask & 0x01) == 0))
{
green_mask>>=1;
green_shift++;
}
blue_mask=ximage->blue_mask;
blue_shift=0;
while ((blue_mask != 0) && ((blue_mask & 0x01) == 0))
{
blue_mask>>=1;
blue_shift++;
}
/*
Convert X image to DirectClass packets.
*/
if ((image->colors != 0) && (authentic_colormap != MagickFalse))
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=XGetPixel(ximage,(int) x,(int) y);
index=ConstrainColormapIndex(image,(ssize_t) (pixel >>
red_shift) & red_mask);
SetPixelRed(q,ScaleShortToQuantum(colors[(ssize_t) index].red));
index=ConstrainColormapIndex(image,(ssize_t) (pixel >>
green_shift) & green_mask);
SetPixelGreen(q,ScaleShortToQuantum(colors[(ssize_t)
index].green));
index=ConstrainColormapIndex(image,(ssize_t) (pixel >>
blue_shift) & blue_mask);
SetPixelBlue(q,ScaleShortToQuantum(colors[(ssize_t) index].blue));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=XGetPixel(ximage,(int) x,(int) y);
color=(pixel >> red_shift) & red_mask;
if (red_mask != 0)
color=(color*65535UL)/red_mask;
SetPixelRed(q,ScaleShortToQuantum((unsigned short) color));
color=(pixel >> green_shift) & green_mask;
if (green_mask != 0)
color=(color*65535UL)/green_mask;
SetPixelGreen(q,ScaleShortToQuantum((unsigned short) color));
color=(pixel >> blue_shift) & blue_mask;
if (blue_mask != 0)
color=(color*65535UL)/blue_mask;
SetPixelBlue(q,ScaleShortToQuantum((unsigned short) color));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
break;
}
case PseudoClass:
{
/*
Convert X image to PseudoClass packets.
*/
if (AcquireImageColormap(image,image->colors) == MagickFalse)
{
if (header.ncolors != 0)
colors=(XColor *) RelinquishMagickMemory(colors);
ximage->data=DestroyString(ximage->data);
ximage=(XImage *) RelinquishMagickMemory(ximage);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=ScaleShortToQuantum(colors[i].red);
image->colormap[i].green=ScaleShortToQuantum(colors[i].green);
image->colormap[i].blue=ScaleShortToQuantum(colors[i].blue);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=ConstrainColormapIndex(image,(ssize_t) XGetPixel(ximage,(int)
x,(int) y));
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
break;
}
}
/*
Free image and colormap.
*/
if (header.ncolors != 0)
colors=(XColor *) RelinquishMagickMemory(colors);
ximage->data=DestroyString(ximage->data);
ximage=(XImage *) RelinquishMagickMemory(ximage);
if (EOFBlob(image) != MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
} | 1 | [
"CWE-125"
] | ImageMagick6 | 6d46f0a046a58e7c4567a86ba1b9cb847d5b1968 | 145,582,269,035,747,490,000,000,000,000,000,000,000 | 506 | https://github.com/ImageMagick/ImageMagick/issues/1553 |
static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct ms_data *ms = hid_get_drvdata(hdev);
unsigned long quirks = ms->quirks;
/*
* Microsoft Wireless Desktop Receiver (Model 1028) has
* 'Usage Min/Max' where it ought to have 'Physical Min/Max'
*/
if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 &&
rdesc[559] == 0x29) {
hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
rdesc[557] = 0x35;
rdesc[559] = 0x45;
}
return rdesc;
} | 0 | [
"CWE-787"
] | linux | d9d4b1e46d9543a82c23f6df03f4ad697dab361b | 329,273,168,046,577,850,000,000,000,000,000,000,000 | 18 | HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: [email protected]
Signed-off-by: Alan Stern <[email protected]>
CC: <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]> |
TRIO_PRIVATE void TrioWriteStringCharacter TRIO_ARGS3((self, ch, flags), trio_class_t* self, int ch,
trio_flags_t flags)
{
if (flags & FLAGS_ALTERNATIVE)
{
if (!isprint(ch))
{
/*
* Non-printable characters are converted to C escapes or
* \number, if no C escape exists.
*/
self->OutStream(self, CHAR_BACKSLASH);
switch (ch)
{
case '\007':
self->OutStream(self, 'a');
break;
case '\b':
self->OutStream(self, 'b');
break;
case '\f':
self->OutStream(self, 'f');
break;
case '\n':
self->OutStream(self, 'n');
break;
case '\r':
self->OutStream(self, 'r');
break;
case '\t':
self->OutStream(self, 't');
break;
case '\v':
self->OutStream(self, 'v');
break;
case '\\':
self->OutStream(self, '\\');
break;
default:
self->OutStream(self, 'x');
TrioWriteNumber(self, (trio_uintmax_t)ch, FLAGS_UNSIGNED | FLAGS_NILPADDING, 2,
2, BASE_HEX);
break;
}
}
else if (ch == CHAR_BACKSLASH)
{
self->OutStream(self, CHAR_BACKSLASH);
self->OutStream(self, CHAR_BACKSLASH);
}
else
{
self->OutStream(self, ch);
}
}
else
{
self->OutStream(self, ch);
}
} | 0 | [
"CWE-190",
"CWE-125"
] | FreeRDP | 05cd9ea2290d23931f615c1b004d4b2e69074e27 | 252,898,641,321,338,600,000,000,000,000,000,000,000 | 60 | Fixed TrioParse and trio_length limts.
CVE-2020-4030 thanks to @antonio-morales for finding this. |
is_dev_fd_file(char_u *fname)
{
return (STRNCMP(fname, "/dev/fd/", 8) == 0
&& VIM_ISDIGIT(fname[8])
&& *skipdigits(fname + 9) == NUL
&& (fname[9] != NUL
|| (fname[8] != '0' && fname[8] != '1' && fname[8] != '2')));
} | 0 | [
"CWE-200",
"CWE-668"
] | vim | 5a73e0ca54c77e067c3b12ea6f35e3e8681e8cf8 | 61,330,810,512,002,420,000,000,000,000,000,000,000 | 8 | patch 8.0.1263: others can read the swap file if a user is careless
Problem: Others can read the swap file if a user is careless with his
primary group.
Solution: If the group permission allows for reading but the world
permissions doesn't, make sure the group is right. |
void ElectronBrowserClient::RegisterNonNetworkSubresourceURLLoaderFactories(
int render_process_id,
int render_frame_id,
NonNetworkURLLoaderFactoryMap* factories) {
auto* render_process_host =
content::RenderProcessHost::FromID(render_process_id);
DCHECK(render_process_host);
if (!render_process_host || !render_process_host->GetBrowserContext())
return;
content::RenderFrameHost* frame_host =
content::RenderFrameHost::FromID(render_process_id, render_frame_id);
content::WebContents* web_contents =
content::WebContents::FromRenderFrameHost(frame_host);
// Allow accessing file:// subresources from non-file protocols if web
// security is disabled.
bool allow_file_access = false;
if (web_contents) {
const auto& web_preferences = web_contents->GetOrCreateWebPreferences();
if (!web_preferences.web_security_enabled)
allow_file_access = true;
}
ProtocolRegistry::FromBrowserContext(render_process_host->GetBrowserContext())
->RegisterURLLoaderFactories(factories, allow_file_access);
#if BUILDFLAG(ENABLE_ELECTRON_EXTENSIONS)
auto factory = extensions::CreateExtensionURLLoaderFactory(render_process_id,
render_frame_id);
if (factory)
factories->emplace(extensions::kExtensionScheme, std::move(factory));
if (!web_contents)
return;
extensions::ElectronExtensionWebContentsObserver* web_observer =
extensions::ElectronExtensionWebContentsObserver::FromWebContents(
web_contents);
// There is nothing to do if no ElectronExtensionWebContentsObserver is
// attached to the |web_contents|.
if (!web_observer)
return;
const extensions::Extension* extension =
web_observer->GetExtensionFromFrame(frame_host, false);
if (!extension)
return;
// Support for chrome:// scheme if appropriate.
if (extension->is_extension() &&
extensions::Manifest::IsComponentLocation(extension->location())) {
// Components of chrome that are implemented as extensions or platform apps
// are allowed to use chrome://resources/ and chrome://theme/ URLs.
factories->emplace(content::kChromeUIScheme,
content::CreateWebUIURLLoaderFactory(
frame_host, content::kChromeUIScheme,
{content::kChromeUIResourcesHost}));
}
// Extensions with the necessary permissions get access to file:// URLs that
// gets approval from ChildProcessSecurityPolicy. Keep this logic in sync with
// ExtensionWebContentsObserver::RenderFrameCreated.
extensions::Manifest::Type type = extension->GetType();
if (type == extensions::Manifest::TYPE_EXTENSION &&
AllowFileAccess(extension->id(), web_contents->GetBrowserContext())) {
factories->emplace(url::kFileScheme,
FileURLLoaderFactory::Create(render_process_id));
}
#endif
} | 0 | [] | electron | ea1f402417022c59c0794e97c87e6be2553989e7 | 304,641,007,516,492,850,000,000,000,000,000,000,000 | 72 | fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33323) (#33350)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame |
static ssize_t interf_grp_compatible_id_show(struct config_item *item,
char *page)
{
memcpy(page, to_usb_os_desc(item)->ext_compat_id, 8);
return 8;
} | 0 | [
"CWE-125"
] | linux | 15753588bcd4bbffae1cca33c8ced5722477fe1f | 313,667,818,318,458,000,000,000,000,000,000,000,000 | 6 | USB: gadget: fix illegal array access in binding with UDC
FuzzUSB (a variant of syzkaller) found an illegal array access
using an incorrect index while binding a gadget with UDC.
Reference: https://www.spinics.net/lists/linux-usb/msg194331.html
This bug occurs when a size variable used for a buffer
is misused to access its strcpy-ed buffer.
Given a buffer along with its size variable (taken from user input),
from which, a new buffer is created using kstrdup().
Due to the original buffer containing 0 value in the middle,
the size of the kstrdup-ed buffer becomes smaller than that of the original.
So accessing the kstrdup-ed buffer with the same size variable
triggers memory access violation.
The fix makes sure no zero value in the buffer,
by comparing the strlen() of the orignal buffer with the size variable,
so that the access to the kstrdup-ed buffer is safe.
BUG: KASAN: slab-out-of-bounds in gadget_dev_desc_UDC_store+0x1ba/0x200
drivers/usb/gadget/configfs.c:266
Read of size 1 at addr ffff88806a55dd7e by task syz-executor.0/17208
CPU: 2 PID: 17208 Comm: syz-executor.0 Not tainted 5.6.8 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xce/0x128 lib/dump_stack.c:118
print_address_description.constprop.4+0x21/0x3c0 mm/kasan/report.c:374
__kasan_report+0x131/0x1b0 mm/kasan/report.c:506
kasan_report+0x12/0x20 mm/kasan/common.c:641
__asan_report_load1_noabort+0x14/0x20 mm/kasan/generic_report.c:132
gadget_dev_desc_UDC_store+0x1ba/0x200 drivers/usb/gadget/configfs.c:266
flush_write_buffer fs/configfs/file.c:251 [inline]
configfs_write_file+0x2f1/0x4c0 fs/configfs/file.c:283
__vfs_write+0x85/0x110 fs/read_write.c:494
vfs_write+0x1cd/0x510 fs/read_write.c:558
ksys_write+0x18a/0x220 fs/read_write.c:611
__do_sys_write fs/read_write.c:623 [inline]
__se_sys_write fs/read_write.c:620 [inline]
__x64_sys_write+0x73/0xb0 fs/read_write.c:620
do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Signed-off-by: Kyungtae Kim <[email protected]>
Reported-and-tested-by: Kyungtae Kim <[email protected]>
Cc: Felipe Balbi <[email protected]>
Cc: stable <[email protected]>
Link: https://lore.kernel.org/r/20200510054326.GA19198@pizza01
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static inline bool cfs_bandwidth_used(void)
{
return static_key_false(&__cfs_bandwidth_used);
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-835"
] | linux | c40f7d74c741a907cfaeb73a7697081881c497d0 | 312,680,833,042,602,830,000,000,000,000,000,000,000 | 4 | sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
static int snd_rawmidi_dev_free(struct snd_device *device)
{
struct snd_rawmidi *rmidi = device->device_data;
return snd_rawmidi_free(rmidi);
} | 0 | [
"CWE-415"
] | linux | 39675f7a7c7e7702f7d5341f1e0d01db746543a0 | 24,376,127,354,720,110,000,000,000,000,000,000,000 | 5 | ALSA: rawmidi: Change resized buffers atomically
The SNDRV_RAWMIDI_IOCTL_PARAMS ioctl may resize the buffers and the
current code is racy. For example, the sequencer client may write to
buffer while it being resized.
As a simple workaround, let's switch to the resized buffer inside the
stream runtime lock.
Reported-by: [email protected]
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
struct vhost_work *work = NULL;
unsigned uninitialized_var(seq);
mm_segment_t oldfs = get_fs();
set_fs(USER_DS);
use_mm(dev->mm);
for (;;) {
/* mb paired w/ kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irq(&dev->work_lock);
if (work) {
work->done_seq = seq;
if (work->flushing)
wake_up_all(&work->done);
}
if (kthread_should_stop()) {
spin_unlock_irq(&dev->work_lock);
__set_current_state(TASK_RUNNING);
break;
}
if (!list_empty(&dev->work_list)) {
work = list_first_entry(&dev->work_list,
struct vhost_work, node);
list_del_init(&work->node);
seq = work->queue_seq;
} else
work = NULL;
spin_unlock_irq(&dev->work_lock);
if (work) {
__set_current_state(TASK_RUNNING);
work->fn(work);
if (need_resched())
schedule();
} else
schedule();
}
unuse_mm(dev->mm);
set_fs(oldfs);
return 0;
} | 0 | [] | linux-2.6 | bd97120fc3d1a11f3124c7c9ba1d91f51829eb85 | 216,725,767,664,480,030,000,000,000,000,000,000,000 | 48 | vhost: fix length for cross region descriptor
If a single descriptor crosses a region, the
second chunk length should be decremented
by size translated so far, instead it includes
the full descriptor length.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Acked-by: Jason Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
wait_connectable(int fd)
{
int sockerr, revents;
socklen_t sockerrlen;
sockerrlen = (socklen_t)sizeof(sockerr);
if (getsockopt(fd, SOL_SOCKET, SO_ERROR, (void *)&sockerr, &sockerrlen) < 0)
return -1;
/* necessary for non-blocking sockets (at least ECONNREFUSED) */
switch (sockerr) {
case 0:
break;
#ifdef EALREADY
case EALREADY:
#endif
#ifdef EISCONN
case EISCONN:
#endif
#ifdef ECONNREFUSED
case ECONNREFUSED:
#endif
#ifdef EHOSTUNREACH
case EHOSTUNREACH:
#endif
errno = sockerr;
return -1;
}
/*
* Stevens book says, successful finish turn on RB_WAITFD_OUT and
* failure finish turn on both RB_WAITFD_IN and RB_WAITFD_OUT.
* So it's enough to wait only RB_WAITFD_OUT and check the pending error
* by getsockopt().
*
* Note: rb_wait_for_single_fd already retries on EINTR/ERESTART
*/
revents = rb_wait_for_single_fd(fd, RB_WAITFD_IN|RB_WAITFD_OUT, NULL);
if (revents < 0)
return -1;
sockerrlen = (socklen_t)sizeof(sockerr);
if (getsockopt(fd, SOL_SOCKET, SO_ERROR, (void *)&sockerr, &sockerrlen) < 0)
return -1;
switch (sockerr) {
case 0:
/*
* be defensive in case some platforms set SO_ERROR on the original,
* interrupted connect()
*/
case EINTR:
#ifdef ERESTART
case ERESTART:
#endif
case EAGAIN:
#ifdef EINPROGRESS
case EINPROGRESS:
#endif
#ifdef EALREADY
case EALREADY:
#endif
#ifdef EISCONN
case EISCONN:
#endif
return 0; /* success */
default:
/* likely (but not limited to): ECONNREFUSED, ETIMEDOUT, EHOSTUNREACH */
errno = sockerr;
return -1;
}
return 0;
} | 0 | [
"CWE-908"
] | ruby | 61b7f86248bd121be2e83768be71ef289e8e5b90 | 152,575,660,291,531,540,000,000,000,000,000,000,000 | 75 | ext/socket/init.c: do not return uninitialized buffer
Resize string buffer only if some data is received in
BasicSocket#read_nonblock and some methods.
Co-Authored-By: Samuel Williams <[email protected]> |
xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
xmlChar *buf = NULL;
int len = 0;
int size = XML_PARSER_BUFFER_SIZE;
xmlChar cur;
xmlChar stop;
int count = 0;
xmlParserInputState oldstate = ctxt->instate;
SHRINK;
if (RAW == '"') {
NEXT;
stop = '"';
} else if (RAW == '\'') {
NEXT;
stop = '\'';
} else {
xmlFatalErr(ctxt, XML_ERR_LITERAL_NOT_STARTED, NULL);
return(NULL);
}
buf = (xmlChar *) xmlMallocAtomic(size * sizeof(xmlChar));
if (buf == NULL) {
xmlErrMemory(ctxt, NULL);
return(NULL);
}
ctxt->instate = XML_PARSER_PUBLIC_LITERAL;
cur = CUR;
while ((IS_PUBIDCHAR_CH(cur)) && (cur != stop)) { /* checked */
if (len + 1 >= size) {
xmlChar *tmp;
if ((size > XML_MAX_NAME_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Public ID");
xmlFree(buf);
return(NULL);
}
size *= 2;
tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
if (tmp == NULL) {
xmlErrMemory(ctxt, NULL);
xmlFree(buf);
return(NULL);
}
buf = tmp;
}
buf[len++] = cur;
count++;
if (count > 50) {
GROW;
count = 0;
if (ctxt->instate == XML_PARSER_EOF) {
xmlFree(buf);
return(NULL);
}
}
NEXT;
cur = CUR;
if (cur == 0) {
GROW;
SHRINK;
cur = CUR;
}
}
buf[len] = 0;
if (cur != stop) {
xmlFatalErr(ctxt, XML_ERR_LITERAL_NOT_FINISHED, NULL);
} else {
NEXT;
}
ctxt->instate = oldstate;
return(buf);
} | 0 | [
"CWE-119"
] | libxml2 | 6a36fbe3b3e001a8a840b5c1fdd81cefc9947f0d | 207,307,098,081,022,050,000,000,000,000,000,000,000 | 73 | Fix potential out of bound access |
execute_connection (command, asynchronous, pipe_in, pipe_out, fds_to_close)
COMMAND *command;
int asynchronous, pipe_in, pipe_out;
struct fd_bitmap *fds_to_close;
{
COMMAND *tc, *second;
int ignore_return, exec_result, was_error_trap, invert;
volatile int save_line_number;
ignore_return = (command->flags & CMD_IGNORE_RETURN) != 0;
switch (command->value.Connection->connector)
{
/* Do the first command asynchronously. */
case '&':
tc = command->value.Connection->first;
if (tc == 0)
return (EXECUTION_SUCCESS);
if (ignore_return)
tc->flags |= CMD_IGNORE_RETURN;
tc->flags |= CMD_AMPERSAND;
/* If this shell was compiled without job control support,
if we are currently in a subshell via `( xxx )', or if job
control is not active then the standard input for an
asynchronous command is forced to /dev/null. */
#if defined (JOB_CONTROL)
if ((subshell_environment || !job_control) && !stdin_redir)
#else
if (!stdin_redir)
#endif /* JOB_CONTROL */
tc->flags |= CMD_STDIN_REDIR;
exec_result = execute_command_internal (tc, 1, pipe_in, pipe_out, fds_to_close);
QUIT;
if (tc->flags & CMD_STDIN_REDIR)
tc->flags &= ~CMD_STDIN_REDIR;
second = command->value.Connection->second;
if (second)
{
if (ignore_return)
second->flags |= CMD_IGNORE_RETURN;
exec_result = execute_command_internal (second, asynchronous, pipe_in, pipe_out, fds_to_close);
}
break;
/* Just call execute command on both sides. */
case ';':
if (ignore_return)
{
if (command->value.Connection->first)
command->value.Connection->first->flags |= CMD_IGNORE_RETURN;
if (command->value.Connection->second)
command->value.Connection->second->flags |= CMD_IGNORE_RETURN;
}
executing_list++;
QUIT;
execute_command (command->value.Connection->first);
QUIT;
exec_result = execute_command_internal (command->value.Connection->second,
asynchronous, pipe_in, pipe_out,
fds_to_close);
executing_list--;
break;
case '|':
was_error_trap = signal_is_trapped (ERROR_TRAP) && signal_is_ignored (ERROR_TRAP) == 0;
invert = (command->flags & CMD_INVERT_RETURN) != 0;
ignore_return = (command->flags & CMD_IGNORE_RETURN) != 0;
line_number_for_err_trap = line_number;
exec_result = execute_pipeline (command, asynchronous, pipe_in, pipe_out, fds_to_close);
if (was_error_trap && ignore_return == 0 && invert == 0 && exec_result != EXECUTION_SUCCESS)
{
last_command_exit_value = exec_result;
save_line_number = line_number;
line_number = line_number_for_err_trap;
run_error_trap ();
line_number = save_line_number;
}
if (ignore_return == 0 && invert == 0 && exit_immediately_on_error && exec_result != EXECUTION_SUCCESS)
{
last_command_exit_value = exec_result;
run_pending_traps ();
jump_to_top_level (ERREXIT);
}
break;
case AND_AND:
case OR_OR:
if (asynchronous)
{
/* If we have something like `a && b &' or `a || b &', run the
&& or || stuff in a subshell. Force a subshell and just call
execute_command_internal again. Leave asynchronous on
so that we get a report from the parent shell about the
background job. */
command->flags |= CMD_FORCE_SUBSHELL;
exec_result = execute_command_internal (command, 1, pipe_in, pipe_out, fds_to_close);
break;
}
/* Execute the first command. If the result of that is successful
and the connector is AND_AND, or the result is not successful
and the connector is OR_OR, then execute the second command,
otherwise return. */
executing_list++;
if (command->value.Connection->first)
command->value.Connection->first->flags |= CMD_IGNORE_RETURN;
exec_result = execute_command (command->value.Connection->first);
QUIT;
if (((command->value.Connection->connector == AND_AND) &&
(exec_result == EXECUTION_SUCCESS)) ||
((command->value.Connection->connector == OR_OR) &&
(exec_result != EXECUTION_SUCCESS)))
{
if (ignore_return && command->value.Connection->second)
command->value.Connection->second->flags |= CMD_IGNORE_RETURN;
exec_result = execute_command (command->value.Connection->second);
}
executing_list--;
break;
default:
command_error ("execute_connection", CMDERR_BADCONN, command->value.Connection->connector, 0);
jump_to_top_level (DISCARD);
exec_result = EXECUTION_FAILURE;
}
return exec_result;
} | 0 | [] | bash | 863d31ae775d56b785dc5b0105b6d251515d81d5 | 117,334,965,504,995,270,000,000,000,000,000,000,000 | 142 | commit bash-20120224 snapshot |
static int parse_multipart(
ogs_sbi_message_t *message, ogs_sbi_http_message_t *http)
{
char *boundary = NULL;
int i;
multipart_parser_settings settings;
multipart_parser_data_t data;
multipart_parser *parser = NULL;
ogs_assert(message);
ogs_assert(http);
memset(&settings, 0, sizeof(settings));
settings.on_header_field = &on_header_field;
settings.on_header_value = &on_header_value;
settings.on_part_data = &on_part_data;
settings.on_part_data_end = &on_part_data_end;
for (i = 0; i < http->content_length; i++) {
if (http->content[i] == '\r' && http->content[i+1] == '\n')
break;
}
if (i >= http->content_length) {
ogs_error("Invalid HTTP content [%d]", i);
ogs_log_hexdump(OGS_LOG_ERROR,
(unsigned char *)http->content, http->content_length);
return OGS_ERROR;
}
boundary = ogs_strndup(http->content, i);
ogs_assert(boundary);
parser = multipart_parser_init(boundary, &settings);
ogs_assert(parser);
memset(&data, 0, sizeof(data));
multipart_parser_set_data(parser, &data);
multipart_parser_execute(parser, http->content, http->content_length);
multipart_parser_free(parser);
ogs_free(boundary);
if (data.num_of_part > OGS_SBI_MAX_NUM_OF_PART) {
/* Overflow Issues #1247 */
ogs_fatal("Overflow num_of_part[%d]", data.num_of_part);
ogs_assert_if_reached();
}
for (i = 0; i < data.num_of_part; i++) {
SWITCH(data.part[i].content_type)
CASE(OGS_SBI_CONTENT_JSON_TYPE)
parse_json(message,
data.part[i].content_type, data.part[i].content);
if (data.part[i].content_id)
ogs_free(data.part[i].content_id);
if (data.part[i].content_type)
ogs_free(data.part[i].content_type);
if (data.part[i].content)
ogs_free(data.part[i].content);
break;
CASE(OGS_SBI_CONTENT_5GNAS_TYPE)
CASE(OGS_SBI_CONTENT_NGAP_TYPE)
http->part[http->num_of_part].content_id =
data.part[i].content_id;
http->part[http->num_of_part].content_type =
data.part[i].content_type;
http->part[http->num_of_part].pkbuf =
ogs_pkbuf_alloc(NULL, data.part[i].content_length);
ogs_expect_or_return_val(
http->part[http->num_of_part].pkbuf, OGS_ERROR);
ogs_pkbuf_put_data(http->part[http->num_of_part].pkbuf,
data.part[i].content, data.part[i].content_length);
message->part[message->num_of_part].content_id =
http->part[http->num_of_part].content_id;
message->part[message->num_of_part].content_type =
http->part[http->num_of_part].content_type;
message->part[message->num_of_part].pkbuf =
ogs_pkbuf_copy(http->part[http->num_of_part].pkbuf);
ogs_expect_or_return_val(
message->part[message->num_of_part].pkbuf, OGS_ERROR);
http->num_of_part++;
message->num_of_part++;
if (data.part[i].content)
ogs_free(data.part[i].content);
break;
DEFAULT
ogs_error("Unknown content-type[%s]", data.part[i].content_type);
if (data.part[i].content_id)
ogs_free(data.part[i].content_id);
if (data.part[i].content_type)
ogs_free(data.part[i].content_type);
END
}
if (data.header_field)
ogs_free(data.header_field);
return OGS_OK;
} | 0 | [
"CWE-476",
"CWE-787"
] | open5gs | d919b2744cd05abae043490f0a3dd1946c1ccb8c | 253,611,112,796,523,620,000,000,000,000,000,000,000 | 109 | [AMF] fix the memory problem (#1247)
1. memory corruption
- Overflow num_of_part in SBI message
2. null pointer dereference
- n2InfoContent->ngap_ie_type |
static struct sock *unix_find_other(struct net *net,
struct sockaddr_un *sunname, int len,
int type, unsigned int hash, int *error)
{
struct sock *u;
struct path path;
int err = 0;
if (sunname->sun_path[0]) {
struct inode *inode;
err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
if (err)
goto fail;
inode = d_backing_inode(path.dentry);
err = path_permission(&path, MAY_WRITE);
if (err)
goto put_fail;
err = -ECONNREFUSED;
if (!S_ISSOCK(inode->i_mode))
goto put_fail;
u = unix_find_socket_byinode(inode);
if (!u)
goto put_fail;
if (u->sk_type == type)
touch_atime(&path);
path_put(&path);
err = -EPROTOTYPE;
if (u->sk_type != type) {
sock_put(u);
goto fail;
}
} else {
err = -ECONNREFUSED;
u = unix_find_socket_byname(net, sunname, len, type ^ hash);
if (u) {
struct dentry *dentry;
dentry = unix_sk(u)->path.dentry;
if (dentry)
touch_atime(&unix_sk(u)->path);
} else
goto fail;
}
return u;
put_fail:
path_put(&path);
fail:
*error = err;
return NULL;
} | 0 | [
"CWE-362"
] | linux | cbcf01128d0a92e131bd09f1688fe032480b65ca | 75,621,615,234,081,540,000,000,000,000,000,000,000 | 54 | af_unix: fix garbage collect vs MSG_PEEK
unix_gc() assumes that candidate sockets can never gain an external
reference (i.e. be installed into an fd) while the unix_gc_lock is
held. Except for MSG_PEEK this is guaranteed by modifying inflight
count under the unix_gc_lock.
MSG_PEEK does not touch any variable protected by unix_gc_lock (file
count is not), yet it needs to be serialized with garbage collection.
Do this by locking/unlocking unix_gc_lock:
1) increment file count
2) lock/unlock barrier to make sure incremented file count is visible
to garbage collection
3) install file into fd
This is a lock barrier (unlike smp_mb()) that ensures that garbage
collection is run completely before or completely after the barrier.
Cc: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int ZEND_FASTCALL ZEND_DECLARE_CLASS_SPEC_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
EX_T(opline->result.u.var).class_entry = do_bind_class(opline, EG(class_table), 0 TSRMLS_CC);
ZEND_VM_NEXT_OPCODE();
} | 0 | [] | php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 8,270,208,298,072,190,000,000,000,000,000,000,000 | 7 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp)
{
/*
* You can never reset a CPU clock, but we check for other errors
* in the call before failing with EPERM.
*/
int error = check_clock(which_clock);
if (error == 0) {
error = -EPERM;
}
return error;
} | 0 | [
"CWE-190"
] | linux | 78c9c4dfbf8c04883941445a195276bb4bb92c76 | 265,081,842,909,103,120,000,000,000,000,000,000,000 | 12 | posix-timers: Sanitize overrun handling
The posix timer overrun handling is broken because the forwarding functions
can return a huge number of overruns which does not fit in an int. As a
consequence timer_getoverrun(2) and siginfo::si_overrun can turn into
random number generators.
The k_clock::timer_forward() callbacks return a 64 bit value now. Make
k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal
accounting is correct. 3Remove the temporary (int) casts.
Add a helper function which clamps the overrun value returned to user space
via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value
between 0 and INT_MAX. INT_MAX is an indicator for user space that the
overrun value has been clamped.
Reported-by: Team OWL337 <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Michael Kerrisk <[email protected]>
Link: https://lkml.kernel.org/r/[email protected] |
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
list_add_event(event, ctx);
perf_group_attach(event);
event->tstamp_enabled = tstamp;
event->tstamp_running = tstamp;
event->tstamp_stopped = tstamp;
} | 0 | [
"CWE-703",
"CWE-189"
] | linux | 8176cced706b5e5d15887584150764894e94e02f | 80,978,054,357,614,980,000,000,000,000,000,000,000 | 11 | perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
void WebContents::SetDevToolsWebContents(const WebContents* devtools) {
if (inspectable_web_contents_)
inspectable_web_contents_->SetDevToolsWebContents(devtools->web_contents());
} | 0 | [] | electron | e9fa834757f41c0b9fe44a4dffe3d7d437f52d34 | 188,623,143,042,567,570,000,000,000,000,000,000,000 | 4 | fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]> |
**/
CImg<T>& vector() {
return unroll('y'); | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 121,855,445,291,980,190,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_gb_surface_reference_arg *arg =
(union drm_vmw_gb_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
struct ttm_base_object *base;
uint32_t backup_handle;
int ret = -EINVAL;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req->handle_type, &base);
if (unlikely(ret != 0))
return ret;
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
if (!srf->res.backup) {
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
&backup_handle);
mutex_unlock(&dev_priv->cmdbuf_mutex);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a GB surface "
"backup buffer.\n");
(void) ttm_ref_object_base_unref(tfile, base->hash.key,
TTM_REF_USAGE);
goto out_bad_resource;
}
rep->creq.svga3d_flags = srf->flags;
rep->creq.format = srf->format;
rep->creq.mip_levels = srf->mip_levels[0];
rep->creq.drm_surface_flags = 0;
rep->creq.multisample_count = srf->multisample_count;
rep->creq.autogen_filter = srf->autogen_filter;
rep->creq.array_size = srf->array_size;
rep->creq.buffer_handle = backup_handle;
rep->creq.base_size = srf->base_size;
rep->crep.handle = user_srf->prime.base.hash.key;
rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
out_bad_resource:
ttm_base_object_unref(&base);
return ret;
} | 0 | [
"CWE-20"
] | linux | ee9c4e681ec4f58e42a83cb0c22a0289ade1aacf | 299,953,843,839,393,840,000,000,000,000,000,000,000 | 61 | drm/vmwgfx: limit the number of mip levels in vmw_gb_surface_define_ioctl()
The 'req->mip_levels' parameter in vmw_gb_surface_define_ioctl() is
a user-controlled 'uint32_t' value which is used as a loop count limit.
This can lead to a kernel lockup and DoS. Add check for 'req->mip_levels'.
References:
https://bugzilla.redhat.com/show_bug.cgi?id=1437431
Cc: <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Reviewed-by: Sinclair Yeh <[email protected]> |
long vnc_client_read_ws(VncState *vs)
{
int ret, err;
uint8_t *payload;
size_t payload_size, header_size;
VNC_DEBUG("Read websocket %p size %zd offset %zd\n", vs->ws_input.buffer,
vs->ws_input.capacity, vs->ws_input.offset);
buffer_reserve(&vs->ws_input, 4096);
ret = vnc_client_read_buf(vs, buffer_end(&vs->ws_input), 4096);
if (!ret) {
return 0;
}
vs->ws_input.offset += ret;
ret = 0;
/* consume as much of ws_input buffer as possible */
do {
if (vs->ws_payload_remain == 0) {
err = vncws_decode_frame_header(&vs->ws_input,
&header_size,
&vs->ws_payload_remain,
&vs->ws_payload_mask);
if (err <= 0) {
return err;
}
buffer_advance(&vs->ws_input, header_size);
}
if (vs->ws_payload_remain != 0) {
err = vncws_decode_frame_payload(&vs->ws_input,
&vs->ws_payload_remain,
&vs->ws_payload_mask,
&payload,
&payload_size);
if (err < 0) {
return err;
}
if (err == 0) {
return ret;
}
ret += err;
buffer_reserve(&vs->input, payload_size);
buffer_append(&vs->input, payload, payload_size);
buffer_advance(&vs->ws_input, payload_size);
}
} while (vs->ws_input.offset > 0);
return ret;
} | 0 | [] | qemu | a2bebfd6e09d285aa793cae3fb0fc3a39a9fee6e | 86,169,362,349,827,270,000,000,000,000,000,000,000 | 51 | CVE-2015-1779: incrementally decode websocket frames
The logic for decoding websocket frames wants to fully
decode the frame header and payload, before allowing the
VNC server to see any of the payload data. There is no
size limit on websocket payloads, so this allows a
malicious network client to consume 2^64 bytes in memory
in QEMU. It can trigger this denial of service before
the VNC server even performs any authentication.
The fix is to decode the header, and then incrementally
decode the payload data as it is needed. With this fix
the websocket decoder will allow at most 4k of data to
be buffered before decoding and processing payload.
Signed-off-by: Daniel P. Berrange <[email protected]>
[ kraxel: fix frequent spurious disconnects, suggested by Peter Maydell ]
@@ -361,7 +361,7 @@ int vncws_decode_frame_payload(Buffer *input,
- *payload_size = input->offset;
+ *payload_size = *payload_remain;
[ kraxel: fix 32bit build ]
@@ -306,7 +306,7 @@ struct VncState
- uint64_t ws_payload_remain;
+ size_t ws_payload_remain;
Signed-off-by: Gerd Hoffmann <[email protected]> |
TEST_F(QueryPlannerTest, CannotTrimIxisectParamBeneathOr) {
params.options = QueryPlannerParams::INDEX_INTERSECTION;
params.options |= QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
addIndex(BSON("c" << 1));
runQuery(fromjson("{d: 1, $or: [{a: 1}, {b: 1, c: 1}]}"));
assertNumSolutions(3U);
assertSolutionExists(
"{fetch: {filter: {d: 1}, node: {or: {nodes: ["
"{fetch: {filter: {c: 1}, node: {ixscan: {filter: null,"
"pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}},"
"{ixscan: {filter: null, pattern: {a: 1},"
"bounds: {a: [[1,1,true,true]]}}}]}}}}");
assertSolutionExists(
"{fetch: {filter: {d: 1}, node: {or: {nodes: ["
"{fetch: {filter: {b: 1}, node: {ixscan: {filter: null,"
"pattern: {c: 1}, bounds: {c: [[1,1,true,true]]}}}}},"
"{ixscan: {filter: null, pattern: {a: 1},"
"bounds: {a: [[1,1,true,true]]}}}]}}}}");
assertSolutionExists(
"{fetch: {filter: {d: 1}, node: {or: {nodes: ["
"{fetch: {filter: {b: 1, c: 1}, node: {andSorted: {nodes: ["
"{ixscan: {filter: null, pattern: {b: 1}}},"
"{ixscan: {filter: null, pattern: {c: 1}}}]}}}},"
"{ixscan: {filter: null, pattern: {a: 1}}}]}}}}");
} | 0 | [] | mongo | ee97c0699fd55b498310996ee002328e533681a3 | 197,163,102,203,015,430,000,000,000,000,000,000,000 | 33 | SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr. |
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
{
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
int yielded = 0;
int try = 3;
int pass;
int i;
kvm_vcpu_set_in_spin_loop(me, true);
/*
* We boost the priority of a VCPU that is runnable but not
* currently running, because it got preempted by something
* else and called schedule in __vcpu_run. Hopefully that
* VCPU is holding the lock that we need and will release it.
* We approximate round-robin by starting at the last boosted VCPU.
*/
for (pass = 0; pass < 2 && !yielded && try; pass++) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!pass && i <= last_boosted_vcpu) {
i = last_boosted_vcpu;
continue;
} else if (pass && i > last_boosted_vcpu)
break;
if (!ACCESS_ONCE(vcpu->preempted))
continue;
if (vcpu == me)
continue;
if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;
yielded = kvm_vcpu_yield_to(vcpu);
if (yielded > 0) {
kvm->last_boosted_vcpu = i;
break;
} else if (yielded < 0) {
try--;
if (!try)
break;
}
}
}
kvm_vcpu_set_in_spin_loop(me, false);
/* Ensure vcpu is not eligible during next spinloop */
kvm_vcpu_set_dy_eligible(me, false);
} | 0 | [
"CWE-416",
"CWE-284"
] | linux | a0f1d21c1ccb1da66629627a74059dd7f5ac9c61 | 320,750,601,124,542,640,000,000,000,000,000,000,000 | 50 | KVM: use after free in kvm_ioctl_create_device()
We should move the ops->destroy(dev) after the list_del(&dev->vm_node)
so that we don't use "dev" after freeing it.
Fixes: a28ebea2adc4 ("KVM: Protect device ops->create and list_add with kvm->lock")
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Signed-off-by: Radim Krčmář <[email protected]> |
static int records_match(const char *mboxname,
struct index_record *old,
struct index_record *new)
{
int i;
int match = 1;
int userflags_dirty = 0;
if (old->internaldate != new->internaldate) {
printf("%s uid %u mismatch: internaldate\n",
mboxname, new->uid);
match = 0;
}
if (old->sentdate != new->sentdate) {
printf("%s uid %u mismatch: sentdate\n",
mboxname, new->uid);
match = 0;
}
if (old->size != new->size) {
printf("%s uid %u mismatch: size\n",
mboxname, new->uid);
match = 0;
}
if (old->header_size != new->header_size) {
printf("%s uid %u mismatch: header_size\n",
mboxname, new->uid);
match = 0;
}
if (old->gmtime != new->gmtime) {
printf("%s uid %u mismatch: gmtime\n",
mboxname, new->uid);
match = 0;
}
if (old->savedate != new->savedate) {
printf("%s uid %u mismatch: savedate\n",
mboxname, new->uid);
match = 0;
}
if (old->createdmodseq != new->createdmodseq) {
printf("%s uid %u mismatch: createdmodseq\n",
mboxname, new->uid);
match = 0;
}
if (old->system_flags != new->system_flags) {
printf("%s uid %u mismatch: systemflags\n",
mboxname, new->uid);
match = 0;
}
if (old->internal_flags != new->internal_flags) {
printf("%s uid %u mismatch: internalflags\n",
mboxname, new->uid);
match = 0;
}
for (i = 0; i < MAX_USER_FLAGS/32; i++) {
if (old->user_flags[i] != new->user_flags[i])
userflags_dirty = 1;
}
if (userflags_dirty) {
printf("%s uid %u mismatch: userflags\n",
mboxname, new->uid);
match = 0;
}
if (!message_guid_equal(&old->guid, &new->guid)) {
printf("%s uid %u mismatch: guid\n",
mboxname, new->uid);
match = 0;
}
if (!match) {
syslog(LOG_ERR, "%s uid %u record mismatch, rewriting",
mboxname, new->uid);
}
/* cache issues - don't print, probably just a version
* upgrade... */
if (old->cache_version != new->cache_version) {
match = 0;
}
if (old->cache_crc != new->cache_crc) {
match = 0;
}
if (cache_len(old) != cache_len(new)) {
match = 0;
}
/* only compare cache records if size matches */
else if (memcmp(cache_base(old), cache_base(new), cache_len(new))) {
match = 0;
}
return match;
} | 0 | [] | cyrus-imapd | 1d6d15ee74e11a9bd745e80be69869e5fb8d64d6 | 185,146,433,238,024,600,000,000,000,000,000,000,000 | 91 | mailbox.c/reconstruct.c: Add mailbox_mbentry_from_path() |
goa_utils_check_duplicate (GoaClient *client,
const gchar *identity,
const gchar *provider_type,
GoaPeekInterfaceFunc func,
GError **error)
{
GList *accounts;
GList *l;
gboolean ret;
ret = FALSE;
accounts = goa_client_get_accounts (client);
for (l = accounts; l != NULL; l = l->next)
{
GoaObject *object = GOA_OBJECT (l->data);
GoaAccount *account;
gpointer *interface;
const gchar *identity_from_object;
const gchar *provider_type_from_object;
account = goa_object_peek_account (object);
interface = (*func) (object);
if (interface == NULL)
continue;
provider_type_from_object = goa_account_get_provider_type (account);
if (g_strcmp0 (provider_type_from_object, provider_type) != 0)
continue;
identity_from_object = goa_account_get_identity (account);
if (g_strcmp0 (identity_from_object, identity) == 0)
{
const gchar *presentation_identity;
const gchar *provider_name;
presentation_identity = goa_account_get_presentation_identity (account);
provider_name = goa_account_get_provider_name (account);
g_set_error (error,
GOA_ERROR,
GOA_ERROR_ACCOUNT_EXISTS,
_("A %s account already exists for %s"),
provider_name,
presentation_identity);
goto out;
}
}
ret = TRUE;
out:
g_list_free_full (accounts, g_object_unref);
return ret;
} | 0 | [
"CWE-310"
] | gnome-online-accounts | edde7c63326242a60a075341d3fea0be0bc4d80e | 126,459,978,285,348,200,000,000,000,000,000,000,000 | 54 | Guard against invalid SSL certificates
None of the branded providers (eg., Google, Facebook and Windows Live)
should ever have an invalid certificate. So set "ssl-strict" on the
SoupSession object being used by GoaWebView.
Providers like ownCloud and Exchange might have to deal with
certificates that are not up to the mark. eg., self-signed
certificates. For those, show a warning when the account is being
created, and only proceed if the user decides to ignore it. In any
case, save the status of the certificate that was used to create the
account. So an account created with a valid certificate will never
work with an invalid one, and one created with an invalid certificate
will not throw any further warnings.
Fixes: CVE-2013-0240 |
double Item::val_real_from_date()
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
if (get_date(<ime, 0))
return 0;
return TIME_to_double(<ime);
} | 0 | [] | server | b000e169562697aa072600695d4f0c0412f94f4f | 303,535,844,997,466,820,000,000,000,000,000,000,000 | 8 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
static Bool gf_m2ts_is_long_section(u8 table_id)
{
switch (table_id) {
case GF_M2TS_TABLE_ID_MPEG4_BIFS:
case GF_M2TS_TABLE_ID_MPEG4_OD:
case GF_M2TS_TABLE_ID_INT:
case GF_M2TS_TABLE_ID_EIT_ACTUAL_PF:
case GF_M2TS_TABLE_ID_EIT_OTHER_PF:
case GF_M2TS_TABLE_ID_ST:
case GF_M2TS_TABLE_ID_SIT:
case GF_M2TS_TABLE_ID_DSM_CC_PRIVATE:
case GF_M2TS_TABLE_ID_MPE_FEC:
case GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE:
case GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE:
return 1;
default:
if (table_id >= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MIN && table_id <= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MAX)
return 1;
else
return 0;
}
} | 0 | [
"CWE-416",
"CWE-125"
] | gpac | 1ab4860609f2e7a35634930571e7d0531297e090 | 165,950,905,021,763,430,000,000,000,000,000,000,000 | 22 | fixed potential crash on PMT IOD parse - cf #1268 #1269 |
COMPS_HSList ** comps_mrtree_getp(COMPS_MRTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it = NULL;
COMPS_MRTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_MRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found)
return NULL;
rtdata = (COMPS_MRTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) return &rtdata->data;
else if (ended == 1) offset+=x;
else return NULL;
subnodes = ((COMPS_MRTreeData*)it->data)->subnodes;
}
if (it)
return &((COMPS_MRTreeData*)it->data)->data;
else return NULL;
} | 0 | [
"CWE-416",
"CWE-862"
] | libcomps | e3a5d056633677959ad924a51758876d415e7046 | 316,647,157,260,484,030,000,000,000,000,000,000,000 | 38 | Fix UAF in comps_objmrtree_unite function
The added field is not used at all in many places and it is probably the
left-over of some copy-paste. |
bool mg_file_write(const char *path, const void *buf, size_t len) {
bool result = false;
FILE *fp;
char tmp[MG_PATH_MAX];
snprintf(tmp, sizeof(tmp), "%s.%d", path, rand());
fp = fopen(tmp, "wb");
if (fp != NULL) {
result = fwrite(buf, 1, len, fp) == len;
fclose(fp);
if (result) {
remove(path);
rename(tmp, path);
} else {
remove(tmp);
}
}
return result;
} | 0 | [
"CWE-552"
] | mongoose | c65c8fdaaa257e0487ab0aaae9e8f6b439335945 | 131,139,817,291,483,430,000,000,000,000,000,000,000 | 18 | Protect against the directory traversal in mg_upload() |
static bool is_ereg(u32 reg)
{
return (1 << reg) & (BIT(BPF_REG_5) |
BIT(AUX_REG) |
BIT(BPF_REG_7) |
BIT(BPF_REG_8) |
BIT(BPF_REG_9));
} | 0 | [
"CWE-17"
] | net | 3f7352bf21f8fd7ba3e2fcef9488756f188e12be | 165,586,791,665,358,400,000,000,000,000,000,000,000 | 8 | x86: bpf_jit: fix compilation of large bpf programs
x86 has variable length encoding. x86 JIT compiler is trying
to pick the shortest encoding for given bpf instruction.
While doing so the jump targets are changing, so JIT is doing
multiple passes over the program. Typical program needs 3 passes.
Some very short programs converge with 2 passes. Large programs
may need 4 or 5. But specially crafted bpf programs may hit the
pass limit and if the program converges on the last iteration
the JIT compiler will be producing an image full of 'int 3' insns.
Fix this corner case by doing final iteration over bpf program.
Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
Reported-by: Daniel Borkmann <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
Tested-by: Daniel Borkmann <[email protected]>
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
struct brcmf_msgbuf_work_item *work)
{
struct msgbuf_tx_flowring_create_req *create;
struct brcmf_commonring *commonring;
void *ret_ptr;
u32 flowid;
void *dma_buf;
u32 dma_sz;
u64 address;
int err;
flowid = work->flowid;
dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
&msgbuf->flowring_dma_handle[flowid],
GFP_KERNEL);
if (!dma_buf) {
brcmf_err("dma_alloc_coherent failed\n");
brcmf_flowring_delete(msgbuf->flow, flowid);
return BRCMF_FLOWRING_INVALID_ID;
}
brcmf_commonring_config(msgbuf->flowrings[flowid],
BRCMF_H2D_TXFLOWRING_MAX_ITEM,
BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
brcmf_commonring_lock(commonring);
ret_ptr = brcmf_commonring_reserve_for_write(commonring);
if (!ret_ptr) {
brcmf_err("Failed to reserve space in commonring\n");
brcmf_commonring_unlock(commonring);
brcmf_msgbuf_remove_flowring(msgbuf, flowid);
return BRCMF_FLOWRING_INVALID_ID;
}
create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
create->msg.ifidx = work->ifidx;
create->msg.request_id = 0;
create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
create->flow_ring_id = cpu_to_le16(flowid +
BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
memcpy(create->sa, work->sa, ETH_ALEN);
memcpy(create->da, work->da, ETH_ALEN);
address = (u64)msgbuf->flowring_dma_handle[flowid];
create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
flowid, work->da, create->tid, work->ifidx);
err = brcmf_commonring_write_complete(commonring);
brcmf_commonring_unlock(commonring);
if (err) {
brcmf_err("Failed to write commonring\n");
brcmf_msgbuf_remove_flowring(msgbuf, flowid);
return BRCMF_FLOWRING_INVALID_ID;
}
return flowid;
} | 0 | [
"CWE-20"
] | linux | a4176ec356c73a46c07c181c6d04039fafa34a9f | 127,925,212,067,696,700,000,000,000,000,000,000,000 | 65 | brcmfmac: add subtype check for event handling in data path
For USB there is no separate channel being used to pass events
from firmware to the host driver and as such are passed over the
data path. In order to detect mock event messages an additional
check is needed on event subtype. This check is added conditionally
using unlikely() keyword.
Reviewed-by: Hante Meuleman <[email protected]>
Reviewed-by: Pieter-Paul Giesberts <[email protected]>
Reviewed-by: Franky Lin <[email protected]>
Signed-off-by: Arend van Spriel <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
bool LOGGER::flush_general_log()
{
/*
Now we lock logger, as nobody should be able to use logging routines while
log tables are closed
*/
logger.lock_exclusive();
/* Reopen general log file */
if (opt_log)
file_log_handler->get_mysql_log()->reopen_file();
/* End of log flush */
logger.unlock();
return 0;
} | 0 | [
"CWE-264"
] | mysql-server | 48bd8b16fe382be302c6f0b45931be5aa6f29a0e | 151,733,492,001,023,490,000,000,000,000,000,000,000 | 17 | Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE
[This is the 5.5/5.6 version of the bugfix].
The problem was that it was possible to write log files ending
in .ini/.cnf that later could be parsed as an options file.
This made it possible for users to specify startup options
without the permissions to do so.
This patch fixes the problem by disallowing general query log
and slow query log to be written to files ending in .ini and .cnf. |
int llhttp__internal__c_or_flags_15(
llhttp__internal_t* state,
const unsigned char* p,
const unsigned char* endp) {
state->flags |= 32;
return 0;
} | 0 | [
"CWE-444"
] | node | 641f786bb1a1f6eb1ff8750782ed939780f2b31a | 34,321,783,200,436,517,000,000,000,000,000,000,000 | 7 | http: unset `F_CHUNKED` on new `Transfer-Encoding`
Duplicate `Transfer-Encoding` header should be a treated as a single,
but with original header values concatenated with a comma separator. In
the light of this, even if the past `Transfer-Encoding` ended with
`chunked`, we should be not let the `F_CHUNKED` to leak into the next
header, because mere presence of another header indicates that `chunked`
is not the last transfer-encoding token.
CVE-ID: CVE-2020-8287
Refs: https://github.com/nodejs-private/llhttp-private/pull/3
Refs: https://hackerone.com/bugs?report_id=1002188&subject=nodejs
PR-URL: https://github.com/nodejs-private/node-private/pull/228
Reviewed-By: Fedor Indutny <[email protected]>
Reviewed-By: Rich Trott <[email protected]> |
int vrend_renderer_init(struct vrend_if_cbs *cbs, uint32_t flags)
{
bool gles;
int gl_ver;
virgl_gl_context gl_context;
struct virgl_gl_ctx_param ctx_params;
if (!vrend_state.inited) {
vrend_state.inited = true;
vrend_object_init_resource_table();
vrend_clicbs = cbs;
/* Give some defaults to be able to run the tests */
vrend_state.max_texture_2d_size =
vrend_state.max_texture_3d_size =
vrend_state.max_texture_cube_size = 16384;
}
#ifndef NDEBUG
vrend_init_debug_flags();
#endif
ctx_params.shared = false;
for (uint32_t i = 0; i < ARRAY_SIZE(gl_versions); i++) {
ctx_params.major_ver = gl_versions[i].major;
ctx_params.minor_ver = gl_versions[i].minor;
gl_context = vrend_clicbs->create_gl_context(0, &ctx_params);
if (gl_context)
break;
}
vrend_clicbs->make_current(gl_context);
gl_ver = epoxy_gl_version();
/* enable error output as early as possible */
if (vrend_use_debug_cb && epoxy_has_gl_extension("GL_KHR_debug")) {
glDebugMessageCallback(vrend_debug_cb, NULL);
glEnable(GL_DEBUG_OUTPUT);
glDisable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
set_feature(feat_debug_cb);
}
/* make sure you have the latest version of libepoxy */
gles = epoxy_is_desktop_gl() == 0;
vrend_state.gl_major_ver = gl_ver / 10;
vrend_state.gl_minor_ver = gl_ver % 10;
if (gles) {
vrend_printf( "gl_version %d - es profile enabled\n", gl_ver);
vrend_state.use_gles = true;
/* for now, makes the rest of the code use the most GLES 3.x like path */
vrend_state.use_core_profile = 1;
} else if (gl_ver > 30 && !epoxy_has_gl_extension("GL_ARB_compatibility")) {
vrend_printf( "gl_version %d - core profile enabled\n", gl_ver);
vrend_state.use_core_profile = 1;
} else {
vrend_printf( "gl_version %d - compat profile\n", gl_ver);
}
init_features(gles ? 0 : gl_ver,
gles ? gl_ver : 0);
vrend_state.features[feat_srgb_write_control] &= virgl_has_gl_colorspace();
glGetIntegerv(GL_MAX_DRAW_BUFFERS, (GLint *) &vrend_state.max_draw_buffers);
if (!has_feature(feat_arb_robustness) &&
!has_feature(feat_gles_khr_robustness)) {
vrend_printf("WARNING: running without ARB/KHR robustness in place may crash\n");
}
/* callbacks for when we are cleaning up the object table */
vrend_resource_set_destroy_callback(vrend_destroy_resource_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_QUERY, vrend_destroy_query_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SURFACE, vrend_destroy_surface_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SHADER, vrend_destroy_shader_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_VIEW, vrend_destroy_sampler_view_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_STREAMOUT_TARGET, vrend_destroy_so_target_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_STATE, vrend_destroy_sampler_state_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_VERTEX_ELEMENTS, vrend_destroy_vertex_elements_object);
/* disable for format testing, spews a lot of errors */
if (has_feature(feat_debug_cb)) {
glDisable(GL_DEBUG_OUTPUT);
}
vrend_state.bgra_srgb_emulation_loaded = false;
vrend_build_format_list_common();
if (vrend_state.use_gles) {
vrend_build_format_list_gles();
} else {
vrend_build_format_list_gl();
}
vrend_check_texture_storage(tex_conv_table);
/* disable for format testing */
if (has_feature(feat_debug_cb)) {
glDisable(GL_DEBUG_OUTPUT);
}
vrend_clicbs->destroy_gl_context(gl_context);
list_inithead(&vrend_state.fence_list);
list_inithead(&vrend_state.fence_wait_list);
list_inithead(&vrend_state.waiting_query_list);
list_inithead(&vrend_state.active_ctx_list);
/* create 0 context */
vrend_renderer_context_create_internal(0, strlen("HOST"), "HOST");
vrend_state.eventfd = -1;
if (flags & VREND_USE_THREAD_SYNC) {
vrend_renderer_use_threaded_sync();
}
return 0;
} | 0 | [
"CWE-787"
] | virglrenderer | cbc8d8b75be360236cada63784046688aeb6d921 | 87,970,982,850,521,250,000,000,000,000,000,000,000 | 118 | vrend: check transfer bounds for negative values too and report error
Closes #138
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Emil Velikov <[email protected]> |
static int trust_1oidany(X509_TRUST *trust, X509 *x, int flags)
{
if (x->aux && (x->aux->trust || x->aux->reject))
return obj_trust(trust->arg1, x, flags);
/*
* we don't have any trust settings: for compatibility we return trusted
* if it is self signed
*/
return trust_compat(trust, x, flags);
} | 1 | [] | openssl | 33cc5dde478ba5ad79f8fd4acd8737f0e60e236e | 221,382,819,721,848,670,000,000,000,000,000,000,000 | 10 | Compat self-signed trust with reject-only aux data
When auxiliary data contains only reject entries, continue to trust
self-signed objects just as when no auxiliary data is present.
This makes it possible to reject specific uses without changing
what's accepted (and thus overring the underlying EKU).
Added new supported certs and doubled test count from 38 to 76.
Reviewed-by: Dr. Stephen Henson <[email protected]> |
void EC_GROUP_free(EC_GROUP *group)
{
if (!group) return;
if (group->meth->group_finish != 0)
group->meth->group_finish(group);
EC_EX_DATA_free_all_data(&group->extra_data);
if (group->mont_data)
BN_MONT_CTX_free(group->mont_data);
if (group->generator != NULL)
EC_POINT_free(group->generator);
BN_free(&group->order);
BN_free(&group->cofactor);
if (group->seed)
OPENSSL_free(group->seed);
OPENSSL_free(group);
} | 0 | [
"CWE-320"
] | openssl | 8aed2a7548362e88e84a7feb795a3a97e8395008 | 30,336,304,485,745,090,000,000,000,000,000,000,000 | 22 | Reserve option to use BN_mod_exp_mont_consttime in ECDSA.
Submitted by Shay Gueron, Intel Corp.
RT: 3149
Reviewed-by: Rich Salz <[email protected]>
(cherry picked from commit f54be179aa4cbbd944728771d7d59ed588158a12) |
static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
unsigned char **iv,
struct scatterlist **sg,
int num_frags)
{
size_t size, iv_offset, sg_offset;
struct aead_request *req;
void *tmp;
size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
iv_offset = size;
size += GCM_AES_IV_LEN;
size = ALIGN(size, __alignof__(struct scatterlist));
sg_offset = size;
size += sizeof(struct scatterlist) * num_frags;
tmp = kmalloc(size, GFP_ATOMIC);
if (!tmp)
return NULL;
*iv = (unsigned char *)(tmp + iv_offset);
*sg = (struct scatterlist *)(tmp + sg_offset);
req = tmp;
aead_request_set_tfm(req, tfm);
return req;
} | 0 | [
"CWE-119"
] | net | 5294b83086cc1c35b4efeca03644cf9d12282e5b | 115,397,458,096,684,730,000,000,000,000,000,000,000 | 29 | macsec: dynamically allocate space for sglist
We call skb_cow_data, which is good anyway to ensure we can actually
modify the skb as such (another error from prior). Now that we have the
number of fragments required, we can safely allocate exactly that amount
of memory.
Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver")
Signed-off-by: Jason A. Donenfeld <[email protected]>
Acked-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline int fat_get_entry(struct inode *dir, loff_t *pos,
struct buffer_head **bh,
struct msdos_dir_entry **de)
{
/* Fast stuff first */
if (*bh && *de &&
(*de - (struct msdos_dir_entry *)(*bh)->b_data) < MSDOS_SB(dir->i_sb)->dir_per_block - 1) {
*pos += sizeof(struct msdos_dir_entry);
(*de)++;
return 0;
}
return fat__get_entry(dir, pos, bh, de);
} | 0 | [] | linux-2.6 | c483bab099cb89e92b7cad94a52fcdaf37e56657 | 148,603,442,242,144,600,000,000,000,000,000,000,000 | 13 | fat: fix VFAT compat ioctls on 64-bit systems
If you compile and run the below test case in an msdos or vfat directory on
an x86-64 system with -m32 you'll get garbage in the kernel_dirent struct
followed by a SIGSEGV.
The patch fixes this.
Reported and initial fix by Bart Oldeman
#include <sys/types.h>
#include <sys/ioctl.h>
#include <dirent.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
struct kernel_dirent {
long d_ino;
long d_off;
unsigned short d_reclen;
char d_name[256]; /* We must not include limits.h! */
};
#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct kernel_dirent [2])
#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct kernel_dirent [2])
int main(void)
{
int fd = open(".", O_RDONLY);
struct kernel_dirent de[2];
while (1) {
int i = ioctl(fd, VFAT_IOCTL_READDIR_BOTH, (long)de);
if (i == -1) break;
if (de[0].d_reclen == 0) break;
printf("SFN: reclen=%2d off=%d ino=%d, %-12s",
de[0].d_reclen, de[0].d_off, de[0].d_ino, de[0].d_name);
if (de[1].d_reclen)
printf("\tLFN: reclen=%2d off=%d ino=%d, %s",
de[1].d_reclen, de[1].d_off, de[1].d_ino, de[1].d_name);
printf("\n");
}
return 0;
}
Signed-off-by: Bart Oldeman <[email protected]>
Signed-off-by: OGAWA Hirofumi <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
const std::string& code() const { return code_; } | 0 | [
"CWE-476"
] | envoy | 8788a3cf255b647fd14e6b5e2585abaaedb28153 | 317,098,716,503,037,300,000,000,000,000,000,000,000 | 1 | 1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]> |
i_object_size(gs_memory_t * mem, const void /*obj_header_t */ *obj)
{
return pre_obj_contents_size((const obj_header_t *)obj - 1);
} | 0 | [
"CWE-190"
] | ghostpdl | cfde94be1d4286bc47633c6e6eaf4e659bd78066 | 128,760,440,081,766,030,000,000,000,000,000,000,000 | 4 | Bug 697985: bounds check the array allocations methods
The clump allocator has four allocation functions that use 'number of elements'
and 'size of elements' parameters (rather than a simple 'number of bytes').
Those need specific bounds checking. |
double Item_func_coalesce::real_op()
{
DBUG_ASSERT(fixed == 1);
null_value=0;
for (uint i=0 ; i < arg_count ; i++)
{
double res= args[i]->val_real();
if (!args[i]->null_value)
return res;
}
null_value=1;
return 0;
} | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 38,583,885,325,731,970,000,000,000,000,000,000,000 | 13 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
{
if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
return true;
return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) &&
(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
} | 0 | [
"CWE-703"
] | linux | 6cd88243c7e03845a450795e134b488fc2afb736 | 58,994,879,160,307,680,000,000,000,000,000,000,000 | 8 | KVM: x86: do not report a vCPU as preempted outside instruction boundaries
If a vCPU is outside guest mode and is scheduled out, it might be in the
process of making a memory access. A problem occurs if another vCPU uses
the PV TLB flush feature during the period when the vCPU is scheduled
out, and a virtual address has already been translated but has not yet
been accessed, because this is equivalent to using a stale TLB entry.
To avoid this, only report a vCPU as preempted if sure that the guest
is at an instruction boundary. A rescheduling request will be delivered
to the host physical CPU as an external interrupt, so for simplicity
consider any vmexit *not* instruction boundary except for external
interrupts.
It would in principle be okay to report the vCPU as preempted also
if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the
vmentry/vmexit overhead unnecessarily, and optimistic spinning is
also unlikely to succeed. However, leave it for later because right
now kvm_vcpu_check_block() is doing memory accesses. Even
though the TLB flush issue only applies to virtual memory address,
it's very much preferrable to be conservative.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
uint32 pack_length() const { return (uint32) (field_length + 7) / 8; } | 0 | [
"CWE-416",
"CWE-703"
] | server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 226,972,366,071,873,940,000,000,000,000,000,000,000 | 1 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
static int index_conflict__get_byindex(
const git_index_entry **ancestor_out,
const git_index_entry **our_out,
const git_index_entry **their_out,
git_index *index,
size_t n)
{
const git_index_entry *conflict_entry;
const char *path = NULL;
size_t count;
int stage, len = 0;
assert(ancestor_out && our_out && their_out && index);
*ancestor_out = NULL;
*our_out = NULL;
*their_out = NULL;
for (count = git_index_entrycount(index); n < count; ++n) {
conflict_entry = git_vector_get(&index->entries, n);
if (path && index->entries_cmp_path(conflict_entry->path, path) != 0)
break;
stage = GIT_IDXENTRY_STAGE(conflict_entry);
path = conflict_entry->path;
switch (stage) {
case 3:
*their_out = conflict_entry;
len++;
break;
case 2:
*our_out = conflict_entry;
len++;
break;
case 1:
*ancestor_out = conflict_entry;
len++;
break;
default:
break;
};
}
return len;
} | 0 | [
"CWE-415",
"CWE-190"
] | libgit2 | 3db1af1f370295ad5355b8f64b865a2a357bcac0 | 123,701,798,865,430,170,000,000,000,000,000,000,000 | 47 | index: error out on unreasonable prefix-compressed path lengths
When computing the complete path length from the encoded
prefix-compressed path, we end up just allocating the complete path
without ever checking what the encoded path length actually is. This can
easily lead to a denial of service by just encoding an unreasonable long
path name inside of the index. Git already enforces a maximum path
length of 4096 bytes. As we also have that enforcement ready in some
places, just make sure that the resulting path is smaller than
GIT_PATH_MAX.
Reported-by: Krishna Ram Prakash R <[email protected]>
Reported-by: Vivek Parikh <[email protected]> |
static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
unsigned short hnum,
__be16 sport, __be32 daddr, __be16 dport, int dif)
{
int score = -1;
if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
!ipv6_only_sock(sk)) {
struct inet_sock *inet = inet_sk(sk);
score = (sk->sk_family == PF_INET ? 1 : 0);
if (inet->inet_rcv_saddr) {
if (inet->inet_rcv_saddr != daddr)
return -1;
score += 2;
}
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
score += 2;
}
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score += 2;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 2;
}
}
return score;
} | 0 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 263,682,068,846,795,600,000,000,000,000,000,000,000 | 34 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
__perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs,
int (*output_begin)(struct perf_output_handle *,
struct perf_event *,
unsigned int))
{
struct perf_output_handle handle;
struct perf_event_header header;
/* protect the callchain buffers */
rcu_read_lock();
perf_prepare_sample(&header, data, event, regs);
if (output_begin(&handle, event, header.size))
goto exit;
perf_output_sample(&handle, &header, data, event);
perf_output_end(&handle);
exit:
rcu_read_unlock();
} | 0 | [
"CWE-362",
"CWE-125"
] | linux | 321027c1fe77f892f4ea07846aeae08cefbbb290 | 196,441,940,413,426,170,000,000,000,000,000,000,000 | 25 | perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race
Di Shen reported a race between two concurrent sys_perf_event_open()
calls where both try and move the same pre-existing software group
into a hardware context.
The problem is exactly that described in commit:
f63a8daa5812 ("perf: Fix event->ctx locking")
... where, while we wait for a ctx->mutex acquisition, the event->ctx
relation can have changed under us.
That very same commit failed to recognise sys_perf_event_context() as an
external access vector to the events and thereby didn't apply the
established locking rules correctly.
So while one sys_perf_event_open() call is stuck waiting on
mutex_lock_double(), the other (which owns said locks) moves the group
about. So by the time the former sys_perf_event_open() acquires the
locks, the context we've acquired is stale (and possibly dead).
Apply the established locking rules as per perf_event_ctx_lock_nested()
to the mutex_lock_double() for the 'move_group' case. This obviously means
we need to validate state after we acquire the locks.
Reported-by: Di Shen (Keen Lab)
Tested-by: John Dias <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Min Chong <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Fixes: f63a8daa5812 ("perf: Fix event->ctx locking")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
dump_head(Buffer *buf)
{
TextListItem *ti;
if (buf->document_header == NULL) {
if (w3m_dump & DUMP_EXTRA)
printf("\n");
return;
}
for (ti = buf->document_header->first; ti; ti = ti->next) {
#ifdef USE_M17N
printf("%s",
wc_conv_strict(ti->ptr, InnerCharset,
buf->document_charset)->ptr);
#else
printf("%s", ti->ptr);
#endif
}
puts("");
} | 0 | [
"CWE-59",
"CWE-241"
] | w3m | 18dcbadf2771cdb0c18509b14e4e73505b242753 | 60,980,678,811,333,570,000,000,000,000,000,000,000 | 20 | Make temporary directory safely when ~/.w3m is unwritable |
static void _php_curl_reset_handlers(php_curl *ch)
{
if (!Z_ISUNDEF(ch->handlers->write->stream)) {
zval_ptr_dtor(&ch->handlers->write->stream);
ZVAL_UNDEF(&ch->handlers->write->stream);
}
ch->handlers->write->fp = NULL;
ch->handlers->write->method = PHP_CURL_STDOUT;
if (!Z_ISUNDEF(ch->handlers->write_header->stream)) {
zval_ptr_dtor(&ch->handlers->write_header->stream);
ZVAL_UNDEF(&ch->handlers->write_header->stream);
}
ch->handlers->write_header->fp = NULL;
ch->handlers->write_header->method = PHP_CURL_IGNORE;
if (!Z_ISUNDEF(ch->handlers->read->stream)) {
zval_ptr_dtor(&ch->handlers->read->stream);
ZVAL_UNDEF(&ch->handlers->read->stream);
}
ch->handlers->read->fp = NULL;
ch->handlers->read->res = NULL;
ch->handlers->read->method = PHP_CURL_DIRECT;
if (!Z_ISUNDEF(ch->handlers->std_err)) {
zval_ptr_dtor(&ch->handlers->std_err);
ZVAL_UNDEF(&ch->handlers->std_err);
}
if (ch->handlers->progress) {
zval_ptr_dtor(&ch->handlers->progress->func_name);
efree(ch->handlers->progress);
ch->handlers->progress = NULL;
}
#if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */
if (ch->handlers->fnmatch) {
zval_ptr_dtor(&ch->handlers->fnmatch->func_name);
efree(ch->handlers->fnmatch);
ch->handlers->fnmatch = NULL;
}
#endif
} | 0 | [] | php-src | 124fb22a13fafa3648e4e15b4f207c7096d8155e | 319,479,093,436,899,250,000,000,000,000,000,000,000 | 44 | Fixed bug #68739 #68740 #68741 |
xmlRelaxNGNewParserCtxt(const char *URL)
{
xmlRelaxNGParserCtxtPtr ret;
if (URL == NULL)
return (NULL);
ret =
(xmlRelaxNGParserCtxtPtr) xmlMalloc(sizeof(xmlRelaxNGParserCtxt));
if (ret == NULL) {
xmlRngPErrMemory(NULL, "building parser\n");
return (NULL);
}
memset(ret, 0, sizeof(xmlRelaxNGParserCtxt));
ret->URL = xmlStrdup((const xmlChar *) URL);
ret->error = xmlGenericError;
ret->userData = xmlGenericErrorContext;
return (ret);
} | 0 | [
"CWE-134"
] | libxml2 | 502f6a6d08b08c04b3ddfb1cd21b2f699c1b7f5b | 183,682,428,371,437,400,000,000,000,000,000,000,000 | 19 | More format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
adds a new xmlEscapeFormatString() function to escape composed format
strings |
ctl_putsys(
int varid
)
{
l_fp tmp;
char str[256];
u_int u;
double kb;
double dtemp;
const char *ss;
#ifdef AUTOKEY
struct cert_info *cp;
#endif /* AUTOKEY */
#ifdef KERNEL_PLL
static struct timex ntx;
static u_long ntp_adjtime_time;
static const double to_ms =
# ifdef STA_NANO
1.0e-6; /* nsec to msec */
# else
1.0e-3; /* usec to msec */
# endif
/*
* CS_K_* variables depend on up-to-date output of ntp_adjtime()
*/
if (CS_KERN_FIRST <= varid && varid <= CS_KERN_LAST &&
current_time != ntp_adjtime_time) {
ZERO(ntx);
if (ntp_adjtime(&ntx) < 0)
msyslog(LOG_ERR, "ntp_adjtime() for mode 6 query failed: %m");
else
ntp_adjtime_time = current_time;
}
#endif /* KERNEL_PLL */
switch (varid) {
case CS_LEAP:
ctl_putuint(sys_var[CS_LEAP].text, sys_leap);
break;
case CS_STRATUM:
ctl_putuint(sys_var[CS_STRATUM].text, sys_stratum);
break;
case CS_PRECISION:
ctl_putint(sys_var[CS_PRECISION].text, sys_precision);
break;
case CS_ROOTDELAY:
ctl_putdbl(sys_var[CS_ROOTDELAY].text, sys_rootdelay *
1e3);
break;
case CS_ROOTDISPERSION:
ctl_putdbl(sys_var[CS_ROOTDISPERSION].text,
sys_rootdisp * 1e3);
break;
case CS_REFID:
if (sys_stratum > 1 && sys_stratum < STRATUM_UNSPEC)
ctl_putadr(sys_var[varid].text, sys_refid, NULL);
else
ctl_putrefid(sys_var[varid].text, sys_refid);
break;
case CS_REFTIME:
ctl_putts(sys_var[CS_REFTIME].text, &sys_reftime);
break;
case CS_POLL:
ctl_putuint(sys_var[CS_POLL].text, sys_poll);
break;
case CS_PEERID:
if (sys_peer == NULL)
ctl_putuint(sys_var[CS_PEERID].text, 0);
else
ctl_putuint(sys_var[CS_PEERID].text,
sys_peer->associd);
break;
case CS_PEERADR:
if (sys_peer != NULL && sys_peer->dstadr != NULL)
ss = sptoa(&sys_peer->srcadr);
else
ss = "0.0.0.0:0";
ctl_putunqstr(sys_var[CS_PEERADR].text, ss, strlen(ss));
break;
case CS_PEERMODE:
u = (sys_peer != NULL)
? sys_peer->hmode
: MODE_UNSPEC;
ctl_putuint(sys_var[CS_PEERMODE].text, u);
break;
case CS_OFFSET:
ctl_putdbl6(sys_var[CS_OFFSET].text, last_offset * 1e3);
break;
case CS_DRIFT:
ctl_putdbl(sys_var[CS_DRIFT].text, drift_comp * 1e6);
break;
case CS_JITTER:
ctl_putdbl6(sys_var[CS_JITTER].text, sys_jitter * 1e3);
break;
case CS_ERROR:
ctl_putdbl(sys_var[CS_ERROR].text, clock_jitter * 1e3);
break;
case CS_CLOCK:
get_systime(&tmp);
ctl_putts(sys_var[CS_CLOCK].text, &tmp);
break;
case CS_PROCESSOR:
#ifndef HAVE_UNAME
ctl_putstr(sys_var[CS_PROCESSOR].text, str_processor,
sizeof(str_processor) - 1);
#else
ctl_putstr(sys_var[CS_PROCESSOR].text,
utsnamebuf.machine, strlen(utsnamebuf.machine));
#endif /* HAVE_UNAME */
break;
case CS_SYSTEM:
#ifndef HAVE_UNAME
ctl_putstr(sys_var[CS_SYSTEM].text, str_system,
sizeof(str_system) - 1);
#else
snprintf(str, sizeof(str), "%s/%s", utsnamebuf.sysname,
utsnamebuf.release);
ctl_putstr(sys_var[CS_SYSTEM].text, str, strlen(str));
#endif /* HAVE_UNAME */
break;
case CS_VERSION:
ctl_putstr(sys_var[CS_VERSION].text, Version,
strlen(Version));
break;
case CS_STABIL:
ctl_putdbl(sys_var[CS_STABIL].text, clock_stability *
1e6);
break;
case CS_VARLIST:
{
char buf[CTL_MAX_DATA_LEN];
//buffPointer, firstElementPointer, buffEndPointer
char *buffp, *buffend;
int firstVarName;
const char *ss1;
int len;
const struct ctl_var *k;
buffp = buf;
buffend = buf + sizeof(buf);
if (buffp + strlen(sys_var[CS_VARLIST].text) + 4 > buffend)
break; /* really long var name */
snprintf(buffp, sizeof(buf), "%s=\"",sys_var[CS_VARLIST].text);
buffp += strlen(buffp);
firstVarName = TRUE;
for (k = sys_var; !(k->flags & EOV); k++) {
if (k->flags & PADDING)
continue;
len = strlen(k->text);
if (buffp + len + 1 >= buffend)
break;
if (!firstVarName)
*buffp++ = ',';
else
firstVarName = FALSE;
memcpy(buffp, k->text, len);
buffp += len;
}
for (k = ext_sys_var; k && !(k->flags & EOV); k++) {
if (k->flags & PADDING)
continue;
if (NULL == k->text)
continue;
ss1 = strchr(k->text, '=');
if (NULL == ss1)
len = strlen(k->text);
else
len = ss1 - k->text;
if (buffp + len + 1 >= buffend)
break;
if (firstVarName) {
*buffp++ = ',';
firstVarName = FALSE;
}
memcpy(buffp, k->text,(unsigned)len);
buffp += len;
}
if (buffp + 2 >= buffend)
break;
*buffp++ = '"';
*buffp = '\0';
ctl_putdata(buf, (unsigned)( buffp - buf ), 0);
break;
}
case CS_TAI:
if (sys_tai > 0)
ctl_putuint(sys_var[CS_TAI].text, sys_tai);
break;
case CS_LEAPTAB:
{
leap_signature_t lsig;
leapsec_getsig(&lsig);
if (lsig.ttime > 0)
ctl_putfs(sys_var[CS_LEAPTAB].text, lsig.ttime);
break;
}
case CS_LEAPEND:
{
leap_signature_t lsig;
leapsec_getsig(&lsig);
if (lsig.etime > 0)
ctl_putfs(sys_var[CS_LEAPEND].text, lsig.etime);
break;
}
#ifdef LEAP_SMEAR
case CS_LEAPSMEARINTV:
if (leap_smear_intv > 0)
ctl_putuint(sys_var[CS_LEAPSMEARINTV].text, leap_smear_intv);
break;
case CS_LEAPSMEAROFFS:
if (leap_smear_intv > 0)
ctl_putdbl(sys_var[CS_LEAPSMEAROFFS].text,
leap_smear.doffset * 1e3);
break;
#endif /* LEAP_SMEAR */
case CS_RATE:
ctl_putuint(sys_var[CS_RATE].text, ntp_minpoll);
break;
case CS_MRU_ENABLED:
ctl_puthex(sys_var[varid].text, mon_enabled);
break;
case CS_MRU_DEPTH:
ctl_putuint(sys_var[varid].text, mru_entries);
break;
case CS_MRU_MEM:
kb = mru_entries * (sizeof(mon_entry) / 1024.);
u = (u_int)kb;
if (kb - u >= 0.5)
u++;
ctl_putuint(sys_var[varid].text, u);
break;
case CS_MRU_DEEPEST:
ctl_putuint(sys_var[varid].text, mru_peakentries);
break;
case CS_MRU_MINDEPTH:
ctl_putuint(sys_var[varid].text, mru_mindepth);
break;
case CS_MRU_MAXAGE:
ctl_putint(sys_var[varid].text, mru_maxage);
break;
case CS_MRU_MAXDEPTH:
ctl_putuint(sys_var[varid].text, mru_maxdepth);
break;
case CS_MRU_MAXMEM:
kb = mru_maxdepth * (sizeof(mon_entry) / 1024.);
u = (u_int)kb;
if (kb - u >= 0.5)
u++;
ctl_putuint(sys_var[varid].text, u);
break;
case CS_SS_UPTIME:
ctl_putuint(sys_var[varid].text, current_time);
break;
case CS_SS_RESET:
ctl_putuint(sys_var[varid].text,
current_time - sys_stattime);
break;
case CS_SS_RECEIVED:
ctl_putuint(sys_var[varid].text, sys_received);
break;
case CS_SS_THISVER:
ctl_putuint(sys_var[varid].text, sys_newversion);
break;
case CS_SS_OLDVER:
ctl_putuint(sys_var[varid].text, sys_oldversion);
break;
case CS_SS_BADFORMAT:
ctl_putuint(sys_var[varid].text, sys_badlength);
break;
case CS_SS_BADAUTH:
ctl_putuint(sys_var[varid].text, sys_badauth);
break;
case CS_SS_DECLINED:
ctl_putuint(sys_var[varid].text, sys_declined);
break;
case CS_SS_RESTRICTED:
ctl_putuint(sys_var[varid].text, sys_restricted);
break;
case CS_SS_LIMITED:
ctl_putuint(sys_var[varid].text, sys_limitrejected);
break;
case CS_SS_KODSENT:
ctl_putuint(sys_var[varid].text, sys_kodsent);
break;
case CS_SS_PROCESSED:
ctl_putuint(sys_var[varid].text, sys_processed);
break;
case CS_BCASTDELAY:
ctl_putdbl(sys_var[varid].text, sys_bdelay * 1e3);
break;
case CS_AUTHDELAY:
LFPTOD(&sys_authdelay, dtemp);
ctl_putdbl(sys_var[varid].text, dtemp * 1e3);
break;
case CS_AUTHKEYS:
ctl_putuint(sys_var[varid].text, authnumkeys);
break;
case CS_AUTHFREEK:
ctl_putuint(sys_var[varid].text, authnumfreekeys);
break;
case CS_AUTHKLOOKUPS:
ctl_putuint(sys_var[varid].text, authkeylookups);
break;
case CS_AUTHKNOTFOUND:
ctl_putuint(sys_var[varid].text, authkeynotfound);
break;
case CS_AUTHKUNCACHED:
ctl_putuint(sys_var[varid].text, authkeyuncached);
break;
case CS_AUTHKEXPIRED:
ctl_putuint(sys_var[varid].text, authkeyexpired);
break;
case CS_AUTHENCRYPTS:
ctl_putuint(sys_var[varid].text, authencryptions);
break;
case CS_AUTHDECRYPTS:
ctl_putuint(sys_var[varid].text, authdecryptions);
break;
case CS_AUTHRESET:
ctl_putuint(sys_var[varid].text,
current_time - auth_timereset);
break;
/*
* CTL_IF_KERNLOOP() puts a zero if the kernel loop is
* unavailable, otherwise calls putfunc with args.
*/
#ifndef KERNEL_PLL
# define CTL_IF_KERNLOOP(putfunc, args) \
ctl_putint(sys_var[varid].text, 0)
#else
# define CTL_IF_KERNLOOP(putfunc, args) \
putfunc args
#endif
/*
* CTL_IF_KERNPPS() puts a zero if either the kernel
* loop is unavailable, or kernel hard PPS is not
* active, otherwise calls putfunc with args.
*/
#ifndef KERNEL_PLL
# define CTL_IF_KERNPPS(putfunc, args) \
ctl_putint(sys_var[varid].text, 0)
#else
# define CTL_IF_KERNPPS(putfunc, args) \
if (0 == ntx.shift) \
ctl_putint(sys_var[varid].text, 0); \
else \
putfunc args /* no trailing ; */
#endif
case CS_K_OFFSET:
CTL_IF_KERNLOOP(
ctl_putdblf,
(sys_var[varid].text, 0, -1, to_ms * ntx.offset)
);
break;
case CS_K_FREQ:
CTL_IF_KERNLOOP(
ctl_putsfp,
(sys_var[varid].text, ntx.freq)
);
break;
case CS_K_MAXERR:
CTL_IF_KERNLOOP(
ctl_putdblf,
(sys_var[varid].text, 0, 6,
to_ms * ntx.maxerror)
);
break;
case CS_K_ESTERR:
CTL_IF_KERNLOOP(
ctl_putdblf,
(sys_var[varid].text, 0, 6,
to_ms * ntx.esterror)
);
break;
case CS_K_STFLAGS:
#ifndef KERNEL_PLL
ss = "";
#else
ss = k_st_flags(ntx.status);
#endif
ctl_putstr(sys_var[varid].text, ss, strlen(ss));
break;
case CS_K_TIMECONST:
CTL_IF_KERNLOOP(
ctl_putint,
(sys_var[varid].text, ntx.constant)
);
break;
case CS_K_PRECISION:
CTL_IF_KERNLOOP(
ctl_putdblf,
(sys_var[varid].text, 0, 6,
to_ms * ntx.precision)
);
break;
case CS_K_FREQTOL:
CTL_IF_KERNLOOP(
ctl_putsfp,
(sys_var[varid].text, ntx.tolerance)
);
break;
case CS_K_PPS_FREQ:
CTL_IF_KERNPPS(
ctl_putsfp,
(sys_var[varid].text, ntx.ppsfreq)
);
break;
case CS_K_PPS_STABIL:
CTL_IF_KERNPPS(
ctl_putsfp,
(sys_var[varid].text, ntx.stabil)
);
break;
case CS_K_PPS_JITTER:
CTL_IF_KERNPPS(
ctl_putdbl,
(sys_var[varid].text, to_ms * ntx.jitter)
);
break;
case CS_K_PPS_CALIBDUR:
CTL_IF_KERNPPS(
ctl_putint,
(sys_var[varid].text, 1 << ntx.shift)
);
break;
case CS_K_PPS_CALIBS:
CTL_IF_KERNPPS(
ctl_putint,
(sys_var[varid].text, ntx.calcnt)
);
break;
case CS_K_PPS_CALIBERRS:
CTL_IF_KERNPPS(
ctl_putint,
(sys_var[varid].text, ntx.errcnt)
);
break;
case CS_K_PPS_JITEXC:
CTL_IF_KERNPPS(
ctl_putint,
(sys_var[varid].text, ntx.jitcnt)
);
break;
case CS_K_PPS_STBEXC:
CTL_IF_KERNPPS(
ctl_putint,
(sys_var[varid].text, ntx.stbcnt)
);
break;
case CS_IOSTATS_RESET:
ctl_putuint(sys_var[varid].text,
current_time - io_timereset);
break;
case CS_TOTAL_RBUF:
ctl_putuint(sys_var[varid].text, total_recvbuffs());
break;
case CS_FREE_RBUF:
ctl_putuint(sys_var[varid].text, free_recvbuffs());
break;
case CS_USED_RBUF:
ctl_putuint(sys_var[varid].text, full_recvbuffs());
break;
case CS_RBUF_LOWATER:
ctl_putuint(sys_var[varid].text, lowater_additions());
break;
case CS_IO_DROPPED:
ctl_putuint(sys_var[varid].text, packets_dropped);
break;
case CS_IO_IGNORED:
ctl_putuint(sys_var[varid].text, packets_ignored);
break;
case CS_IO_RECEIVED:
ctl_putuint(sys_var[varid].text, packets_received);
break;
case CS_IO_SENT:
ctl_putuint(sys_var[varid].text, packets_sent);
break;
case CS_IO_SENDFAILED:
ctl_putuint(sys_var[varid].text, packets_notsent);
break;
case CS_IO_WAKEUPS:
ctl_putuint(sys_var[varid].text, handler_calls);
break;
case CS_IO_GOODWAKEUPS:
ctl_putuint(sys_var[varid].text, handler_pkts);
break;
case CS_TIMERSTATS_RESET:
ctl_putuint(sys_var[varid].text,
current_time - timer_timereset);
break;
case CS_TIMER_OVERRUNS:
ctl_putuint(sys_var[varid].text, alarm_overflow);
break;
case CS_TIMER_XMTS:
ctl_putuint(sys_var[varid].text, timer_xmtcalls);
break;
case CS_FUZZ:
ctl_putdbl(sys_var[varid].text, sys_fuzz * 1e3);
break;
case CS_WANDER_THRESH:
ctl_putdbl(sys_var[varid].text, wander_threshold * 1e6);
break;
#ifdef AUTOKEY
case CS_FLAGS:
if (crypto_flags)
ctl_puthex(sys_var[CS_FLAGS].text,
crypto_flags);
break;
case CS_DIGEST:
if (crypto_flags) {
strlcpy(str, OBJ_nid2ln(crypto_nid),
COUNTOF(str));
ctl_putstr(sys_var[CS_DIGEST].text, str,
strlen(str));
}
break;
case CS_SIGNATURE:
if (crypto_flags) {
const EVP_MD *dp;
dp = EVP_get_digestbynid(crypto_flags >> 16);
strlcpy(str, OBJ_nid2ln(EVP_MD_pkey_type(dp)),
COUNTOF(str));
ctl_putstr(sys_var[CS_SIGNATURE].text, str,
strlen(str));
}
break;
case CS_HOST:
if (hostval.ptr != NULL)
ctl_putstr(sys_var[CS_HOST].text, hostval.ptr,
strlen(hostval.ptr));
break;
case CS_IDENT:
if (sys_ident != NULL)
ctl_putstr(sys_var[CS_IDENT].text, sys_ident,
strlen(sys_ident));
break;
case CS_CERTIF:
for (cp = cinfo; cp != NULL; cp = cp->link) {
snprintf(str, sizeof(str), "%s %s 0x%x",
cp->subject, cp->issuer, cp->flags);
ctl_putstr(sys_var[CS_CERTIF].text, str,
strlen(str));
ctl_putcal(sys_var[CS_REVTIME].text, &(cp->last));
}
break;
case CS_PUBLIC:
if (hostval.tstamp != 0)
ctl_putfs(sys_var[CS_PUBLIC].text,
ntohl(hostval.tstamp));
break;
#endif /* AUTOKEY */
default:
break;
}
} | 0 | [
"CWE-22"
] | ntp | 184516e143ce4448ddb5b9876dd372008cc779f6 | 185,916,050,040,122,300,000,000,000,000,000,000,000 | 661 | [TALOS-CAN-0062] prevent directory traversal for VMS, too, when using 'saveconfig' command. |
static void mutt_decode_uuencoded (STATE *s, LOFF_T len, int istext, iconv_t cd)
{
char tmps[SHORT_STRING];
char linelen, c, l, out;
char *pt;
char bufi[BUFI_SIZE];
size_t k = 0;
if (istext)
state_set_prefix(s);
while (len > 0)
{
if ((fgets(tmps, sizeof(tmps), s->fpin)) == NULL)
return;
len -= mutt_strlen(tmps);
if ((!mutt_strncmp (tmps, "begin", 5)) && ISSPACE (tmps[5]))
break;
}
while (len > 0)
{
if ((fgets(tmps, sizeof(tmps), s->fpin)) == NULL)
return;
len -= mutt_strlen(tmps);
if (!mutt_strncmp (tmps, "end", 3))
break;
pt = tmps;
linelen = decode_byte (*pt);
pt++;
for (c = 0; c < linelen && *pt;)
{
for (l = 2; l <= 6 && *pt && *(pt + 1); l += 2)
{
out = decode_byte (*pt) << l;
pt++;
out |= (decode_byte (*pt) >> (6 - l));
bufi[k++] = out;
c++;
if (c == linelen)
break;
}
mutt_convert_to_state (cd, bufi, &k, s);
pt++;
}
}
mutt_convert_to_state (cd, bufi, &k, s);
mutt_convert_to_state (cd, 0, 0, s);
state_reset_prefix(s);
} | 0 | [
"CWE-120"
] | mutt | e5ed080c00e59701ca62ef9b2a6d2612ebf765a5 | 40,419,941,611,570,327,000,000,000,000,000,000,000 | 51 | Fix uudecode buffer overflow.
mutt_decode_uuencoded() used each line's initial "length character"
without any validation. It would happily read past the end of the
input line, and with a suitable value even past the length of the
input buffer.
As I noted in ticket 404, there are several other changes that could
be added to make the parser more robust. However, to avoid
accidentally introducing another bug or regression, I'm restricting
this patch to simply addressing the overflow.
Thanks to Tavis Ormandy for reporting the issue, along with a sample
message demonstrating the problem. |
static int fts3IncrmergeLoad(
Fts3Table *p, /* Fts3 table handle */
sqlite3_int64 iAbsLevel, /* Absolute level of input segments */
int iIdx, /* Index of candidate output segment */
const char *zKey, /* First key to write */
int nKey, /* Number of bytes in nKey */
IncrmergeWriter *pWriter /* Populate this object */
){
int rc; /* Return code */
sqlite3_stmt *pSelect = 0; /* SELECT to read %_segdir entry */
rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pSelect, 0);
if( rc==SQLITE_OK ){
sqlite3_int64 iStart = 0; /* Value of %_segdir.start_block */
sqlite3_int64 iLeafEnd = 0; /* Value of %_segdir.leaves_end_block */
sqlite3_int64 iEnd = 0; /* Value of %_segdir.end_block */
const char *aRoot = 0; /* Pointer to %_segdir.root buffer */
int nRoot = 0; /* Size of aRoot[] in bytes */
int rc2; /* Return code from sqlite3_reset() */
int bAppendable = 0; /* Set to true if segment is appendable */
/* Read the %_segdir entry for index iIdx absolute level (iAbsLevel+1) */
sqlite3_bind_int64(pSelect, 1, iAbsLevel+1);
sqlite3_bind_int(pSelect, 2, iIdx);
if( sqlite3_step(pSelect)==SQLITE_ROW ){
iStart = sqlite3_column_int64(pSelect, 1);
iLeafEnd = sqlite3_column_int64(pSelect, 2);
fts3ReadEndBlockField(pSelect, 3, &iEnd, &pWriter->nLeafData);
if( pWriter->nLeafData<0 ){
pWriter->nLeafData = pWriter->nLeafData * -1;
}
pWriter->bNoLeafData = (pWriter->nLeafData==0);
nRoot = sqlite3_column_bytes(pSelect, 4);
aRoot = sqlite3_column_blob(pSelect, 4);
}else{
return sqlite3_reset(pSelect);
}
/* Check for the zero-length marker in the %_segments table */
rc = fts3IsAppendable(p, iEnd, &bAppendable);
/* Check that zKey/nKey is larger than the largest key the candidate */
if( rc==SQLITE_OK && bAppendable ){
char *aLeaf = 0;
int nLeaf = 0;
rc = sqlite3Fts3ReadBlock(p, iLeafEnd, &aLeaf, &nLeaf, 0);
if( rc==SQLITE_OK ){
NodeReader reader;
for(rc = nodeReaderInit(&reader, aLeaf, nLeaf);
rc==SQLITE_OK && reader.aNode;
rc = nodeReaderNext(&reader)
){
assert( reader.aNode );
}
if( fts3TermCmp(zKey, nKey, reader.term.a, reader.term.n)<=0 ){
bAppendable = 0;
}
nodeReaderRelease(&reader);
}
sqlite3_free(aLeaf);
}
if( rc==SQLITE_OK && bAppendable ){
/* It is possible to append to this segment. Set up the IncrmergeWriter
** object to do so. */
int i;
int nHeight = (int)aRoot[0];
NodeWriter *pNode;
pWriter->nLeafEst = (int)((iEnd - iStart) + 1)/FTS_MAX_APPENDABLE_HEIGHT;
pWriter->iStart = iStart;
pWriter->iEnd = iEnd;
pWriter->iAbsLevel = iAbsLevel;
pWriter->iIdx = iIdx;
for(i=nHeight+1; i<FTS_MAX_APPENDABLE_HEIGHT; i++){
pWriter->aNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst;
}
pNode = &pWriter->aNodeWriter[nHeight];
pNode->iBlock = pWriter->iStart + pWriter->nLeafEst*nHeight;
blobGrowBuffer(&pNode->block,
MAX(nRoot, p->nNodeSize)+FTS3_NODE_PADDING, &rc
);
if( rc==SQLITE_OK ){
memcpy(pNode->block.a, aRoot, nRoot);
pNode->block.n = nRoot;
memset(&pNode->block.a[nRoot], 0, FTS3_NODE_PADDING);
}
for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){
NodeReader reader;
pNode = &pWriter->aNodeWriter[i];
if( pNode->block.a){
rc = nodeReaderInit(&reader, pNode->block.a, pNode->block.n);
while( reader.aNode && rc==SQLITE_OK ) rc = nodeReaderNext(&reader);
blobGrowBuffer(&pNode->key, reader.term.n, &rc);
if( rc==SQLITE_OK ){
memcpy(pNode->key.a, reader.term.a, reader.term.n);
pNode->key.n = reader.term.n;
if( i>0 ){
char *aBlock = 0;
int nBlock = 0;
pNode = &pWriter->aNodeWriter[i-1];
pNode->iBlock = reader.iChild;
rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock, 0);
blobGrowBuffer(&pNode->block,
MAX(nBlock, p->nNodeSize)+FTS3_NODE_PADDING, &rc
);
if( rc==SQLITE_OK ){
memcpy(pNode->block.a, aBlock, nBlock);
pNode->block.n = nBlock;
memset(&pNode->block.a[nBlock], 0, FTS3_NODE_PADDING);
}
sqlite3_free(aBlock);
}
}
}
nodeReaderRelease(&reader);
}
}
rc2 = sqlite3_reset(pSelect);
if( rc==SQLITE_OK ) rc = rc2;
}
return rc;
} | 1 | [
"CWE-787"
] | sqlite | c72f2fb7feff582444b8ffdc6c900c69847ce8a9 | 14,479,294,390,464,359,000,000,000,000,000,000,000 | 130 | More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d |
PackOpenBSDElf32x86::generateElfHdr(
OutputFile *fo,
void const *proto,
unsigned const brka
)
{
cprElfHdr3 *const h3 = (cprElfHdr3 *)(void *)&elfout;
memcpy(h3, proto, sizeof(*h3)); // reads beyond, but OK
h3->ehdr.e_ident[Elf32_Ehdr::EI_OSABI] = ei_osabi;
assert(2==get_te16(&h3->ehdr.e_phnum));
set_te16(&h3->ehdr.e_phnum, 3);
assert(get_te32(&h3->ehdr.e_phoff) == sizeof(Elf32_Ehdr));
h3->ehdr.e_shoff = 0;
assert(get_te16(&h3->ehdr.e_ehsize) == sizeof(Elf32_Ehdr));
assert(get_te16(&h3->ehdr.e_phentsize) == sizeof(Elf32_Phdr));
set_te16(&h3->ehdr.e_shentsize, sizeof(Elf32_Shdr));
h3->ehdr.e_shnum = 0;
h3->ehdr.e_shstrndx = 0;
struct {
Elf32_Nhdr nhdr;
char name[8];
unsigned body;
} elfnote;
unsigned const note_offset = sizeof(*h3) - sizeof(linfo);
sz_elf_hdrs = sizeof(elfnote) + note_offset;
set_te32(&h3->phdr[2].p_type, PT_NOTE32);
set_te32(&h3->phdr[2].p_offset, note_offset);
set_te32(&h3->phdr[2].p_vaddr, note_offset);
set_te32(&h3->phdr[2].p_paddr, note_offset);
set_te32(&h3->phdr[2].p_filesz, sizeof(elfnote));
set_te32(&h3->phdr[2].p_memsz, sizeof(elfnote));
set_te32(&h3->phdr[2].p_flags, Elf32_Phdr::PF_R);
set_te32(&h3->phdr[2].p_align, 4);
// Q: Same as this->note_body[0 .. this->note_size-1] ?
set_te32(&elfnote.nhdr.namesz, 8);
set_te32(&elfnote.nhdr.descsz, OPENBSD_DESCSZ);
set_te32(&elfnote.nhdr.type, NHDR_OPENBSD_TAG);
memcpy(elfnote.name, "OpenBSD", sizeof(elfnote.name));
elfnote.body = 0;
set_te32(&h3->phdr[0].p_filesz, sz_elf_hdrs);
h3->phdr[0].p_memsz = h3->phdr[0].p_filesz;
unsigned const brkb = brka | ((0==(~page_mask & brka)) ? 0x20 : 0);
set_te32(&h3->phdr[1].p_type, PT_LOAD32); // be sure
set_te32(&h3->phdr[1].p_offset, ~page_mask & brkb);
set_te32(&h3->phdr[1].p_vaddr, brkb);
set_te32(&h3->phdr[1].p_paddr, brkb);
h3->phdr[1].p_filesz = 0;
// Too many kernels have bugs when 0==.p_memsz
set_te32(&h3->phdr[1].p_memsz, 1);
set_te32(&h3->phdr[1].p_flags, Elf32_Phdr::PF_R | Elf32_Phdr::PF_W);
if (ph.format==getFormat()) {
memset(&h3->linfo, 0, sizeof(h3->linfo));
fo->write(h3, sizeof(*h3) - sizeof(h3->linfo));
fo->write(&elfnote, sizeof(elfnote));
fo->write(&h3->linfo, sizeof(h3->linfo));
}
else {
assert(false); // unknown ph.format, PackLinuxElf32
}
} | 0 | [
"CWE-415"
] | upx | d9288213ec156dffc435566b9d393d23e87c6914 | 180,983,135,140,875,720,000,000,000,000,000,000,000 | 68 | More checking of PT_DYNAMIC and its contents.
https://github.com/upx/upx/issues/206
modified: p_lx_elf.cpp |
TEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestId) {
connection_.stream_info_.downstream_address_provider_->setRemoteAddress(
std::make_shared<Network::Address::Ipv4Instance>("134.2.2.11"));
ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true));
ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true));
TestRequestHeaderMapImpl headers{{"x-request-id", "my-request-id"},
{"x-forwarded-for", "198.51.100.1"}};
EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false));
EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0);
EXPECT_EQ((MutateRequestRet{"134.2.2.11:0", false, Tracing::Reason::NotTraceable}),
callMutateRequestHeaders(headers, Protocol::Http2));
EXPECT_CALL(random_, uuid()).Times(0);
EXPECT_EQ("my-request-id", headers.get_("x-request-id"));
} | 0 | [
"CWE-22"
] | envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 339,771,437,610,425,580,000,000,000,000,000,000,000 | 14 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]> |
static int SFDGetBitmapProps(FILE *sfd,BDFFont *bdf,char *tok) {
int pcnt;
int i;
if ( getint(sfd,&pcnt)!=1 || pcnt<=0 )
return( 0 );
bdf->prop_cnt = pcnt;
bdf->props = malloc(pcnt*sizeof(BDFProperties));
for ( i=0; i<pcnt; ++i ) {
if ( getname(sfd,tok)!=1 )
break;
if ( strcmp(tok,"BDFEndProperties")==0 )
break;
bdf->props[i].name = copy(tok);
getint(sfd,&bdf->props[i].type);
switch ( bdf->props[i].type&~prt_property ) {
case prt_int: case prt_uint:
getint(sfd,&bdf->props[i].u.val);
break;
case prt_string: case prt_atom:
geteol(sfd,tok);
if ( tok[strlen(tok)-1]=='"' ) tok[strlen(tok)-1] = '\0';
bdf->props[i].u.str = copy(tok[0]=='"'?tok+1:tok);
break;
default:
break;
}
}
bdf->prop_cnt = i;
return( 1 );
} | 0 | [
"CWE-416"
] | fontforge | 048a91e2682c1a8936ae34dbc7bd70291ec05410 | 214,103,632,234,151,100,000,000,000,000,000,000,000 | 31 | Fix for #4084 Use-after-free (heap) in the SFD_GetFontMetaData() function
Fix for #4086 NULL pointer dereference in the SFDGetSpiros() function
Fix for #4088 NULL pointer dereference in the SFD_AssignLookups() function
Add empty sf->fontname string if it isn't set, fixing #4089 #4090 and many
other potential issues (many downstream calls to strlen() on the value). |
sigend_vrrp(__attribute__((unused)) void *v, __attribute__((unused)) int sig)
{
if (master)
thread_add_start_terminate_event(master, start_vrrp_termination_thread);
} | 0 | [
"CWE-200"
] | keepalived | 26c8d6374db33bcfcdcd758b1282f12ceef4b94f | 255,098,525,266,062,740,000,000,000,000,000,000,000 | 5 | Disable fopen_safe() append mode by default
If a non privileged user creates /tmp/keepalived.log and has it open
for read (e.g. tail -f), then even though keepalived will change the
owner to root and remove all read/write permissions from non owners,
the application which already has the file open will be able to read
the added log entries.
Accordingly, opening a file in append mode is disabled by default, and
only enabled if --enable-smtp-alert-debug or --enable-log-file (which
are debugging options and unset by default) are enabled.
This should further alleviate security concerns related to CVE-2018-19046.
Signed-off-by: Quentin Armitage <[email protected]> |
static bool is_inf(const unsigned char) { return false; } | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 224,611,242,263,455,400,000,000,000,000,000,000,000 | 1 | Fix other issues in 'CImg<T>::load_bmp()'. |
cert_object_t **get_certificate_list(pkcs11_handle_t *h, int *ncerts)
{
CK_BYTE *id_value;
CK_BYTE *cert_value;
CK_OBJECT_HANDLE object;
CK_ULONG object_count;
X509 *x509;
cert_object_t **certs = NULL;
int rv;
CK_OBJECT_CLASS cert_class = CKO_CERTIFICATE;
CK_CERTIFICATE_TYPE cert_type = CKC_X_509;
CK_ATTRIBUTE cert_template[] = {
{CKA_CLASS, &cert_class, sizeof(CK_OBJECT_CLASS)}
,
{CKA_CERTIFICATE_TYPE, &cert_type, sizeof(CK_CERTIFICATE_TYPE)}
,
{CKA_ID, NULL, 0}
,
{CKA_VALUE, NULL, 0}
};
if (h->certs) {
*ncerts = h->cert_count;
return h->certs;
}
rv = h->fl->C_FindObjectsInit(h->session, cert_template, 2);
if (rv != CKR_OK) {
set_error("C_FindObjectsInit() failed: 0x%08lX", rv);
return NULL;
}
while(1) {
/* look for certificates */
rv = h->fl->C_FindObjects(h->session, &object, 1, &object_count);
if (rv != CKR_OK) {
set_error("C_FindObjects() failed: 0x%08lX", rv);
goto getlist_error;
}
if (object_count == 0) break; /* no more certs */
/* Cert found, read */
/* pass 1: get cert id */
/* retrieve cert object id length */
cert_template[2].pValue = NULL;
cert_template[2].ulValueLen = 0;
rv = h->fl->C_GetAttributeValue(h->session, object, cert_template, 3);
if (rv != CKR_OK) {
set_error("CertID length: C_GetAttributeValue() failed: 0x%08lX", rv);
goto getlist_error;
}
/* allocate enough space */
id_value = malloc(cert_template[2].ulValueLen);
if (id_value == NULL) {
set_error("CertID malloc(%d): not enough free memory available", cert_template[2].ulValueLen);
goto getlist_error;
}
/* read cert id into allocated space */
cert_template[2].pValue = id_value;
rv = h->fl->C_GetAttributeValue(h->session, object, cert_template, 3);
if (rv != CKR_OK) {
free(id_value);
set_error("CertID value: C_GetAttributeValue() failed: 0x%08lX", rv);
goto getlist_error;
}
/* pass 2: get certificate */
/* retrieve cert length */
cert_template[3].pValue = NULL;
rv = h->fl->C_GetAttributeValue(h->session, object, cert_template, 4);
if (rv != CKR_OK) {
set_error("Cert Length: C_GetAttributeValue() failed: 0x%08lX", rv);
goto getlist_error;
}
/* allocate enough space */
cert_value = malloc(cert_template[3].ulValueLen);
if (cert_value == NULL) {
set_error("Cert Length malloc(%d): not enough free memory available", cert_template[3].ulValueLen);
goto getlist_error;
}
/* read certificate into allocated space */
cert_template[3].pValue = cert_value;
rv = h->fl->C_GetAttributeValue(h->session, object, cert_template, 4);
if (rv != CKR_OK) {
free(cert_value);
set_error("Cert Value: C_GetAttributeValue() failed: 0x%08lX", rv);
goto getlist_error;
}
/* Pass 3: store certificate */
/* convert to X509 data structure */
x509 = d2i_X509(NULL, (const unsigned char **)&cert_template[3].pValue, cert_template[3].ulValueLen);
if (x509 == NULL) {
free(id_value);
free(cert_value);
set_error("d2i_x509() failed: %s", ERR_error_string(ERR_get_error(), NULL));
goto getlist_error;
}
/* finally add certificate to chain */
certs= realloc(h->certs,(h->cert_count+1) * sizeof(cert_object_t *));
if (!certs) {
free(id_value);
X509_free(x509);
set_error("realloc() not space to re-size cert table");
goto getlist_error;
}
h->certs=certs;
DBG1("Saving Certificate #%d:", h->cert_count + 1);
certs[h->cert_count] = NULL;
DBG1("- type: %02lx", cert_type);
DBG1("- id: %02x", id_value[0]);
h->certs[h->cert_count] = (cert_object_t *)calloc(sizeof(cert_object_t),1);
if (h->certs[h->cert_count] == NULL) {
free(id_value);
X509_free(x509);
set_error("malloc() not space to allocate cert object");
goto getlist_error;
}
h->certs[h->cert_count]->type = cert_type;
h->certs[h->cert_count]->id = id_value;
h->certs[h->cert_count]->id_length = cert_template[2].ulValueLen;
h->certs[h->cert_count]->x509 = x509;
h->certs[h->cert_count]->private_key = CK_INVALID_HANDLE;
h->certs[h->cert_count]->key_type = 0;
++h->cert_count;
} /* end of while(1) */
/* release FindObject Sesion */
rv = h->fl->C_FindObjectsFinal(h->session);
if (rv != CKR_OK) {
set_error("C_FindObjectsFinal() failed: 0x%08lX", rv);
free_certs(certs, h->cert_count);
certs = NULL;
h->certs = NULL;
h->cert_count = 0;
return NULL;
}
*ncerts = h->cert_count;
/* arriving here means that's all right */
DBG1("Found %d certificates in token",h->cert_count);
return h->certs;
/* some error arrived: clean as possible, and return fail */
getlist_error:
rv = h->fl->C_FindObjectsFinal(h->session);
if (rv != CKR_OK) {
set_error("C_FindObjectsFinal() failed: 0x%08lX", rv);
}
free_certs(h->certs, h->cert_count);
h->certs = NULL;
h->cert_count = 0;
return NULL;
} | 0 | [] | pam_pkcs11 | cc51b3e2720ea862d500cab2ea517518ff39a497 | 150,894,170,728,337,550,000,000,000,000,000,000,000 | 160 | verify using a nonce from the system, not the card
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problem. |
int ssh_dh_generate_x(ssh_session session) {
int keysize;
if (session->next_crypto->kex_type == SSH_KEX_DH_GROUP1_SHA1) {
keysize = 1023;
} else {
keysize = 2047;
}
session->next_crypto->x = bignum_new();
if (session->next_crypto->x == NULL) {
return -1;
}
#ifdef HAVE_LIBGCRYPT
bignum_rand(session->next_crypto->x, keysize);
#elif defined HAVE_LIBCRYPTO
bignum_rand(session->next_crypto->x, keysize, -1, 0);
#endif
/* not harder than this */
#ifdef DEBUG_CRYPTO
ssh_print_bignum("x", session->next_crypto->x);
#endif
return 0;
} | 0 | [
"CWE-200"
] | libssh | 4e6ff36a9a3aef72aa214f6fb267c28953b80060 | 277,505,376,479,194,040,000,000,000,000,000,000,000 | 25 | dh: Fix CVE-2016-0739
Due to a byte/bit confusion, the DH secret was too short. This file was
completely reworked and will be commited in a future version.
Signed-off-by: Aris Adamantiadis <[email protected]>
Reviewed-by: Andreas Schneider <[email protected]> |
Status AuthorizationSession::checkAuthForCreate(const NamespaceString& ns,
const BSONObj& cmdObj,
bool isMongos) {
if (cmdObj["capped"].trueValue() &&
!isAuthorizedForActionsOnNamespace(ns, ActionType::convertToCapped)) {
return Status(ErrorCodes::Unauthorized, "unauthorized");
}
const bool hasCreateCollectionAction =
isAuthorizedForActionsOnNamespace(ns, ActionType::createCollection);
// If attempting to create a view, check for additional required privileges.
if (cmdObj["viewOn"]) {
// You need the createCollection action on this namespace; the insert action is not
// sufficient.
if (!hasCreateCollectionAction) {
return Status(ErrorCodes::Unauthorized, "unauthorized");
}
// Parse the viewOn namespace and the pipeline. If no pipeline was specified, use the empty
// pipeline.
NamespaceString viewOnNs(ns.db(), cmdObj["viewOn"].checkAndGetStringData());
auto pipeline =
cmdObj.hasField("pipeline") ? BSONArray(cmdObj["pipeline"].Obj()) : BSONArray();
return checkAuthForCreateOrModifyView(this, ns, viewOnNs, pipeline, isMongos);
}
// To create a regular collection, ActionType::createCollection or ActionType::insert are
// both acceptable.
if (hasCreateCollectionAction || isAuthorizedForActionsOnNamespace(ns, ActionType::insert)) {
return Status::OK();
}
return Status(ErrorCodes::Unauthorized, "unauthorized");
} | 0 | [
"CWE-613"
] | mongo | db19e7ce84cfd702a4ba9983ee2ea5019f470f82 | 193,198,788,894,712,960,000,000,000,000,000,000,000 | 35 | SERVER-38984 Validate unique User ID on UserCache hit
(cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7) |
static X509 *find_issuer(X509_STORE_CTX *ctx, STACK_OF(X509) *sk, X509 *x)
{
int i;
X509 *issuer;
for (i = 0; i < sk_X509_num(sk); i++) {
issuer = sk_X509_value(sk, i);
if (ctx->check_issued(ctx, x, issuer))
return issuer;
}
return NULL;
} | 0 | [
"CWE-119"
] | openssl | 370ac320301e28bb615cee80124c042649c95d14 | 329,133,557,203,811,500,000,000,000,000,000,000,000 | 11 | Fix length checks in X509_cmp_time to avoid out-of-bounds reads.
Also tighten X509_cmp_time to reject more than three fractional
seconds in the time; and to reject trailing garbage after the offset.
CVE-2015-1789
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Richard Levitte <[email protected]> |
win_setheight(int height)
{
win_setheight_win(height, curwin);
} | 0 | [
"CWE-416"
] | vim | ec66c41d84e574baf8009dbc0bd088d2bc5b2421 | 150,291,051,738,226,030,000,000,000,000,000,000,000 | 4 | patch 8.1.2136: using freed memory with autocmd from fuzzer
Problem: using freed memory with autocmd from fuzzer. (Dhiraj Mishra,
Dominique Pelle)
Solution: Avoid using "wp" after autocommands. (closes #5041) |
int xt_compat_target_offset(const struct xt_target *target)
{
u_int16_t csize = target->compatsize ? : target->targetsize;
return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
} | 0 | [
"CWE-119"
] | nf-next | d7591f0c41ce3e67600a982bab6989ef0f07b3ce | 184,026,197,861,864,160,000,000,000,000,000,000,000 | 5 | netfilter: x_tables: introduce and use xt_copy_counters_from_user
The three variants use same copy&pasted code, condense this into a
helper and use that.
Make sure info.name is 0-terminated.
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
{
if (have_hash_collision &&
(net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
net->xfrm.state_num > net->xfrm.state_hmask)
schedule_work(&net->xfrm.state_hash_work);
} | 0 | [
"CWE-416"
] | linux | dbb2483b2a46fbaf833cfb5deb5ed9cace9c7399 | 270,862,867,481,639,730,000,000,000,000,000,000,000 | 7 | xfrm: clean up xfrm protocol checks
In commit 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
I introduced a check for xfrm protocol, but according to Herbert
IPSEC_PROTO_ANY should only be used as a wildcard for lookup, so
it should be removed from validate_tmpl().
And, IPSEC_PROTO_ANY is expected to only match 3 IPSec-specific
protocols, this is why xfrm_state_flush() could still miss
IPPROTO_ROUTING, which leads that those entries are left in
net->xfrm.state_all before exit net. Fix this by replacing
IPSEC_PROTO_ANY with zero.
This patch also extracts the check from validate_tmpl() to
xfrm_id_proto_valid() and uses it in parse_ipsecrequest().
With this, no other protocols should be added into xfrm.
Fixes: 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
Reported-by: [email protected]
Cc: Steffen Klassert <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
megasas_make_sgl_skinny(struct megasas_instance *instance,
struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
{
int i;
int sge_count;
struct scatterlist *os_sgl;
sge_count = scsi_dma_map(scp);
if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
mfi_sgl->sge_skinny[i].length =
cpu_to_le32(sg_dma_len(os_sgl));
mfi_sgl->sge_skinny[i].phys_addr =
cpu_to_le64(sg_dma_address(os_sgl));
mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
}
}
return sge_count;
} | 0 | [
"CWE-476"
] | linux | bcf3b67d16a4c8ffae0aa79de5853435e683945c | 44,377,536,807,882,120,000,000,000,000,000,000,000 | 20 | scsi: megaraid_sas: return error when create DMA pool failed
when create DMA pool for cmd frames failed, we should return -ENOMEM,
instead of 0.
In some case in:
megasas_init_adapter_fusion()
-->megasas_alloc_cmds()
-->megasas_create_frame_pool
create DMA pool failed,
--> megasas_free_cmds() [1]
-->megasas_alloc_cmds_fusion()
failed, then goto fail_alloc_cmds.
-->megasas_free_cmds() [2]
we will call megasas_free_cmds twice, [1] will kfree cmd_list,
[2] will use cmd_list.it will cause a problem:
Unable to handle kernel NULL pointer dereference at virtual address
00000000
pgd = ffffffc000f70000
[00000000] *pgd=0000001fbf893003, *pud=0000001fbf893003,
*pmd=0000001fbf894003, *pte=006000006d000707
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 18 PID: 1 Comm: swapper/0 Not tainted
task: ffffffdfb9290000 ti: ffffffdfb923c000 task.ti: ffffffdfb923c000
PC is at megasas_free_cmds+0x30/0x70
LR is at megasas_free_cmds+0x24/0x70
...
Call trace:
[<ffffffc0005b779c>] megasas_free_cmds+0x30/0x70
[<ffffffc0005bca74>] megasas_init_adapter_fusion+0x2f4/0x4d8
[<ffffffc0005b926c>] megasas_init_fw+0x2dc/0x760
[<ffffffc0005b9ab0>] megasas_probe_one+0x3c0/0xcd8
[<ffffffc0004a5abc>] local_pci_probe+0x4c/0xb4
[<ffffffc0004a5c40>] pci_device_probe+0x11c/0x14c
[<ffffffc00053a5e4>] driver_probe_device+0x1ec/0x430
[<ffffffc00053a92c>] __driver_attach+0xa8/0xb0
[<ffffffc000538178>] bus_for_each_dev+0x74/0xc8
[<ffffffc000539e88>] driver_attach+0x28/0x34
[<ffffffc000539a18>] bus_add_driver+0x16c/0x248
[<ffffffc00053b234>] driver_register+0x6c/0x138
[<ffffffc0004a5350>] __pci_register_driver+0x5c/0x6c
[<ffffffc000ce3868>] megasas_init+0xc0/0x1a8
[<ffffffc000082a58>] do_one_initcall+0xe8/0x1ec
[<ffffffc000ca7be8>] kernel_init_freeable+0x1c8/0x284
[<ffffffc0008d90b8>] kernel_init+0x1c/0xe4
Signed-off-by: Jason Yan <[email protected]>
Acked-by: Sumit Saxena <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
cmdline_erase_chars(
int c,
int indent
#ifdef FEAT_SEARCH_EXTRA
, incsearch_state_T *isp
#endif
)
{
int i;
int j;
if (c == K_KDEL)
c = K_DEL;
/*
* Delete current character is the same as backspace on next
* character, except at end of line.
*/
if (c == K_DEL && ccline.cmdpos != ccline.cmdlen)
++ccline.cmdpos;
if (has_mbyte && c == K_DEL)
ccline.cmdpos += mb_off_next(ccline.cmdbuff,
ccline.cmdbuff + ccline.cmdpos);
if (ccline.cmdpos > 0)
{
char_u *p;
j = ccline.cmdpos;
p = ccline.cmdbuff + j;
if (has_mbyte)
{
p = mb_prevptr(ccline.cmdbuff, p);
if (c == Ctrl_W)
{
while (p > ccline.cmdbuff && vim_isspace(*p))
p = mb_prevptr(ccline.cmdbuff, p);
i = mb_get_class(p);
while (p > ccline.cmdbuff && mb_get_class(p) == i)
p = mb_prevptr(ccline.cmdbuff, p);
if (mb_get_class(p) != i)
p += (*mb_ptr2len)(p);
}
}
else if (c == Ctrl_W)
{
while (p > ccline.cmdbuff && vim_isspace(p[-1]))
--p;
i = vim_iswordc(p[-1]);
while (p > ccline.cmdbuff && !vim_isspace(p[-1])
&& vim_iswordc(p[-1]) == i)
--p;
}
else
--p;
ccline.cmdpos = (int)(p - ccline.cmdbuff);
ccline.cmdlen -= j - ccline.cmdpos;
i = ccline.cmdpos;
while (i < ccline.cmdlen)
ccline.cmdbuff[i++] = ccline.cmdbuff[j++];
// Truncate at the end, required for multi-byte chars.
ccline.cmdbuff[ccline.cmdlen] = NUL;
#ifdef FEAT_SEARCH_EXTRA
if (ccline.cmdlen == 0)
{
isp->search_start = isp->save_cursor;
// save view settings, so that the screen
// won't be restored at the wrong position
isp->old_viewstate = isp->init_viewstate;
}
#endif
redrawcmd();
}
else if (ccline.cmdlen == 0 && c != Ctrl_W
&& ccline.cmdprompt == NULL && indent == 0)
{
// In ex and debug mode it doesn't make sense to return.
if (exmode_active
#ifdef FEAT_EVAL
|| ccline.cmdfirstc == '>'
#endif
)
return CMDLINE_NOT_CHANGED;
VIM_CLEAR(ccline.cmdbuff); // no commandline to return
if (!cmd_silent)
{
#ifdef FEAT_RIGHTLEFT
if (cmdmsg_rl)
msg_col = Columns;
else
#endif
msg_col = 0;
msg_putchar(' '); // delete ':'
}
#ifdef FEAT_SEARCH_EXTRA
if (ccline.cmdlen == 0)
isp->search_start = isp->save_cursor;
#endif
redraw_cmdline = TRUE;
return GOTO_NORMAL_MODE;
}
return CMDLINE_CHANGED;
} | 1 | [
"CWE-122",
"CWE-787"
] | vim | ef02f16609ff0a26ffc6e20263523424980898fe | 321,553,580,368,713,900,000,000,000,000,000,000,000 | 104 | patch 8.2.4899: with latin1 encoding CTRL-W might go before the cmdline
Problem: With latin1 encoding CTRL-W might go before the start of the
command line.
Solution: Check already being at the start of the command line. |
template<typename t>
CImgList<T>& assign(const CImgList<t>& list, const bool is_shared=false) {
cimg::unused(is_shared);
assign(list._width);
cimglist_for(*this,l) _data[l].assign(list[l],false);
return *this; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 127,167,255,286,286,320,000,000,000,000,000,000,000 | 6 | Fix other issues in 'CImg<T>::load_bmp()'. |
void xgroupCommand(client *c) {
stream *s = NULL;
sds grpname = NULL;
streamCG *cg = NULL;
char *opt = c->argv[1]->ptr; /* Subcommand name. */
int mkstream = 0;
long long entries_read = SCG_INVALID_ENTRIES_READ;
robj *o;
/* Everything but the "HELP" option requires a key and group name. */
if (c->argc >= 4) {
/* Parse optional arguments for CREATE and SETID */
int i = 5;
int create_subcmd = !strcasecmp(opt,"CREATE");
int setid_subcmd = !strcasecmp(opt,"SETID");
while (i < c->argc) {
if (create_subcmd && !strcasecmp(c->argv[i]->ptr,"MKSTREAM")) {
mkstream = 1;
i++;
} else if ((create_subcmd || setid_subcmd) && !strcasecmp(c->argv[i]->ptr,"ENTRIESREAD") && i + 1 < c->argc) {
if (getLongLongFromObjectOrReply(c,c->argv[i+1],&entries_read,NULL) != C_OK)
return;
if (entries_read < 0 && entries_read != SCG_INVALID_ENTRIES_READ) {
addReplyError(c,"value for ENTRIESREAD must be positive or -1");
return;
}
i += 2;
} else {
addReplySubcommandSyntaxError(c);
return;
}
}
o = lookupKeyWrite(c->db,c->argv[2]);
if (o) {
if (checkType(c,o,OBJ_STREAM)) return;
s = o->ptr;
}
grpname = c->argv[3]->ptr;
}
/* Check for missing key/group. */
if (c->argc >= 4 && !mkstream) {
/* At this point key must exist, or there is an error. */
if (s == NULL) {
addReplyError(c,
"The XGROUP subcommand requires the key to exist. "
"Note that for CREATE you may want to use the MKSTREAM "
"option to create an empty stream automatically.");
return;
}
/* Certain subcommands require the group to exist. */
if ((cg = streamLookupCG(s,grpname)) == NULL &&
(!strcasecmp(opt,"SETID") ||
!strcasecmp(opt,"CREATECONSUMER") ||
!strcasecmp(opt,"DELCONSUMER")))
{
addReplyErrorFormat(c, "-NOGROUP No such consumer group '%s' "
"for key name '%s'",
(char*)grpname, (char*)c->argv[2]->ptr);
return;
}
}
/* Dispatch the different subcommands. */
if (c->argc == 2 && !strcasecmp(opt,"HELP")) {
const char *help[] = {
"CREATE <key> <groupname> <id|$> [option]",
" Create a new consumer group. Options are:",
" * MKSTREAM",
" Create the empty stream if it does not exist.",
" * ENTRIESREAD entries_read",
" Set the group's entries_read counter (internal use).",
"CREATECONSUMER <key> <groupname> <consumer>",
" Create a new consumer in the specified group.",
"DELCONSUMER <key> <groupname> <consumer>",
" Remove the specified consumer.",
"DESTROY <key> <groupname>",
" Remove the specified group.",
"SETID <key> <groupname> <id|$> [ENTRIESREAD entries_read]",
" Set the current group ID and entries_read counter.",
NULL
};
addReplyHelp(c, help);
} else if (!strcasecmp(opt,"CREATE") && (c->argc >= 5 && c->argc <= 8)) {
streamID id;
if (!strcmp(c->argv[4]->ptr,"$")) {
if (s) {
id = s->last_id;
} else {
id.ms = 0;
id.seq = 0;
}
} else if (streamParseStrictIDOrReply(c,c->argv[4],&id,0,NULL) != C_OK) {
return;
}
/* Handle the MKSTREAM option now that the command can no longer fail. */
if (s == NULL) {
serverAssert(mkstream);
o = createStreamObject();
dbAdd(c->db,c->argv[2],o);
s = o->ptr;
signalModifiedKey(c,c->db,c->argv[2]);
}
streamCG *cg = streamCreateCG(s,grpname,sdslen(grpname),&id,entries_read);
if (cg) {
addReply(c,shared.ok);
server.dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-create",
c->argv[2],c->db->id);
} else {
addReplyError(c,"-BUSYGROUP Consumer Group name already exists");
}
} else if (!strcasecmp(opt,"SETID") && (c->argc == 5 || c->argc == 7)) {
streamID id;
if (!strcmp(c->argv[4]->ptr,"$")) {
id = s->last_id;
} else if (streamParseIDOrReply(c,c->argv[4],&id,0) != C_OK) {
return;
}
cg->last_id = id;
cg->entries_read = entries_read;
addReply(c,shared.ok);
server.dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-setid",c->argv[2],c->db->id);
} else if (!strcasecmp(opt,"DESTROY") && c->argc == 4) {
if (cg) {
raxRemove(s->cgroups,(unsigned char*)grpname,sdslen(grpname),NULL);
streamFreeCG(cg);
addReply(c,shared.cone);
server.dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-destroy",
c->argv[2],c->db->id);
/* We want to unblock any XREADGROUP consumers with -NOGROUP. */
signalKeyAsReady(c->db,c->argv[2],OBJ_STREAM);
} else {
addReply(c,shared.czero);
}
} else if (!strcasecmp(opt,"CREATECONSUMER") && c->argc == 5) {
streamConsumer *created = streamCreateConsumer(cg,c->argv[4]->ptr,c->argv[2],
c->db->id,SCC_DEFAULT);
addReplyLongLong(c,created ? 1 : 0);
} else if (!strcasecmp(opt,"DELCONSUMER") && c->argc == 5) {
long long pending = 0;
streamConsumer *consumer = streamLookupConsumer(cg,c->argv[4]->ptr,SLC_NO_REFRESH);
if (consumer) {
/* Delete the consumer and returns the number of pending messages
* that were yet associated with such a consumer. */
pending = raxSize(consumer->pel);
streamDelConsumer(cg,consumer);
server.dirty++;
notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-delconsumer",
c->argv[2],c->db->id);
}
addReplyLongLong(c,pending);
} else {
addReplySubcommandSyntaxError(c);
}
} | 0 | [
"CWE-703",
"CWE-401"
] | redis | 4a7a4e42db8ff757cdf3f4a824f66426036034ef | 320,232,926,199,886,800,000,000,000,000,000,000,000 | 162 | Fix memory leak in streamGetEdgeID (#10753)
si is initialized by streamIteratorStart(), we should call
streamIteratorStop() on it when done.
regression introduced in #9127 (redis 7.0) |
RZ_API char *rz_bin_dwarf_line_header_get_full_file_path(RZ_NULLABLE const RzBinDwarfDebugInfo *info, const RzBinDwarfLineHeader *header, ut64 file_index) {
rz_return_val_if_fail(header, NULL);
if (file_index >= header->file_names_count) {
return NULL;
}
RzBinDwarfLineFileEntry *file = &header->file_names[file_index];
if (!file->name) {
return NULL;
}
/*
* Dwarf standard does not seem to specify the exact separator (slash/backslash) of paths
* so apparently it is target-dependent. However we have yet to see a Windows binary that
* also contains dwarf and contains backslashes. The ones we have seen from MinGW have regular
* slashes.
* And since there seems to be no way to reliable check whether the target uses slashes
* or backslashes anyway, we will simply use slashes always here.
*/
const char *comp_dir = info ? ht_up_find(info->line_info_offset_comp_dir, header->offset, NULL) : NULL;
const char *include_dir = NULL;
char *own_str = NULL;
if (file->id_idx > 0 && file->id_idx - 1 < header->include_dirs_count) {
include_dir = header->include_dirs[file->id_idx - 1];
if (include_dir && include_dir[0] != '/' && comp_dir) {
include_dir = own_str = rz_str_newf("%s/%s/", comp_dir, include_dir);
}
} else {
include_dir = comp_dir;
}
if (!include_dir) {
include_dir = "./";
}
char *r = rz_str_newf("%s/%s", include_dir, file->name);
free(own_str);
return r;
} | 0 | [
"CWE-787"
] | rizin | aa6917772d2f32e5a7daab25a46c72df0b5ea406 | 13,772,617,185,099,572,000,000,000,000,000,000,000 | 37 | Fix oob write for dwarf with abbrev with count 0 (Fix #2083) (#2086) |
static FDrive *get_cur_drv(FDCtrl *fdctrl)
{
switch (fdctrl->cur_drv) {
case 0: return drv0(fdctrl);
case 1: return drv1(fdctrl);
#if MAX_FD == 4
case 2: return drv2(fdctrl);
case 3: return drv3(fdctrl);
#endif
default: return NULL;
}
} | 0 | [
"CWE-119"
] | qemu | e907746266721f305d67bc0718795fedee2e824c | 247,084,255,678,224,900,000,000,000,000,000,000,000 | 12 | fdc: force the fifo access to be in bounds of the allocated buffer
During processing of certain commands such as FD_CMD_READ_ID and
FD_CMD_DRIVE_SPECIFICATION_COMMAND the fifo memory access could
get out of bounds leading to memory corruption with values coming
from the guest.
Fix this by making sure that the index is always bounded by the
allocated memory.
This is CVE-2015-3456.
Signed-off-by: Petr Matousek <[email protected]>
Reviewed-by: John Snow <[email protected]>
Signed-off-by: John Snow <[email protected]> |
static void ossl_trace(int direction, int ssl_ver, int content_type,
const void *buf, size_t len, SSL *ssl,
void *userp)
{
char unknown[32];
const char *verstr = NULL;
struct connectdata *conn = userp;
struct ssl_connect_data *connssl = &conn->ssl[0];
struct ssl_backend_data *backend = connssl->backend;
struct Curl_easy *data = backend->logger;
if(!conn || !data || !data->set.fdebug ||
(direction != 0 && direction != 1))
return;
switch(ssl_ver) {
#ifdef SSL2_VERSION /* removed in recent versions */
case SSL2_VERSION:
verstr = "SSLv2";
break;
#endif
#ifdef SSL3_VERSION
case SSL3_VERSION:
verstr = "SSLv3";
break;
#endif
case TLS1_VERSION:
verstr = "TLSv1.0";
break;
#ifdef TLS1_1_VERSION
case TLS1_1_VERSION:
verstr = "TLSv1.1";
break;
#endif
#ifdef TLS1_2_VERSION
case TLS1_2_VERSION:
verstr = "TLSv1.2";
break;
#endif
#ifdef TLS1_3_VERSION
case TLS1_3_VERSION:
verstr = "TLSv1.3";
break;
#endif
case 0:
break;
default:
msnprintf(unknown, sizeof(unknown), "(%x)", ssl_ver);
verstr = unknown;
break;
}
/* Log progress for interesting records only (like Handshake or Alert), skip
* all raw record headers (content_type == SSL3_RT_HEADER or ssl_ver == 0).
* For TLS 1.3, skip notification of the decrypted inner Content Type.
*/
if(ssl_ver
#ifdef SSL3_RT_INNER_CONTENT_TYPE
&& content_type != SSL3_RT_INNER_CONTENT_TYPE
#endif
) {
const char *msg_name, *tls_rt_name;
char ssl_buf[1024];
int msg_type, txt_len;
/* the info given when the version is zero is not that useful for us */
ssl_ver >>= 8; /* check the upper 8 bits only below */
/* SSLv2 doesn't seem to have TLS record-type headers, so OpenSSL
* always pass-up content-type as 0. But the interesting message-type
* is at 'buf[0]'.
*/
if(ssl_ver == SSL3_VERSION_MAJOR && content_type)
tls_rt_name = tls_rt_type(content_type);
else
tls_rt_name = "";
if(content_type == SSL3_RT_CHANGE_CIPHER_SPEC) {
msg_type = *(char *)buf;
msg_name = "Change cipher spec";
}
else if(content_type == SSL3_RT_ALERT) {
msg_type = (((char *)buf)[0] << 8) + ((char *)buf)[1];
msg_name = SSL_alert_desc_string_long(msg_type);
}
else {
msg_type = *(char *)buf;
msg_name = ssl_msg_type(ssl_ver, msg_type);
}
txt_len = msnprintf(ssl_buf, sizeof(ssl_buf), "%s (%s), %s, %s (%d):\n",
verstr, direction?"OUT":"IN",
tls_rt_name, msg_name, msg_type);
if(0 <= txt_len && (unsigned)txt_len < sizeof(ssl_buf)) {
Curl_debug(data, CURLINFO_TEXT, ssl_buf, (size_t)txt_len);
}
}
Curl_debug(data, (direction == 1) ? CURLINFO_SSL_DATA_OUT :
CURLINFO_SSL_DATA_IN, (char *)buf, len);
(void) ssl;
} | 0 | [
"CWE-290"
] | curl | b09c8ee15771c614c4bf3ddac893cdb12187c844 | 195,164,461,674,143,680,000,000,000,000,000,000,000 | 103 | vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890 |
static void __init iommu_exit_mempool(void)
{
kmem_cache_destroy(iommu_devinfo_cache);
kmem_cache_destroy(iommu_domain_cache);
iova_cache_put();
} | 0 | [] | linux | d8b8591054575f33237556c32762d54e30774d28 | 33,053,782,124,885,185,000,000,000,000,000,000,000 | 6 | iommu/vt-d: Disable ATS support on untrusted devices
Commit fb58fdcd295b9 ("iommu/vt-d: Do not enable ATS for untrusted
devices") disables ATS support on the devices which have been marked
as untrusted. Unfortunately this is not enough to fix the DMA attack
vulnerabiltiies because IOMMU driver allows translated requests as
long as a device advertises the ATS capability. Hence a malicious
peripheral device could use this to bypass IOMMU.
This disables the ATS support on untrusted devices by clearing the
internal per-device ATS mark. As the result, IOMMU driver will block
any translated requests from any device marked as untrusted.
Cc: Jacob Pan <[email protected]>
Cc: Mika Westerberg <[email protected]>
Suggested-by: Kevin Tian <[email protected]>
Suggested-by: Ashok Raj <[email protected]>
Fixes: fb58fdcd295b9 ("iommu/vt-d: Do not enable ATS for untrusted devices")
Signed-off-by: Lu Baolu <[email protected]>
Signed-off-by: Joerg Roedel <[email protected]> |
void FilterUtility::setUpstreamScheme(Http::HeaderMap& headers,
const Upstream::ClusterInfo& cluster) {
if (cluster.transportSocketFactory().implementsSecureTransport()) {
headers.insertScheme().value().setReference(Http::Headers::get().SchemeValues.Https);
} else {
headers.insertScheme().value().setReference(Http::Headers::get().SchemeValues.Http);
}
} | 0 | [
"CWE-400",
"CWE-703"
] | envoy | afc39bea36fd436e54262f150c009e8d72db5014 | 77,805,507,528,809,900,000,000,000,000,000,000,000 | 8 | Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]> |
void set_extraction_flag(int flags)
{
marker &= ~EXTRACTION_MASK;
marker|= flags;
} | 0 | [
"CWE-617"
] | server | 2e7891080667c59ac80f788eef4d59d447595772 | 142,619,077,503,797,890,000,000,000,000,000,000,000 | 5 | MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]> |
int bdrv_get_backing_file_depth(BlockDriverState *bs)
{
if (!bs->drv) {
return 0;
}
if (!bs->backing_hd) {
return 0;
}
return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
} | 0 | [
"CWE-190"
] | qemu | 8f4754ede56e3f9ea3fd7207f4a7c4453e59285b | 197,453,072,393,481,270,000,000,000,000,000,000,000 | 12 | block: Limit request size (CVE-2014-0143)
Limiting the size of a single request to INT_MAX not only fixes a
direct integer overflow in bdrv_check_request() (which would only
trigger bad behaviour with ridiculously huge images, as in close to
2^64 bytes), but can also prevent overflows in all block drivers.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]> |
get_tc(const char *descriptor,
int modid,
int *tc_index,
struct enum_list **ep, struct range_list **rp, char **hint)
{
int i;
struct tc *tcp;
i = get_tc_index(descriptor, modid);
if (tc_index)
*tc_index = i;
if (i != -1) {
tcp = &tclist[i];
if (ep) {
free_enums(ep);
*ep = copy_enums(tcp->enums);
}
if (rp) {
free_ranges(rp);
*rp = copy_ranges(tcp->ranges);
}
if (hint) {
if (*hint)
free(*hint);
*hint = (tcp->hint ? strdup(tcp->hint) : NULL);
}
return tcp->type;
}
return LABEL;
} | 0 | [
"CWE-59",
"CWE-61"
] | net-snmp | 4fd9a450444a434a993bc72f7c3486ccce41f602 | 196,973,533,072,427,900,000,000,000,000,000,000,000 | 30 | CHANGES: snmpd: Stop reading and writing the mib_indexes/* files
Caching directory contents is something the operating system should do
and is not something Net-SNMP should do. Instead of storing a copy of
the directory contents in ${tmp_dir}/mib_indexes/${n}, always scan a
MIB directory. |
//! Return specified image channel \inplace.
CImg<T>& channel(const int c0) {
return channels(c0,c0); | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 251,756,327,533,189,300,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
static void udt_close_if_finished(struct unidirectional_transfer *t)
{
if (STATE_NEEDS_CLOSING(t->state) && !t->bufuse) {
t->state = SSTATE_FINISHED;
if (t->dest_is_sock)
shutdown(t->dest, SHUT_WR);
else
close(t->dest);
transfer_debug("Closed %s.", t->dest_name);
}
} | 0 | [] | git | 68061e3470210703cb15594194718d35094afdc0 | 188,947,895,272,718,140,000,000,000,000,000,000,000 | 11 | fast-import: disallow "feature export-marks" by default
The fast-import stream command "feature export-marks=<path>" lets the
stream write marks to an arbitrary path. This may be surprising if you
are running fast-import against an untrusted input (which otherwise
cannot do anything except update Git objects and refs).
Let's disallow the use of this feature by default, and provide a
command-line option to re-enable it (you can always just use the
command-line --export-marks as well, but the in-stream version provides
an easy way for exporters to control the process).
This is a backwards-incompatible change, since the default is flipping
to the new, safer behavior. However, since the main users of the
in-stream versions would be import/export-based remote helpers, and
since we trust remote helpers already (which are already running
arbitrary code), we'll pass the new option by default when reading a
remote helper's stream. This should minimize the impact.
Note that the implementation isn't totally simple, as we have to work
around the fact that fast-import doesn't parse its command-line options
until after it has read any "feature" lines from the stream. This is how
it lets command-line options override in-stream. But in our case, it's
important to parse the new --allow-unsafe-features first.
There are three options for resolving this:
1. Do a separate "early" pass over the options. This is easy for us to
do because there are no command-line options that allow the
"unstuck" form (so there's no chance of us mistaking an argument
for an option), though it does introduce a risk of incorrect
parsing later (e.g,. if we convert to parse-options).
2. Move the option parsing phase back to the start of the program, but
teach the stream-reading code never to override an existing value.
This is tricky, because stream "feature" lines override each other
(meaning we'd have to start tracking the source for every option).
3. Accept that we might parse a "feature export-marks" line that is
forbidden, as long we don't _act_ on it until after we've parsed
the command line options.
This would, in fact, work with the current code, but only because
the previous patch fixed the export-marks parser to avoid touching
the filesystem.
So while it works, it does carry risk of somebody getting it wrong
in the future in a rather subtle and unsafe way.
I've gone with option (1) here as simple, safe, and unlikely to cause
regressions.
This fixes CVE-2019-1348.
Signed-off-by: Jeff King <[email protected]> |
exif_entry_ref (ExifEntry *e)
{
if (!e) return;
e->priv->ref_count++;
} | 0 | [
"CWE-125"
] | libexif | f9bb9f263fb00f0603ecbefa8957cad24168cbff | 226,128,723,570,423,800,000,000,000,000,000,000,000 | 6 | Fix a buffer read overflow in exif_entry_get_value
While parsing EXIF_TAG_FOCAL_LENGTH it was possible to read 8 bytes past
the end of a heap buffer. This was detected by the OSS Fuzz project.
Patch from Google.
Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=7344 and
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=14543 |
void dump_data_file(const uint8_t *buf, int len, bool omit_zero_bytes,
FILE *f)
{
dump_data_cb(buf, len, omit_zero_bytes, fprintf_cb, f);
} | 0 | [] | samba | 8eae8d28bce2c3f6a323d3dc48ed10c2e6bb1ba5 | 154,536,507,367,400,080,000,000,000,000,000,000,000 | 5 | CVE-2013-4476: lib-util: add file_check_permissions()
Bug: https://bugzilla.samba.org/show_bug.cgi?id=10234
Signed-off-by: Björn Baumbach <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]> |
int open_fdout(const char *ifilename,
IOUtil::FileWriter *writer,
bool is_embedded_jpeg,
Sirikata::Array1d<uint8_t, 2> fileid,
bool force_compressed_output,
bool *is_socket) {
if (writer != NULL) {
*is_socket = writer->is_socket();
return writer->get_fd();
}
*is_socket = false;
if (strcmp(ifilename, "-") == 0) {
return 1;
}
int retval = -1;
std::string ofilename;
// check file id, determine filetype
if (file_no + 1 < file_cnt && ofilename != ifilename) {
ofilename = filelist[file_no + 1];
} else if (is_jpeg_header(fileid) || is_embedded_jpeg || g_permissive) {
ofilename = postfix_uniq(ifilename, (ofiletype == UJG ? ".ujg" : ".lep"));
} else if ( ( ( fileid[0] == ujg_header[0] ) && ( fileid[1] == ujg_header[1] ) )
|| ( ( fileid[0] == lepton_header[0] ) && ( fileid[1] == lepton_header[1] ) )
|| ( ( fileid[0] == zlepton_header[0] ) && ( fileid[1] == zlepton_header[1] ) ) ){
if ((fileid[0] == zlepton_header[0] && fileid[1] == zlepton_header[1])
|| force_compressed_output) {
ofilename = postfix_uniq(ifilename, ".jpg.z");
} else {
ofilename = postfix_uniq(ifilename, ".jpg");
}
}
do {
retval = open(ofilename.c_str(), O_WRONLY|O_CREAT|O_TRUNC
#ifdef _WIN32
| O_BINARY
#endif
, 0
#ifdef _WIN32
| S_IREAD| S_IWRITE
#else
| S_IWUSR | S_IRUSR
#endif
);
}while (retval == -1 && errno == EINTR);
if (retval == -1) {
const char * errormessage = "Output file unable to be opened for writing:";
while(write(2, errormessage, strlen(errormessage)) == -1 && errno == EINTR) {}
while(write(2, ofilename.c_str(), ofilename.length()) == -1 && errno == EINTR) {}
while(write(2, "\n", 1) == -1 && errno == EINTR) {}
custom_exit(ExitCode::FILE_NOT_FOUND);
}
return retval;
} | 0 | [
"CWE-399",
"CWE-190"
] | lepton | 6a5ceefac1162783fffd9506a3de39c85c725761 | 147,491,754,331,938,310,000,000,000,000,000,000,000 | 53 | fix #111 |
int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config)
{
int64_t infilesize, total_samples;
DFFFileHeader dff_file_header;
DFFChunkHeader dff_chunk_header;
uint32_t bcount;
infilesize = DoGetFileSize (infile);
memcpy (&dff_file_header, fourcc, 4);
if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) ||
bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
#if 1 // this might be a little too picky...
WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat);
if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) &&
dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) {
error_line ("%s is not a valid .DFF file (by total size)!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("file header indicated length = %lld", dff_file_header.ckDataSize);
#endif
// loop through all elements of the DSDIFF header
// (until the data chuck) and copy them to the output file
while (1) {
if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) ||
bcount != sizeof (DFFChunkHeader)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat);
if (debug_logging_mode)
error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize);
if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) {
uint32_t version;
if (dff_chunk_header.ckDataSize != sizeof (version) ||
!DoReadFile (infile, &version, sizeof (version), &bcount) ||
bcount != sizeof (version)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &version, sizeof (version))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&version, "L");
if (debug_logging_mode)
error_line ("dsdiff file version = 0x%08x", version);
}
else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) {
char *prop_chunk;
if (dff_chunk_header.ckDataSize < 4 || dff_chunk_header.ckDataSize > 1024) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("got PROP chunk of %d bytes total", (int) dff_chunk_header.ckDataSize);
prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize);
if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) ||
bcount != dff_chunk_header.ckDataSize) {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
if (!strncmp (prop_chunk, "SND ", 4)) {
char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize;
uint16_t numChannels = 0, chansSpecified, chanMask = 0;
uint32_t sampleRate = 0;
while (eptr - cptr >= sizeof (dff_chunk_header)) {
memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header));
cptr += sizeof (dff_chunk_header);
WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat);
if (dff_chunk_header.ckDataSize > 0 && dff_chunk_header.ckDataSize <= eptr - cptr) {
if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) {
memcpy (&sampleRate, cptr, sizeof (sampleRate));
WavpackBigEndianToNative (&sampleRate, "L");
cptr += dff_chunk_header.ckDataSize;
if (debug_logging_mode)
error_line ("got sample rate of %u Hz", sampleRate);
}
else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) {
memcpy (&numChannels, cptr, sizeof (numChannels));
WavpackBigEndianToNative (&numChannels, "S");
cptr += sizeof (numChannels);
chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4;
if (numChannels < chansSpecified || numChannels < 1 || numChannels > 256) {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
while (chansSpecified--) {
if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4))
chanMask |= 0x1;
else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4))
chanMask |= 0x2;
else if (!strncmp (cptr, "LS ", 4))
chanMask |= 0x10;
else if (!strncmp (cptr, "RS ", 4))
chanMask |= 0x20;
else if (!strncmp (cptr, "C ", 4))
chanMask |= 0x4;
else if (!strncmp (cptr, "LFE ", 4))
chanMask |= 0x8;
else
if (debug_logging_mode)
error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]);
cptr += 4;
}
if (debug_logging_mode)
error_line ("%d channels, mask = 0x%08x", numChannels, chanMask);
}
else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) {
if (strncmp (cptr, "DSD ", 4)) {
error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!",
cptr [0], cptr [1], cptr [2], cptr [3]);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
cptr += dff_chunk_header.ckDataSize;
}
else {
if (debug_logging_mode)
error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0],
dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize);
cptr += dff_chunk_header.ckDataSize;
}
}
else {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
}
if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) {
error_line ("this DSDIFF file already has channel order information!");
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
else if (chanMask)
config->channel_mask = chanMask;
config->bits_per_sample = 8;
config->bytes_per_sample = 1;
config->num_channels = numChannels;
config->sample_rate = sampleRate / 8;
config->qmode |= QMODE_DSD_MSB_FIRST;
}
else if (debug_logging_mode)
error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes",
prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize);
free (prop_chunk);
}
else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) {
if (!config->num_channels || !config->sample_rate) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
total_samples = dff_chunk_header.ckDataSize / config->num_channels;
break;
}
else { // just copy unknown chunks to output file
int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1);
char *buff;
if (bytes_to_copy < 0 || bytes_to_copy > 4194304) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
buff = malloc (bytes_to_copy);
if (debug_logging_mode)
error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes",
dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2],
dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize);
if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) ||
bcount != bytes_to_copy ||
(!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, buff, bytes_to_copy))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (buff);
return WAVPACK_SOFT_ERROR;
}
free (buff);
}
}
if (debug_logging_mode)
error_line ("setting configuration with %lld samples", total_samples);
if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) {
error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
return WAVPACK_NO_ERROR;
} | 1 | [
"CWE-125"
] | WavPack | 773f9d0803c6888ae7d5391878d7337f24216f4a | 56,648,364,932,811,920,000,000,000,000,000,000,000 | 251 | issue #110: sanitize DSD file types for invalid lengths |
void qdisc_list_add(struct Qdisc *q)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
struct Qdisc *root = qdisc_dev(q)->qdisc;
WARN_ON_ONCE(root == &noop_qdisc);
list_add_tail(&q->list, &root->list);
}
} | 0 | [
"CWE-264"
] | net | 90f62cf30a78721641e08737bda787552428061e | 313,611,393,065,684,000,000,000,000,000,000,000,000 | 9 | net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
_outVar(StringInfo str, const Var *node)
{
WRITE_NODE_TYPE("VAR");
WRITE_UINT_FIELD(varno);
WRITE_INT_FIELD(varattno);
WRITE_OID_FIELD(vartype);
WRITE_INT_FIELD(vartypmod);
WRITE_OID_FIELD(varcollid);
WRITE_UINT_FIELD(varlevelsup);
WRITE_UINT_FIELD(varnoold);
WRITE_INT_FIELD(varoattno);
WRITE_LOCATION_FIELD(location);
} | 0 | [
"CWE-362"
] | postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 271,205,540,129,205,100,000,000,000,000,000,000,000 | 14 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
void CBINDInstallDlg::OnPaint() {
if (IsIconic()) {
CPaintDC dc(this); // device context for painting
SendMessage(WM_ICONERASEBKGND, (WPARAM) dc.GetSafeHdc(), 0);
// Center icon in client rectangle
int cxIcon = GetSystemMetrics(SM_CXICON);
int cyIcon = GetSystemMetrics(SM_CYICON);
CRect rect;
GetClientRect(&rect);
int x = (rect.Width() - cxIcon + 1) / 2;
int y = (rect.Height() - cyIcon + 1) / 2;
// Draw the icon
dc.DrawIcon(x, y, m_hIcon);
}
else {
CDialog::OnPaint();
}
} | 0 | [
"CWE-284"
] | bind9 | 967a3b9419a3c12b8c0870c86d1ee3840bcbbad7 | 100,132,888,331,259,560,000,000,000,000,000,000,000 | 21 | [master] quote service registry paths
4532. [security] The BIND installer on Windows used an unquoted
service path, which can enable privilege escalation.
(CVE-2017-3141) [RT #45229] |
static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private)
{
ext4_io_end_t *io_end = private;
/* if not async direct IO just return */
if (!io_end)
return 0;
ext_debug("ext4_end_io_dio(): io_end 0x%p "
"for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
io_end, io_end->inode->i_ino, iocb, offset, size);
/*
* Error during AIO DIO. We cannot convert unwritten extents as the
* data was not written. Just clear the unwritten flag and drop io_end.
*/
if (size <= 0) {
ext4_clear_io_unwritten_flag(io_end);
size = 0;
}
io_end->offset = offset;
io_end->size = size;
ext4_put_io_end(io_end);
return 0;
} | 0 | [
"CWE-200"
] | linux | 06bd3c36a733ac27962fea7d6f47168841376824 | 306,654,129,023,589,230,000,000,000,000,000,000,000 | 27 | ext4: fix data exposure after a crash
Huang has reported that in his powerfail testing he is seeing stale
block contents in some of recently allocated blocks although he mounts
ext4 in data=ordered mode. After some investigation I have found out
that indeed when delayed allocation is used, we don't add inode to
transaction's list of inodes needing flushing before commit. Originally
we were doing that but commit f3b59291a69d removed the logic with a
flawed argument that it is not needed.
The problem is that although for delayed allocated blocks we write their
contents immediately after allocating them, there is no guarantee that
the IO scheduler or device doesn't reorder things and thus transaction
allocating blocks and attaching them to inode can reach stable storage
before actual block contents. Actually whenever we attach freshly
allocated blocks to inode using a written extent, we should add inode to
transaction's ordered inode list to make sure we properly wait for block
contents to be written before committing the transaction. So that is
what we do in this patch. This also handles other cases where stale data
exposure was possible - like filling hole via mmap in
data=ordered,nodelalloc mode.
The only exception to the above rule are extending direct IO writes where
blkdev_direct_IO() waits for IO to complete before increasing i_size and
thus stale data exposure is not possible. For now we don't complicate
the code with optimizing this special case since the overhead is pretty
low. In case this is observed to be a performance problem we can always
handle it using a special flag to ext4_map_blocks().
CC: [email protected]
Fixes: f3b59291a69d0b734be1fc8be489fef2dd846d3d
Reported-by: "HUANG Weller (CM/ESW12-CN)" <[email protected]>
Tested-by: "HUANG Weller (CM/ESW12-CN)" <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]> |
ImagingNew(const char* mode, int xsize, int ysize)
{
int bytes;
Imaging im;
if (strlen(mode) == 1) {
if (mode[0] == 'F' || mode[0] == 'I')
bytes = 4;
else
bytes = 1;
} else
bytes = strlen(mode); /* close enough */
if ((int64_t) xsize * (int64_t) ysize <= THRESHOLD / bytes) {
im = ImagingNewBlock(mode, xsize, ysize);
if (im)
return im;
/* assume memory error; try allocating in array mode instead */
ImagingError_Clear();
}
return ImagingNewArray(mode, xsize, ysize);
} | 1 | [
"CWE-284"
] | Pillow | 5d8a0be45aad78c5a22c8d099118ee26ef8144af | 150,178,444,882,571,100,000,000,000,000,000,000,000 | 23 | Memory error in Storage.c when accepting negative image size arguments |
void oidc_request_state_set(request_rec *r, const char *key, const char *value) {
/* get a handle to the global state, which is a table */
apr_table_t *state = oidc_request_state(r);
/* put the name/value pair in that table */
apr_table_set(state, key, value);
} | 0 | [
"CWE-601"
] | mod_auth_openidc | 5c15dfb08106c2451c2c44ce7ace6813c216ba75 | 713,154,630,669,004,600,000,000,000,000,000,000 | 8 | improve validation of the post-logout URL; closes #449
- to avoid an open redirect; thanks AIMOTO Norihito
- release 2.4.0.1
Signed-off-by: Hans Zandbelt <[email protected]> |
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{
int err = 0;
UNIXCB(skb).pid = get_pid(scm->pid);
UNIXCB(skb).uid = scm->creds.uid;
UNIXCB(skb).gid = scm->creds.gid;
UNIXCB(skb).fp = NULL;
unix_get_secdata(scm, skb);
if (scm->fp && send_fds)
err = unix_attach_fds(scm, skb);
skb->destructor = unix_destruct_scm;
return err;
} | 0 | [] | net | 7d267278a9ece963d77eefec61630223fce08c6c | 186,974,104,178,847,500,000,000,000,000,000,000,000 | 15 | unix: avoid use-after-free in ep_remove_wait_queue
Rainer Weikusat <[email protected]> writes:
An AF_UNIX datagram socket being the client in an n:1 association with
some server socket is only allowed to send messages to the server if the
receive queue of this socket contains at most sk_max_ack_backlog
datagrams. This implies that prospective writers might be forced to go
to sleep despite none of the message presently enqueued on the server
receive queue were sent by them. In order to ensure that these will be
woken up once space becomes again available, the present unix_dgram_poll
routine does a second sock_poll_wait call with the peer_wait wait queue
of the server socket as queue argument (unix_dgram_recvmsg does a wake
up on this queue after a datagram was received). This is inherently
problematic because the server socket is only guaranteed to remain alive
for as long as the client still holds a reference to it. In case the
connection is dissolved via connect or by the dead peer detection logic
in unix_dgram_sendmsg, the server socket may be freed despite "the
polling mechanism" (in particular, epoll) still has a pointer to the
corresponding peer_wait queue. There's no way to forcibly deregister a
wait queue with epoll.
Based on an idea by Jason Baron, the patch below changes the code such
that a wait_queue_t belonging to the client socket is enqueued on the
peer_wait queue of the server whenever the peer receive queue full
condition is detected by either a sendmsg or a poll. A wake up on the
peer queue is then relayed to the ordinary wait queue of the client
socket via wake function. The connection to the peer wait queue is again
dissolved if either a wake up is about to be relayed or the client
socket reconnects or a dead peer is detected or the client socket is
itself closed. This enables removing the second sock_poll_wait from
unix_dgram_poll, thus avoiding the use-after-free, while still ensuring
that no blocked writer sleeps forever.
Signed-off-by: Rainer Weikusat <[email protected]>
Fixes: ec0d215f9420 ("af_unix: fix 'poll for write'/connected DGRAM sockets")
Reviewed-by: Jason Baron <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
if (vmx->loaded_vmcs == vmcs)
return;
cpu = get_cpu();
vmx->loaded_vmcs = vmcs;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
put_cpu();
} | 0 | [
"CWE-20",
"CWE-617"
] | linux | 3a8b0677fc6180a467e26cc32ce6b0c09a32f9bb | 290,853,104,298,548,800,000,000,000,000,000,000,000 | 15 | KVM: VMX: Do not BUG() on out-of-bounds guest IRQ
The value of the guest_irq argument to vmx_update_pi_irte() is
ultimately coming from a KVM_IRQFD API call. Do not BUG() in
vmx_update_pi_irte() if the value is out-of bounds. (Especially,
since KVM as a whole seems to hang after that.)
Instead, print a message only once if we find that we don't have a
route for a certain IRQ (which can be out-of-bounds or within the
array).
This fixes CVE-2017-1000252.
Fixes: efc644048ecde54 ("KVM: x86: Update IRTE for posted-interrupts")
Signed-off-by: Jan H. Schönherr <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const {
auto userIdElement = doc[AuthorizationManager::USERID_FIELD_NAME];
auto userElement = doc[AuthorizationManager::USER_NAME_FIELD_NAME];
auto userDBElement = doc[AuthorizationManager::USER_DB_FIELD_NAME];
auto credentialsElement = doc[CREDENTIALS_FIELD_NAME];
auto rolesElement = doc[ROLES_FIELD_NAME];
// Validate the "userId" element.
if (!userIdElement.eoo()) {
if (!userIdElement.isBinData(BinDataType::newUUID)) {
return _badValue("User document needs 'userId' field to be a UUID", 0);
}
}
// Validate the "user" element.
if (userElement.type() != String)
return _badValue("User document needs 'user' field to be a string", 0);
if (userElement.valueStringData().empty())
return _badValue("User document needs 'user' field to be non-empty", 0);
// Validate the "db" element
if (userDBElement.type() != String || userDBElement.valueStringData().empty()) {
return _badValue("User document needs 'db' field to be a non-empty string", 0);
}
StringData userDBStr = userDBElement.valueStringData();
if (!NamespaceString::validDBName(userDBStr, NamespaceString::DollarInDbNameBehavior::Allow) &&
userDBStr != "$external") {
return _badValue(mongoutils::str::stream() << "'" << userDBStr
<< "' is not a valid value for the db field.",
0);
}
// Validate the "credentials" element
if (credentialsElement.eoo()) {
return _badValue("User document needs 'credentials' object", 0);
}
if (credentialsElement.type() != Object) {
return _badValue("User document needs 'credentials' field to be an object", 0);
}
BSONObj credentialsObj = credentialsElement.Obj();
if (credentialsObj.isEmpty()) {
return _badValue("User document needs 'credentials' field to be a non-empty object", 0);
}
if (userDBStr == "$external") {
BSONElement externalElement = credentialsObj[MONGODB_EXTERNAL_CREDENTIAL_FIELD_NAME];
if (externalElement.eoo() || externalElement.type() != Bool || !externalElement.Bool()) {
return _badValue(
"User documents for users defined on '$external' must have "
"'credentials' field set to {external: true}",
0);
}
} else {
BSONElement scramElement = credentialsObj[SCRAM_CREDENTIAL_FIELD_NAME];
BSONElement mongoCRElement = credentialsObj[MONGODB_CR_CREDENTIAL_FIELD_NAME];
if (!mongoCRElement.eoo()) {
if (mongoCRElement.type() != String || mongoCRElement.valueStringData().empty()) {
return _badValue(
"MONGODB-CR credential must to be a non-empty string"
", if present",
0);
}
} else if (!scramElement.eoo()) {
if (scramElement.type() != Object) {
return _badValue("SCRAM credential must be an object, if present", 0);
}
} else {
return _badValue(
"User document must provide credentials for all "
"non-external users",
0);
}
}
// Validate the "roles" element.
Status status = _checkV2RolesArray(rolesElement);
if (!status.isOK())
return status;
return Status::OK();
} | 0 | [
"CWE-613"
] | mongo | 64d8e9e1b12d16b54d6a592bae8110226c491b4e | 41,512,918,358,561,463,000,000,000,000,000,000,000 | 82 | SERVER-38984 Validate unique User ID on UserCache hit
(cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7) |
void RGWGetBucketWebsite::pre_exec()
{
rgw_bucket_object_pre_exec(s);
} | 0 | [
"CWE-770"
] | ceph | ab29bed2fc9f961fe895de1086a8208e21ddaddc | 334,743,352,147,402,800,000,000,000,000,000,000,000 | 4 | rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests |
static uint32_t ok_png_get_height_for_pass(const ok_png_decoder *decoder) {
const uint32_t h = decoder->png->height;
if (decoder->interlace_method == 0) {
return h;
}
switch (decoder->interlace_pass) {
case 1: return (h + 7) / 8;
case 2: return (h + 7) / 8;
case 3: return (h + 3) / 8;
case 4: return (h + 3) / 4;
case 5: return (h + 1) / 4;
case 6: return (h + 1) / 2;
case 7: return h / 2;
default: return 0;
}
} | 0 | [
"CWE-787"
] | ok-file-formats | e49cdfb84fb5eca2a6261f3c51a3c793fab9f62e | 5,230,460,672,391,671,500,000,000,000,000,000,000 | 17 | ok_png: Disallow multiple IHDR chunks (#15) |
static void* H2Malloc(size_t size, void* user_data) {
return H2Realloc(nullptr, size, user_data);
} | 0 | [
"CWE-416"
] | node | 7f178663ebffc82c9f8a5a1b6bf2da0c263a30ed | 251,370,301,880,113,300,000,000,000,000,000,000,000 | 3 | src: use unique_ptr for WriteWrap
This commit attempts to avoid a use-after-free error by using unqiue_ptr
and passing a reference to it.
CVE-ID: CVE-2020-8265
Fixes: https://github.com/nodejs-private/node-private/issues/227
PR-URL: https://github.com/nodejs-private/node-private/pull/238
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Tobias Nießen <[email protected]>
Reviewed-By: Richard Lau <[email protected]> |
Subsets and Splits