func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
qqueueEnqObj(qqueue_t *pThis, flowControl_t flowCtlType, void *pUsr)
{
DEFiRet;
int iCancelStateSave;
ISOBJ_TYPE_assert(pThis, qqueue);
if(pThis->qType != QUEUETYPE_DIRECT) {
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &iCancelStateSave);
d_pthread_mutex_lock(pThis->mut);
}
CHKiRet(doEnqSingleObj(pThis, flowCtlType, pUsr));
qqueueChkPersist(pThis, 1);
finalize_it:
if(pThis->qType != QUEUETYPE_DIRECT) {
/* make sure at least one worker is running. */
qqueueAdviseMaxWorkers(pThis);
/* and release the mutex */
d_pthread_mutex_unlock(pThis->mut);
pthread_setcancelstate(iCancelStateSave, NULL);
DBGOPRINT((obj_t*) pThis, "EnqueueMsg advised worker start\n");
}
RETiRet;
} | 0 | [
"CWE-772"
] | rsyslog | dfa88369d4ca4290db56b843f9eabdae1bfe0fd5 | 280,729,780,790,118,350,000,000,000,000,000,000,000 | 28 | bugfix: memory leak when $RepeatedMsgReduction on was used
bug tracker: http://bugzilla.adiscon.com/show_bug.cgi?id=225 |
static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
{
if (INET_ECN_is_ce(iph->tos)) {
if (skb->protocol == htons(ETH_P_IP)) {
IP_ECN_set_ce(ip_hdr(skb));
} else if (skb->protocol == htons(ETH_P_IPV6)) {
IP6_ECN_set_ce(ipv6_hdr(skb));
}
}
} | 0 | [] | linux-2.6 | c2892f02712e9516d72841d5c019ed6916329794 | 266,562,437,947,021,700,000,000,000,000,000,000,000 | 10 | gre: fix netns vs proto registration ordering
GRE protocol receive hook can be called right after protocol addition is done.
If netns stuff is not yet initialized, we're going to oops in
net_generic().
This is remotely oopsable if ip_gre is compiled as module and packet
comes at unfortunate moment of module loading.
Signed-off-by: Alexey Dobriyan <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
struct page *page, unsigned int offset, unsigned int bytes)
{
struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *req;
req = nfs_try_to_update_request(inode, page, offset, bytes);
if (req != NULL)
goto out;
req = nfs_create_request(ctx, inode, page, offset, bytes);
if (IS_ERR(req))
goto out;
nfs_inode_add_request(inode, req);
out:
return req;
} | 0 | [] | linux | c7559663e42f4294ffe31fe159da6b6a66b35d61 | 78,554,636,387,444,880,000,000,000,000,000,000,000 | 16 | NFS: Allow nfs_updatepage to extend a write under additional circumstances
Currently nfs_updatepage allows a write to be extended to cover a full
page only if we don't have a byte range lock lock on the file... but if
we have a write delegation on the file or if we have the whole file
locked for writing then we should be allowed to extend the write as
well.
Signed-off-by: Scott Mayhew <[email protected]>
[Trond: fix up call to nfs_have_delegation()]
Signed-off-by: Trond Myklebust <[email protected]> |
xsltNewStyleDocument(xsltStylesheetPtr style, xmlDocPtr doc) {
xsltDocumentPtr cur;
cur = (xsltDocumentPtr) xmlMalloc(sizeof(xsltDocument));
if (cur == NULL) {
xsltTransformError(NULL, style, (xmlNodePtr) doc,
"xsltNewStyleDocument : malloc failed\n");
return(NULL);
}
memset(cur, 0, sizeof(xsltDocument));
cur->doc = doc;
if (style != NULL) {
cur->next = style->docList;
style->docList = cur;
}
return(cur);
} | 0 | [] | libxslt | e03553605b45c88f0b4b2980adfbbb8f6fca2fd6 | 190,836,947,349,964,850,000,000,000,000,000,000,000 | 17 | Fix security framework bypass
xsltCheckRead and xsltCheckWrite return -1 in case of error but callers
don't check for this condition and allow access. With a specially
crafted URL, xsltCheckRead could be tricked into returning an error
because of a supposedly invalid URL that would still be loaded
succesfully later on.
Fixes #12.
Thanks to Felix Wilhelm for the report. |
AP_DECLARE(const char *) ap_get_remote_logname(request_rec *r)
{
if (r->connection->remote_logname != NULL) {
return r->connection->remote_logname;
}
if (ident_lookup) {
return ident_lookup(r);
}
return NULL;
} | 0 | [
"CWE-416",
"CWE-284"
] | httpd | 4cc27823899e070268b906ca677ee838d07cf67a | 326,590,590,632,819,400,000,000,000,000,000,000,000 | 12 | core: Disallow Methods' registration at run time (.htaccess), they may be
used only if registered at init time (httpd.conf).
Calling ap_method_register() in children processes is not the right scope
since it won't be shared for all requests.
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1807655 13f79535-47bb-0310-9956-ffa450edef68 |
Status ShapeRefiner::SetShape(const Node* node, int output_port,
ShapeHandle shape) {
auto c = GetContext(node);
if (c == nullptr) {
return errors::Internal("Could not find context for ", node->name());
}
if (output_port < 0 || output_port >= node->num_outputs()) {
return errors::InvalidArgument(
"output_port '", output_port, "' is out of range, ", "node '",
node->name(), "' has ", node->num_outputs(), " outputs");
}
// Note: it's possible, if the node's been updated, that the shape inference
// context doesn't have the right number of outputs.
if (node->num_outputs() > c->num_outputs()) {
TF_RETURN_IF_ERROR(c->ExpandOutputs(node->num_outputs()));
}
// Check compatibility, and merge the shapes.
ShapeHandle existing_shape = c->output(output_port);
TF_RETURN_IF_ERROR(c->Merge(existing_shape, shape, &shape));
c->set_output(output_port, shape);
// TODO(vrv): Do we need to propagate the new shape through all
// consumers that change their outputs? At the moment, python
// does not do this, but this seems like a nice feature.
// TODO(vrv): We might need to keep track of the fact that the
// existing shape is invalidated, in case we need to propagate
// this information to remote workers.
return Status::OK();
} | 0 | [
"CWE-416",
"CWE-369"
] | tensorflow | ee119d4a498979525046fba1c3dd3f13a039fbb1 | 103,262,588,316,081,340,000,000,000,000,000,000,000 | 32 | Fix segmentation fault in shape inference logic.
When running shape functions, some functions (such as `MutableHashTableShape`)
produce extra output information in the form of a `ShapeAndType` struct. The
shapes embedded in this struct are owned by an inference context that is
cleaned up almost immediately; if the upstream code attempts to access this
shape information, it can trigger a segfault.
`ShapeRefiner` is mitigating this for normal output shapes by cloning them
(and thus putting the newly created shape under ownership of an inference
context that will not die), but we were not doing the same for shapes and
types. This commit fixes that by doing similar logic on output shapes and
types.
PiperOrigin-RevId: 384761124
Change-Id: I07c0c42d29dfbb55bfa13ec1f09ef825fb0a1a1d |
int SFDWrite(char *filename,SplineFont *sf,EncMap *map,EncMap *normal,int todir) {
FILE *sfd;
int i, gc;
char *tempfilename = filename;
int err = false;
if ( todir ) {
SFDirClean(filename);
GFileMkDir(filename, 0755); /* this will fail if directory already exists. That's ok */
tempfilename = malloc(strlen(filename)+strlen("/" FONT_PROPS)+1);
strcpy(tempfilename,filename); strcat(tempfilename,"/" FONT_PROPS);
}
sfd = fopen(tempfilename,"w");
if ( tempfilename!=filename ) free(tempfilename);
if ( sfd==NULL )
return( 0 );
locale_t tmplocale; locale_t oldlocale; // Declare temporary locale storage.
switch_to_c_locale(&tmplocale, &oldlocale); // Switch to the C locale temporarily and cache the old locale.
if ( sf->cidmaster!=NULL ) {
sf=sf->cidmaster;
gc = 1;
for ( i=0; i<sf->subfontcnt; ++i )
if ( sf->subfonts[i]->glyphcnt > gc )
gc = sf->subfonts[i]->glyphcnt;
map = EncMap1to1(gc);
err = SFDDump(sfd,sf,map,NULL,todir,filename);
EncMapFree(map);
} else
err = SFDDump(sfd,sf,map,normal,todir,filename);
switch_to_old_locale(&tmplocale, &oldlocale); // Switch to the cached locale.
if ( ferror(sfd) ) err = true;
if ( fclose(sfd) ) err = true;
if ( todir )
SFFinalDirClean(filename);
return( !err );
} | 0 | [
"CWE-416"
] | fontforge | 048a91e2682c1a8936ae34dbc7bd70291ec05410 | 108,477,598,317,903,330,000,000,000,000,000,000,000 | 38 | Fix for #4084 Use-after-free (heap) in the SFD_GetFontMetaData() function
Fix for #4086 NULL pointer dereference in the SFDGetSpiros() function
Fix for #4088 NULL pointer dereference in the SFD_AssignLookups() function
Add empty sf->fontname string if it isn't set, fixing #4089 #4090 and many
other potential issues (many downstream calls to strlen() on the value). |
static time_t asn1_time_to_time_t(ASN1_UTCTIME * timestr TSRMLS_DC) /* {{{ */
{
/*
This is how the time string is formatted:
snprintf(p, sizeof(p), "%02d%02d%02d%02d%02d%02dZ",ts->tm_year%100,
ts->tm_mon+1,ts->tm_mday,ts->tm_hour,ts->tm_min,ts->tm_sec);
*/
time_t ret;
struct tm thetime;
char * strbuf;
char * thestr;
long gmadjust = 0;
if (ASN1_STRING_type(timestr) != V_ASN1_UTCTIME && ASN1_STRING_type(timestr) != V_ASN1_GENERALIZEDTIME) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "illegal ASN1 data type for timestamp");
return (time_t)-1;
}
if (ASN1_STRING_length(timestr) != strlen((const char*)ASN1_STRING_data(timestr))) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "illegal length in timestamp");
return (time_t)-1;
}
if (ASN1_STRING_length(timestr) < 13) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "unable to parse time string %s correctly", timestr->data);
return (time_t)-1;
}
if (ASN1_STRING_type(timestr) == V_ASN1_GENERALIZEDTIME && ASN1_STRING_length(timestr) < 15) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "unable to parse time string %s correctly", timestr->data);
return (time_t)-1;
}
strbuf = estrdup((char *)ASN1_STRING_data(timestr));
memset(&thetime, 0, sizeof(thetime));
/* we work backwards so that we can use atoi more easily */
thestr = strbuf + ASN1_STRING_length(timestr) - 3;
thetime.tm_sec = atoi(thestr);
*thestr = '\0';
thestr -= 2;
thetime.tm_min = atoi(thestr);
*thestr = '\0';
thestr -= 2;
thetime.tm_hour = atoi(thestr);
*thestr = '\0';
thestr -= 2;
thetime.tm_mday = atoi(thestr);
*thestr = '\0';
thestr -= 2;
thetime.tm_mon = atoi(thestr)-1;
*thestr = '\0';
if( ASN1_STRING_type(timestr) == V_ASN1_UTCTIME ) {
thestr -= 2;
thetime.tm_year = atoi(thestr);
if (thetime.tm_year < 68) {
thetime.tm_year += 100;
}
} else if( ASN1_STRING_type(timestr) == V_ASN1_GENERALIZEDTIME ) {
thestr -= 4;
thetime.tm_year = atoi(thestr) - 1900;
}
thetime.tm_isdst = -1;
ret = mktime(&thetime);
#if HAVE_TM_GMTOFF
gmadjust = thetime.tm_gmtoff;
#else
/*
** If correcting for daylight savings time, we set the adjustment to
** the value of timezone - 3600 seconds. Otherwise, we need to overcorrect and
** set the adjustment to the main timezone + 3600 seconds.
*/
gmadjust = -(thetime.tm_isdst ? (long)timezone - 3600 : (long)timezone + 3600);
#endif
ret += gmadjust;
efree(strbuf);
return ret;
} | 0 | [
"CWE-754"
] | php-src | 89637c6b41b510c20d262c17483f582f115c66d6 | 182,527,147,579,154,600,000,000,000,000,000,000,000 | 90 | Fix bug #74651 - check EVP_SealInit as it can return -1 |
int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
enum iscsi_param param, char *buf)
{
struct iscsi_session *session = cls_session->dd_data;
int len;
switch(param) {
case ISCSI_PARAM_FAST_ABORT:
len = sprintf(buf, "%d\n", session->fast_abort);
break;
case ISCSI_PARAM_ABORT_TMO:
len = sprintf(buf, "%d\n", session->abort_timeout);
break;
case ISCSI_PARAM_LU_RESET_TMO:
len = sprintf(buf, "%d\n", session->lu_reset_timeout);
break;
case ISCSI_PARAM_TGT_RESET_TMO:
len = sprintf(buf, "%d\n", session->tgt_reset_timeout);
break;
case ISCSI_PARAM_INITIAL_R2T_EN:
len = sprintf(buf, "%d\n", session->initial_r2t_en);
break;
case ISCSI_PARAM_MAX_R2T:
len = sprintf(buf, "%hu\n", session->max_r2t);
break;
case ISCSI_PARAM_IMM_DATA_EN:
len = sprintf(buf, "%d\n", session->imm_data_en);
break;
case ISCSI_PARAM_FIRST_BURST:
len = sprintf(buf, "%u\n", session->first_burst);
break;
case ISCSI_PARAM_MAX_BURST:
len = sprintf(buf, "%u\n", session->max_burst);
break;
case ISCSI_PARAM_PDU_INORDER_EN:
len = sprintf(buf, "%d\n", session->pdu_inorder_en);
break;
case ISCSI_PARAM_DATASEQ_INORDER_EN:
len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
break;
case ISCSI_PARAM_DEF_TASKMGMT_TMO:
len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo);
break;
case ISCSI_PARAM_ERL:
len = sprintf(buf, "%d\n", session->erl);
break;
case ISCSI_PARAM_TARGET_NAME:
len = sprintf(buf, "%s\n", session->targetname);
break;
case ISCSI_PARAM_TARGET_ALIAS:
len = sprintf(buf, "%s\n", session->targetalias);
break;
case ISCSI_PARAM_TPGT:
len = sprintf(buf, "%d\n", session->tpgt);
break;
case ISCSI_PARAM_USERNAME:
len = sprintf(buf, "%s\n", session->username);
break;
case ISCSI_PARAM_USERNAME_IN:
len = sprintf(buf, "%s\n", session->username_in);
break;
case ISCSI_PARAM_PASSWORD:
len = sprintf(buf, "%s\n", session->password);
break;
case ISCSI_PARAM_PASSWORD_IN:
len = sprintf(buf, "%s\n", session->password_in);
break;
case ISCSI_PARAM_IFACE_NAME:
len = sprintf(buf, "%s\n", session->ifacename);
break;
case ISCSI_PARAM_INITIATOR_NAME:
len = sprintf(buf, "%s\n", session->initiatorname);
break;
case ISCSI_PARAM_BOOT_ROOT:
len = sprintf(buf, "%s\n", session->boot_root);
break;
case ISCSI_PARAM_BOOT_NIC:
len = sprintf(buf, "%s\n", session->boot_nic);
break;
case ISCSI_PARAM_BOOT_TARGET:
len = sprintf(buf, "%s\n", session->boot_target);
break;
case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable);
break;
case ISCSI_PARAM_DISCOVERY_SESS:
len = sprintf(buf, "%u\n", session->discovery_sess);
break;
case ISCSI_PARAM_PORTAL_TYPE:
len = sprintf(buf, "%s\n", session->portal_type);
break;
case ISCSI_PARAM_CHAP_AUTH_EN:
len = sprintf(buf, "%u\n", session->chap_auth_en);
break;
case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
len = sprintf(buf, "%u\n", session->discovery_logout_en);
break;
case ISCSI_PARAM_BIDI_CHAP_EN:
len = sprintf(buf, "%u\n", session->bidi_chap_en);
break;
case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
len = sprintf(buf, "%u\n", session->discovery_auth_optional);
break;
case ISCSI_PARAM_DEF_TIME2WAIT:
len = sprintf(buf, "%d\n", session->time2wait);
break;
case ISCSI_PARAM_DEF_TIME2RETAIN:
len = sprintf(buf, "%d\n", session->time2retain);
break;
case ISCSI_PARAM_TSID:
len = sprintf(buf, "%u\n", session->tsid);
break;
case ISCSI_PARAM_ISID:
len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
session->isid[0], session->isid[1],
session->isid[2], session->isid[3],
session->isid[4], session->isid[5]);
break;
case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
len = sprintf(buf, "%u\n", session->discovery_parent_idx);
break;
case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
if (session->discovery_parent_type)
len = sprintf(buf, "%s\n",
session->discovery_parent_type);
else
len = sprintf(buf, "\n");
break;
default:
return -ENOSYS;
}
return len;
} | 1 | [
"CWE-787"
] | linux | ec98ea7070e94cc25a422ec97d1421e28d97b7ee | 251,760,917,559,417,970,000,000,000,000,000,000,000 | 134 | scsi: iscsi: Ensure sysfs attributes are limited to PAGE_SIZE
As the iSCSI parameters are exported back through sysfs, it should be
enforcing that they never are more than PAGE_SIZE (which should be more
than enough) before accepting updates through netlink.
Change all iSCSI sysfs attributes to use sysfs_emit().
Cc: [email protected]
Reported-by: Adam Nichols <[email protected]>
Reviewed-by: Lee Duncan <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
Reviewed-by: Mike Christie <[email protected]>
Signed-off-by: Chris Leech <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
{
static unsigned pressed, committing;
static unsigned long releasestart;
if (kbd->kbdmode != VC_UNICODE) {
if (!up_flag)
pr_warn("keyboard mode must be unicode for braille patterns\n");
return;
}
if (!value) {
k_unicode(vc, BRL_UC_ROW, up_flag);
return;
}
if (value > 8)
return;
if (!up_flag) {
pressed |= 1 << (value - 1);
if (!brl_timeout)
committing = pressed;
} else if (brl_timeout) {
if (!committing ||
time_after(jiffies,
releasestart + msecs_to_jiffies(brl_timeout))) {
committing = pressed;
releasestart = jiffies;
}
pressed &= ~(1 << (value - 1));
if (!pressed && committing) {
k_brlcommit(vc, committing, 0);
committing = 0;
}
} else {
if (committing) {
k_brlcommit(vc, committing, 0);
committing = 0;
}
pressed &= ~(1 << (value - 1));
}
} | 0 | [
"CWE-416"
] | linux | 6ca03f90527e499dd5e32d6522909e2ad390896b | 304,758,669,291,322,980,000,000,000,000,000,000,000 | 43 | vt: keyboard, simplify vt_kdgkbsent
Use 'strlen' of the string, add one for NUL terminator and simply do
'copy_to_user' instead of the explicit 'for' loop. This makes the
KDGKBSENT case more compact.
The only thing we need to take care about is NULL 'func_table[i]'. Use
an empty string in that case.
The original check for overflow could never trigger as the func_buf
strings are always shorter or equal to 'struct kbsentry's.
Cc: <[email protected]>
Signed-off-by: Jiri Slaby <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
bool InMatchExpression::matchesSingleElement(const BSONElement& e, MatchDetails* details) const {
if (_hasNull && e.eoo()) {
return true;
}
if (contains(e)) {
return true;
}
for (auto&& regex : _regexes) {
if (regex->matchesSingleElement(e, details)) {
return true;
}
}
return false;
} | 0 | [
"CWE-190"
] | mongo | 21d8699ed6c517b45e1613e20231cd8eba894985 | 325,779,852,092,917,200,000,000,000,000,000,000,000 | 14 | SERVER-43699 $mod should not overflow for large negative values |
void pmixp_coll_ring_reset_if_to(pmixp_coll_t *coll, time_t ts) {
pmixp_coll_ring_ctx_t *coll_ctx;
int i;
/* lock the structure */
slurm_mutex_lock(&coll->lock);
for (i = 0; i < PMIXP_COLL_RING_CTX_NUM; i++) {
coll_ctx = &coll->state.ring.ctx_array[i];
if (!coll_ctx->in_use ||
(PMIXP_COLL_RING_SYNC == coll_ctx->state)) {
continue;
}
if (ts - coll->ts > pmixp_info_timeout()) {
/* respond to the libpmix */
pmixp_coll_localcb_nodata(coll, PMIXP_ERR_TIMEOUT);
/* report the timeout event */
PMIXP_ERROR("%p: collective timeout seq=%d",
coll, coll_ctx->seq);
pmixp_coll_log(coll);
/* drop the collective */
_reset_coll_ring(coll_ctx);
}
}
/* unlock the structure */
slurm_mutex_unlock(&coll->lock);
} | 0 | [
"CWE-120"
] | slurm | c3142dd87e06621ff148791c3d2f298b5c0b3a81 | 51,282,935,475,251,870,000,000,000,000,000,000,000 | 27 | PMIx - fix potential buffer overflows from use of unpackmem().
CVE-2020-27745. |
TPMS_ALG_PROPERTY_Unmarshal(TPMS_ALG_PROPERTY *target, BYTE **buffer, INT32 *size)
{
TPM_RC rc = TPM_RC_SUCCESS;
if (rc == TPM_RC_SUCCESS) {
rc = TPM_ALG_ID_Unmarshal(&target->alg, buffer, size);
}
if (rc == TPM_RC_SUCCESS) {
rc = TPMA_ALGORITHM_Unmarshal(&target->algProperties, buffer, size);
}
return rc;
} | 0 | [
"CWE-787"
] | libtpms | 5cc98a62dc6f204dcf5b87c2ee83ac742a6a319b | 126,641,854,316,598,760,000,000,000,000,000,000,000 | 12 | tpm2: Restore original value if unmarshalled value was illegal
Restore the original value of the memory location where data from
a stream was unmarshalled and the unmarshalled value was found to
be illegal. The goal is to not keep illegal values in memory.
Signed-off-by: Stefan Berger <[email protected]> |
compilePassOpcode(const FileInfo *file, TranslationTableOpcode opcode, int noback,
int nofor, TranslationTableHeader **table) {
static CharsString passRuleChars;
static CharsString passRuleDots;
/* Compile the operands of a pass opcode */
widechar passSubOp;
const CharacterClass *class;
TranslationTableRule *rule = NULL;
int k;
int kk = 0;
int endTest = 0;
widechar *passInstructions = passRuleDots.chars;
int passIC = 0; /* Instruction counter */
passRuleChars.length = 0;
CharsString passHoldString;
widechar passHoldNumber;
CharsString passLine;
int passLinepos = 0;
TranslationTableCharacterAttributes passAttributes;
passHoldString.length = 0;
for (k = file->linepos; k < file->linelen; k++)
passHoldString.chars[passHoldString.length++] = file->line[k];
#define SEPCHAR 0x0001
for (k = 0; k < passHoldString.length && passHoldString.chars[k] > 32; k++)
;
if (k < passHoldString.length)
passHoldString.chars[k] = SEPCHAR;
else {
compileError(file, "Invalid multipass operands");
return 0;
}
parseChars(file, &passLine, &passHoldString);
/* Compile test part */
for (k = 0; k < passLine.length && passLine.chars[k] != SEPCHAR; k++)
;
endTest = k;
passLine.chars[endTest] = pass_endTest;
passLinepos = 0;
while (passLinepos <= endTest) {
switch ((passSubOp = passLine.chars[passLinepos])) {
case pass_lookback:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_lookback))
return 0;
passLinepos++;
passGetNumber(&passLine, &passLinepos, &passHoldNumber);
if (passHoldNumber == 0) passHoldNumber = 1;
if (!appendInstructionChar(file, passInstructions, &passIC, passHoldNumber))
return 0;
break;
case pass_not:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_not))
return 0;
passLinepos++;
break;
case pass_first:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_first))
return 0;
passLinepos++;
break;
case pass_last:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_last))
return 0;
passLinepos++;
break;
case pass_search:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_search))
return 0;
passLinepos++;
break;
case pass_string:
if (!verifyStringOrDots(file, opcode, 1, 0, nofor)) {
return 0;
}
passLinepos++;
if (!appendInstructionChar(file, passInstructions, &passIC, pass_string))
return 0;
passGetString(&passLine, &passLinepos, &passHoldString, file);
if (passHoldString.length == 0) {
compileError(file, "empty string in test part");
return 0;
}
goto testDoCharsDots;
case pass_dots:
if (!verifyStringOrDots(file, opcode, 0, 0, nofor)) {
return 0;
}
passLinepos++;
if (!appendInstructionChar(file, passInstructions, &passIC, pass_dots))
return 0;
passGetDots(&passLine, &passLinepos, &passHoldString, file);
if (passHoldString.length == 0) {
compileError(file, "expected dot pattern after @ operand in test part");
return 0;
}
testDoCharsDots:
if (passIC >= MAXSTRING) {
compileError(
file, "@ operand in test part of multipass operand too long");
return 0;
}
if (!appendInstructionChar(
file, passInstructions, &passIC, passHoldString.length))
return 0;
for (kk = 0; kk < passHoldString.length; kk++) {
if (passIC >= MAXSTRING) {
compileError(
file, "@ operand in test part of multipass operand too long");
return 0;
}
if (!appendInstructionChar(
file, passInstructions, &passIC, passHoldString.chars[kk]))
return 0;
}
break;
case pass_startReplace:
if (!appendInstructionChar(
file, passInstructions, &passIC, pass_startReplace))
return 0;
passLinepos++;
break;
case pass_endReplace:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_endReplace))
return 0;
passLinepos++;
break;
case pass_variable:
passLinepos++;
if (!passGetVariableNumber(file, &passLine, &passLinepos, &passHoldNumber))
return 0;
switch (passLine.chars[passLinepos]) {
case pass_eq:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_eq))
return 0;
goto doComp;
case pass_lt:
if (passLine.chars[passLinepos + 1] == pass_eq) {
passLinepos++;
if (!appendInstructionChar(
file, passInstructions, &passIC, pass_lteq))
return 0;
} else if (!appendInstructionChar(
file, passInstructions, &passIC, pass_lt))
return 0;
goto doComp;
case pass_gt:
if (passLine.chars[passLinepos + 1] == pass_eq) {
passLinepos++;
if (!appendInstructionChar(
file, passInstructions, &passIC, pass_gteq))
return 0;
} else if (!appendInstructionChar(
file, passInstructions, &passIC, pass_gt))
return 0;
doComp:
if (!appendInstructionChar(
file, passInstructions, &passIC, passHoldNumber))
return 0;
passLinepos++;
passGetNumber(&passLine, &passLinepos, &passHoldNumber);
if (!appendInstructionChar(
file, passInstructions, &passIC, passHoldNumber))
return 0;
break;
default:
compileError(file, "incorrect comparison operator");
return 0;
}
break;
case pass_attributes:
passLinepos++;
if (!passGetAttributes(&passLine, &passLinepos, &passAttributes, file))
return 0;
insertAttributes:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_attributes))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, (passAttributes >> 48) & 0xffff))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, (passAttributes >> 32) & 0xffff))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, (passAttributes >> 16) & 0xffff))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, passAttributes & 0xffff))
return 0;
getRange:
if (passLine.chars[passLinepos] == pass_until) {
passLinepos++;
if (!appendInstructionChar(file, passInstructions, &passIC, 1)) return 0;
if (!appendInstructionChar(file, passInstructions, &passIC, 0xffff))
return 0;
break;
}
passGetNumber(&passLine, &passLinepos, &passHoldNumber);
if (passHoldNumber == 0) {
if (!appendInstructionChar(file, passInstructions, &passIC, 1)) return 0;
if (!appendInstructionChar(file, passInstructions, &passIC, 1)) return 0;
break;
}
if (!appendInstructionChar(file, passInstructions, &passIC, passHoldNumber))
return 0;
if (passLine.chars[passLinepos] != pass_hyphen) {
if (!appendInstructionChar(
file, passInstructions, &passIC, passHoldNumber))
return 0;
break;
}
passLinepos++;
passGetNumber(&passLine, &passLinepos, &passHoldNumber);
if (passHoldNumber == 0) {
compileError(file, "invalid range");
return 0;
}
if (!appendInstructionChar(file, passInstructions, &passIC, passHoldNumber))
return 0;
break;
case pass_groupstart:
case pass_groupend: {
passLinepos++;
passGetName(&passLine, &passLinepos, &passHoldString);
TranslationTableOffset ruleOffset = findRuleName(&passHoldString, *table);
if (ruleOffset)
rule = (TranslationTableRule *)&(*table)->ruleArea[ruleOffset];
if (rule && rule->opcode == CTO_Grouping) {
if (!appendInstructionChar(file, passInstructions, &passIC, passSubOp))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, ruleOffset >> 16))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, ruleOffset & 0xffff))
return 0;
break;
} else {
compileError(file, "%s is not a grouping name",
_lou_showString(
&passHoldString.chars[0], passHoldString.length, 0));
return 0;
}
break;
}
case pass_swap: {
passLinepos++;
passGetName(&passLine, &passLinepos, &passHoldString);
if ((class = findCharacterClass(&passHoldString, *table))) {
passAttributes = class->attribute;
goto insertAttributes;
}
TranslationTableOffset ruleOffset = findRuleName(&passHoldString, *table);
if (ruleOffset)
rule = (TranslationTableRule *)&(*table)->ruleArea[ruleOffset];
if (rule &&
(rule->opcode == CTO_SwapCc || rule->opcode == CTO_SwapCd ||
rule->opcode == CTO_SwapDd)) {
if (!appendInstructionChar(file, passInstructions, &passIC, pass_swap))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, ruleOffset >> 16))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, ruleOffset & 0xffff))
return 0;
goto getRange;
}
compileError(file, "%s is neither a class name nor a swap name.",
_lou_showString(&passHoldString.chars[0], passHoldString.length, 0));
return 0;
}
case pass_endTest:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_endTest))
return 0;
passLinepos++;
break;
default:
compileError(file, "incorrect operator '%c ' in test part",
passLine.chars[passLinepos]);
return 0;
}
} /* Compile action part */
/* Compile action part */
while (passLinepos < passLine.length && passLine.chars[passLinepos] <= 32)
passLinepos++;
while (passLinepos < passLine.length && passLine.chars[passLinepos] > 32) {
if (passIC >= MAXSTRING) {
compileError(file, "Action part in multipass operand too long");
return 0;
}
switch ((passSubOp = passLine.chars[passLinepos])) {
case pass_string:
if (!verifyStringOrDots(file, opcode, 1, 1, nofor)) {
return 0;
}
passLinepos++;
if (!appendInstructionChar(file, passInstructions, &passIC, pass_string))
return 0;
passGetString(&passLine, &passLinepos, &passHoldString, file);
goto actionDoCharsDots;
case pass_dots:
if (!verifyStringOrDots(file, opcode, 0, 1, nofor)) {
return 0;
}
passLinepos++;
passGetDots(&passLine, &passLinepos, &passHoldString, file);
if (!appendInstructionChar(file, passInstructions, &passIC, pass_dots))
return 0;
if (passHoldString.length == 0) {
compileError(file, "expected dot pattern after @ operand in action part");
return 0;
}
actionDoCharsDots:
if (passIC >= MAXSTRING) {
compileError(
file, "@ operand in action part of multipass operand too long");
return 0;
}
if (!appendInstructionChar(
file, passInstructions, &passIC, passHoldString.length))
return 0;
for (kk = 0; kk < passHoldString.length; kk++) {
if (passIC >= MAXSTRING) {
compileError(file,
"@ operand in action part of multipass operand too long");
return 0;
}
if (!appendInstructionChar(
file, passInstructions, &passIC, passHoldString.chars[kk]))
return 0;
}
break;
case pass_variable:
passLinepos++;
if (!passGetVariableNumber(file, &passLine, &passLinepos, &passHoldNumber))
return 0;
switch (passLine.chars[passLinepos]) {
case pass_eq:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_eq))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, passHoldNumber))
return 0;
passLinepos++;
passGetNumber(&passLine, &passLinepos, &passHoldNumber);
if (!appendInstructionChar(
file, passInstructions, &passIC, passHoldNumber))
return 0;
break;
case pass_plus:
case pass_hyphen:
if (!appendInstructionChar(file, passInstructions, &passIC,
passLine.chars[passLinepos++]))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, passHoldNumber))
return 0;
break;
default:
compileError(file, "incorrect variable operator in action part");
return 0;
}
break;
case pass_copy:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_copy))
return 0;
passLinepos++;
break;
case pass_omit:
if (!appendInstructionChar(file, passInstructions, &passIC, pass_omit))
return 0;
passLinepos++;
break;
case pass_groupreplace:
case pass_groupstart:
case pass_groupend: {
passLinepos++;
passGetName(&passLine, &passLinepos, &passHoldString);
TranslationTableOffset ruleOffset = findRuleName(&passHoldString, *table);
if (ruleOffset)
rule = (TranslationTableRule *)&(*table)->ruleArea[ruleOffset];
if (rule && rule->opcode == CTO_Grouping) {
if (!appendInstructionChar(file, passInstructions, &passIC, passSubOp))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, ruleOffset >> 16))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, ruleOffset & 0xffff))
return 0;
break;
}
compileError(file, "%s is not a grouping name",
_lou_showString(&passHoldString.chars[0], passHoldString.length, 0));
return 0;
}
case pass_swap: {
passLinepos++;
passGetName(&passLine, &passLinepos, &passHoldString);
TranslationTableOffset ruleOffset = findRuleName(&passHoldString, *table);
if (ruleOffset)
rule = (TranslationTableRule *)&(*table)->ruleArea[ruleOffset];
if (rule &&
(rule->opcode == CTO_SwapCc || rule->opcode == CTO_SwapCd ||
rule->opcode == CTO_SwapDd)) {
if (!appendInstructionChar(file, passInstructions, &passIC, pass_swap))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, ruleOffset >> 16))
return 0;
if (!appendInstructionChar(
file, passInstructions, &passIC, ruleOffset & 0xffff))
return 0;
break;
}
compileError(file, "%s is not a swap name.",
_lou_showString(&passHoldString.chars[0], passHoldString.length, 0));
return 0;
break;
}
default:
compileError(file, "incorrect operator in action part");
return 0;
}
}
/* Analyze and add rule */
passRuleDots.length = passIC;
{
widechar *characters;
int length;
int found = passFindCharacters(
file, passInstructions, passRuleDots.length, &characters, &length);
if (!found) return 0;
if (characters) {
for (k = 0; k < length; k += 1) passRuleChars.chars[k] = characters[k];
passRuleChars.length = k;
}
}
if (!addRule(file, opcode, &passRuleChars, &passRuleDots, 0, 0, NULL, NULL, noback,
nofor, table))
return 0;
return 1;
} | 0 | [
"CWE-787"
] | liblouis | 2e4772befb2b1c37cb4b9d6572945115ee28630a | 312,915,430,097,009,240,000,000,000,000,000,000,000 | 449 | Prevent an invalid memory writes in compileRule
Thanks to Han Zheng for reporting it
Fixes #1214 |
rbtk_EventSetupProc(ClientData clientData, int flag)
{
Tcl_Time tcl_time;
tcl_time.sec = 0;
tcl_time.usec = 1000L * (long)no_event_tick;
Tcl_SetMaxBlockTime(&tcl_time);
} | 0 | [] | tk | ebd0fc80d62eeb7b8556522256f8d035e013eb65 | 268,187,666,943,638,700,000,000,000,000,000,000,000 | 7 | tcltklib.c: check argument
* ext/tk/tcltklib.c (ip_cancel_eval_core): check argument type and
length.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e |
int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
z_streamp strm;
int good_length;
int max_lazy;
int nice_length;
int max_chain;
{
deflate_state *s;
if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
s = strm->state;
s->good_match = (uInt)good_length;
s->max_lazy_match = (uInt)max_lazy;
s->nice_match = nice_length;
s->max_chain_length = (uInt)max_chain;
return Z_OK;
} | 0 | [
"CWE-284",
"CWE-787"
] | zlib | 5c44459c3b28a9bd3283aaceab7c615f8020c531 | 28,866,034,578,321,216,000,000,000,000,000,000,000 | 17 | Fix a bug that can crash deflate on some input when using Z_FIXED.
This bug was reported by Danilo Ramos of Eideticom, Inc. It has
lain in wait 13 years before being found! The bug was introduced
in zlib 1.2.2.2, with the addition of the Z_FIXED option. That
option forces the use of fixed Huffman codes. For rare inputs with
a large number of distant matches, the pending buffer into which
the compressed data is written can overwrite the distance symbol
table which it overlays. That results in corrupted output due to
invalid distances, and can result in out-of-bound accesses,
crashing the application.
The fix here combines the distance buffer and literal/length
buffers into a single symbol buffer. Now three bytes of pending
buffer space are opened up for each literal or length/distance
pair consumed, instead of the previous two bytes. This assures
that the pending buffer cannot overwrite the symbol table, since
the maximum fixed code compressed length/distance is 31 bits, and
since there are four bytes of pending space for every three bytes
of symbol space. |
static int cardos_finish(sc_card_t *card)
{
int r = 0;
if (card == NULL )
return 0;
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE);
/* free priv data */
if (card->drv_data) { /* priv */
free(card->drv_data);
card->drv_data = NULL;
}
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, r);
} | 0 | [] | OpenSC | 1252aca9f10771ef5ba8405e73cf2da50827958f | 244,173,873,168,509,100,000,000,000,000,000,000,000 | 17 | cardos: Correctly calculate the left bytes to avoid buffer overrun
Thanks oss-fuzz
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=29912 |
static int getDepth(const std::shared_ptr<SerializedScope> chain) {
int depth = 0;
const SerializedScope *current = chain.get();
while (current) {
depth += 1;
current = current->parentScope.get();
}
return depth;
} | 0 | [
"CWE-125",
"CWE-787"
] | hermes | 091835377369c8fd5917d9b87acffa721ad2a168 | 141,036,847,685,002,110,000,000,000,000,000,000,000 | 9 | Correctly restore whether or not a function is an inner generator
Summary:
If a generator was large enough to be lazily compiled, we would lose
that information when reconstituting the function's context. This meant
the function was generated as a regular function instead of a generator.
#utd-hermes-ignore-android
Reviewed By: tmikov
Differential Revision: D23580247
fbshipit-source-id: af5628bf322cbdc7c7cdfbb5f8d0756328518ea1 |
FLAC__bool read_subframe_constant_(FLAC__StreamDecoder *decoder, unsigned channel, unsigned bps, FLAC__bool do_full_decode)
{
FLAC__Subframe_Constant *subframe = &decoder->private_->frame.subframes[channel].data.constant;
FLAC__int32 x;
unsigned i;
FLAC__int32 *output = decoder->private_->output[channel];
decoder->private_->frame.subframes[channel].type = FLAC__SUBFRAME_TYPE_CONSTANT;
if(!FLAC__bitreader_read_raw_int32(decoder->private_->input, &x, bps))
return false; /* read_callback_ sets the state for us */
subframe->value = x;
/* decode the subframe */
if(do_full_decode) {
for(i = 0; i < decoder->private_->frame.header.blocksize; i++)
output[i] = x;
}
return true;
} | 0 | [
"CWE-119"
] | flac | 5b3033a2b355068c11fe637e14ac742d273f076e | 15,708,358,965,292,940,000,000,000,000,000,000,000 | 22 | src/libFLAC/stream_decoder.c : Fix buffer read overflow.
This is CVE-2014-8962.
Reported-by: Michele Spagnuolo,
Google Security Team <[email protected]> |
TPM_RESULT SWTPM_NVRAM_Store_Volatile(void)
{
TPM_RESULT rc = 0;
char *name = TPM_VOLATILESTATE_NAME;
uint32_t tpm_number = 0;
unsigned char *buffer = NULL;
uint32_t buflen;
TPM_DEBUG(" SWTPM_Store_Volatile: Name %s\n", name);
if (rc == 0) {
rc = TPMLIB_VolatileAll_Store(&buffer, &buflen);
}
if (rc == 0) {
/* map name to the rooted filename */
rc = SWTPM_NVRAM_StoreData(buffer, buflen, tpm_number, name);
}
free(buffer);
return rc;
} | 0 | [] | swtpm | cae5991423826f21b11f7a5bc7f7b2b538bde2a2 | 207,318,545,930,582,670,000,000,000,000,000,000,000 | 21 | swtpm: Do not follow symlinks when opening lockfile (CVE-2020-28407)
This patch addresses CVE-2020-28407.
Prevent us from following symliks when we open the lockfile
for writing.
Signed-off-by: Stefan Berger <[email protected]> |
utf32le_mbc_to_code(const UChar* p, const UChar* end ARG_UNUSED)
{
if (end - p < 4) return 0;
return (OnigCodePoint )(((p[3] * 256 + p[2]) * 256 + p[1]) * 256 + p[0]);
} | 0 | [
"CWE-125"
] | php-src | 9d6c59eeea88a3e9d7039cb4fed5126ef704593a | 308,873,503,644,618,030,000,000,000,000,000,000 | 5 | Fix bug #77418 - Heap overflow in utf32be_mbc_to_code |
static Bool ttml_check_range(TTMLInterval *interval, s64 ts_begin, s64 ts_end)
{
//if in current interval, push node
if ((ts_begin != -1) && (ts_end != -1) && ((ts_begin>=interval->begin) && (ts_end<=interval->end))
) {
return GF_TRUE;
}
//begin not set, end set: in range if end less than interval end range
else if ((ts_begin==-1) && (ts_end != -1) && (ts_end<=interval->end)) {
return GF_TRUE;
}
//begin set, end not set: in range if begin greater than interval begin range
else if ((ts_begin!=-1) && (ts_end==-1) && (ts_begin>=interval->begin)) {
return GF_TRUE;
}
return GF_FALSE;
} | 0 | [
"CWE-276"
] | gpac | 96699aabae042f8f55cf8a85fa5758e3db752bae | 148,530,419,523,844,860,000,000,000,000,000,000,000 | 17 | fixed #2061 |
evdev_tag_tablet_touchpad(struct evdev_device *device)
{
device->tags |= EVDEV_TAG_TABLET_TOUCHPAD;
} | 0 | [
"CWE-134"
] | libinput | a423d7d3269dc32a87384f79e29bb5ac021c83d1 | 221,313,105,660,788,900,000,000,000,000,000,000,000 | 4 | evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]> |
static void parse_fpe(struct pt_regs *regs)
{
int code = 0;
flush_fp_to_thread(current);
code = __parse_fpscr(current->thread.fp_state.fpscr);
_exception(SIGFPE, regs, code, regs->nip);
} | 0 | [] | linux | 5d176f751ee3c6eededd984ad409bff201f436a7 | 128,426,823,249,689,540,000,000,000,000,000,000,000 | 10 | powerpc: tm: Enable transactional memory (TM) lazily for userspace
Currently the MSR TM bit is always set if the hardware is TM capable.
This adds extra overhead as it means the TM SPRS (TFHAR, TEXASR and
TFAIR) must be swapped for each process regardless of if they use TM.
For processes that don't use TM the TM MSR bit can be turned off
allowing the kernel to avoid the expensive swap of the TM registers.
A TM unavailable exception will occur if a thread does use TM and the
kernel will enable MSR_TM and leave it so for some time afterwards.
Signed-off-by: Cyril Bur <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]> |
void kvm_free_physmem(struct kvm *kvm)
{
int i;
struct kvm_memslots *slots = kvm->memslots;
for (i = 0; i < slots->nmemslots; ++i)
kvm_free_physmem_slot(&slots->memslots[i], NULL);
kfree(kvm->memslots);
} | 0 | [
"CWE-20",
"CWE-787"
] | linux | fa3d315a4ce2c0891cdde262562e710d95fba19e | 206,230,908,987,673,300,000,000,000,000,000,000,000 | 10 | KVM: Validate userspace_addr of memslot when registered
This way, we can avoid checking the user space address many times when
we read the guest memory.
Although we can do the same for write if we check which slots are
writable, we do not care write now: reading the guest memory happens
more often than writing.
[avi: change VERIFY_READ to VERIFY_WRITE]
Signed-off-by: Takuya Yoshikawa <[email protected]>
Signed-off-by: Avi Kivity <[email protected]> |
struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
int data_len, __u8 flags, __u16 ssn)
{
struct sctp_chunk *retval;
struct sctp_datahdr dp;
int chunk_len;
/* We assign the TSN as LATE as possible, not here when
* creating the chunk.
*/
dp.tsn = 0;
dp.stream = htons(sinfo->sinfo_stream);
dp.ppid = sinfo->sinfo_ppid;
/* Set the flags for an unordered send. */
if (sinfo->sinfo_flags & SCTP_UNORDERED) {
flags |= SCTP_DATA_UNORDERED;
dp.ssn = 0;
} else
dp.ssn = htons(ssn);
chunk_len = sizeof(dp) + data_len;
retval = sctp_make_data(asoc, flags, chunk_len);
if (!retval)
goto nodata;
retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
nodata:
return retval;
} | 0 | [
"CWE-20",
"CWE-399"
] | linux | 9de7922bc709eee2f609cd01d98aaedc4cf5ea74 | 45,836,706,730,831,180,000,000,000,000,000,000,000 | 33 | net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks
Commit 6f4c618ddb0 ("SCTP : Add paramters validity check for
ASCONF chunk") added basic verification of ASCONF chunks, however,
it is still possible to remotely crash a server by sending a
special crafted ASCONF chunk, even up to pre 2.6.12 kernels:
skb_over_panic: text:ffffffffa01ea1c3 len:31056 put:30768
head:ffff88011bd81800 data:ffff88011bd81800 tail:0x7950
end:0x440 dev:<NULL>
------------[ cut here ]------------
kernel BUG at net/core/skbuff.c:129!
[...]
Call Trace:
<IRQ>
[<ffffffff8144fb1c>] skb_put+0x5c/0x70
[<ffffffffa01ea1c3>] sctp_addto_chunk+0x63/0xd0 [sctp]
[<ffffffffa01eadaf>] sctp_process_asconf+0x1af/0x540 [sctp]
[<ffffffff8152d025>] ? _read_unlock_bh+0x15/0x20
[<ffffffffa01e0038>] sctp_sf_do_asconf+0x168/0x240 [sctp]
[<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp]
[<ffffffff8147645d>] ? fib_rules_lookup+0xad/0xf0
[<ffffffffa01e6b22>] ? sctp_cmp_addr_exact+0x32/0x40 [sctp]
[<ffffffffa01e8393>] sctp_assoc_bh_rcv+0xd3/0x180 [sctp]
[<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp]
[<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp]
[<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter]
[<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff81496ded>] ip_local_deliver_finish+0xdd/0x2d0
[<ffffffff81497078>] ip_local_deliver+0x98/0xa0
[<ffffffff8149653d>] ip_rcv_finish+0x12d/0x440
[<ffffffff81496ac5>] ip_rcv+0x275/0x350
[<ffffffff8145c88b>] __netif_receive_skb+0x4ab/0x750
[<ffffffff81460588>] netif_receive_skb+0x58/0x60
This can be triggered e.g., through a simple scripted nmap
connection scan injecting the chunk after the handshake, for
example, ...
-------------- INIT[ASCONF; ASCONF_ACK] ------------->
<----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
-------------------- COOKIE-ECHO -------------------->
<-------------------- COOKIE-ACK ---------------------
------------------ ASCONF; UNKNOWN ------------------>
... where ASCONF chunk of length 280 contains 2 parameters ...
1) Add IP address parameter (param length: 16)
2) Add/del IP address parameter (param length: 255)
... followed by an UNKNOWN chunk of e.g. 4 bytes. Here, the
Address Parameter in the ASCONF chunk is even missing, too.
This is just an example and similarly-crafted ASCONF chunks
could be used just as well.
The ASCONF chunk passes through sctp_verify_asconf() as all
parameters passed sanity checks, and after walking, we ended
up successfully at the chunk end boundary, and thus may invoke
sctp_process_asconf(). Parameter walking is done with
WORD_ROUND() to take padding into account.
In sctp_process_asconf()'s TLV processing, we may fail in
sctp_process_asconf_param() e.g., due to removal of the IP
address that is also the source address of the packet containing
the ASCONF chunk, and thus we need to add all TLVs after the
failure to our ASCONF response to remote via helper function
sctp_add_asconf_response(), which basically invokes a
sctp_addto_chunk() adding the error parameters to the given
skb.
When walking to the next parameter this time, we proceed
with ...
length = ntohs(asconf_param->param_hdr.length);
asconf_param = (void *)asconf_param + length;
... instead of the WORD_ROUND()'ed length, thus resulting here
in an off-by-one that leads to reading the follow-up garbage
parameter length of 12336, and thus throwing an skb_over_panic
for the reply when trying to sctp_addto_chunk() next time,
which implicitly calls the skb_put() with that length.
Fix it by using sctp_walk_params() [ which is also used in
INIT parameter processing ] macro in the verification *and*
in ASCONF processing: it will make sure we don't spill over,
that we walk parameters WORD_ROUND()'ed. Moreover, we're being
more defensive and guard against unknown parameter types and
missized addresses.
Joint work with Vlad Yasevich.
Fixes: b896b82be4ae ("[SCTP] ADDIP: Support for processing incoming ASCONF_ACK chunks.")
Signed-off-by: Daniel Borkmann <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Acked-by: Neil Horman <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int csnmp_init(void) {
call_snmp_init_once();
return (0);
} /* int csnmp_init */ | 0 | [
"CWE-415"
] | collectd | d16c24542b2f96a194d43a73c2e5778822b9cb47 | 189,067,800,403,932,970,000,000,000,000,000,000,000 | 5 | snmp plugin: Fix double free of request PDU
snmp_sess_synch_response() always frees request PDU, in both case of request
error and success. If error condition occurs inside of `while (status == 0)`
loop, double free of `req` happens.
Issue: #2291
Signed-off-by: Florian Forster <[email protected]> |
static int hdr_validate_crypt_segment(struct crypt_device *cd,
json_object *jobj, const char *key, json_object *jobj_digests,
uint64_t offset __attribute__((unused)), uint64_t size)
{
json_object *jobj_ivoffset, *jobj_sector_size, *jobj_integrity;
uint32_t sector_size;
uint64_t ivoffset;
if (!(jobj_ivoffset = json_contains(cd, jobj, key, "Segment", "iv_tweak", json_type_string)) ||
!json_contains(cd, jobj, key, "Segment", "encryption", json_type_string) ||
!(jobj_sector_size = json_contains(cd, jobj, key, "Segment", "sector_size", json_type_int)))
return 1;
/* integrity */
if (json_object_object_get_ex(jobj, "integrity", &jobj_integrity)) {
if (!json_contains(cd, jobj, key, "Segment", "integrity", json_type_object) ||
!json_contains(cd, jobj_integrity, key, "Segment integrity", "type", json_type_string) ||
!json_contains(cd, jobj_integrity, key, "Segment integrity", "journal_encryption", json_type_string) ||
!json_contains(cd, jobj_integrity, key, "Segment integrity", "journal_integrity", json_type_string))
return 1;
}
/* enforce uint32_t type */
if (!validate_json_uint32(jobj_sector_size)) {
log_dbg(cd, "Illegal field \"sector_size\":%s.",
json_object_get_string(jobj_sector_size));
return 1;
}
sector_size = crypt_jobj_get_uint32(jobj_sector_size);
if (!sector_size || MISALIGNED_512(sector_size)) {
log_dbg(cd, "Illegal sector size: %" PRIu32, sector_size);
return 1;
}
if (!numbered(cd, "iv_tweak", json_object_get_string(jobj_ivoffset)) ||
!json_str_to_uint64(jobj_ivoffset, &ivoffset)) {
log_dbg(cd, "Illegal iv_tweak value.");
return 1;
}
if (size % sector_size) {
log_dbg(cd, "Size field has to be aligned to sector size: %" PRIu32, sector_size);
return 1;
}
return !segment_has_digest(key, jobj_digests);
} | 0 | [
"CWE-345"
] | cryptsetup | 0113ac2d889c5322659ad0596d4cfc6da53e356c | 119,870,907,509,649,260,000,000,000,000,000,000,000 | 48 | Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack
Fix possible attacks against data confidentiality through LUKS2 online
reencryption extension crash recovery.
An attacker can modify on-disk metadata to simulate decryption in
progress with crashed (unfinished) reencryption step and persistently
decrypt part of the LUKS device.
This attack requires repeated physical access to the LUKS device but
no knowledge of user passphrases.
The decryption step is performed after a valid user activates
the device with a correct passphrase and modified metadata.
There are no visible warnings for the user that such recovery happened
(except using the luksDump command). The attack can also be reversed
afterward (simulating crashed encryption from a plaintext) with
possible modification of revealed plaintext.
The problem was caused by reusing a mechanism designed for actual
reencryption operation without reassessing the security impact for new
encryption and decryption operations. While the reencryption requires
calculating and verifying both key digests, no digest was needed to
initiate decryption recovery if the destination is plaintext (no
encryption key). Also, some metadata (like encryption cipher) is not
protected, and an attacker could change it. Note that LUKS2 protects
visible metadata only when a random change occurs. It does not protect
against intentional modification but such modification must not cause
a violation of data confidentiality.
The fix introduces additional digest protection of reencryption
metadata. The digest is calculated from known keys and critical
reencryption metadata. Now an attacker cannot create correct metadata
digest without knowledge of a passphrase for used keyslots.
For more details, see LUKS2 On-Disk Format Specification version 1.1.0. |
leaving_window(win_T *win)
{
// Only matters for a prompt window.
if (!bt_prompt(win->w_buffer))
return;
// When leaving a prompt window stop Insert mode and perhaps restart
// it when entering that window again.
win->w_buffer->b_prompt_insert = restart_edit;
if (restart_edit != 0 && mode_displayed)
clear_cmdline = TRUE; /* unshow mode later */
restart_edit = NUL;
// When leaving the window (or closing the window) was done from a
// callback we need to break out of the Insert mode loop and restart Insert
// mode when entering the window again.
if (State & INSERT)
{
stop_insert_mode = TRUE;
if (win->w_buffer->b_prompt_insert == NUL)
win->w_buffer->b_prompt_insert = 'A';
}
} | 0 | [
"CWE-416"
] | vim | ec66c41d84e574baf8009dbc0bd088d2bc5b2421 | 284,800,429,554,741,300,000,000,000,000,000,000,000 | 23 | patch 8.1.2136: using freed memory with autocmd from fuzzer
Problem: using freed memory with autocmd from fuzzer. (Dhiraj Mishra,
Dominique Pelle)
Solution: Avoid using "wp" after autocommands. (closes #5041) |
nautilus_file_is_in_trash (NautilusFile *file)
{
g_assert (NAUTILUS_IS_FILE (file));
return nautilus_directory_is_in_trash (file->details->directory);
} | 0 | [] | nautilus | 7632a3e13874a2c5e8988428ca913620a25df983 | 71,826,959,305,012,230,000,000,000,000,000,000,000 | 6 | Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003 |
CURLcode Curl_posttransfer(struct Curl_easy *data)
{
#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
/* restore the signal handler for SIGPIPE before we get back */
if(!data->set.no_signal)
signal(SIGPIPE, data->state.prev_signal);
#else
(void)data; /* unused parameter */
#endif
return CURLE_OK;
} | 0 | [
"CWE-200"
] | curl | 7214288898f5625a6cc196e22a74232eada7861c | 162,747,739,079,201,390,000,000,000,000,000,000,000 | 12 | transfer: strip credentials from the auto-referer header field
Added test 2081 to verify.
CVE-2021-22876
Bug: https://curl.se/docs/CVE-2021-22876.html |
void addReplyError(client *c, const char *err) {
addReplyErrorLength(c,err,strlen(err));
} | 0 | [
"CWE-254"
] | redis | 874804da0c014a7d704b3d285aa500098a931f50 | 309,529,162,332,573,050,000,000,000,000,000,000,000 | 3 | Security: Cross Protocol Scripting protection.
This is an attempt at mitigating problems due to cross protocol
scripting, an attack targeting services using line oriented protocols
like Redis that can accept HTTP requests as valid protocol, by
discarding the invalid parts and accepting the payloads sent, for
example, via a POST request.
For this to be effective, when we detect POST and Host: and terminate
the connection asynchronously, the networking code was modified in order
to never process further input. It was later verified that in a
pipelined request containing a POST command, the successive commands are
not executed. |
bool DecimalQuantity::hasIntegerValue() const {
return scale >= 0;
} | 0 | [
"CWE-190"
] | icu | 53d8c8f3d181d87a6aa925b449b51c4a2c922a51 | 148,085,897,714,957,800,000,000,000,000,000,000,000 | 3 | ICU-20246 Fixing another integer overflow in number parsing. |
TiledInputFile::setFrameBuffer (const FrameBuffer &frameBuffer)
{
Lock lock (*_data->_streamData);
//
// Set the frame buffer
//
//
// Check if the new frame buffer descriptor is
// compatible with the image file header.
//
const ChannelList &channels = _data->header.channels();
for (FrameBuffer::ConstIterator j = frameBuffer.begin();
j != frameBuffer.end();
++j)
{
ChannelList::ConstIterator i = channels.find (j.name());
if (i == channels.end())
continue;
if (i.channel().xSampling != j.slice().xSampling ||
i.channel().ySampling != j.slice().ySampling)
THROW (IEX_NAMESPACE::ArgExc, "X and/or y subsampling factors "
"of \"" << i.name() << "\" channel "
"of input file \"" << fileName() << "\" are "
"not compatible with the frame buffer's "
"subsampling factors.");
}
//
// Initialize the slice table for readPixels().
//
vector<TInSliceInfo> slices;
ChannelList::ConstIterator i = channels.begin();
for (FrameBuffer::ConstIterator j = frameBuffer.begin();
j != frameBuffer.end();
++j)
{
while (i != channels.end() && strcmp (i.name(), j.name()) < 0)
{
//
// Channel i is present in the file but not
// in the frame buffer; data for channel i
// will be skipped during readPixels().
//
slices.push_back (TInSliceInfo (i.channel().type,
i.channel().type,
0, // base
0, // xStride
0, // yStride
false, // fill
true, // skip
0.0)); // fillValue
++i;
}
bool fill = false;
if (i == channels.end() || strcmp (i.name(), j.name()) > 0)
{
//
// Channel i is present in the frame buffer, but not in the file.
// In the frame buffer, slice j will be filled with a default value.
//
fill = true;
}
slices.push_back (TInSliceInfo (j.slice().type,
fill? j.slice().type: i.channel().type,
j.slice().base,
j.slice().xStride,
j.slice().yStride,
fill,
false, // skip
j.slice().fillValue,
(j.slice().xTileCoords)? 1: 0,
(j.slice().yTileCoords)? 1: 0));
if (i != channels.end() && !fill)
++i;
}
while (i != channels.end())
{
//
// Channel i is present in the file but not
// in the frame buffer; data for channel i
// will be skipped during readPixels().
//
slices.push_back (TInSliceInfo (i.channel().type,
i.channel().type,
0, // base
0, // xStride
0, // yStride
false, // fill
true, // skip
0.0)); // fillValue
++i;
}
//
// Store the new frame buffer.
//
_data->frameBuffer = frameBuffer;
_data->slices = slices;
} | 0 | [
"CWE-125"
] | openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 66,216,776,796,868,640,000,000,000,000,000,000,000 | 116 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
TEST_P(Http2UpstreamIntegrationTest, Retry) { testRetry(); } | 0 | [
"CWE-400"
] | envoy | 0e49a495826ea9e29134c1bd54fdeb31a034f40c | 54,959,061,249,776,360,000,000,000,000,000,000,000 | 1 | http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]> |
static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
{
__u16 *data = (__u16 *) &rq->ifr_ifru;
pegasus_t *pegasus = netdev_priv(net);
int res;
switch (cmd) {
case SIOCDEVPRIVATE:
data[0] = pegasus->phy;
case SIOCDEVPRIVATE + 1:
read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
res = 0;
break;
case SIOCDEVPRIVATE + 2:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
write_mii_word(pegasus, pegasus->phy, data[1] & 0x1f, &data[2]);
res = 0;
break;
default:
res = -EOPNOTSUPP;
}
return res;
} | 0 | [
"CWE-119",
"CWE-284"
] | linux | 5593523f968bc86d42a035c6df47d5e0979b5ace | 117,531,444,817,946,260,000,000,000,000,000,000,000 | 24 | pegasus: Use heap buffers for all register access
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
References: https://bugs.debian.org/852556
Reported-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Tested-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
bool CModule::OnBoot() { return true; } | 0 | [
"CWE-20",
"CWE-264"
] | znc | 8de9e376ce531fe7f3c8b0aa4876d15b479b7311 | 69,381,603,867,751,580,000,000,000,000,000,000,000 | 1 | Fix remote code execution and privilege escalation vulnerability.
To trigger this, need to have a user already.
Thanks for Jeriko One <[email protected]> for finding and reporting this.
CVE-2019-12816 |
explicit SSLAcceptFiberRunner(EventBase* evb) : SSLAcceptEvbRunner(evb) {} | 0 | [
"CWE-125"
] | folly | c321eb588909646c15aefde035fd3133ba32cdee | 150,165,526,721,077,710,000,000,000,000,000,000,000 | 1 | Handle close_notify as standard writeErr in AsyncSSLSocket.
Summary: Fixes CVE-2019-11934
Reviewed By: mingtaoy
Differential Revision: D18020613
fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836 |
static void msusb_mspipes_free(MSUSB_PIPE_DESCRIPTOR** MsPipes, UINT32 NumberOfPipes)
{
UINT32 pnum = 0;
if (MsPipes)
{
for (pnum = 0; pnum < NumberOfPipes && MsPipes[pnum]; pnum++)
free(MsPipes[pnum]);
free(MsPipes);
}
} | 0 | [
"CWE-190"
] | FreeRDP | 9f77fc3dd2394373e1be753952b00dafa1a9b7da | 298,850,235,727,179,740,000,000,000,000,000,000,000 | 12 | Fixed int overflow in msusb_mspipes_read
Thanks to hac425 |
int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode,
struct sk_buff *skb)
{
return nci_op_rsp_packet(ndev, opcode, skb, ndev->ops->prop_ops,
ndev->ops->n_prop_ops);
} | 0 | [] | linux | 48b71a9e66c2eab60564b1b1c85f4928ed04e406 | 87,811,475,149,256,290,000,000,000,000,000,000,000 | 6 | NFC: add NCI_UNREG flag to eliminate the race
There are two sites that calls queue_work() after the
destroy_workqueue() and lead to possible UAF.
The first site is nci_send_cmd(), which can happen after the
nci_close_device as below
nfcmrvl_nci_unregister_dev | nfc_genl_dev_up
nci_close_device |
flush_workqueue |
del_timer_sync |
nci_unregister_device | nfc_get_device
destroy_workqueue | nfc_dev_up
nfc_unregister_device | nci_dev_up
device_del | nci_open_device
| __nci_request
| nci_send_cmd
| queue_work !!!
Another site is nci_cmd_timer, awaked by the nci_cmd_work from the
nci_send_cmd.
... | ...
nci_unregister_device | queue_work
destroy_workqueue |
nfc_unregister_device | ...
device_del | nci_cmd_work
| mod_timer
| ...
| nci_cmd_timer
| queue_work !!!
For the above two UAF, the root cause is that the nfc_dev_up can race
between the nci_unregister_device routine. Therefore, this patch
introduce NCI_UNREG flag to easily eliminate the possible race. In
addition, the mutex_lock in nci_close_device can act as a barrier.
Signed-off-by: Lin Ma <[email protected]>
Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
Reviewed-by: Jakub Kicinski <[email protected]>
Reviewed-by: Krzysztof Kozlowski <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
static void sha1_update(SHA_CTX *c,const void *data,size_t len)
{ const unsigned char *ptr = data;
size_t res;
if ((res = c->num)) {
res = SHA_CBLOCK-res;
if (len<res) res=len;
SHA1_Update (c,ptr,res);
ptr += res;
len -= res;
}
res = len % SHA_CBLOCK;
len -= res;
if (len) {
sha1_block_data_order(c,ptr,len/SHA_CBLOCK);
ptr += len;
c->Nh += len>>29;
c->Nl += len<<=3;
if (c->Nl<(unsigned int)len) c->Nh++;
}
if (res)
SHA1_Update(c,ptr,res);
} | 0 | [
"CWE-310"
] | openssl | 746c6f3a533b1eb50b909147b35fa1b0e5c61f59 | 70,705,834,250,778,780,000,000,000,000,000,000,000 | 27 | e_aes_cbc_hmac_sha1.c: align calculated MAC at cache line. |
void FilterManager::sendLocalReply(
Code code, absl::string_view body,
const std::function<void(ResponseHeaderMap& headers)>& modify_headers,
const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) {
ASSERT(!state_.under_on_local_reply_);
const bool is_head_request = state_.is_head_request_;
const bool is_grpc_request = state_.is_grpc_request_;
// Stop filter chain iteration if local reply was sent while filter decoding or encoding callbacks
// are running.
if (state_.filter_call_state_ & (FilterCallState::DecodeHeaders | FilterCallState::DecodeData |
FilterCallState::DecodeTrailers)) {
state_.decoder_filter_chain_aborted_ = true;
} else if (state_.filter_call_state_ &
(FilterCallState::EncodeHeaders | FilterCallState::EncodeData |
FilterCallState::EncodeTrailers)) {
state_.encoder_filter_chain_aborted_ = true;
}
stream_info_.setResponseCodeDetails(details);
StreamFilterBase::LocalReplyData data{code, details, false};
FilterManager::onLocalReply(data);
if (data.reset_imminent_) {
ENVOY_STREAM_LOG(debug, "Resetting stream due to {}. onLocalReply requested reset.", *this,
details);
filter_manager_callbacks_.resetStream();
return;
}
if (!filter_manager_callbacks_.responseHeaders().has_value()) {
// If the response has not started at all, send the response through the filter chain.
sendLocalReplyViaFilterChain(is_grpc_request, code, body, modify_headers, is_head_request,
grpc_status, details);
} else if (!state_.non_100_response_headers_encoded_) {
ENVOY_STREAM_LOG(debug, "Sending local reply with details {} directly to the encoder", *this,
details);
// In this case, at least the header and possibly the body has started
// processing through the filter chain, but no non-informational headers
// have been sent downstream. To ensure that filters don't get their
// state machine screwed up, bypass the filter chain and send the local
// reply directly to the codec.
//
sendDirectLocalReply(code, body, modify_headers, state_.is_head_request_, grpc_status);
} else {
// If we land in this branch, response headers have already been sent to the client.
// All we can do at this point is reset the stream.
ENVOY_STREAM_LOG(debug, "Resetting stream due to {}. Prior headers have already been sent",
*this, details);
// TODO(snowp): This means we increment the tx_reset stat which we weren't doing previously.
// Intended?
filter_manager_callbacks_.resetStream();
}
} | 0 | [
"CWE-416"
] | envoy | fe7c69c248f4fe5a9080c7ccb35275b5218bb5ab | 260,140,322,581,318,950,000,000,000,000,000,000,000 | 53 | internal redirect: fix a lifetime bug (#785)
Signed-off-by: Alyssa Wilk <[email protected]>
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]> |
static void ebu_ttd_remove_samples(GF_XMLNode *root, GF_XMLNode **out_body_node)
{
u32 idx = 0;
GF_XMLNode *node = NULL;
*out_body_node = NULL;
while ( (node = (GF_XMLNode*)gf_list_enum(root->content, &idx))) {
if (!strcmp(node->name, "body")) {
GF_XMLNode *body_node;
u32 body_idx = 0;
*out_body_node = node;
while ( (body_node = (GF_XMLNode*)gf_list_enum(node->content, &body_idx))) {
if (!strcmp(body_node->name, "div")) {
u32 body_num;
body_num = gf_list_count(body_node->content);
while (body_num--) {
GF_XMLNode *content_node = (GF_XMLNode*)gf_list_get(body_node->content, 0);
assert(gf_list_find(body_node->content, content_node) == 0);
gf_list_rem(body_node->content, 0);
gf_xml_dom_node_del(content_node);
}
}
}
return;
}
}
} | 0 | [
"CWE-276"
] | gpac | 96699aabae042f8f55cf8a85fa5758e3db752bae | 223,430,205,261,559,920,000,000,000,000,000,000,000 | 26 | fixed #2061 |
Expression_Obj Parser::parse_value()
{
lex< css_comments >(false);
if (lex< ampersand >())
{
if (match< ampersand >()) {
warning("In Sass, \"&&\" means two copies of the parent selector. You probably want to use \"and\" instead.", pstate);
}
return SASS_MEMORY_NEW(Parent_Reference, pstate); }
if (lex< kwd_important >())
{ return SASS_MEMORY_NEW(String_Constant, pstate, "!important"); }
// parse `10%4px` into separated items and not a schema
if (lex< sequence < percentage, lookahead < number > > >())
{ return lexed_percentage(lexed); }
if (lex< sequence < number, lookahead< sequence < op, number > > > >())
{ return lexed_number(lexed); }
// string may be interpolated
if (lex< sequence < quoted_string, lookahead < exactly <'-'> > > >())
{ return parse_string(); }
if (const char* stop = peek< value_schema >())
{ return parse_value_schema(stop); }
// string may be interpolated
if (lex< quoted_string >())
{ return parse_string(); }
if (lex< kwd_true >())
{ return SASS_MEMORY_NEW(Boolean, pstate, true); }
if (lex< kwd_false >())
{ return SASS_MEMORY_NEW(Boolean, pstate, false); }
if (lex< kwd_null >())
{ return SASS_MEMORY_NEW(Null, pstate); }
if (lex< identifier >()) {
return color_or_string(lexed);
}
if (lex< percentage >())
{ return lexed_percentage(lexed); }
// match hex number first because 0x000 looks like a number followed by an identifier
if (lex< sequence < alternatives< hex, hex0 >, negate < exactly<'-'> > > >())
{ return lexed_hex_color(lexed); }
if (lex< hexa >())
{ return lexed_hex_color(lexed); }
if (lex< sequence < exactly <'#'>, identifier > >())
{ return SASS_MEMORY_NEW(String_Quoted, pstate, lexed); }
// also handle the 10em- foo special case
// alternatives < exactly < '.' >, .. > -- `1.5em-.75em` is split into a list, not a binary expression
if (lex< sequence< dimension, optional< sequence< exactly<'-'>, lookahead< alternatives < space > > > > > >())
{ return lexed_dimension(lexed); }
if (lex< sequence< static_component, one_plus< strict_identifier > > >())
{ return SASS_MEMORY_NEW(String_Constant, pstate, lexed); }
if (lex< number >())
{ return lexed_number(lexed); }
if (lex< variable >())
{ return SASS_MEMORY_NEW(Variable, pstate, Util::normalize_underscores(lexed)); }
css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was ");
// unreachable statement
return {};
} | 0 | [
"CWE-125"
] | libsass | eb15533b07773c30dc03c9d742865604f47120ef | 121,097,306,843,634,850,000,000,000,000,000,000,000 | 76 | Fix memory leak in `parse_ie_keyword_arg`
`kwd_arg` would never get freed when there was a parse error in
`parse_ie_keyword_arg`.
Closes #2656 |
gs_manager_create_windows_for_screen (GSManager *manager,
GdkScreen *screen)
{
GSWindow *window;
int n_monitors;
int i;
g_return_if_fail (manager != NULL);
g_return_if_fail (GS_IS_MANAGER (manager));
g_return_if_fail (GDK_IS_SCREEN (screen));
g_object_ref (manager);
g_object_ref (screen);
n_monitors = gdk_screen_get_n_monitors (screen);
gs_debug ("Creating %d windows for screen %d", n_monitors, gdk_screen_get_number (screen));
for (i = 0; i < n_monitors; i++) {
window = gs_window_new (screen, i, manager->priv->lock_active);
gs_window_set_user_switch_enabled (window, manager->priv->user_switch_enabled);
gs_window_set_logout_enabled (window, manager->priv->logout_enabled);
gs_window_set_logout_timeout (window, manager->priv->logout_timeout);
gs_window_set_logout_command (window, manager->priv->logout_command);
gs_window_set_keyboard_enabled (window, manager->priv->keyboard_enabled);
gs_window_set_keyboard_command (window, manager->priv->keyboard_command);
gs_window_set_away_message (window, manager->priv->away_message);
connect_window_signals (manager, window);
manager->priv->windows = g_slist_append (manager->priv->windows, window);
}
g_object_unref (screen);
g_object_unref (manager);
} | 1 | [] | gnome-screensaver | f6d3defdc7080a540d7f8df15dc309a9364ae668 | 143,321,559,131,040,370,000,000,000,000,000,000,000 | 37 | Create or remove windows as number of monitors changes due to randr 1.2
2008-08-20 William Jon McCann <[email protected]>
* src/gs-manager.c (gs_manager_create_window_for_monitor),
(on_screen_monitors_changed), (gs_manager_destroy_windows),
(gs_manager_finalize), (gs_manager_create_windows_for_screen):
Create or remove windows as number of monitors changes
due to randr 1.2 goodness.
svn path=/trunk/; revision=1483 |
void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
CHECK_ALIVE(VisitForValue(exprs->at(i)));
}
} | 0 | [] | node | fd80a31e0697d6317ce8c2d289575399f4e06d21 | 188,105,361,806,318,580,000,000,000,000,000,000,000 | 5 | deps: backport 5f836c from v8 upstream
Original commit message:
Fix Hydrogen bounds check elimination
When combining bounds checks, they must all be moved before the first load/store
that they are guarding.
BUG=chromium:344186
LOG=y
[email protected]
Review URL: https://codereview.chromium.org/172093002
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
fix #8070 |
static int connecthostport(const char * host, unsigned short port, char * result)
{
int s, n;
char hostname[INET6_ADDRSTRLEN];
char port_str[8], ifname[8], tmp[4];
struct addrinfo *ai, *p;
struct addrinfo hints;
memset(&hints, 0, sizeof(hints));
/* hints.ai_flags = AI_ADDRCONFIG; */
#ifdef AI_NUMERICSERV
hints.ai_flags = AI_NUMERICSERV;
#endif
hints.ai_socktype = SOCK_STREAM;
hints.ai_family = AF_UNSPEC; /* AF_INET, AF_INET6 or AF_UNSPEC */
/* hints.ai_protocol = IPPROTO_TCP; */
snprintf(port_str, sizeof(port_str), "%hu", port);
strcpy(hostname, host);
if(!strncmp(host, "fe80", 4))
{
printf("Using an linklocal address\n");
strcpy(ifname, "%");
snprintf(tmp, sizeof(tmp), "%d", linklocal_index);
strcat(ifname, tmp);
strcat(hostname, ifname);
printf("host: %s\n", hostname);
}
n = getaddrinfo(hostname, port_str, &hints, &ai);
if(n != 0)
{
fprintf(stderr, "getaddrinfo() error : %s\n", gai_strerror(n));
return -1;
}
s = -1;
for(p = ai; p; p = p->ai_next)
{
#ifdef DEBUG
char tmp_host[256];
char tmp_service[256];
printf("ai_family=%d ai_socktype=%d ai_protocol=%d ai_addrlen=%d\n ",
p->ai_family, p->ai_socktype, p->ai_protocol, p->ai_addrlen);
getnameinfo(p->ai_addr, p->ai_addrlen, tmp_host, sizeof(tmp_host),
tmp_service, sizeof(tmp_service),
NI_NUMERICHOST | NI_NUMERICSERV);
printf(" host=%s service=%s\n", tmp_host, tmp_service);
#endif
inet_ntop(AF_INET6, &(((struct sockaddr_in6 *)p->ai_addr)->sin6_addr), result, INET6_ADDRSTRLEN);
return 0;
}
freeaddrinfo(ai);
} | 0 | [
"CWE-476"
] | miniupnp | 13585f15c7f7dc28bbbba1661efb280d530d114c | 266,110,961,945,661,460,000,000,000,000,000,000,000 | 51 | GetOutboundPinholeTimeout: check args |
long Track::GetNext(const BlockEntry* pCurrEntry,
const BlockEntry*& pNextEntry) const {
assert(pCurrEntry);
assert(!pCurrEntry->EOS()); //?
const Block* const pCurrBlock = pCurrEntry->GetBlock();
assert(pCurrBlock && pCurrBlock->GetTrackNumber() == m_info.number);
if (!pCurrBlock || pCurrBlock->GetTrackNumber() != m_info.number)
return -1;
const Cluster* pCluster = pCurrEntry->GetCluster();
assert(pCluster);
assert(!pCluster->EOS());
long status = pCluster->GetNext(pCurrEntry, pNextEntry);
if (status < 0) // error
return status;
for (int i = 0;;) {
while (pNextEntry) {
const Block* const pNextBlock = pNextEntry->GetBlock();
assert(pNextBlock);
if (pNextBlock->GetTrackNumber() == m_info.number)
return 0;
pCurrEntry = pNextEntry;
status = pCluster->GetNext(pCurrEntry, pNextEntry);
if (status < 0) // error
return status;
}
pCluster = m_pSegment->GetNext(pCluster);
if (pCluster == NULL) {
pNextEntry = GetEOS();
return 1;
}
if (pCluster->EOS()) {
if (m_pSegment->DoneParsing()) {
pNextEntry = GetEOS();
return 1;
}
// TODO: there is a potential O(n^2) problem here: we tell the
// caller to (pre)load another cluster, which he does, but then he
// calls GetNext again, which repeats the same search. This is
// a pathological case, since the only way it can happen is if
// there exists a long sequence of clusters none of which contain a
// block from this track. One way around this problem is for the
// caller to be smarter when he loads another cluster: don't call
// us back until you have a cluster that contains a block from this
// track. (Of course, that's not cheap either, since our caller
// would have to scan the each cluster as it's loaded, so that
// would just push back the problem.)
pNextEntry = NULL;
return E_BUFFER_NOT_FULL;
}
status = pCluster->GetFirst(pNextEntry);
if (status < 0) // error
return status;
if (pNextEntry == NULL) // empty cluster
continue;
++i;
if (i >= 100)
break;
}
// NOTE: if we get here, it means that we didn't find a block with
// a matching track number after lots of searching, so we give
// up trying.
pNextEntry = GetEOS(); // so we can return a non-NULL value
return 1;
} | 0 | [
"CWE-20"
] | libvpx | 34d54b04e98dd0bac32e9aab0fbda0bf501bc742 | 152,686,065,427,382,240,000,000,000,000,000,000,000 | 85 | update libwebm to libwebm-1.0.0.27-358-gdbf1d10
changelog:
https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10
Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3 |
static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry)
{
struct superblock_security_struct *sbsec = NULL;
struct inode_security_struct *isec = selinux_inode(inode);
u32 task_sid, sid = 0;
u16 sclass;
struct dentry *dentry;
int rc = 0;
if (isec->initialized == LABEL_INITIALIZED)
return 0;
spin_lock(&isec->lock);
if (isec->initialized == LABEL_INITIALIZED)
goto out_unlock;
if (isec->sclass == SECCLASS_FILE)
isec->sclass = inode_mode_to_security_class(inode->i_mode);
sbsec = selinux_superblock(inode->i_sb);
if (!(sbsec->flags & SE_SBINITIALIZED)) {
/* Defer initialization until selinux_complete_init,
after the initial policy is loaded and the security
server is ready to handle calls. */
spin_lock(&sbsec->isec_lock);
if (list_empty(&isec->list))
list_add(&isec->list, &sbsec->isec_head);
spin_unlock(&sbsec->isec_lock);
goto out_unlock;
}
sclass = isec->sclass;
task_sid = isec->task_sid;
sid = isec->sid;
isec->initialized = LABEL_PENDING;
spin_unlock(&isec->lock);
switch (sbsec->behavior) {
case SECURITY_FS_USE_NATIVE:
break;
case SECURITY_FS_USE_XATTR:
if (!(inode->i_opflags & IOP_XATTR)) {
sid = sbsec->def_sid;
break;
}
/* Need a dentry, since the xattr API requires one.
Life would be simpler if we could just pass the inode. */
if (opt_dentry) {
/* Called from d_instantiate or d_splice_alias. */
dentry = dget(opt_dentry);
} else {
/*
* Called from selinux_complete_init, try to find a dentry.
* Some filesystems really want a connected one, so try
* that first. We could split SECURITY_FS_USE_XATTR in
* two, depending upon that...
*/
dentry = d_find_alias(inode);
if (!dentry)
dentry = d_find_any_alias(inode);
}
if (!dentry) {
/*
* this is can be hit on boot when a file is accessed
* before the policy is loaded. When we load policy we
* may find inodes that have no dentry on the
* sbsec->isec_head list. No reason to complain as these
* will get fixed up the next time we go through
* inode_doinit with a dentry, before these inodes could
* be used again by userspace.
*/
goto out_invalid;
}
rc = inode_doinit_use_xattr(inode, dentry, sbsec->def_sid,
&sid);
dput(dentry);
if (rc)
goto out;
break;
case SECURITY_FS_USE_TASK:
sid = task_sid;
break;
case SECURITY_FS_USE_TRANS:
/* Default to the fs SID. */
sid = sbsec->sid;
/* Try to obtain a transition SID. */
rc = security_transition_sid(&selinux_state, task_sid, sid,
sclass, NULL, &sid);
if (rc)
goto out;
break;
case SECURITY_FS_USE_MNTPOINT:
sid = sbsec->mntpoint_sid;
break;
default:
/* Default to the fs superblock SID. */
sid = sbsec->sid;
if ((sbsec->flags & SE_SBGENFS) &&
(!S_ISLNK(inode->i_mode) ||
selinux_policycap_genfs_seclabel_symlinks())) {
/* We must have a dentry to determine the label on
* procfs inodes */
if (opt_dentry) {
/* Called from d_instantiate or
* d_splice_alias. */
dentry = dget(opt_dentry);
} else {
/* Called from selinux_complete_init, try to
* find a dentry. Some filesystems really want
* a connected one, so try that first.
*/
dentry = d_find_alias(inode);
if (!dentry)
dentry = d_find_any_alias(inode);
}
/*
* This can be hit on boot when a file is accessed
* before the policy is loaded. When we load policy we
* may find inodes that have no dentry on the
* sbsec->isec_head list. No reason to complain as
* these will get fixed up the next time we go through
* inode_doinit() with a dentry, before these inodes
* could be used again by userspace.
*/
if (!dentry)
goto out_invalid;
rc = selinux_genfs_get_sid(dentry, sclass,
sbsec->flags, &sid);
if (rc) {
dput(dentry);
goto out;
}
if ((sbsec->flags & SE_SBGENFS_XATTR) &&
(inode->i_opflags & IOP_XATTR)) {
rc = inode_doinit_use_xattr(inode, dentry,
sid, &sid);
if (rc) {
dput(dentry);
goto out;
}
}
dput(dentry);
}
break;
}
out:
spin_lock(&isec->lock);
if (isec->initialized == LABEL_PENDING) {
if (rc) {
isec->initialized = LABEL_INVALID;
goto out_unlock;
}
isec->initialized = LABEL_INITIALIZED;
isec->sid = sid;
}
out_unlock:
spin_unlock(&isec->lock);
return rc;
out_invalid:
spin_lock(&isec->lock);
if (isec->initialized == LABEL_PENDING) {
isec->initialized = LABEL_INVALID;
isec->sid = sid;
}
spin_unlock(&isec->lock);
return 0;
} | 0 | [
"CWE-416"
] | linux | a3727a8bac0a9e77c70820655fd8715523ba3db7 | 194,850,759,964,257,520,000,000,000,000,000,000,000 | 174 | selinux,smack: fix subjective/objective credential use mixups
Jann Horn reported a problem with commit eb1231f73c4d ("selinux:
clarify task subjective and objective credentials") where some LSM
hooks were attempting to access the subjective credentials of a task
other than the current task. Generally speaking, it is not safe to
access another task's subjective credentials and doing so can cause
a number of problems.
Further, while looking into the problem, I realized that Smack was
suffering from a similar problem brought about by a similar commit
1fb057dcde11 ("smack: differentiate between subjective and objective
task credentials").
This patch addresses this problem by restoring the use of the task's
objective credentials in those cases where the task is other than the
current executing task. Not only does this resolve the problem
reported by Jann, it is arguably the correct thing to do in these
cases.
Cc: [email protected]
Fixes: eb1231f73c4d ("selinux: clarify task subjective and objective credentials")
Fixes: 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials")
Reported-by: Jann Horn <[email protected]>
Acked-by: Eric W. Biederman <[email protected]>
Acked-by: Casey Schaufler <[email protected]>
Signed-off-by: Paul Moore <[email protected]> |
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
kvm_set_cr8(vcpu, cr8);
}
} | 0 | [] | kvm | 854e8bb1aa06c578c2c9145fa6bfe3680ef63b23 | 9,825,029,085,837,951,000,000,000,000,000,000,000 | 12 | KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags, struct dev_pagemap **pgmap)
{
unsigned long pfn = pud_pfn(*pud);
struct mm_struct *mm = vma->vm_mm;
struct page *page;
assert_spin_locked(pud_lockptr(mm, pud));
if (flags & FOLL_WRITE && !pud_write(*pud))
return NULL;
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET)))
return NULL;
if (pud_present(*pud) && pud_devmap(*pud))
/* pass */;
else
return NULL;
if (flags & FOLL_TOUCH)
touch_pud(vma, addr, pud, flags);
/*
* device mapped pages can only be returned if the
* caller will manage the page reference count.
*
* At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
*/
if (!(flags & (FOLL_GET | FOLL_PIN)))
return ERR_PTR(-EEXIST);
pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
*pgmap = get_dev_pagemap(pfn, *pgmap);
if (!*pgmap)
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
if (!try_grab_page(page, flags))
page = ERR_PTR(-ENOMEM);
return page;
} | 0 | [
"CWE-362"
] | linux | c444eb564fb16645c172d550359cb3d75fe8a040 | 294,103,260,657,905,400,000,000,000,000,000,000,000 | 44 | mm: thp: make the THP mapcount atomic against __split_huge_pmd_locked()
Write protect anon page faults require an accurate mapcount to decide
if to break the COW or not. This is implemented in the THP path with
reuse_swap_page() ->
page_trans_huge_map_swapcount()/page_trans_huge_mapcount().
If the COW triggers while the other processes sharing the page are
under a huge pmd split, to do an accurate reading, we must ensure the
mapcount isn't computed while it's being transferred from the head
page to the tail pages.
reuse_swap_cache() already runs serialized by the page lock, so it's
enough to add the page lock around __split_huge_pmd_locked too, in
order to add the missing serialization.
Note: the commit in "Fixes" is just to facilitate the backporting,
because the code before such commit didn't try to do an accurate THP
mapcount calculation and it instead used the page_count() to decide if
to COW or not. Both the page_count and the pin_count are THP-wide
refcounts, so they're inaccurate if used in
reuse_swap_page(). Reverting such commit (besides the unrelated fix to
the local anon_vma assignment) would have also opened the window for
memory corruption side effects to certain workloads as documented in
such commit header.
Signed-off-by: Andrea Arcangeli <[email protected]>
Suggested-by: Jann Horn <[email protected]>
Reported-by: Jann Horn <[email protected]>
Acked-by: Kirill A. Shutemov <[email protected]>
Fixes: 6d0a07edd17c ("mm: thp: calculate the mapcount correctly for THP pages during WP faults")
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_mmio_fragment *frag;
unsigned len;
BUG_ON(!vcpu->mmio_needed);
/* Complete previous fragment */
frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
len = min(8u, frag->len);
if (!vcpu->mmio_is_write)
memcpy(frag->data, run->mmio.data, len);
if (frag->len <= 8) {
/* Switch to the next fragment. */
frag++;
vcpu->mmio_cur_fragment++;
} else {
/* Go forward to the next mmio piece. */
frag->data += len;
frag->gpa += len;
frag->len -= len;
}
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
/* FIXME: return into emulator if single-stepping. */
if (vcpu->mmio_is_write)
return 1;
vcpu->mmio_read_completed = 1;
return complete_emulated_io(vcpu);
}
run->exit_reason = KVM_EXIT_MMIO;
run->mmio.phys_addr = frag->gpa;
if (vcpu->mmio_is_write)
memcpy(run->mmio.data, frag->data, min(8u, frag->len));
run->mmio.len = min(8u, frag->len);
run->mmio.is_write = vcpu->mmio_is_write;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
return 0;
} | 1 | [
"CWE-119",
"CWE-703",
"CWE-120"
] | linux | a08d3b3b99efd509133946056531cdf8f3a0c09b | 7,140,657,898,321,828,000,000,000,000,000,000,000 | 44 | kvm: x86: fix emulator buffer overflow (CVE-2014-0049)
The problem occurs when the guest performs a pusha with the stack
address pointing to an mmio address (or an invalid guest physical
address) to start with, but then extending into an ordinary guest
physical address. When doing repeated emulated pushes
emulator_read_write sets mmio_needed to 1 on the first one. On a
later push when the stack points to regular memory,
mmio_nr_fragments is set to 0, but mmio_is_needed is not set to 0.
As a result, KVM exits to userspace, and then returns to
complete_emulated_mmio. In complete_emulated_mmio
vcpu->mmio_cur_fragment is incremented. The termination condition of
vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments is never achieved.
The code bounces back and fourth to userspace incrementing
mmio_cur_fragment past it's buffer. If the guest does nothing else it
eventually leads to a a crash on a memcpy from invalid memory address.
However if a guest code can cause the vm to be destroyed in another
vcpu with excellent timing, then kvm_clear_async_pf_completion_queue
can be used by the guest to control the data that's pointed to by the
call to cancel_work_item, which can be used to gain execution.
Fixes: f78146b0f9230765c6315b2e14f56112513389ad
Signed-off-by: Andrew Honig <[email protected]>
Cc: [email protected] (3.5+)
Signed-off-by: Paolo Bonzini <[email protected]> |
void blk_mq_sched_free_hctx_data(struct request_queue *q,
void (*exit)(struct blk_mq_hw_ctx *))
{
struct blk_mq_hw_ctx *hctx;
int i;
queue_for_each_hw_ctx(q, hctx, i) {
if (exit && hctx->sched_data)
exit(hctx);
kfree(hctx->sched_data);
hctx->sched_data = NULL;
}
} | 0 | [
"CWE-416"
] | linux | c3e2219216c92919a6bd1711f340f5faa98695e6 | 301,648,405,357,966,570,000,000,000,000,000,000,000 | 13 | block: free sched's request pool in blk_cleanup_queue
In theory, IO scheduler belongs to request queue, and the request pool
of sched tags belongs to the request queue too.
However, the current tags allocation interfaces are re-used for both
driver tags and sched tags, and driver tags is definitely host wide,
and doesn't belong to any request queue, same with its request pool.
So we need tagset instance for freeing request of sched tags.
Meantime, blk_mq_free_tag_set() often follows blk_cleanup_queue() in case
of non-BLK_MQ_F_TAG_SHARED, this way requires that request pool of sched
tags to be freed before calling blk_mq_free_tag_set().
Commit 47cdee29ef9d94e ("block: move blk_exit_queue into __blk_release_queue")
moves blk_exit_queue into __blk_release_queue for simplying the fast
path in generic_make_request(), then causes oops during freeing requests
of sched tags in __blk_release_queue().
Fix the above issue by move freeing request pool of sched tags into
blk_cleanup_queue(), this way is safe becasue queue has been frozen and no any
in-queue requests at that time. Freeing sched tags has to be kept in queue's
release handler becasue there might be un-completed dispatch activity
which might refer to sched tags.
Cc: Bart Van Assche <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Fixes: 47cdee29ef9d94e485eb08f962c74943023a5271 ("block: move blk_exit_queue into __blk_release_queue")
Tested-by: Yi Zhang <[email protected]>
Reported-by: kernel test robot <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
TEST_F(PathUtilityTest, RemoveQueryAndFragment) {
EXPECT_EQ("", PathUtil::removeQueryAndFragment(""));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?param=value"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?param=value1¶m=value2"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc??"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc??param=value"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc#"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc#fragment"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc#fragment?param=value"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc##"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc#?"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc#?param=value"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?#"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?#fragment"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?param=value#"));
EXPECT_EQ("/abc", PathUtil::removeQueryAndFragment("/abc?param=value#fragment"));
} | 0 | [
"CWE-22"
] | envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 134,777,501,105,514,990,000,000,000,000,000,000,000 | 19 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]> |
static void csi_m(struct vc_data *vc)
{
int i;
for (i = 0; i <= vc->vc_npar; i++)
switch (vc->vc_par[i]) {
case 0: /* all attributes off */
default_attr(vc);
break;
case 1:
vc->vc_intensity = 2;
break;
case 2:
vc->vc_intensity = 0;
break;
case 3:
vc->vc_italic = 1;
break;
case 21:
/*
* No console drivers support double underline, so
* convert it to a single underline.
*/
case 4:
vc->vc_underline = 1;
break;
case 5:
vc->vc_blink = 1;
break;
case 7:
vc->vc_reverse = 1;
break;
case 10: /* ANSI X3.64-1979 (SCO-ish?)
* Select primary font, don't display control chars if
* defined, don't set bit 8 on output.
*/
vc->vc_translate = set_translate(vc->vc_charset == 0
? vc->vc_G0_charset
: vc->vc_G1_charset, vc);
vc->vc_disp_ctrl = 0;
vc->vc_toggle_meta = 0;
break;
case 11: /* ANSI X3.64-1979 (SCO-ish?)
* Select first alternate font, lets chars < 32 be
* displayed as ROM chars.
*/
vc->vc_translate = set_translate(IBMPC_MAP, vc);
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 0;
break;
case 12: /* ANSI X3.64-1979 (SCO-ish?)
* Select second alternate font, toggle high bit
* before displaying as ROM char.
*/
vc->vc_translate = set_translate(IBMPC_MAP, vc);
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 1;
break;
case 22:
vc->vc_intensity = 1;
break;
case 23:
vc->vc_italic = 0;
break;
case 24:
vc->vc_underline = 0;
break;
case 25:
vc->vc_blink = 0;
break;
case 27:
vc->vc_reverse = 0;
break;
case 38:
i = vc_t416_color(vc, i, rgb_foreground);
break;
case 48:
i = vc_t416_color(vc, i, rgb_background);
break;
case 39:
vc->vc_color = (vc->vc_def_color & 0x0f) |
(vc->vc_color & 0xf0);
break;
case 49:
vc->vc_color = (vc->vc_def_color & 0xf0) |
(vc->vc_color & 0x0f);
break;
default:
if (vc->vc_par[i] >= 90 && vc->vc_par[i] <= 107) {
if (vc->vc_par[i] < 100)
vc->vc_intensity = 2;
vc->vc_par[i] -= 60;
}
if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
vc->vc_color = color_table[vc->vc_par[i] - 30]
| (vc->vc_color & 0xf0);
else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4)
| (vc->vc_color & 0x0f);
break;
}
update_attr(vc);
} | 0 | [
"CWE-416",
"CWE-362"
] | linux | ca4463bf8438b403596edd0ec961ca0d4fbe0220 | 305,187,969,313,241,350,000,000,000,000,000,000,000 | 103 | vt: vt_ioctl: fix VT_DISALLOCATE freeing in-use virtual console
The VT_DISALLOCATE ioctl can free a virtual console while tty_release()
is still running, causing a use-after-free in con_shutdown(). This
occurs because VT_DISALLOCATE considers a virtual console's
'struct vc_data' to be unused as soon as the corresponding tty's
refcount hits 0. But actually it may be still being closed.
Fix this by making vc_data be reference-counted via the embedded
'struct tty_port'. A newly allocated virtual console has refcount 1.
Opening it for the first time increments the refcount to 2. Closing it
for the last time decrements the refcount (in tty_operations::cleanup()
so that it happens late enough), as does VT_DISALLOCATE.
Reproducer:
#include <fcntl.h>
#include <linux/vt.h>
#include <sys/ioctl.h>
#include <unistd.h>
int main()
{
if (fork()) {
for (;;)
close(open("/dev/tty5", O_RDWR));
} else {
int fd = open("/dev/tty10", O_RDWR);
for (;;)
ioctl(fd, VT_DISALLOCATE, 5);
}
}
KASAN report:
BUG: KASAN: use-after-free in con_shutdown+0x76/0x80 drivers/tty/vt/vt.c:3278
Write of size 8 at addr ffff88806a4ec108 by task syz_vt/129
CPU: 0 PID: 129 Comm: syz_vt Not tainted 5.6.0-rc2 #11
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20191223_100556-anatol 04/01/2014
Call Trace:
[...]
con_shutdown+0x76/0x80 drivers/tty/vt/vt.c:3278
release_tty+0xa8/0x410 drivers/tty/tty_io.c:1514
tty_release_struct+0x34/0x50 drivers/tty/tty_io.c:1629
tty_release+0x984/0xed0 drivers/tty/tty_io.c:1789
[...]
Allocated by task 129:
[...]
kzalloc include/linux/slab.h:669 [inline]
vc_allocate drivers/tty/vt/vt.c:1085 [inline]
vc_allocate+0x1ac/0x680 drivers/tty/vt/vt.c:1066
con_install+0x4d/0x3f0 drivers/tty/vt/vt.c:3229
tty_driver_install_tty drivers/tty/tty_io.c:1228 [inline]
tty_init_dev+0x94/0x350 drivers/tty/tty_io.c:1341
tty_open_by_driver drivers/tty/tty_io.c:1987 [inline]
tty_open+0x3ca/0xb30 drivers/tty/tty_io.c:2035
[...]
Freed by task 130:
[...]
kfree+0xbf/0x1e0 mm/slab.c:3757
vt_disallocate drivers/tty/vt/vt_ioctl.c:300 [inline]
vt_ioctl+0x16dc/0x1e30 drivers/tty/vt/vt_ioctl.c:818
tty_ioctl+0x9db/0x11b0 drivers/tty/tty_io.c:2660
[...]
Fixes: 4001d7b7fc27 ("vt: push down the tty lock so we can see what is left to tackle")
Cc: <[email protected]> # v3.4+
Reported-by: [email protected]
Acked-by: Jiri Slaby <[email protected]>
Signed-off-by: Eric Biggers <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
d2flac16_clip_array (const double *src, int32_t *dest, int count, int normalize)
{ double normfact, scaled_value ;
normfact = normalize ? (8.0 * 0x1000) : 1.0 ;
while (--count >= 0)
{ scaled_value = src [count] * normfact ;
if (CPU_CLIPS_POSITIVE == 0 && scaled_value >= (1.0 * 0x7FFF))
{ dest [count] = 0x7FFF ;
continue ;
} ;
if (CPU_CLIPS_NEGATIVE == 0 && scaled_value <= (-8.0 * 0x1000))
{ dest [count] = 0x8000 ;
continue ;
} ;
dest [count] = lrint (scaled_value) ;
} ;
return ;
} /* d2flac16_clip_array */ | 0 | [
"CWE-119",
"CWE-369"
] | libsndfile | 60b234301adf258786d8b90be5c1d437fc8799e0 | 254,644,245,666,231,530,000,000,000,000,000,000,000 | 20 | src/flac.c: Improve error handling
Especially when dealing with corrupt or malicious files. |
uint32_t cli_bcapi_setvirusname(struct cli_bc_ctx* ctx, const uint8_t *name, uint32_t len)
{
ctx->virname = (const char*)name;
return 0;
} | 0 | [
"CWE-189"
] | clamav-devel | 3d664817f6ef833a17414a4ecea42004c35cc42f | 89,667,339,035,366,860,000,000,000,000,000,000,000 | 5 | fix recursion level crash (bb #3706).
Thanks to Stephane Chazelas for the analysis. |
static bool parse_dysymtab(struct MACH0_(obj_t) * bin, ut64 off) {
size_t len, i;
ut32 size_tab;
ut8 dysym[sizeof(struct dysymtab_command)] = { 0 };
ut8 dytoc[sizeof(struct dylib_table_of_contents)] = { 0 };
ut8 dymod[sizeof(struct MACH0_(dylib_module))] = { 0 };
ut8 idsyms[sizeof(ut32)] = { 0 };
if (off > bin->size || off + sizeof(struct dysymtab_command) > bin->size) {
return false;
}
len = rz_buf_read_at(bin->b, off, dysym, sizeof(struct dysymtab_command));
if (len != sizeof(struct dysymtab_command)) {
bprintf("Error: read (dysymtab)\n");
return false;
}
bin->dysymtab.cmd = rz_read_ble32(&dysym[0], bin->big_endian);
bin->dysymtab.cmdsize = rz_read_ble32(&dysym[4], bin->big_endian);
bin->dysymtab.ilocalsym = rz_read_ble32(&dysym[8], bin->big_endian);
bin->dysymtab.nlocalsym = rz_read_ble32(&dysym[12], bin->big_endian);
bin->dysymtab.iextdefsym = rz_read_ble32(&dysym[16], bin->big_endian);
bin->dysymtab.nextdefsym = rz_read_ble32(&dysym[20], bin->big_endian);
bin->dysymtab.iundefsym = rz_read_ble32(&dysym[24], bin->big_endian);
bin->dysymtab.nundefsym = rz_read_ble32(&dysym[28], bin->big_endian);
bin->dysymtab.tocoff = rz_read_ble32(&dysym[32], bin->big_endian);
bin->dysymtab.ntoc = rz_read_ble32(&dysym[36], bin->big_endian);
bin->dysymtab.modtaboff = rz_read_ble32(&dysym[40], bin->big_endian);
bin->dysymtab.nmodtab = rz_read_ble32(&dysym[44], bin->big_endian);
bin->dysymtab.extrefsymoff = rz_read_ble32(&dysym[48], bin->big_endian);
bin->dysymtab.nextrefsyms = rz_read_ble32(&dysym[52], bin->big_endian);
bin->dysymtab.indirectsymoff = rz_read_ble32(&dysym[56], bin->big_endian);
bin->dysymtab.nindirectsyms = rz_read_ble32(&dysym[60], bin->big_endian);
bin->dysymtab.extreloff = rz_read_ble32(&dysym[64], bin->big_endian);
bin->dysymtab.nextrel = rz_read_ble32(&dysym[68], bin->big_endian);
bin->dysymtab.locreloff = rz_read_ble32(&dysym[72], bin->big_endian);
bin->dysymtab.nlocrel = rz_read_ble32(&dysym[76], bin->big_endian);
bin->ntoc = bin->dysymtab.ntoc;
if (bin->ntoc > 0) {
if (!(bin->toc = calloc(bin->ntoc, sizeof(struct dylib_table_of_contents)))) {
perror("calloc (toc)");
return false;
}
if (!UT32_MUL(&size_tab, bin->ntoc, sizeof(struct dylib_table_of_contents))) {
RZ_FREE(bin->toc);
return false;
}
if (!size_tab) {
RZ_FREE(bin->toc);
return false;
}
if (bin->dysymtab.tocoff > bin->size || bin->dysymtab.tocoff + size_tab > bin->size) {
RZ_FREE(bin->toc);
return false;
}
for (i = 0; i < bin->ntoc; i++) {
len = rz_buf_read_at(bin->b, bin->dysymtab.tocoff + i * sizeof(struct dylib_table_of_contents),
dytoc, sizeof(struct dylib_table_of_contents));
if (len != sizeof(struct dylib_table_of_contents)) {
bprintf("Error: read (toc)\n");
RZ_FREE(bin->toc);
return false;
}
bin->toc[i].symbol_index = rz_read_ble32(&dytoc[0], bin->big_endian);
bin->toc[i].module_index = rz_read_ble32(&dytoc[4], bin->big_endian);
}
}
bin->nmodtab = bin->dysymtab.nmodtab;
if (bin->nmodtab > 0) {
if (!(bin->modtab = calloc(bin->nmodtab, sizeof(struct MACH0_(dylib_module))))) {
perror("calloc (modtab)");
return false;
}
if (!UT32_MUL(&size_tab, bin->nmodtab, sizeof(struct MACH0_(dylib_module)))) {
RZ_FREE(bin->modtab);
return false;
}
if (!size_tab) {
RZ_FREE(bin->modtab);
return false;
}
if (bin->dysymtab.modtaboff > bin->size ||
bin->dysymtab.modtaboff + size_tab > bin->size) {
RZ_FREE(bin->modtab);
return false;
}
for (i = 0; i < bin->nmodtab; i++) {
len = rz_buf_read_at(bin->b, bin->dysymtab.modtaboff + i * sizeof(struct MACH0_(dylib_module)),
dymod, sizeof(struct MACH0_(dylib_module)));
if (len == -1) {
bprintf("Error: read (modtab)\n");
RZ_FREE(bin->modtab);
return false;
}
bin->modtab[i].module_name = rz_read_ble32(&dymod[0], bin->big_endian);
bin->modtab[i].iextdefsym = rz_read_ble32(&dymod[4], bin->big_endian);
bin->modtab[i].nextdefsym = rz_read_ble32(&dymod[8], bin->big_endian);
bin->modtab[i].irefsym = rz_read_ble32(&dymod[12], bin->big_endian);
bin->modtab[i].nrefsym = rz_read_ble32(&dymod[16], bin->big_endian);
bin->modtab[i].ilocalsym = rz_read_ble32(&dymod[20], bin->big_endian);
bin->modtab[i].nlocalsym = rz_read_ble32(&dymod[24], bin->big_endian);
bin->modtab[i].iextrel = rz_read_ble32(&dymod[28], bin->big_endian);
bin->modtab[i].nextrel = rz_read_ble32(&dymod[32], bin->big_endian);
bin->modtab[i].iinit_iterm = rz_read_ble32(&dymod[36], bin->big_endian);
bin->modtab[i].ninit_nterm = rz_read_ble32(&dymod[40], bin->big_endian);
#if RZ_BIN_MACH064
bin->modtab[i].objc_module_info_size = rz_read_ble32(&dymod[44], bin->big_endian);
bin->modtab[i].objc_module_info_addr = rz_read_ble64(&dymod[48], bin->big_endian);
#else
bin->modtab[i].objc_module_info_addr = rz_read_ble32(&dymod[44], bin->big_endian);
bin->modtab[i].objc_module_info_size = rz_read_ble32(&dymod[48], bin->big_endian);
#endif
}
}
bin->nindirectsyms = bin->dysymtab.nindirectsyms;
if (bin->nindirectsyms > 0) {
if (!(bin->indirectsyms = calloc(bin->nindirectsyms, sizeof(ut32)))) {
perror("calloc (indirectsyms)");
return false;
}
if (!UT32_MUL(&size_tab, bin->nindirectsyms, sizeof(ut32))) {
RZ_FREE(bin->indirectsyms);
return false;
}
if (!size_tab) {
RZ_FREE(bin->indirectsyms);
return false;
}
if (bin->dysymtab.indirectsymoff > bin->size ||
bin->dysymtab.indirectsymoff + size_tab > bin->size) {
RZ_FREE(bin->indirectsyms);
return false;
}
for (i = 0; i < bin->nindirectsyms; i++) {
len = rz_buf_read_at(bin->b, bin->dysymtab.indirectsymoff + i * sizeof(ut32), idsyms, 4);
if (len == -1) {
bprintf("Error: read (indirect syms)\n");
RZ_FREE(bin->indirectsyms);
return false;
}
bin->indirectsyms[i] = rz_read_ble32(&idsyms[0], bin->big_endian);
}
}
/* TODO extrefsyms, extrel, locrel */
return true;
} | 0 | [
"CWE-787"
] | rizin | 348b1447d1452f978b69631d6de5b08dd3bdf79d | 170,567,877,698,759,940,000,000,000,000,000,000,000 | 149 | fix #2956 - oob write in mach0.c |
PHPAPI void php_handle_aborted_connection(void)
{
PG(connection_status) = PHP_CONNECTION_ABORTED;
php_output_set_status(PHP_OUTPUT_DISABLED);
if (!PG(ignore_user_abort)) {
zend_bailout();
}
} | 0 | [] | php-src | 9a07245b728714de09361ea16b9c6fcf70cb5685 | 282,228,300,985,103,930,000,000,000,000,000,000,000 | 10 | Fixed bug #71273 A wrong ext directory setup in php.ini leads to crash |
int nwfilterRegister(void)
{
if (virRegisterConnectDriver(&nwfilterConnectDriver, false) < 0)
return -1;
if (virSetSharedNWFilterDriver(&nwfilterDriver) < 0)
return -1;
if (virRegisterStateDriver(&stateDriver) < 0)
return -1;
return 0;
} | 0 | [
"CWE-667"
] | libvirt | a4947e8f63c3e6b7b067b444f3d6cf674c0d7f36 | 177,552,299,536,424,700,000,000,000,000,000,000,000 | 10 | nwfilter: fix crash when counting number of network filters
The virNWFilterObjListNumOfNWFilters method iterates over the
driver->nwfilters, accessing virNWFilterObj instances. As such
it needs to be protected against concurrent modification of
the driver->nwfilters object.
This API allows unprivileged users to connect, so users with
read-only access to libvirt can cause a denial of service
crash if they are able to race with a call of virNWFilterUndefine.
Since network filters are usually statically defined, this is
considered a low severity problem.
This is assigned CVE-2022-0897.
Reviewed-by: Eric Blake <[email protected]>
Signed-off-by: Daniel P. Berrangé <[email protected]> |
static void rfc1002mangle(char *target, char *source, unsigned int length)
{
unsigned int i, j;
for (i = 0, j = 0; i < (length); i++) {
/* mask a nibble at a time and encode */
target[j] = 'A' + (0x0F & (source[i] >> 4));
target[j+1] = 'A' + (0x0F & source[i]);
j += 2;
}
} | 0 | [
"CWE-20"
] | linux | 70945643722ffeac779d2529a348f99567fa5c33 | 236,328,140,138,234,740,000,000,000,000,000,000,000 | 12 | cifs: always do is_path_accessible check in cifs_mount
Currently, we skip doing the is_path_accessible check in cifs_mount if
there is no prefixpath. I have a report of at least one server however
that allows a TREE_CONNECT to a share that has a DFS referral at its
root. The reporter in this case was using a UNC that had no prefixpath,
so the is_path_accessible check was not triggered and the box later hit
a BUG() because we were chasing a DFS referral on the root dentry for
the mount.
This patch fixes this by removing the check for a zero-length
prefixpath. That should make the is_path_accessible check be done in
this situation and should allow the client to chase the DFS referral at
mount time instead.
Cc: [email protected]
Reported-and-Tested-by: Yogesh Sharma <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]> |
void DocumentSourceGraphLookUp::serializeToArray(
std::vector<Value>& array, boost::optional<ExplainOptions::Verbosity> explain) const {
// Serialize default options.
MutableDocument spec(DOC("from" << _from.coll() << "as" << _as.fullPath() << "connectToField"
<< _connectToField.fullPath() << "connectFromField"
<< _connectFromField.fullPath() << "startWith"
<< _startWith->serialize(false)));
// depthField is optional; serialize it if it was specified.
if (_depthField) {
spec["depthField"] = Value(_depthField->fullPath());
}
if (_maxDepth) {
spec["maxDepth"] = Value(*_maxDepth);
}
if (_additionalFilter) {
spec["restrictSearchWithMatch"] = Value(*_additionalFilter);
}
// If we are explaining, include an absorbed $unwind inside the $graphLookup specification.
if (_unwind && explain) {
const boost::optional<FieldPath> indexPath = (*_unwind)->indexPath();
spec["unwinding"] =
Value(DOC("preserveNullAndEmptyArrays"
<< (*_unwind)->preserveNullAndEmptyArrays() << "includeArrayIndex"
<< (indexPath ? Value((*indexPath).fullPath()) : Value())));
}
array.push_back(Value(DOC(getSourceName() << spec.freeze())));
// If we are not explaining, the output of this method must be parseable, so serialize our
// $unwind into a separate stage.
if (_unwind && !explain) {
(*_unwind)->serializeToArray(array);
}
} | 0 | [
"CWE-416"
] | mongo | d6133a3a5464fac202f512b0310dfeb200c126f9 | 127,926,943,447,775,840,000,000,000,000,000,000,000 | 38 | SERVER-43350 $lookup with no local default or user-specified collation should explicitly set the simple collation on the foreign expression context |
lprn_get_params(gx_device * dev, gs_param_list * plist)
{
gx_device_lprn *const lprn = (gx_device_lprn *) dev;
int code = gdev_prn_get_params(dev, plist);
int ncode;
if (code < 0)
return code;
if ((ncode = param_write_bool(plist, "ManualFeed", &lprn->ManualFeed)) < 0)
code = ncode;
if ((ncode = param_write_bool(plist, "NegativePrint", &lprn->NegativePrint)) < 0)
code = ncode;
if ((ncode = param_write_bool(plist, "Tumble", &lprn->Tumble)) < 0)
code = ncode;
if ((ncode = param_write_bool(plist, "RITOff", &lprn->RITOff)) < 0)
code = ncode;
if ((ncode = param_write_int(plist, "BlockLine", &lprn->BlockLine)) < 0)
code = ncode;
if ((ncode = param_write_int(plist, "BlockWidth", &lprn->nBw)) < 0)
code = ncode;
if ((ncode = param_write_int(plist, "BlockHeight", &lprn->nBh)) < 0)
code = ncode;
if ((ncode = param_write_bool(plist, "ShowBubble", &lprn->ShowBubble)) < 0)
code = ncode;
return code;
} | 0 | [
"CWE-787"
] | ghostpdl | 450da26a76286a8342ec0864b3d113856709f8f6 | 259,823,342,956,044,660,000,000,000,000,000,000,000 | 35 | Bug 701785: fixed sanitizer heap-buffer-overflow in lprn_is_black().
In contrib/lips4/gdevlprn.c:lprn_is_black(), it seems that bpl is not
necessarily a multiple of lprn->nBw, so we need to explicitly avoid straying
into the next line's data.
This also avoids accessing beyond our buffer if we are already on the last
line, and so fixes the sanitizer error.
Fixes:
./sanbin/gs -sOutputFile=tmp -sDEVICE=lips2p ../bug-701785.pdf |
int oauth2_try_parse_jwt(const struct oauth2_settings *set,
const char *token, ARRAY_TYPE(oauth2_field) *fields,
bool *is_jwt_r, const char **error_r)
{
const char *const *blobs = t_strsplit(token, ".");
int ret;
i_assert(set->key_dict != NULL);
/* we don't know if it's JWT token yet */
*is_jwt_r = FALSE;
if (str_array_length(blobs) != 3) {
*error_r = "Not a JWT token";
return -1;
}
/* attempt to decode header */
buffer_t *header =
t_base64url_decode_str(BASE64_DECODE_FLAG_NO_PADDING, blobs[0]);
if (header->used == 0) {
*error_r = "Not a JWT token";
return -1;
}
struct json_tree *header_tree;
if (oauth2_json_tree_build(header, &header_tree, error_r) < 0)
return -1;
const char *alg, *kid;
ret = oauth2_jwt_header_process(header_tree, &alg, &kid, error_r);
json_tree_deinit(&header_tree);
if (ret < 0)
return -1;
/* it is now assumed to be a JWT token */
*is_jwt_r = TRUE;
if (kid == NULL)
kid = "default";
else if (*kid == '\0') {
*error_r = "'kid' field is empty";
return -1;
} else {
kid = escape_identifier(kid);
}
/* parse body */
struct json_tree *body_tree;
buffer_t *body =
t_base64url_decode_str(BASE64_DECODE_FLAG_NO_PADDING, blobs[1]);
if (oauth2_json_tree_build(body, &body_tree, error_r) == -1)
return -1;
ret = oauth2_jwt_body_process(set, alg, kid, fields, body_tree, blobs,
error_r);
json_tree_deinit(&body_tree);
return ret;
} | 0 | [
"CWE-22"
] | core | 15682a20d5589ebf5496b31c55ecf9238ff2457b | 145,172,370,476,699,340,000,000,000,000,000,000,000 | 60 | lib-oauth2: Do not escape '.'
This is not really needed and just makes things difficult. |
PosibErr<void> convert_ec(const char * in0, int size,
CharVector & out, ParmStr) const {
ConvDirect::convert(in0, size, out);
return no_err;
} | 0 | [
"CWE-125"
] | aspell | de29341638833ba7717bd6b5e6850998454b044b | 198,899,367,277,557,140,000,000,000,000,000,000,000 | 5 | Don't allow null-terminated UCS-2/4 strings using the original API.
Detect if the encoding is UCS-2/4 and the length is -1 in affected API
functions and refuse to convert the string. If the string ends up
being converted somehow, abort with an error message in DecodeDirect
and ConvDirect. To convert a null terminated string in
Decode/ConvDirect, a negative number corresponding to the width of the
underlying character type for the encoding is expected; for example,
if the encoding is "ucs-2" then a the size is expected to be -2.
Also fix a 1-3 byte over-read in DecodeDirect when reading UCS-2/4
strings when a size is provided (found by OSS-Fuzz).
Also fix a bug in DecodeDirect that caused DocumentChecker to return
the wrong offsets when working with UCS-2/4 strings. |
EXPORTED int mailbox_rename_cleanup(struct mailbox **mailboxptr)
{
int r = 0;
struct mailbox *oldmailbox = *mailboxptr;
char *name = xstrdup(mailbox_name(oldmailbox));
r = mailbox_delete_internal(mailboxptr);
if (r) {
syslog(LOG_CRIT,
"Rename Failure during mailbox_rename_cleanup (%s), " \
"potential leaked space (%s)", name,
error_message(r));
}
free(name);
return r;
} | 0 | [] | cyrus-imapd | 1d6d15ee74e11a9bd745e80be69869e5fb8d64d6 | 82,658,305,567,349,310,000,000,000,000,000,000,000 | 19 | mailbox.c/reconstruct.c: Add mailbox_mbentry_from_path() |
static int init_rmode(struct kvm *kvm)
{
if (!init_rmode_tss(kvm))
return 0;
if (!init_rmode_identity_map(kvm))
return 0;
return 1;
} | 0 | [
"CWE-20"
] | linux-2.6 | 16175a796d061833aacfbd9672235f2d2725df65 | 157,928,279,618,264,660,000,000,000,000,000,000,000 | 8 | KVM: VMX: Don't allow uninhibited access to EFER on i386
vmx_set_msr() does not allow i386 guests to touch EFER, but they can still
do so through the default: label in the switch. If they set EFER_LME, they
can oops the host.
Fix by having EFER access through the normal channel (which will check for
EFER_LME) even on i386.
Reported-and-tested-by: Benjamin Gilbert <[email protected]>
Cc: [email protected]
Signed-off-by: Avi Kivity <[email protected]> |
write_linefeed(void)
{
term_cursor *curs = &term.curs;
if (curs->x < term.marg_left || curs->x > term.marg_right)
return;
clear_wrapcontd(term.lines[curs->y], curs->y);
if (curs->y == term.marg_bot)
term_do_scroll(term.marg_top, term.marg_bot, 1, true);
else if (curs->y < term.rows - 1)
curs->y++;
curs->wrapnext = false;
} | 0 | [
"CWE-703",
"CWE-770"
] | mintty | bd52109993440b6996760aaccb66e68e782762b9 | 339,448,369,390,761,080,000,000,000,000,000,000,000 | 13 | tame some window operations, just in case |
int manager_loop(Manager *m) {
int r;
RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
assert(m);
m->exit_code = MANAGER_RUNNING;
/* Release the path cache */
set_free_free(m->unit_path_cache);
m->unit_path_cache = NULL;
manager_check_finished(m);
/* There might still be some zombies hanging around from
* before we were exec()'ed. Let's reap them. */
r = manager_dispatch_sigchld(m);
if (r < 0)
return r;
while (m->exit_code == MANAGER_RUNNING) {
usec_t wait_usec;
if (m->runtime_watchdog > 0 && m->running_as == SYSTEMD_SYSTEM)
watchdog_ping();
if (!ratelimit_test(&rl)) {
/* Yay, something is going seriously wrong, pause a little */
log_warning("Looping too fast. Throttling execution a little.");
sleep(1);
continue;
}
if (manager_dispatch_load_queue(m) > 0)
continue;
if (manager_dispatch_gc_queue(m) > 0)
continue;
if (manager_dispatch_cleanup_queue(m) > 0)
continue;
if (manager_dispatch_cgroup_queue(m) > 0)
continue;
if (manager_dispatch_dbus_queue(m) > 0)
continue;
/* Sleep for half the watchdog time */
if (m->runtime_watchdog > 0 && m->running_as == SYSTEMD_SYSTEM) {
wait_usec = m->runtime_watchdog / 2;
if (wait_usec <= 0)
wait_usec = 1;
} else
wait_usec = (usec_t) -1;
r = sd_event_run(m->event, wait_usec);
if (r < 0) {
log_error("Failed to run event loop: %s", strerror(-r));
return r;
}
}
return m->exit_code;
} | 0 | [] | systemd | 5ba6985b6c8ef85a8bcfeb1b65239c863436e75b | 185,847,069,572,270,550,000,000,000,000,000,000,000 | 65 | core: allow PIDs to be watched by two units at the same time
In some cases it is interesting to map a PID to two units at the same
time. For example, when a user logs in via a getty, which is reexeced to
/sbin/login that binary will be explicitly referenced as main pid of the
getty service, as well as implicitly referenced as part of the session
scope. |
static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
dma_cookie_t last_complete, dma_cookie_t last_used)
{
if (last_complete <= last_used) {
if ((cookie <= last_complete) || (cookie > last_used))
return DMA_COMPLETE;
} else {
if ((cookie <= last_complete) && (cookie > last_used))
return DMA_COMPLETE;
}
return DMA_IN_PROGRESS;
} | 0 | [] | linux | 7bced397510ab569d31de4c70b39e13355046387 | 268,547,723,761,154,640,000,000,000,000,000,000,000 | 12 | net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]> |
static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct shared_msr_entry *msr;
int ret = 0;
u32 msr_index = msr_info->index;
u64 data = msr_info->data;
switch (msr_index) {
case MSR_EFER:
ret = kvm_set_msr_common(vcpu, msr_info);
break;
#ifdef CONFIG_X86_64
case MSR_FS_BASE:
vmx_segment_cache_clear(vmx);
vmcs_writel(GUEST_FS_BASE, data);
break;
case MSR_GS_BASE:
vmx_segment_cache_clear(vmx);
vmcs_writel(GUEST_GS_BASE, data);
break;
case MSR_KERNEL_GS_BASE:
vmx_load_host_state(vmx);
vmx->msr_guest_kernel_gs_base = data;
break;
#endif
case MSR_IA32_SYSENTER_CS:
vmcs_write32(GUEST_SYSENTER_CS, data);
break;
case MSR_IA32_SYSENTER_EIP:
vmcs_writel(GUEST_SYSENTER_EIP, data);
break;
case MSR_IA32_SYSENTER_ESP:
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr_info);
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, data);
vcpu->arch.pat = data;
break;
}
ret = kvm_set_msr_common(vcpu, msr_info);
break;
case MSR_IA32_TSC_ADJUST:
ret = kvm_set_msr_common(vcpu, msr_info);
break;
case MSR_TSC_AUX:
if (!vmx->rdtscp_enabled)
return 1;
/* Check reserved bit, higher 32 bits should be zero */
if ((data >> 32) != 0)
return 1;
/* Otherwise falls through */
default:
if (vmx_set_vmx_msr(vcpu, msr_info))
break;
msr = find_msr_entry(vmx, msr_index);
if (msr) {
msr->data = data;
if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
preempt_disable();
kvm_set_shared_msr(msr->index, msr->data,
msr->mask);
preempt_enable();
}
break;
}
ret = kvm_set_msr_common(vcpu, msr_info);
}
return ret;
} | 0 | [
"CWE-20"
] | linux | bfd0a56b90005f8c8a004baf407ad90045c2b11e | 5,992,821,257,388,777,000,000,000,000,000,000,000 | 75 | nEPT: Nested INVEPT
If we let L1 use EPT, we should probably also support the INVEPT instruction.
In our current nested EPT implementation, when L1 changes its EPT table
for L2 (i.e., EPT12), L0 modifies the shadow EPT table (EPT02), and in
the course of this modification already calls INVEPT. But if last level
of shadow page is unsync not all L1's changes to EPT12 are intercepted,
which means roots need to be synced when L1 calls INVEPT. Global INVEPT
should not be different since roots are synced by kvm_mmu_load() each
time EPTP02 changes.
Reviewed-by: Xiao Guangrong <[email protected]>
Signed-off-by: Nadav Har'El <[email protected]>
Signed-off-by: Jun Nakajima <[email protected]>
Signed-off-by: Xinhao Xu <[email protected]>
Signed-off-by: Yang Zhang <[email protected]>
Signed-off-by: Gleb Natapov <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
void do_usage()
{
printf("Usage:\n"
" %s <user> log <filename>\n"
" %s <user> exec <command> <args>\n",
my_progname, my_progname);
my_exit(1);
} | 0 | [] | server | 8fcdd6b0ecbb966f4479856efe93a963a7a422f7 | 51,638,032,889,771,430,000,000,000,000,000,000,000 | 8 | Numerous issues in mysqld_safe |
bool Config::remove_notifier(const Notifier * n)
{
Vector<Notifier *>::iterator i = notifier_list.begin();
Vector<Notifier *>::iterator end = notifier_list.end();
while (i != end && *i != n)
++i;
if (i == end) {
return false;
} else {
delete *i;
notifier_list.erase(i);
return true;
}
} | 0 | [
"CWE-125"
] | aspell | 80fa26c74279fced8d778351cff19d1d8f44fe4e | 326,734,393,179,366,750,000,000,000,000,000,000,000 | 20 | Fix various bugs found by OSS-Fuze. |
static int get_master_uuid(MYSQL *mysql, Master_info *mi)
{
const char *errmsg;
MYSQL_RES *master_res= NULL;
MYSQL_ROW master_row= NULL;
int ret= 0;
DBUG_EXECUTE_IF("dbug.before_get_MASTER_UUID",
{
const char act[]= "now wait_for signal.get_master_uuid";
DBUG_ASSERT(opt_debug_sync_timeout > 0);
DBUG_ASSERT(!debug_sync_set_action(current_thd,
STRING_WITH_LEN(act)));
};);
DBUG_EXECUTE_IF("dbug.simulate_busy_io",
{
const char act[]= "now signal Reached wait_for signal.got_stop_slave";
DBUG_ASSERT(opt_debug_sync_timeout > 0);
DBUG_ASSERT(!debug_sync_set_action(current_thd,
STRING_WITH_LEN(act)));
};);
if (!mysql_real_query(mysql,
STRING_WITH_LEN("SHOW VARIABLES LIKE 'SERVER_UUID'")) &&
(master_res= mysql_store_result(mysql)) &&
(master_row= mysql_fetch_row(master_res)))
{
if (!strcmp(::server_uuid, master_row[1]) &&
!mi->rli->replicate_same_server_id)
{
errmsg= "The slave I/O thread stops because master and slave have equal "
"MySQL server UUIDs; these UUIDs must be different for "
"replication to work.";
mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, ER(ER_SLAVE_FATAL_ERROR),
errmsg);
// Fatal error
ret= 1;
}
else
{
if (mi->master_uuid[0] != 0 && strcmp(mi->master_uuid, master_row[1]))
sql_print_warning("The master's UUID has changed, although this should"
" not happen unless you have changed it manually."
" The old UUID was %s.",
mi->master_uuid);
strncpy(mi->master_uuid, master_row[1], UUID_LENGTH);
mi->master_uuid[UUID_LENGTH]= 0;
}
}
else if (mysql_errno(mysql))
{
if (is_network_error(mysql_errno(mysql)))
{
mi->report(WARNING_LEVEL, mysql_errno(mysql),
"Get master SERVER_UUID failed with error: %s",
mysql_error(mysql));
ret= 2;
}
else
{
/* Fatal error */
errmsg= "The slave I/O thread stops because a fatal error is encountered "
"when it tries to get the value of SERVER_UUID variable from master.";
mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, ER(ER_SLAVE_FATAL_ERROR),
errmsg);
ret= 1;
}
}
else if (!master_row && master_res)
{
mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE,
"Unknown system variable 'SERVER_UUID' on master. "
"A probable cause is that the variable is not supported on the "
"master (version: %s), even though it is on the slave (version: %s)",
mysql->server_version, server_version);
}
if (master_res)
mysql_free_result(master_res);
return ret;
} | 0 | [
"CWE-284",
"CWE-295"
] | mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 278,801,511,585,388,800,000,000,000,000,000,000,000 | 81 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
strtoargvsub(isc_mem_t *mctx, char *s, unsigned int *argcp,
char ***argvp, unsigned int n)
{
isc_result_t result;
/* Discard leading whitespace. */
while (*s == ' ' || *s == '\t')
s++;
if (*s == '\0') {
/* We have reached the end of the string. */
*argcp = n;
*argvp = isc_mem_get(mctx, n * sizeof(char *));
if (*argvp == NULL)
return (ISC_R_NOMEMORY);
} else {
char *p = s;
while (*p != ' ' && *p != '\t' && *p != '\0')
p++;
if (*p != '\0')
*p++ = '\0';
result = strtoargvsub(mctx, p, argcp, argvp, n + 1);
if (result != ISC_R_SUCCESS)
return (result);
(*argvp)[n] = s;
}
return (ISC_R_SUCCESS);
} | 0 | [
"CWE-269"
] | bind9 | e4cccf9668c7adee4724a7649ec64685f82c8677 | 48,754,042,564,353,010,000,000,000,000,000,000,000 | 29 | Update-policy 'subdomain' was incorrectly treated as 'zonesub'
resulting in names outside the specified subdomain having the wrong
restrictions for the given key. |
static void v9fs_fs_ro(void *opaque)
{
V9fsPDU *pdu = opaque;
pdu_complete(pdu, -EROFS);
} | 0 | [
"CWE-399",
"CWE-772"
] | qemu | e95c9a493a5a8d6f969e86c9f19f80ffe6587e19 | 238,312,080,752,025,000,000,000,000,000,000,000,000 | 5 | 9pfs: fix potential host memory leak in v9fs_read
In 9pfs read dispatch function, it doesn't free two QEMUIOVector
object thus causing potential memory leak. This patch avoid this.
Signed-off-by: Li Qiang <[email protected]>
Signed-off-by: Greg Kurz <[email protected]> |
static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags)
{
BOOL rc = FALSE;
size_t start, end, diff;
BYTE orderType;
UINT16 extraFlags;
UINT16 orderLength;
rdpContext* context = update->context;
rdpSettings* settings = context->settings;
rdpSecondaryUpdate* secondary = update->secondary;
const char* name;
if (Stream_GetRemainingLength(s) < 5)
{
WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5");
return FALSE;
}
Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */
Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */
Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */
if (Stream_GetRemainingLength(s) < orderLength + 7U)
{
WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16,
Stream_GetRemainingLength(s), orderLength + 7);
return FALSE;
}
start = Stream_GetPosition(s);
name = secondary_order_string(orderType);
WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name);
if (!check_secondary_order_supported(update->log, settings, orderType, name))
return FALSE;
switch (orderType)
{
case ORDER_TYPE_BITMAP_UNCOMPRESSED:
case ORDER_TYPE_CACHE_BITMAP_COMPRESSED:
{
const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED);
CACHE_BITMAP_ORDER* order =
update_read_cache_bitmap_order(update, s, compressed, extraFlags);
if (order)
{
rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order);
free_cache_bitmap_order(context, order);
}
}
break;
case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2:
case ORDER_TYPE_BITMAP_COMPRESSED_V2:
{
const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2);
CACHE_BITMAP_V2_ORDER* order =
update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags);
if (order)
{
rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order);
free_cache_bitmap_v2_order(context, order);
}
}
break;
case ORDER_TYPE_BITMAP_COMPRESSED_V3:
{
CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags);
if (order)
{
rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order);
free_cache_bitmap_v3_order(context, order);
}
}
break;
case ORDER_TYPE_CACHE_COLOR_TABLE:
{
CACHE_COLOR_TABLE_ORDER* order =
update_read_cache_color_table_order(update, s, extraFlags);
if (order)
{
rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order);
free_cache_color_table_order(context, order);
}
}
break;
case ORDER_TYPE_CACHE_GLYPH:
{
switch (settings->GlyphSupportLevel)
{
case GLYPH_SUPPORT_PARTIAL:
case GLYPH_SUPPORT_FULL:
{
CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags);
if (order)
{
rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order);
free_cache_glyph_order(context, order);
}
}
break;
case GLYPH_SUPPORT_ENCODE:
{
CACHE_GLYPH_V2_ORDER* order =
update_read_cache_glyph_v2_order(update, s, extraFlags);
if (order)
{
rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order);
free_cache_glyph_v2_order(context, order);
}
}
break;
case GLYPH_SUPPORT_NONE:
default:
break;
}
}
break;
case ORDER_TYPE_CACHE_BRUSH:
/* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */
{
CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags);
if (order)
{
rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order);
free_cache_brush_order(context, order);
}
}
break;
default:
WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name);
break;
}
if (!rc)
{
WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name);
}
start += orderLength + 7;
end = Stream_GetPosition(s);
if (start > end)
{
WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much",
name, end - start);
return FALSE;
}
diff = start - end;
if (diff > 0)
{
WLog_Print(update->log, WLOG_DEBUG,
"SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff);
Stream_Seek(s, diff);
}
return rc;
} | 1 | [
"CWE-703",
"CWE-681"
] | FreeRDP | e7bffa64ef5ed70bac94f823e2b95262642f5296 | 17,145,863,199,436,656,000,000,000,000,000,000,000 | 169 | Fixed OOB read in update_recv_secondary_order
CVE-2020-4032 thanks to @antonio-morales for finding this. |
get_folder_names_to_update (gpointer key,
gpointer value,
gpointer user_data)
{
CamelEwsStore *ews_store = user_data;
const gchar *folder_id = key;
gchar *folder_name;
folder_name = camel_ews_store_summary_get_folder_full_name (ews_store->summary, folder_id, NULL);
if (folder_name != NULL)
ews_store->priv->update_folder_names = g_slist_prepend (ews_store->priv->update_folder_names, folder_name);
} | 0 | [
"CWE-295"
] | evolution-ews | 915226eca9454b8b3e5adb6f2fff9698451778de | 101,663,125,641,964,000,000,000,000,000,000,000,000 | 12 | I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27 |
Parse a set of mail headers contained in a string, and return an object similar to imap_headerinfo() */
PHP_FUNCTION(imap_rfc822_parse_headers)
{
zend_string *headers, *defaulthost = NULL;
ENVELOPE *en;
int argc = ZEND_NUM_ARGS();
if (zend_parse_parameters(argc, "S|S", &headers, &defaulthost) == FAILURE) {
return;
}
if (argc == 2) {
rfc822_parse_msg(&en, NULL, ZSTR_VAL(headers), ZSTR_LEN(headers), NULL, ZSTR_VAL(defaulthost), NIL);
} else {
rfc822_parse_msg(&en, NULL, ZSTR_VAL(headers), ZSTR_LEN(headers), NULL, "UNKNOWN", NIL);
}
/* call a function to parse all the text, so that we can use the
same function no matter where the headers are from */
_php_make_header_object(return_value, en);
mail_free_envelope(&en); | 0 | [
"CWE-88"
] | php-src | 336d2086a9189006909ae06c7e95902d7d5ff77e | 55,314,802,216,250,140,000,000,000,000,000,000,000 | 21 | Disable rsh/ssh functionality in imap by default (bug #77153) |
auth_algo_transform(uint32_t virtio_auth_algo,
enum rte_crypto_auth_algorithm *algo)
{
switch (virtio_auth_algo) {
case VIRTIO_CRYPTO_NO_MAC:
*algo = RTE_CRYPTO_AUTH_NULL;
break;
case VIRTIO_CRYPTO_MAC_HMAC_MD5:
*algo = RTE_CRYPTO_AUTH_MD5_HMAC;
break;
case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
break;
case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
break;
case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
break;
case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
*algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
break;
case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
*algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
break;
case VIRTIO_CRYPTO_MAC_CMAC_AES:
*algo = RTE_CRYPTO_AUTH_AES_CMAC;
break;
case VIRTIO_CRYPTO_MAC_KASUMI_F9:
*algo = RTE_CRYPTO_AUTH_KASUMI_F9;
break;
case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
*algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
break;
case VIRTIO_CRYPTO_MAC_GMAC_AES:
*algo = RTE_CRYPTO_AUTH_AES_GMAC;
break;
case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
*algo = RTE_CRYPTO_AUTH_AES_CBC_MAC;
break;
case VIRTIO_CRYPTO_MAC_XCBC_AES:
*algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
break;
case VIRTIO_CRYPTO_MAC_CMAC_3DES:
case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
return -VIRTIO_CRYPTO_NOTSUPP;
default:
return -VIRTIO_CRYPTO_BADMSG;
}
return 0;
} | 0 | [
"CWE-125"
] | dpdk | acd4c92fa693bbea695f2bb42bb93fb8567c3ca5 | 285,440,866,895,512,300,000,000,000,000,000,000,000 | 53 | vhost/crypto: validate keys lengths
transform_cipher_param() and transform_chain_param() handle
the payload data for the VHOST_USER_CRYPTO_CREATE_SESS
message. These payloads have to be validated, since it
could come from untrusted sources.
Two buffers and their lengths are defined in this payload,
one the the auth key and one for the cipher key. But above
functions do not validate the key length inputs, which could
lead to read out of bounds, as buffers have static sizes of
64 bytes for the cipher key and 512 bytes for the auth key.
This patch adds necessary checks on the key length field
before being used.
CVE-2020-10724
Fixes: e80a98708166 ("vhost/crypto: add session message handler")
Cc: [email protected]
Reported-by: Ilja Van Sprundel <[email protected]>
Signed-off-by: Maxime Coquelin <[email protected]>
Reviewed-by: Xiaolong Ye <[email protected]>
Reviewed-by: Ilja Van Sprundel <[email protected]> |
static int packet_notifier(struct notifier_block *this,
unsigned long msg, void *ptr)
{
struct sock *sk;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
rcu_read_lock();
sk_for_each_rcu(sk, &net->packet.sklist) {
struct packet_sock *po = pkt_sk(sk);
switch (msg) {
case NETDEV_UNREGISTER:
if (po->mclist)
packet_dev_mclist_delete(dev, &po->mclist);
fallthrough;
case NETDEV_DOWN:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
if (po->running) {
__unregister_prot_hook(sk, false);
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po);
po->ifindex = -1;
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
}
break;
case NETDEV_UP:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
if (po->num)
register_prot_hook(sk);
spin_unlock(&po->bind_lock);
}
break;
}
}
rcu_read_unlock();
return NOTIFY_DONE;
} | 0 | [
"CWE-787"
] | linux | acf69c946233259ab4d64f8869d4037a198c7f06 | 63,478,354,422,236,240,000,000,000,000,000,000,000 | 49 | net/packet: fix overflow in tpacket_rcv
Using tp_reserve to calculate netoff can overflow as
tp_reserve is unsigned int and netoff is unsigned short.
This may lead to macoff receving a smaller value then
sizeof(struct virtio_net_hdr), and if po->has_vnet_hdr
is set, an out-of-bounds write will occur when
calling virtio_net_hdr_from_skb.
The bug is fixed by converting netoff to unsigned int
and checking if it exceeds USHRT_MAX.
This addresses CVE-2020-14386
Fixes: 8913336a7e8d ("packet: add PACKET_RESERVE sockopt")
Signed-off-by: Or Cohen <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
value_free(
struct value *vp /* value structure */
)
{
if (vp->ptr != NULL)
free(vp->ptr);
if (vp->sig != NULL)
free(vp->sig);
memset(vp, 0, sizeof(struct value));
} | 0 | [
"CWE-20"
] | ntp | c4cd4aaf418f57f7225708a93bf48afb2bc9c1da | 233,715,250,624,459,500,000,000,000,000,000,000,000 | 10 | CVE-2014-9297 |
static void __x25_destroy_socket(struct sock *sk)
{
struct sk_buff *skb;
x25_stop_heartbeat(sk);
x25_stop_timer(sk);
x25_remove_socket(sk);
x25_clear_queues(sk); /* Flush the queues */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/*
* Queue the unaccepted socket for death
*/
skb->sk->sk_state = TCP_LISTEN;
sock_set_flag(skb->sk, SOCK_DEAD);
x25_start_heartbeat(skb->sk);
x25_sk(skb->sk)->state = X25_STATE_0;
}
kfree_skb(skb);
}
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
sk->sk_timer.expires = jiffies + 10 * HZ;
sk->sk_timer.function = x25_destroy_timer;
sk->sk_timer.data = (unsigned long)sk;
add_timer(&sk->sk_timer);
} else {
/* drop last reference so sock_put will free */
__sock_put(sk);
}
} | 0 | [
"CWE-20",
"CWE-269"
] | linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 79,772,587,699,238,880,000,000,000,000,000,000,000 | 35 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
iasecc_sdo_get_data(struct sc_card *card, struct iasecc_sdo *sdo)
{
struct sc_context *ctx = card->ctx;
int rv, sdo_tag;
LOG_FUNC_CALLED(ctx);
sdo_tag = iasecc_sdo_tag_from_class(sdo->sdo_class);
rv = iasecc_sdo_get_tagged_data(card, sdo_tag, sdo);
/* When there is no public data 'GET DATA' returns error */
if (rv != SC_ERROR_INCORRECT_PARAMETERS)
LOG_TEST_RET(ctx, rv, "cannot parse ECC SDO data");
rv = iasecc_sdo_get_tagged_data(card, IASECC_DOCP_TAG, sdo);
LOG_TEST_RET(ctx, rv, "cannot parse ECC DOCP data");
LOG_FUNC_RETURN(ctx, rv);
} | 0 | [
"CWE-125"
] | OpenSC | 8fe377e93b4b56060e5bbfb6f3142ceaeca744fa | 49,310,794,631,010,860,000,000,000,000,000,000,000 | 19 | fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes. |
lys_getnext_data(const struct lys_module *mod, const struct lys_node *parent, const char *name, int nam_len,
LYS_NODE type, int getnext_opts, const struct lys_node **ret)
{
const struct lys_node *node;
assert((mod || parent) && name);
assert(!(type & (LYS_AUGMENT | LYS_USES | LYS_GROUPING | LYS_CHOICE | LYS_CASE | LYS_INPUT | LYS_OUTPUT)));
if (!mod) {
mod = lys_node_module(parent);
}
/* try to find the node */
node = NULL;
while ((node = lys_getnext(node, parent, mod, getnext_opts))) {
if (!type || (node->nodetype & type)) {
/* module check */
if (lys_node_module(node) != lys_main_module(mod)) {
continue;
}
/* direct name check */
if (!strncmp(node->name, name, nam_len) && !node->name[nam_len]) {
if (ret) {
*ret = node;
}
return EXIT_SUCCESS;
}
}
}
return EXIT_FAILURE;
} | 0 | [
"CWE-119"
] | libyang | 32fb4993bc8bb49e93e84016af3c10ea53964be5 | 279,302,476,075,258,240,000,000,000,000,000,000,000 | 33 | schema tree BUGFIX do not check features while still resolving schema
Fixes #723 |
encode_LEARN(const struct ofpact_learn *learn,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
const struct ofpact_learn_spec *spec;
struct nx_action_learn *nal;
size_t start_ofs;
start_ofs = out->size;
nal = put_NXAST_LEARN(out);
nal->idle_timeout = htons(learn->idle_timeout);
nal->hard_timeout = htons(learn->hard_timeout);
nal->fin_idle_timeout = htons(learn->fin_idle_timeout);
nal->fin_hard_timeout = htons(learn->fin_hard_timeout);
nal->priority = htons(learn->priority);
nal->cookie = learn->cookie;
nal->flags = htons(learn->flags);
nal->table_id = learn->table_id;
OFPACT_LEARN_SPEC_FOR_EACH (spec, learn) {
put_u16(out, spec->n_bits | spec->dst_type | spec->src_type);
if (spec->src_type == NX_LEARN_SRC_FIELD) {
put_u32(out, nxm_header_from_mff(spec->src.field));
put_u16(out, spec->src.ofs);
} else {
size_t n_dst_bytes = 2 * DIV_ROUND_UP(spec->n_bits, 16);
uint8_t *bits = ofpbuf_put_zeros(out, n_dst_bytes);
unsigned int n_bytes = DIV_ROUND_UP(spec->n_bits, 8);
memcpy(bits + n_dst_bytes - n_bytes, ofpact_learn_spec_imm(spec),
n_bytes);
}
if (spec->dst_type == NX_LEARN_DST_MATCH ||
spec->dst_type == NX_LEARN_DST_LOAD) {
put_u32(out, nxm_header_from_mff(spec->dst.field));
put_u16(out, spec->dst.ofs);
}
}
pad_ofpat(out, start_ofs);
} | 0 | [
"CWE-125"
] | ovs | 9237a63c47bd314b807cda0bd2216264e82edbe8 | 74,713,672,640,811,635,000,000,000,000,000,000,000 | 42 | ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
slew_sources(struct timespec *raw, struct timespec *cooked, double dfreq,
double doffset, LCL_ChangeType change_type, void *anything)
{
int i;
for (i=0; i<n_sources; i++) {
if (change_type == LCL_ChangeUnknownStep) {
SST_ResetInstance(sources[i]->stats);
} else {
SST_SlewSamples(sources[i]->stats, cooked, dfreq, doffset);
}
}
if (change_type == LCL_ChangeUnknownStep) {
/* After resetting no source is selectable, set reference unsynchronised */
SRC_SelectSource(NULL);
}
} | 0 | [
"CWE-59"
] | chrony | e18903a6b56341481a2e08469c0602010bf7bfe3 | 96,061,367,380,911,130,000,000,000,000,000,000,000 | 18 | switch to new util file functions
Replace all fopen(), rename(), and unlink() calls with the new util
functions. |
static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
err = sock_queue_rcv_skb(sk, skb);
if (err)
kfree_skb(skb);
return err;
} | 0 | [
"CWE-667"
] | linux | c518adafa39f37858697ac9309c6cf1805581446 | 219,791,797,004,441,300,000,000,000,000,000,000,000 | 10 | vsock: fix the race conditions in multi-transport support
There are multiple similar bugs implicitly introduced by the
commit c0cfa2d8a788fcf4 ("vsock: add multi-transports support") and
commit 6a2c0962105ae8ce ("vsock: prevent transport modules unloading").
The bug pattern:
[1] vsock_sock.transport pointer is copied to a local variable,
[2] lock_sock() is called,
[3] the local variable is used.
VSOCK multi-transport support introduced the race condition:
vsock_sock.transport value may change between [1] and [2].
Let's copy vsock_sock.transport pointer to local variables after
the lock_sock() call.
Fixes: c0cfa2d8a788fcf4 ("vsock: add multi-transports support")
Signed-off-by: Alexander Popov <[email protected]>
Reviewed-by: Stefano Garzarella <[email protected]>
Reviewed-by: Jorgen Hansen <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
void perf_sched_cb_dec(struct pmu *pmu)
{
this_cpu_dec(perf_sched_cb_usages);
} | 0 | [
"CWE-416",
"CWE-362"
] | linux | 12ca6ad2e3a896256f086497a7c7406a547ee373 | 21,975,307,481,105,946,000,000,000,000,000,000,000 | 4 | perf: Fix race in swevent hash
There's a race on CPU unplug where we free the swevent hash array
while it can still have events on. This will result in a
use-after-free which is BAD.
Simply do not free the hash array on unplug. This leaves the thing
around and no use-after-free takes place.
When the last swevent dies, we do a for_each_possible_cpu() iteration
anyway to clean these up, at which time we'll free it, so no leakage
will occur.
Reported-by: Sasha Levin <[email protected]>
Tested-by: Sasha Levin <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]> |
T atN(const int pos, const int x, const int y, const int z, const int c, const T& out_value) const {
return (pos<0 || pos>=width())?out_value:(*this)(pos,x,y,z,c);
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 26,182,475,480,758,464,000,000,000,000,000,000,000 | 3 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
START_TEST(range_prop_parser)
{
struct parser_test_range {
char *tag;
bool success;
int hi, lo;
} tests[] = {
{ "10:8", true, 10, 8 },
{ "100:-1", true, 100, -1 },
{ "-203813:-502023", true, -203813, -502023 },
{ "238492:28210", true, 238492, 28210 },
{ "none", true, 0, 0 },
{ "0:0", false, 0, 0 },
{ "", false, 0, 0 },
{ "abcd", false, 0, 0 },
{ "10:30:10", false, 0, 0 },
{ NULL, false, 0, 0 }
};
int i;
int hi, lo;
bool success;
for (i = 0; tests[i].tag != NULL; i++) {
hi = lo = 0xad;
success = parse_range_property(tests[i].tag, &hi, &lo);
ck_assert(success == tests[i].success);
if (success) {
ck_assert_int_eq(hi, tests[i].hi);
ck_assert_int_eq(lo, tests[i].lo);
} else {
ck_assert_int_eq(hi, 0xad);
ck_assert_int_eq(lo, 0xad);
}
}
success = parse_range_property(NULL, NULL, NULL);
ck_assert(success == false);
} | 0 | [
"CWE-134"
] | libinput | a423d7d3269dc32a87384f79e29bb5ac021c83d1 | 339,023,174,101,413,160,000,000,000,000,000,000,000 | 38 | evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]> |
void CLASS convert_to_rgb()
{
int row, col, c, i, j, k;
ushort *img;
float out[3], out_cam[3][4];
double num, inverse[3][3];
static const double xyzd50_srgb[3][3] =
{ { 0.436083, 0.385083, 0.143055 },
{ 0.222507, 0.716888, 0.060608 },
{ 0.013930, 0.097097, 0.714022 } };
static const double rgb_rgb[3][3] =
{ { 1,0,0 }, { 0,1,0 }, { 0,0,1 } };
static const double adobe_rgb[3][3] =
{ { 0.715146, 0.284856, 0.000000 },
{ 0.000000, 1.000000, 0.000000 },
{ 0.000000, 0.041166, 0.958839 } };
static const double wide_rgb[3][3] =
{ { 0.593087, 0.404710, 0.002206 },
{ 0.095413, 0.843149, 0.061439 },
{ 0.011621, 0.069091, 0.919288 } };
static const double prophoto_rgb[3][3] =
{ { 0.529317, 0.330092, 0.140588 },
{ 0.098368, 0.873465, 0.028169 },
{ 0.016879, 0.117663, 0.865457 } };
static const double (*out_rgb[])[3] =
{ rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb };
static const char *name[] =
{ "sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ" };
static const unsigned phead[] =
{ 1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0, 0, 0,
0x61637370, 0, 0, 0x6e6f6e65, 0, 0, 0, 0, 0xf6d6, 0x10000, 0xd32d };
unsigned pbody[] =
{ 10, 0x63707274, 0, 36, /* cprt */
0x64657363, 0, 40, /* desc */
0x77747074, 0, 20, /* wtpt */
0x626b7074, 0, 20, /* bkpt */
0x72545243, 0, 14, /* rTRC */
0x67545243, 0, 14, /* gTRC */
0x62545243, 0, 14, /* bTRC */
0x7258595a, 0, 20, /* rXYZ */
0x6758595a, 0, 20, /* gXYZ */
0x6258595a, 0, 20 }; /* bXYZ */
static const unsigned pwhite[] = { 0xf351, 0x10000, 0x116cc };
unsigned pcurve[] = { 0x63757276, 0, 1, 0x1000000 };
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,0,2);
#endif
gamma_curve (gamm[0], gamm[1], 0, 0);
memcpy (out_cam, rgb_cam, sizeof out_cam);
raw_color |= colors == 1 || document_mode ||
output_color < 1 || output_color > 5;
if (!raw_color) {
oprof = (unsigned *) calloc (phead[0], 1);
merror (oprof, "convert_to_rgb()");
memcpy (oprof, phead, sizeof phead);
if (output_color == 5) oprof[4] = oprof[5];
oprof[0] = 132 + 12*pbody[0];
for (i=0; i < pbody[0]; i++) {
oprof[oprof[0]/4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874;
pbody[i*3+2] = oprof[0];
oprof[0] += (pbody[i*3+3] + 3) & -4;
}
memcpy (oprof+32, pbody, sizeof pbody);
oprof[pbody[5]/4+2] = strlen(name[output_color-1]) + 1;
memcpy ((char *)oprof+pbody[8]+8, pwhite, sizeof pwhite);
pcurve[3] = (short)(256/gamm[5]+0.5) << 16;
for (i=4; i < 7; i++)
memcpy ((char *)oprof+pbody[i*3+2], pcurve, sizeof pcurve);
pseudoinverse ((double (*)[3]) out_rgb[output_color-1], inverse, 3);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++) {
for (num = k=0; k < 3; k++)
num += xyzd50_srgb[i][k] * inverse[j][k];
oprof[pbody[j*3+23]/4+i+2] = num * 0x10000 + 0.5;
}
for (i=0; i < phead[0]/4; i++)
oprof[i] = htonl(oprof[i]);
strcpy ((char *)oprof+pbody[2]+8, "auto-generated by dcraw");
strcpy ((char *)oprof+pbody[5]+12, name[output_color-1]);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (out_cam[i][j] = k=0; k < 3; k++)
out_cam[i][j] += out_rgb[output_color-1][i][k] * rgb_cam[k][j];
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr, raw_color ? _("Building histograms...\n") :
_("Converting to %s colorspace...\n"), name[output_color-1]);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
memset(histogram,0,sizeof(int)*LIBRAW_HISTOGRAM_SIZE*4);
#else
memset (histogram, 0, sizeof histogram);
#endif
for (img=image[0], row=0; row < height; row++)
for (col=0; col < width; col++, img+=4) {
if (!raw_color) {
out[0] = out[1] = out[2] = 0;
FORCC {
out[0] += out_cam[0][c] * img[c];
out[1] += out_cam[1][c] * img[c];
out[2] += out_cam[2][c] * img[c];
}
FORC3 img[c] = CLIP((int) out[c]);
}
else if (document_mode)
img[0] = img[FC(row,col)];
FORCC histogram[c][img[c] >> 3]++;
}
if (colors == 4 && output_color) colors = 3;
if (document_mode && filters) colors = 1;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,1,2);
#endif
} | 0 | [] | LibRaw | c4e374ea6c979a7d1d968f5082b7d0ea8cd27202 | 179,441,015,315,364,200,000,000,000,000,000,000,000 | 117 | additional data checks backported from 0.15.4 |
static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
{
struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
if (!attr)
return -ENODATA;
*intv = nla_get_u32(attr);
return 0;
} | 0 | [] | linux | 0217ed2848e8538bcf9172d97ed2eeb4a26041bb | 193,816,255,445,229,170,000,000,000,000,000,000,000 | 10 | tipc: better validate user input in tipc_nl_retrieve_key()
Before calling tipc_aead_key_size(ptr), we need to ensure
we have enough data to dereference ptr->keylen.
We probably also want to make sure tipc_aead_key_size()
wont overflow with malicious ptr->keylen values.
Syzbot reported:
BUG: KMSAN: uninit-value in __tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
BUG: KMSAN: uninit-value in tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
CPU: 0 PID: 21060 Comm: syz-executor.5 Not tainted 5.11.0-rc7-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x21c/0x280 lib/dump_stack.c:120
kmsan_report+0xfb/0x1e0 mm/kmsan/kmsan_report.c:118
__msan_warning+0x5f/0xa0 mm/kmsan/kmsan_instr.c:197
__tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline]
genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
genl_rcv_msg+0x1319/0x1610 net/netlink/genetlink.c:800
netlink_rcv_skb+0x6fa/0x810 net/netlink/af_netlink.c:2494
genl_rcv+0x63/0x80 net/netlink/genetlink.c:811
netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline]
netlink_unicast+0x11d6/0x14a0 net/netlink/af_netlink.c:1330
netlink_sendmsg+0x1740/0x1840 net/netlink/af_netlink.c:1919
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
RIP: 0023:0xf7f60549
Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00
RSP: 002b:00000000f555a5fc EFLAGS: 00000296 ORIG_RAX: 0000000000000172
RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000020000200
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
Uninit was created at:
kmsan_save_stack_with_flags mm/kmsan/kmsan.c:121 [inline]
kmsan_internal_poison_shadow+0x5c/0xf0 mm/kmsan/kmsan.c:104
kmsan_slab_alloc+0x8d/0xe0 mm/kmsan/kmsan_hooks.c:76
slab_alloc_node mm/slub.c:2907 [inline]
__kmalloc_node_track_caller+0xa37/0x1430 mm/slub.c:4527
__kmalloc_reserve net/core/skbuff.c:142 [inline]
__alloc_skb+0x2f8/0xb30 net/core/skbuff.c:210
alloc_skb include/linux/skbuff.h:1099 [inline]
netlink_alloc_large_skb net/netlink/af_netlink.c:1176 [inline]
netlink_sendmsg+0xdbc/0x1840 net/netlink/af_netlink.c:1894
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
Fixes: e1f32190cf7d ("tipc: add support for AEAD key setting via netlink")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Tuong Lien <[email protected]>
Cc: Jon Maloy <[email protected]>
Cc: Ying Xue <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{
} | 0 | [
"CWE-400",
"CWE-703"
] | linux | de53fd7aedb100f03e5d2231cfce0e4993282425 | 221,101,149,505,420,950,000,000,000,000,000,000,000 | 3 | sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices
It has been observed, that highly-threaded, non-cpu-bound applications
running under cpu.cfs_quota_us constraints can hit a high percentage of
periods throttled while simultaneously not consuming the allocated
amount of quota. This use case is typical of user-interactive non-cpu
bound applications, such as those running in kubernetes or mesos when
run on multiple cpu cores.
This has been root caused to cpu-local run queue being allocated per cpu
bandwidth slices, and then not fully using that slice within the period.
At which point the slice and quota expires. This expiration of unused
slice results in applications not being able to utilize the quota for
which they are allocated.
The non-expiration of per-cpu slices was recently fixed by
'commit 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift
condition")'. Prior to that it appears that this had been broken since
at least 'commit 51f2176d74ac ("sched/fair: Fix unlocked reads of some
cfs_b->quota/period")' which was introduced in v3.16-rc1 in 2014. That
added the following conditional which resulted in slices never being
expired.
if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
/* extend local deadline, drift is bounded above by 2 ticks */
cfs_rq->runtime_expires += TICK_NSEC;
Because this was broken for nearly 5 years, and has recently been fixed
and is now being noticed by many users running kubernetes
(https://github.com/kubernetes/kubernetes/issues/67577) it is my opinion
that the mechanisms around expiring runtime should be removed
altogether.
This allows quota already allocated to per-cpu run-queues to live longer
than the period boundary. This allows threads on runqueues that do not
use much CPU to continue to use their remaining slice over a longer
period of time than cpu.cfs_period_us. However, this helps prevent the
above condition of hitting throttling while also not fully utilizing
your cpu quota.
This theoretically allows a machine to use slightly more than its
allotted quota in some periods. This overflow would be bounded by the
remaining quota left on each per-cpu runqueueu. This is typically no
more than min_cfs_rq_runtime=1ms per cpu. For CPU bound tasks this will
change nothing, as they should theoretically fully utilize all of their
quota in each period. For user-interactive tasks as described above this
provides a much better user/application experience as their cpu
utilization will more closely match the amount they requested when they
hit throttling. This means that cpu limits no longer strictly apply per
period for non-cpu bound applications, but that they are still accurate
over longer timeframes.
This greatly improves performance of high-thread-count, non-cpu bound
applications with low cfs_quota_us allocation on high-core-count
machines. In the case of an artificial testcase (10ms/100ms of quota on
80 CPU machine), this commit resulted in almost 30x performance
improvement, while still maintaining correct cpu quota restrictions.
That testcase is available at https://github.com/indeedeng/fibtest.
Fixes: 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition")
Signed-off-by: Dave Chiluk <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Phil Auld <[email protected]>
Reviewed-by: Ben Segall <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: John Hammond <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Kyle Anderson <[email protected]>
Cc: Gabriel Munos <[email protected]>
Cc: Peter Oskolkov <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: Brendan Gregg <[email protected]>
Link: https://lkml.kernel.org/r/[email protected] |
static size_t consume_s8 (ut8 *buf, ut8 *max, st8 *out, ut32 *offset) {
size_t n;
ut32 tmp;
if (!(n = consume_u32 (buf, max, &tmp, offset)) || n > 1) {
return 0;
}
*out = (st8)(tmp & 0x7f);
return 1;
} | 0 | [
"CWE-125",
"CWE-787"
] | radare2 | d2632f6483a3ceb5d8e0a5fb11142c51c43978b4 | 213,258,372,100,569,320,000,000,000,000,000,000,000 | 9 | Fix crash in fuzzed wasm r2_hoobr_consume_init_expr |
void same_file_test()
{
try
{
fclose(QUtil::safe_fopen("qutil.out", "r"));
fclose(QUtil::safe_fopen("other-file", "r"));
}
catch (std::exception const&)
{
std::cout << "same_file_test expects to have qutil.out and other-file"
" exist in the current directory\n";
return;
}
assert_same_file("qutil.out", "./qutil.out", true);
assert_same_file("qutil.out", "qutil.out", true);
assert_same_file("qutil.out", "other-file", false);
assert_same_file("qutil.out", "", false);
assert_same_file("qutil.out", 0, false);
assert_same_file("", "qutil.out", false);
} | 0 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 296,497,994,000,919,480,000,000,000,000,000,000,000 | 20 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
bool ValidateSHARK()
{
std::cout << "\nSHARK validation suite running...\n\n";
bool pass1 = true, pass2 = true;
SHARKEncryption enc; // 128-bit only
pass1 = SHARKEncryption::KEYLENGTH == 16 && pass1;
pass1 = enc.StaticGetValidKeyLength(8) == 16 && pass1;
pass1 = enc.StaticGetValidKeyLength(15) == 16 && pass1;
pass1 = enc.StaticGetValidKeyLength(16) == 16 && pass1;
pass1 = enc.StaticGetValidKeyLength(17) == 16 && pass1;
pass1 = enc.StaticGetValidKeyLength(32) == 16 && pass1;
SHARKDecryption dec; // 128-bit only
pass2 = SHARKDecryption::KEYLENGTH == 16 && pass2;
pass2 = dec.StaticGetValidKeyLength(8) == 16 && pass2;
pass2 = dec.StaticGetValidKeyLength(15) == 16 && pass2;
pass2 = dec.StaticGetValidKeyLength(16) == 16 && pass2;
pass2 = dec.StaticGetValidKeyLength(17) == 16 && pass2;
pass2 = dec.StaticGetValidKeyLength(32) == 16 && pass2;
std::cout << (pass1 && pass2 ? "passed:" : "FAILED:") << " Algorithm key lengths\n";
FileSource valdata(CRYPTOPP_DATA_DIR "TestData/sharkval.dat", true, new HexDecoder);
return BlockTransformationTest(FixedRoundsCipherFactory<SHARKEncryption, SHARKDecryption>(), valdata) && pass1 && pass2;
}
| 0 | [
"CWE-190",
"CWE-125"
] | cryptopp | 07dbcc3d9644b18e05c1776db2a57fe04d780965 | 259,711,068,236,894,770,000,000,000,000,000,000,000 | 25 | Add Inflator::BadDistanceErr exception (Issue 414)
The improved validation and excpetion clears the Address Sanitizer and Undefined Behavior Sanitizer findings |
glob_name_is_acceptable (name)
const char *name;
{
struct ign *p;
int flags;
/* . and .. are never matched */
if (name[0] == '.' && (name[1] == '\0' || (name[1] == '.' && name[2] == '\0')))
return (0);
flags = FNM_PATHNAME | FNMATCH_EXTFLAG | FNMATCH_NOCASEGLOB;
for (p = globignore.ignores; p->val; p++)
{
if (strmatch (p->val, (char *)name, flags) != FNM_NOMATCH)
return (0);
}
return (1);
} | 0 | [
"CWE-273",
"CWE-787"
] | bash | 951bdaad7a18cc0dc1036bba86b18b90874d39ff | 265,604,678,207,480,320,000,000,000,000,000,000,000 | 18 | commit bash-20190628 snapshot |
std::string HttpFile::getMd5() const
{
return implPtr_->getMd5();
} | 0 | [
"CWE-552"
] | drogon | 3c785326c63a34aa1799a639ae185bc9453cb447 | 296,376,835,214,619,670,000,000,000,000,000,000,000 | 4 | Prevent malformed upload path causing arbitrary write (#1174) |
gsm_xsmp_server_start (GsmXsmpServer *server)
{
GIOChannel *channel;
int i;
for (i = 0; i < server->priv->num_local_xsmp_sockets; i++) {
GsmIceConnectionData *data;
data = g_new0 (GsmIceConnectionData, 1);
data->server = server;
data->listener = server->priv->xsmp_sockets[i];
channel = g_io_channel_unix_new (IceGetListenConnectionNumber (server->priv->xsmp_sockets[i]));
g_io_add_watch_full (channel,
G_PRIORITY_DEFAULT,
G_IO_IN | G_IO_HUP | G_IO_ERR,
(GIOFunc)accept_ice_connection,
data,
(GDestroyNotify)g_free);
g_io_channel_unref (channel);
}
} | 0 | [
"CWE-125",
"CWE-835"
] | gnome-session | b0dc999e0b45355314616321dbb6cb71e729fc9d | 238,302,581,544,864,760,000,000,000,000,000,000,000 | 22 | [gsm] Delay the creation of the GsmXSMPClient until it really exists
We used to create the GsmXSMPClient before the XSMP connection is really
accepted. This can lead to some issues, though. An example is:
https://bugzilla.gnome.org/show_bug.cgi?id=598211#c19. Quoting:
"What is happening is that a new client (probably metacity in your
case) is opening an ICE connection in the GSM_MANAGER_PHASE_END_SESSION
phase, which causes a new GsmXSMPClient to be added to the client
store. The GSM_MANAGER_PHASE_EXIT phase then begins before the client
has had a chance to establish a xsmp connection, which means that
client->priv->conn will not be initialized at the point that xsmp_stop
is called on the new unregistered client."
The fix is to create the GsmXSMPClient object when there's a real XSMP
connection. This implies moving the timeout that makes sure we don't
have an empty client to the XSMP server.
https://bugzilla.gnome.org/show_bug.cgi?id=598211 |
Subsets and Splits