func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static int fts3EvalIncrPhraseNext(
Fts3Cursor *pCsr, /* FTS Cursor handle */
Fts3Phrase *p, /* Phrase object to advance to next docid */
u8 *pbEof /* OUT: Set to 1 if EOF */
){
int rc = SQLITE_OK;
Fts3Doclist *pDL = &p->doclist;
Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
u8 bEof = 0;
/* This is only called if it is guaranteed that the phrase has at least
** one incremental token. In which case the bIncr flag is set. */
assert( p->bIncr==1 );
if( p->nToken==1 ){
rc = sqlite3Fts3MsrIncrNext(pTab, p->aToken[0].pSegcsr,
&pDL->iDocid, &pDL->pList, &pDL->nList
);
if( pDL->pList==0 ) bEof = 1;
}else{
int bDescDoclist = pCsr->bDesc;
struct TokenDoclist a[MAX_INCR_PHRASE_TOKENS];
memset(a, 0, sizeof(a));
assert( p->nToken<=MAX_INCR_PHRASE_TOKENS );
assert( p->iDoclistToken<MAX_INCR_PHRASE_TOKENS );
while( bEof==0 ){
int bMaxSet = 0;
sqlite3_int64 iMax = 0; /* Largest docid for all iterators */
int i; /* Used to iterate through tokens */
/* Advance the iterator for each token in the phrase once. */
for(i=0; rc==SQLITE_OK && i<p->nToken && bEof==0; i++){
rc = incrPhraseTokenNext(pTab, p, i, &a[i], &bEof);
if( a[i].bIgnore==0 && (bMaxSet==0 || DOCID_CMP(iMax, a[i].iDocid)<0) ){
iMax = a[i].iDocid;
bMaxSet = 1;
}
}
assert( rc!=SQLITE_OK || (p->nToken>=1 && a[p->nToken-1].bIgnore==0) );
assert( rc!=SQLITE_OK || bMaxSet );
/* Keep advancing iterators until they all point to the same document */
for(i=0; i<p->nToken; i++){
while( rc==SQLITE_OK && bEof==0
&& a[i].bIgnore==0 && DOCID_CMP(a[i].iDocid, iMax)<0
){
rc = incrPhraseTokenNext(pTab, p, i, &a[i], &bEof);
if( DOCID_CMP(a[i].iDocid, iMax)>0 ){
iMax = a[i].iDocid;
i = 0;
}
}
}
/* Check if the current entries really are a phrase match */
if( bEof==0 ){
int nList = 0;
int nByte = a[p->nToken-1].nList;
char *aDoclist = sqlite3_malloc(nByte+FTS3_BUFFER_PADDING);
if( !aDoclist ) return SQLITE_NOMEM;
memcpy(aDoclist, a[p->nToken-1].pList, nByte+1);
memset(&aDoclist[nByte], 0, FTS3_BUFFER_PADDING);
for(i=0; i<(p->nToken-1); i++){
if( a[i].bIgnore==0 ){
char *pL = a[i].pList;
char *pR = aDoclist;
char *pOut = aDoclist;
int nDist = p->nToken-1-i;
int res = fts3PoslistPhraseMerge(&pOut, nDist, 0, 1, &pL, &pR);
if( res==0 ) break;
nList = (int)(pOut - aDoclist);
}
}
if( i==(p->nToken-1) ){
pDL->iDocid = iMax;
pDL->pList = aDoclist;
pDL->nList = nList;
pDL->bFreeList = 1;
break;
}
sqlite3_free(aDoclist);
}
}
}
*pbEof = bEof;
return rc;
} | 0 | [
"CWE-787"
] | sqlite | c72f2fb7feff582444b8ffdc6c900c69847ce8a9 | 4,464,369,817,073,855,500,000,000,000,000,000,000 | 91 | More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d |
int qemu_target_page_bits(void)
{
return TARGET_PAGE_BITS;
} | 0 | [
"CWE-125"
] | qemu | 04bf2526ce87f21b32c9acba1c5518708c243ad0 | 98,561,063,340,393,440,000,000,000,000,000,000,000 | 4 | exec: use qemu_ram_ptr_length to access guest ram
When accessing guest's ram block during DMA operation, use
'qemu_ram_ptr_length' to get ram block pointer. It ensures
that DMA operation of given length is possible; And avoids
any OOB memory access situations.
Reported-by: Alex <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
xmlNodeSetContent(xmlNodePtr cur, const xmlChar *content) {
if (cur == NULL) {
#ifdef DEBUG_TREE
xmlGenericError(xmlGenericErrorContext,
"xmlNodeSetContent : node == NULL\n");
#endif
return;
}
switch (cur->type) {
case XML_DOCUMENT_FRAG_NODE:
case XML_ELEMENT_NODE:
case XML_ATTRIBUTE_NODE:
if (cur->children != NULL) xmlFreeNodeList(cur->children);
cur->children = xmlStringGetNodeList(cur->doc, content);
UPDATE_LAST_CHILD_AND_PARENT(cur)
break;
case XML_TEXT_NODE:
case XML_CDATA_SECTION_NODE:
case XML_ENTITY_REF_NODE:
case XML_ENTITY_NODE:
case XML_PI_NODE:
case XML_COMMENT_NODE:
if ((cur->content != NULL) &&
(cur->content != (xmlChar *) &(cur->properties))) {
if (!((cur->doc != NULL) && (cur->doc->dict != NULL) &&
(xmlDictOwns(cur->doc->dict, cur->content))))
xmlFree(cur->content);
}
if (cur->children != NULL) xmlFreeNodeList(cur->children);
cur->last = cur->children = NULL;
if (content != NULL) {
cur->content = xmlStrdup(content);
} else
cur->content = NULL;
cur->properties = NULL;
cur->nsDef = NULL;
break;
case XML_DOCUMENT_NODE:
case XML_HTML_DOCUMENT_NODE:
case XML_DOCUMENT_TYPE_NODE:
case XML_XINCLUDE_START:
case XML_XINCLUDE_END:
break;
case XML_NOTATION_NODE:
break;
case XML_DTD_NODE:
break;
case XML_NAMESPACE_DECL:
break;
case XML_ELEMENT_DECL:
/* TODO !!! */
break;
case XML_ATTRIBUTE_DECL:
/* TODO !!! */
break;
case XML_ENTITY_DECL:
/* TODO !!! */
break;
}
} | 0 | [
"CWE-190"
] | libxml2 | 6c283d83eccd940bcde15634ac8c7f100e3caefd | 158,232,032,226,329,340,000,000,000,000,000,000,000 | 60 | [CVE-2022-29824] Fix integer overflows in xmlBuf and xmlBuffer
In several places, the code handling string buffers didn't check for
integer overflow or used wrong types for buffer sizes. This could
result in out-of-bounds writes or other memory errors when working on
large, multi-gigabyte buffers.
Thanks to Felix Wilhelm for the report. |
merge_selfsigs_subkey (KBNODE keyblock, KBNODE subnode)
{
PKT_public_key *mainpk = NULL, *subpk = NULL;
PKT_signature *sig;
KBNODE k;
u32 mainkid[2];
u32 sigdate = 0;
KBNODE signode;
u32 curtime = make_timestamp ();
unsigned int key_usage = 0;
u32 keytimestamp = 0;
u32 key_expire = 0;
const byte *p;
if (subnode->pkt->pkttype != PKT_PUBLIC_SUBKEY)
BUG ();
mainpk = keyblock->pkt->pkt.public_key;
if (mainpk->version < 4)
return;/* (actually this should never happen) */
keyid_from_pk (mainpk, mainkid);
subpk = subnode->pkt->pkt.public_key;
keytimestamp = subpk->timestamp;
subpk->flags.valid = 0;
subpk->main_keyid[0] = mainpk->main_keyid[0];
subpk->main_keyid[1] = mainpk->main_keyid[1];
/* Find the latest key binding self-signature. */
signode = NULL;
sigdate = 0; /* Helper to find the latest signature. */
for (k = subnode->next; k && k->pkt->pkttype != PKT_PUBLIC_SUBKEY;
k = k->next)
{
if (k->pkt->pkttype == PKT_SIGNATURE)
{
sig = k->pkt->pkt.signature;
if (sig->keyid[0] == mainkid[0] && sig->keyid[1] == mainkid[1])
{
if (check_key_signature (keyblock, k, NULL))
; /* Signature did not verify. */
else if (IS_SUBKEY_REV (sig))
{
/* Note that this means that the date on a
revocation sig does not matter - even if the
binding sig is dated after the revocation sig,
the subkey is still marked as revoked. This
seems ok, as it is just as easy to make new
subkeys rather than re-sign old ones as the
problem is in the distribution. Plus, PGP (7)
does this the same way. */
subpk->flags.revoked = 1;
sig_to_revoke_info (sig, &subpk->revoked);
/* Although we could stop now, we continue to
* figure out other information like the old expiration
* time. */
}
else if (IS_SUBKEY_SIG (sig) && sig->timestamp >= sigdate)
{
if (sig->flags.expired)
; /* Signature has expired - ignore it. */
else
{
sigdate = sig->timestamp;
signode = k;
signode->pkt->pkt.signature->flags.chosen_selfsig = 0;
}
}
}
}
}
/* No valid key binding. */
if (!signode)
return;
sig = signode->pkt->pkt.signature;
sig->flags.chosen_selfsig = 1; /* So we know which selfsig we chose later. */
key_usage = parse_key_usage (sig);
if (!key_usage)
{
/* No key flags at all: get it from the algo. */
key_usage = openpgp_pk_algo_usage (subpk->pubkey_algo);
}
else
{
/* Check that the usage matches the usage as given by the algo. */
int x = openpgp_pk_algo_usage (subpk->pubkey_algo);
if (x) /* Mask it down to the actual allowed usage. */
key_usage &= x;
}
subpk->pubkey_usage = key_usage;
p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_KEY_EXPIRE, NULL);
if (p && buffer_to_u32 (p))
key_expire = keytimestamp + buffer_to_u32 (p);
else
key_expire = 0;
subpk->has_expired = key_expire >= curtime ? 0 : key_expire;
subpk->expiredate = key_expire;
/* Algo doesn't exist. */
if (openpgp_pk_test_algo (subpk->pubkey_algo))
return;
subpk->flags.valid = 1;
/* Find the most recent 0x19 embedded signature on our self-sig. */
if (!subpk->flags.backsig)
{
int seq = 0;
size_t n;
PKT_signature *backsig = NULL;
sigdate = 0;
/* We do this while() since there may be other embedded
signatures in the future. We only want 0x19 here. */
while ((p = enum_sig_subpkt (sig->hashed,
SIGSUBPKT_SIGNATURE, &n, &seq, NULL)))
if (n > 3
&& ((p[0] == 3 && p[2] == 0x19) || (p[0] == 4 && p[1] == 0x19)))
{
PKT_signature *tempsig = buf_to_sig (p, n);
if (tempsig)
{
if (tempsig->timestamp > sigdate)
{
if (backsig)
free_seckey_enc (backsig);
backsig = tempsig;
sigdate = backsig->timestamp;
}
else
free_seckey_enc (tempsig);
}
}
seq = 0;
/* It is safe to have this in the unhashed area since the 0x19
is located on the selfsig for convenience, not security. */
while ((p = enum_sig_subpkt (sig->unhashed, SIGSUBPKT_SIGNATURE,
&n, &seq, NULL)))
if (n > 3
&& ((p[0] == 3 && p[2] == 0x19) || (p[0] == 4 && p[1] == 0x19)))
{
PKT_signature *tempsig = buf_to_sig (p, n);
if (tempsig)
{
if (tempsig->timestamp > sigdate)
{
if (backsig)
free_seckey_enc (backsig);
backsig = tempsig;
sigdate = backsig->timestamp;
}
else
free_seckey_enc (tempsig);
}
}
if (backsig)
{
/* At ths point, backsig contains the most recent 0x19 sig.
Let's see if it is good. */
/* 2==valid, 1==invalid, 0==didn't check */
if (check_backsig (mainpk, subpk, backsig) == 0)
subpk->flags.backsig = 2;
else
subpk->flags.backsig = 1;
free_seckey_enc (backsig);
}
}
} | 1 | [
"CWE-20"
] | gnupg | 2183683bd633818dd031b090b5530951de76f392 | 140,047,183,424,349,870,000,000,000,000,000,000,000 | 182 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
h2_rx_window_update(struct worker *wrk, struct h2_sess *h2, struct h2_req *r2)
{
uint32_t wu;
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
ASSERT_RXTHR(h2);
CHECK_OBJ_ORNULL(r2, H2_REQ_MAGIC);
if (h2->rxf_len != 4)
return (H2CE_FRAME_SIZE_ERROR);
wu = vbe32dec(h2->rxf_data) & ~(1LU<<31);
if (wu == 0)
return (H2SE_PROTOCOL_ERROR);
if (r2 == NULL)
return (0);
Lck_Lock(&h2->sess->mtx);
r2->t_window += wu;
if (r2 == h2->req0)
AZ(pthread_cond_broadcast(h2->winupd_cond));
else if (r2->cond != NULL)
AZ(pthread_cond_signal(r2->cond));
Lck_Unlock(&h2->sess->mtx);
if (r2->t_window >= (1LL << 31))
return (H2SE_FLOW_CONTROL_ERROR);
return (0);
} | 0 | [
"CWE-444"
] | varnish-cache | d4c67d2a1a05304598895c24663c58a2e2932708 | 42,573,506,194,211,240,000,000,000,000,000,000,000 | 26 | Take content length into account on H/2 request bodies
When receiving H/2 data frames, make sure to take the advertised content
length into account, and fail appropriately if the combined sum of the
data frames does not match the content length. |
static int selinux_socket_unix_stream_connect(struct sock *sock,
struct sock *other,
struct sock *newsk)
{
struct sk_security_struct *sksec_sock = sock->sk_security;
struct sk_security_struct *sksec_other = other->sk_security;
struct sk_security_struct *sksec_new = newsk->sk_security;
struct common_audit_data ad;
struct lsm_network_audit net = {0,};
int err;
ad.type = LSM_AUDIT_DATA_NET;
ad.u.net = &net;
ad.u.net->sk = other;
err = avc_has_perm(&selinux_state,
sksec_sock->sid, sksec_other->sid,
sksec_other->sclass,
UNIX_STREAM_SOCKET__CONNECTTO, &ad);
if (err)
return err;
/* server child socket */
sksec_new->peer_sid = sksec_sock->sid;
err = security_sid_mls_copy(&selinux_state, sksec_other->sid,
sksec_sock->sid, &sksec_new->sid);
if (err)
return err;
/* connecting socket */
sksec_sock->peer_sid = sksec_new->sid;
return 0;
} | 0 | [
"CWE-349"
] | linux | fb73974172ffaaf57a7c42f35424d9aece1a5af6 | 321,865,973,401,632,640,000,000,000,000,000,000,000 | 34 | selinux: properly handle multiple messages in selinux_netlink_send()
Fix the SELinux netlink_send hook to properly handle multiple netlink
messages in a single sk_buff; each message is parsed and subject to
SELinux access control. Prior to this patch, SELinux only inspected
the first message in the sk_buff.
Cc: [email protected]
Reported-by: Dmitry Vyukov <[email protected]>
Reviewed-by: Stephen Smalley <[email protected]>
Signed-off-by: Paul Moore <[email protected]> |
static bool anal_fcn_del_bb(RCore *core, const char *input) {
ut64 addr = r_num_math (core->num, input);
if (!addr) {
addr = core->offset;
}
RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, -1);
if (fcn) {
if (!strcmp (input, "*")) {
while (!r_list_empty (fcn->bbs)) {
r_anal_function_remove_block (fcn, r_list_first (fcn->bbs));
}
} else {
RAnalBlock *b;
RListIter *iter;
r_list_foreach (fcn->bbs, iter, b) {
if (b->addr == addr) {
r_anal_function_remove_block (fcn, b);
return true;
}
}
eprintf ("Cannot find basic block\n");
}
} else {
eprintf ("Cannot find function\n");
}
return false;
} | 0 | [
"CWE-703",
"CWE-908"
] | radare2 | 4d3811681a80f92a53e795f6a64c4b0fc2c8dd22 | 21,988,767,542,906,450,000,000,000,000,000,000,000 | 27 | Fix segfault in adf (#16230) |
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
u32 exit_intr_info,
unsigned long exit_qualification)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 vm_inst_error = 0;
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
leave_guest_mode(vcpu);
prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
exit_qualification);
if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
vmcs12->vm_exit_msr_store_count))
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
if (unlikely(vmx->fail))
vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR);
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
/*
* TODO: SDM says that with acknowledge interrupt on exit, bit 31 of
* the VM-exit interrupt information (valid interrupt) is always set to
* 1 on EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't need
* kvm_cpu_has_interrupt(). See the commit message for details.
*/
if (nested_exit_intr_ack_set(vcpu) &&
exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
kvm_cpu_has_interrupt(vcpu)) {
int irq = kvm_cpu_get_interrupt(vcpu);
WARN_ON(irq < 0);
vmcs12->vm_exit_intr_info = irq |
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
}
trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
vmcs12->exit_qualification,
vmcs12->idt_vectoring_info_field,
vmcs12->vm_exit_intr_info,
vmcs12->vm_exit_intr_error_code,
KVM_ISA_VMX);
vm_entry_controls_reset_shadow(vmx);
vm_exit_controls_reset_shadow(vmx);
vmx_segment_cache_clear(vmx);
/* if no vmcs02 cache requested, remove the one we used */
if (VMCS02_POOL_SIZE == 0)
nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
load_vmcs12_host_state(vcpu, vmcs12);
/* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (vmx->hv_deadline_tsc == -1)
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
PIN_BASED_VMX_PREEMPTION_TIMER);
else
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
PIN_BASED_VMX_PREEMPTION_TIMER);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
vmx_set_virtual_x2apic_mode(vcpu,
vcpu->arch.apic_base & X2APIC_ENABLE);
} else if (!nested_cpu_has_ept(vmcs12) &&
nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmx_flush_tlb_ept_only(vcpu);
}
/* This is needed for same reason as it was needed in prepare_vmcs02 */
vmx->host_rsp = 0;
/* Unpin physical memory we referred to in vmcs02 */
if (vmx->nested.apic_access_page) {
kvm_release_page_dirty(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
if (vmx->nested.virtual_apic_page) {
kvm_release_page_dirty(vmx->nested.virtual_apic_page);
vmx->nested.virtual_apic_page = NULL;
}
if (vmx->nested.pi_desc_page) {
kunmap(vmx->nested.pi_desc_page);
kvm_release_page_dirty(vmx->nested.pi_desc_page);
vmx->nested.pi_desc_page = NULL;
vmx->nested.pi_desc = NULL;
}
/*
* We are now running in L2, mmu_notifier will force to reload the
* page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
*/
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
/*
* Exiting from L2 to L1, we're now back to L1 which thinks it just
* finished a VMLAUNCH or VMRESUME instruction, so we need to set the
* success or failure flag accordingly.
*/
if (unlikely(vmx->fail)) {
vmx->fail = 0;
nested_vmx_failValid(vcpu, vm_inst_error);
} else
nested_vmx_succeed(vcpu);
if (enable_shadow_vmcs)
vmx->nested.sync_shadow_vmcs = true;
/* in case we halted in L2 */
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
} | 0 | [
"CWE-20",
"CWE-617"
] | linux | 3a8b0677fc6180a467e26cc32ce6b0c09a32f9bb | 326,818,982,839,357,250,000,000,000,000,000,000,000 | 120 | KVM: VMX: Do not BUG() on out-of-bounds guest IRQ
The value of the guest_irq argument to vmx_update_pi_irte() is
ultimately coming from a KVM_IRQFD API call. Do not BUG() in
vmx_update_pi_irte() if the value is out-of bounds. (Especially,
since KVM as a whole seems to hang after that.)
Instead, print a message only once if we find that we don't have a
route for a certain IRQ (which can be out-of-bounds or within the
array).
This fixes CVE-2017-1000252.
Fixes: efc644048ecde54 ("KVM: x86: Update IRTE for posted-interrupts")
Signed-off-by: Jan H. Schönherr <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
xmlSchemaFixupTypeAttributeUses(xmlSchemaParserCtxtPtr pctxt,
xmlSchemaTypePtr type)
{
xmlSchemaTypePtr baseType = NULL;
xmlSchemaAttributeUsePtr use;
xmlSchemaItemListPtr uses, baseUses, prohibs = NULL;
if (type->baseType == NULL) {
PERROR_INT("xmlSchemaFixupTypeAttributeUses",
"no base type");
return (-1);
}
baseType = type->baseType;
if (WXS_IS_TYPE_NOT_FIXED(baseType))
if (xmlSchemaTypeFixup(baseType, ACTXT_CAST pctxt) == -1)
return(-1);
uses = type->attrUses;
baseUses = baseType->attrUses;
/*
* Expand attribute group references. And build the 'complete'
* wildcard, i.e. intersect multiple wildcards.
* Move attribute prohibitions into a separate list.
*/
if (uses != NULL) {
if (WXS_IS_RESTRICTION(type)) {
/*
* This one will transfer all attr. prohibitions
* into pctxt->attrProhibs.
*/
if (xmlSchemaExpandAttributeGroupRefs(pctxt,
WXS_BASIC_CAST type, &(type->attributeWildcard), uses,
pctxt->attrProhibs) == -1)
{
PERROR_INT("xmlSchemaFixupTypeAttributeUses",
"failed to expand attributes");
}
if (pctxt->attrProhibs->nbItems != 0)
prohibs = pctxt->attrProhibs;
} else {
if (xmlSchemaExpandAttributeGroupRefs(pctxt,
WXS_BASIC_CAST type, &(type->attributeWildcard), uses,
NULL) == -1)
{
PERROR_INT("xmlSchemaFixupTypeAttributeUses",
"failed to expand attributes");
}
}
}
/*
* Inherit the attribute uses of the base type.
*/
if (baseUses != NULL) {
int i, j;
xmlSchemaAttributeUseProhibPtr pro;
if (WXS_IS_RESTRICTION(type)) {
int usesCount;
xmlSchemaAttributeUsePtr tmp;
if (uses != NULL)
usesCount = uses->nbItems;
else
usesCount = 0;
/* Restriction. */
for (i = 0; i < baseUses->nbItems; i++) {
use = baseUses->items[i];
if (prohibs) {
/*
* Filter out prohibited uses.
*/
for (j = 0; j < prohibs->nbItems; j++) {
pro = prohibs->items[j];
if ((WXS_ATTRUSE_DECL_NAME(use) == pro->name) &&
(WXS_ATTRUSE_DECL_TNS(use) ==
pro->targetNamespace))
{
goto inherit_next;
}
}
}
if (usesCount) {
/*
* Filter out existing uses.
*/
for (j = 0; j < usesCount; j++) {
tmp = uses->items[j];
if ((WXS_ATTRUSE_DECL_NAME(use) ==
WXS_ATTRUSE_DECL_NAME(tmp)) &&
(WXS_ATTRUSE_DECL_TNS(use) ==
WXS_ATTRUSE_DECL_TNS(tmp)))
{
goto inherit_next;
}
}
}
if (uses == NULL) {
type->attrUses = xmlSchemaItemListCreate();
if (type->attrUses == NULL)
goto exit_failure;
uses = type->attrUses;
}
xmlSchemaItemListAddSize(uses, 2, use);
inherit_next: {}
}
} else {
/* Extension. */
for (i = 0; i < baseUses->nbItems; i++) {
use = baseUses->items[i];
if (uses == NULL) {
type->attrUses = xmlSchemaItemListCreate();
if (type->attrUses == NULL)
goto exit_failure;
uses = type->attrUses;
}
xmlSchemaItemListAddSize(uses, baseUses->nbItems, use);
}
}
}
/*
* Shrink attr. uses.
*/
if (uses) {
if (uses->nbItems == 0) {
xmlSchemaItemListFree(uses);
type->attrUses = NULL;
}
/*
* TODO: We could shrink the size of the array
* to fit the actual number of items.
*/
}
/*
* Compute the complete wildcard.
*/
if (WXS_IS_EXTENSION(type)) {
if (baseType->attributeWildcard != NULL) {
/*
* (3.2.2.1) "If the `base wildcard` is non-`absent`, then
* the appropriate case among the following:"
*/
if (type->attributeWildcard != NULL) {
/*
* Union the complete wildcard with the base wildcard.
* SPEC {attribute wildcard}
* (3.2.2.1.2) "otherwise a wildcard whose {process contents}
* and {annotation} are those of the `complete wildcard`,
* and whose {namespace constraint} is the intensional union
* of the {namespace constraint} of the `complete wildcard`
* and of the `base wildcard`, as defined in Attribute
* Wildcard Union ($3.10.6)."
*/
if (xmlSchemaUnionWildcards(pctxt, type->attributeWildcard,
baseType->attributeWildcard) == -1)
goto exit_failure;
} else {
/*
* (3.2.2.1.1) "If the `complete wildcard` is `absent`,
* then the `base wildcard`."
*/
type->attributeWildcard = baseType->attributeWildcard;
}
} else {
/*
* (3.2.2.2) "otherwise (the `base wildcard` is `absent`) the
* `complete wildcard`"
* NOOP
*/
}
} else {
/*
* SPEC {attribute wildcard}
* (3.1) "If the <restriction> alternative is chosen, then the
* `complete wildcard`;"
* NOOP
*/
}
return (0);
exit_failure:
return(-1);
} | 0 | [
"CWE-134"
] | libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 26,711,012,897,063,973,000,000,000,000,000,000,000 | 184 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
static void array_cleanup( char* arr[] , int arr_size)
{
int i=0;
for( i=0; i< arr_size; i++ ){
if( arr[i*2] ){
efree( arr[i*2]);
}
}
efree(arr);
} | 1 | [
"CWE-125"
] | php-src | 97eff7eb57fc2320c267a949cffd622c38712484 | 132,636,430,444,739,700,000,000,000,000,000,000,000 | 10 | Fix bug #72241: get_icu_value_internal out-of-bounds read |
string dotConcat(const std::string& a, const std::string &b)
{
if(a.empty() || b.empty())
return a+b;
else
return a+"."+b;
} | 0 | [
"CWE-399"
] | pdns | 881b5b03a590198d03008e4200dd00cc537712f3 | 148,342,452,824,161,850,000,000,000,000,000,000,000 | 7 | Reject qname's wirelength > 255, `chopOff()` handle dot inside labels |
eval_vars(
char_u *src, // pointer into commandline
char_u *srcstart, // beginning of valid memory for src
int *usedlen, // characters after src that are used
linenr_T *lnump, // line number for :e command, or NULL
char **errormsg, // pointer to error message
int *escaped) // return value has escaped white space (can
// be NULL)
{
int i;
char_u *s;
char_u *result;
char_u *resultbuf = NULL;
int resultlen;
buf_T *buf;
int valid = VALID_HEAD + VALID_PATH; // assume valid result
int spec_idx;
int tilde_file = FALSE;
int skip_mod = FALSE;
char_u strbuf[30];
*errormsg = NULL;
if (escaped != NULL)
*escaped = FALSE;
/*
* Check if there is something to do.
*/
spec_idx = find_cmdline_var(src, usedlen);
if (spec_idx < 0) // no match
{
*usedlen = 1;
return NULL;
}
/*
* Skip when preceded with a backslash "\%" and "\#".
* Note: In "\\%" the % is also not recognized!
*/
if (src > srcstart && src[-1] == '\\')
{
*usedlen = 0;
STRMOVE(src - 1, src); // remove backslash
return NULL;
}
/*
* word or WORD under cursor
*/
if (spec_idx == SPEC_CWORD || spec_idx == SPEC_CCWORD
|| spec_idx == SPEC_CEXPR)
{
resultlen = find_ident_under_cursor(&result,
spec_idx == SPEC_CWORD ? (FIND_IDENT | FIND_STRING)
: spec_idx == SPEC_CEXPR ? (FIND_IDENT | FIND_STRING | FIND_EVAL)
: FIND_STRING);
if (resultlen == 0)
{
*errormsg = "";
return NULL;
}
}
/*
* '#': Alternate file name
* '%': Current file name
* File name under the cursor
* File name for autocommand
* and following modifiers
*/
else
{
int off = 0;
switch (spec_idx)
{
case SPEC_PERC:
#ifdef FEAT_EVAL
if (!in_vim9script() || src[1] != '%')
#endif
{
// '%': current file
if (curbuf->b_fname == NULL)
{
result = (char_u *)"";
valid = 0; // Must have ":p:h" to be valid
}
else
{
result = curbuf->b_fname;
tilde_file = STRCMP(result, "~") == 0;
}
break;
}
#ifdef FEAT_EVAL
// "%%" alternate file
off = 1;
#endif
// FALLTHROUGH
case SPEC_HASH: // '#' or "#99": alternate file
if (off == 0 ? src[1] == '#' : src[2] == '%')
{
// "##" or "%%%": the argument list
result = arg_all();
resultbuf = result;
*usedlen = off + 2;
if (escaped != NULL)
*escaped = TRUE;
skip_mod = TRUE;
break;
}
s = src + off + 1;
if (*s == '<') // "#<99" uses v:oldfiles
++s;
i = (int)getdigits(&s);
if (s == src + off + 2 && src[off + 1] == '-')
// just a minus sign, don't skip over it
s--;
*usedlen = (int)(s - src); // length of what we expand
if (src[off + 1] == '<' && i != 0)
{
if (*usedlen < off + 2)
{
// Should we give an error message for #<text?
*usedlen = off + 1;
return NULL;
}
#ifdef FEAT_EVAL
result = list_find_str(get_vim_var_list(VV_OLDFILES),
(long)i);
if (result == NULL)
{
*errormsg = "";
return NULL;
}
#else
*errormsg = _("E809: #< is not available without the +eval feature");
return NULL;
#endif
}
else
{
if (i == 0 && src[off + 1] == '<' && *usedlen > off + 1)
*usedlen = off + 1;
buf = buflist_findnr(i);
if (buf == NULL)
{
*errormsg = _(e_no_alternate_file_name_to_substitute_for_hash);
return NULL;
}
if (lnump != NULL)
*lnump = ECMD_LAST;
if (buf->b_fname == NULL)
{
result = (char_u *)"";
valid = 0; // Must have ":p:h" to be valid
}
else
{
result = buf->b_fname;
tilde_file = STRCMP(result, "~") == 0;
}
}
break;
#ifdef FEAT_SEARCHPATH
case SPEC_CFILE: // file name under cursor
result = file_name_at_cursor(FNAME_MESS|FNAME_HYP, 1L, NULL);
if (result == NULL)
{
*errormsg = "";
return NULL;
}
resultbuf = result; // remember allocated string
break;
#endif
case SPEC_AFILE: // file name for autocommand
result = autocmd_fname;
if (result != NULL && !autocmd_fname_full)
{
// Still need to turn the fname into a full path. It is
// postponed to avoid a delay when <afile> is not used.
autocmd_fname_full = TRUE;
result = FullName_save(autocmd_fname, FALSE);
vim_free(autocmd_fname);
autocmd_fname = result;
}
if (result == NULL)
{
*errormsg = _(e_no_autocommand_file_name_to_substitute_for_afile);
return NULL;
}
result = shorten_fname1(result);
break;
case SPEC_ABUF: // buffer number for autocommand
if (autocmd_bufnr <= 0)
{
*errormsg = _(e_no_autocommand_buffer_name_to_substitute_for_abuf);
return NULL;
}
sprintf((char *)strbuf, "%d", autocmd_bufnr);
result = strbuf;
break;
case SPEC_AMATCH: // match name for autocommand
result = autocmd_match;
if (result == NULL)
{
*errormsg = _(e_no_autocommand_match_name_to_substitute_for_amatch);
return NULL;
}
break;
case SPEC_SFILE: // file name for ":so" command
case SPEC_STACK: // call stack
result = estack_sfile(spec_idx == SPEC_SFILE
? ESTACK_SFILE : ESTACK_STACK);
if (result == NULL)
{
*errormsg = spec_idx == SPEC_SFILE
? _(e_no_source_file_name_to_substitute_for_sfile)
: _(e_no_call_stack_to_substitute_for_stack);
return NULL;
}
resultbuf = result; // remember allocated string
break;
case SPEC_SLNUM: // line in file for ":so" command
if (SOURCING_NAME == NULL || SOURCING_LNUM == 0)
{
*errormsg = _("E842: no line number to use for \"<slnum>\"");
return NULL;
}
sprintf((char *)strbuf, "%ld", SOURCING_LNUM);
result = strbuf;
break;
#ifdef FEAT_EVAL
case SPEC_SFLNUM: // line in script file
if (current_sctx.sc_lnum + SOURCING_LNUM == 0)
{
*errormsg = _("E961: no line number to use for \"<sflnum>\"");
return NULL;
}
sprintf((char *)strbuf, "%ld",
(long)(current_sctx.sc_lnum + SOURCING_LNUM));
result = strbuf;
break;
case SPEC_SID:
if (current_sctx.sc_sid <= 0)
{
*errormsg = _(e_using_sid_not_in_script_context);
return NULL;
}
sprintf((char *)strbuf, "<SNR>%d_", current_sctx.sc_sid);
result = strbuf;
break;
#endif
#ifdef FEAT_CLIENTSERVER
case SPEC_CLIENT: // Source of last submitted input
sprintf((char *)strbuf, PRINTF_HEX_LONG_U,
(long_u)clientWindow);
result = strbuf;
break;
#endif
default:
result = (char_u *)""; // avoid gcc warning
break;
}
resultlen = (int)STRLEN(result); // length of new string
if (src[*usedlen] == '<') // remove the file name extension
{
++*usedlen;
if ((s = vim_strrchr(result, '.')) != NULL && s >= gettail(result))
resultlen = (int)(s - result);
}
else if (!skip_mod)
{
valid |= modify_fname(src, tilde_file, usedlen, &result, &resultbuf,
&resultlen);
if (result == NULL)
{
*errormsg = "";
return NULL;
}
}
}
if (resultlen == 0 || valid != VALID_HEAD + VALID_PATH)
{
if (valid != VALID_HEAD + VALID_PATH)
// xgettext:no-c-format
*errormsg = _(e_empty_file_name_for_percent_or_hash_only_works_with_ph);
else
*errormsg = _(e_evaluates_to_an_empty_string);
result = NULL;
}
else
result = vim_strnsave(result, resultlen);
vim_free(resultbuf);
return result;
} | 0 | [
"CWE-125"
] | vim | d3a117814d6acbf0dca3eff1a7626843b9b3734a | 334,073,266,132,418,000,000,000,000,000,000,000,000 | 309 | patch 8.2.4009: reading one byte beyond the end of the line
Problem: Reading one byte beyond the end of the line.
Solution: Check for NUL byte first. |
sdap_ad_tokengroups_initgr_posix_sids_connect_done(struct tevent_req *subreq)
{
struct sdap_ad_tokengroups_initgr_posix_state *state = NULL;
struct tevent_req *req = NULL;
int ret;
int dp_error = DP_ERR_FATAL;
req = tevent_req_callback_data(subreq, struct tevent_req);
state = tevent_req_data(req,
struct sdap_ad_tokengroups_initgr_posix_state);
ret = sdap_id_op_connect_recv(subreq, &dp_error);
talloc_zfree(subreq);
if (ret != EOK) {
tevent_req_error(req, ret);
return;
}
subreq = sdap_get_ad_tokengroups_send(state, state->ev, state->opts,
sdap_id_op_handle(state->op),
state->username, state->orig_dn,
state->timeout);
if (subreq == NULL) {
tevent_req_error(req, ENOMEM);
return;
}
tevent_req_set_callback(subreq, sdap_ad_tokengroups_initgr_posix_tg_done,
req);
return;
} | 0 | [
"CWE-264"
] | sssd | 191d7f7ce3de10d9e19eaa0a6ab3319bcd4ca95d | 117,708,736,467,364,380,000,000,000,000,000,000,000 | 34 | AD: process non-posix nested groups using tokenGroups
When initgr is performed for AD supporting tokenGroups, do not skip
non-posix groups.
Resolves:
https://fedorahosted.org/sssd/ticket/2343
Reviewed-by: Michal Židek <[email protected]>
(cherry picked from commit 4932db6258ccfb612a3a28eb6a618c2f042b9d58) |
const char *diff_unique_abbrev(const unsigned char *sha1, int len)
{
int abblen;
const char *abbrev;
if (len == 40)
return sha1_to_hex(sha1);
abbrev = find_unique_abbrev(sha1, len);
abblen = strlen(abbrev);
if (abblen < 37) {
static char hex[41];
if (len < abblen && abblen <= len + 2)
sprintf(hex, "%s%.*s", abbrev, len+3-abblen, "..");
else
sprintf(hex, "%s...", abbrev);
return hex;
}
return sha1_to_hex(sha1);
} | 0 | [
"CWE-119"
] | git | fd55a19eb1d49ae54008d932a65f79cd6fda45c9 | 91,072,146,179,195,670,000,000,000,000,000,000,000 | 19 | Fix buffer overflow in git diff
If PATH_MAX on your system is smaller than a path stored, it may cause
buffer overflow and stack corruption in diff_addremove() and diff_change()
functions when running git-diff
Signed-off-by: Dmitry Potapov <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
void qemu_spice_display_init(DisplayState *ds)
{
assert(sdpy.ds == NULL);
qemu_spice_display_init_common(&sdpy, ds);
register_displaychangelistener(ds, &display_listener);
sdpy.qxl.base.sif = &dpy_interface.base;
qemu_spice_add_interface(&sdpy.qxl.base);
assert(sdpy.worker);
qemu_add_vm_change_state_handler(qemu_spice_vm_change_state_handler, &sdpy);
qemu_spice_create_host_memslot(&sdpy);
qemu_spice_create_host_primary(&sdpy);
} | 0 | [] | qemu-kvm | 5ff4e36c804157bd84af43c139f8cd3a59722db9 | 291,833,424,284,843,000,000,000,000,000,000,000,000 | 14 | qxl: async io support using new spice api
Some of the QXL port i/o commands are waiting for the spice server to
complete certain actions. Add async versions for these commands, so we
don't block the vcpu while the spice server processses the command.
Instead the qxl device will raise an IRQ when done.
The async command processing relies on an added QXLInterface::async_complete
and added QXLWorker::*_async additions, in spice server qxl >= 3.1
Signed-off-by: Gerd Hoffmann <[email protected]>
Signed-off-by: Alon Levy <[email protected]> |
selPrintToString(SEL *sel)
{
char is_center;
char *str, *strptr;
l_int32 type;
l_int32 sx, sy, cx, cy, x, y;
PROCNAME("selPrintToString");
if (!sel)
return (char *)ERROR_PTR("sel not defined", procName, NULL);
selGetParameters(sel, &sy, &sx, &cy, &cx);
if ((str = (char *)LEPT_CALLOC(1, sy * (sx + 1) + 1)) == NULL)
return (char *)ERROR_PTR("calloc fail for str", procName, NULL);
strptr = str;
for (y = 0; y < sy; ++y) {
for (x = 0; x < sx; ++x) {
selGetElement(sel, y, x, &type);
is_center = (x == cx && y == cy);
switch (type) {
case SEL_HIT:
*(strptr++) = is_center ? 'X' : 'x';
break;
case SEL_MISS:
*(strptr++) = is_center ? 'O' : 'o';
break;
case SEL_DONT_CARE:
*(strptr++) = is_center ? 'C' : ' ';
break;
}
}
*(strptr++) = '\n';
}
return str;
} | 0 | [
"CWE-119",
"CWE-787"
] | leptonica | ee301cb2029db8a6289c5295daa42bba7715e99a | 84,976,173,641,182,490,000,000,000,000,000,000,000 | 38 | Security fixes: expect final changes for release 1.75.3.
* Fixed a debian security issue with fscanf() reading a string with
possible buffer overflow.
* There were also a few similar situations with sscanf(). |
void ssl3_cbc_digest_record(
const EVP_MD *digest,
unsigned char* md_out,
size_t* md_out_size,
const unsigned char header[13],
const unsigned char *data,
size_t data_plus_mac_size,
size_t data_plus_mac_plus_padding_size,
const unsigned char *mac_secret,
unsigned mac_secret_length,
char is_sslv3)
{
unsigned char md_state[sizeof(SHA512_CTX)];
void (*md_final_raw)(void *ctx, unsigned char *md_out);
void (*md_transform)(void *ctx, const unsigned char *block);
unsigned md_size, md_block_size = 64;
unsigned sslv3_pad_length = 40, header_length, variance_blocks,
len, max_mac_bytes, num_blocks,
num_starting_blocks, k, mac_end_offset, c, index_a, index_b;
unsigned int bits; /* at most 18 bits */
unsigned char length_bytes[MAX_HASH_BIT_COUNT_BYTES];
/* hmac_pad is the masked HMAC key. */
unsigned char hmac_pad[MAX_HASH_BLOCK_SIZE];
unsigned char first_block[MAX_HASH_BLOCK_SIZE];
unsigned char mac_out[EVP_MAX_MD_SIZE];
unsigned i, j, md_out_size_u;
EVP_MD_CTX md_ctx;
/* mdLengthSize is the number of bytes in the length field that terminates
* the hash. */
unsigned md_length_size = 8;
/* This is a, hopefully redundant, check that allows us to forget about
* many possible overflows later in this function. */
OPENSSL_assert(data_plus_mac_plus_padding_size < 1024*1024);
switch (EVP_MD_type(digest))
{
case NID_md5:
MD5_Init((MD5_CTX*)md_state);
md_final_raw = tls1_md5_final_raw;
md_transform = (void(*)(void *ctx, const unsigned char *block)) MD5_Transform;
md_size = 16;
sslv3_pad_length = 48;
break;
case NID_sha1:
SHA1_Init((SHA_CTX*)md_state);
md_final_raw = tls1_sha1_final_raw;
md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA1_Transform;
md_size = 20;
break;
case NID_sha224:
SHA224_Init((SHA256_CTX*)md_state);
md_final_raw = tls1_sha256_final_raw;
md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA256_Transform;
md_size = 224/8;
break;
case NID_sha256:
SHA256_Init((SHA256_CTX*)md_state);
md_final_raw = tls1_sha256_final_raw;
md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA256_Transform;
md_size = 32;
break;
case NID_sha384:
SHA384_Init((SHA512_CTX*)md_state);
md_final_raw = tls1_sha512_final_raw;
md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA512_Transform;
md_size = 384/8;
md_block_size = 128;
md_length_size = 16;
break;
case NID_sha512:
SHA512_Init((SHA512_CTX*)md_state);
md_final_raw = tls1_sha512_final_raw;
md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA512_Transform;
md_size = 64;
md_block_size = 128;
md_length_size = 16;
break;
default:
/* ssl3_cbc_record_digest_supported should have been
* called first to check that the hash function is
* supported. */
OPENSSL_assert(0);
if (md_out_size)
*md_out_size = -1;
return;
}
OPENSSL_assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES);
OPENSSL_assert(md_block_size <= MAX_HASH_BLOCK_SIZE);
OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE);
header_length = 13;
if (is_sslv3)
{
header_length =
mac_secret_length +
sslv3_pad_length +
8 /* sequence number */ +
1 /* record type */ +
2 /* record length */;
}
/* variance_blocks is the number of blocks of the hash that we have to
* calculate in constant time because they could be altered by the
* padding value.
*
* In SSLv3, the padding must be minimal so the end of the plaintext
* varies by, at most, 15+20 = 35 bytes. (We conservatively assume that
* the MAC size varies from 0..20 bytes.) In case the 9 bytes of hash
* termination (0x80 + 64-bit length) don't fit in the final block, we
* say that the final two blocks can vary based on the padding.
*
* TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not
* required to be minimal. Therefore we say that the final six blocks
* can vary based on the padding.
*
* Later in the function, if the message is short and there obviously
* cannot be this many blocks then variance_blocks can be reduced. */
variance_blocks = is_sslv3 ? 2 : 6;
/* From now on we're dealing with the MAC, which conceptually has 13
* bytes of `header' before the start of the data (TLS) or 71/75 bytes
* (SSLv3) */
len = data_plus_mac_plus_padding_size + header_length;
/* max_mac_bytes contains the maximum bytes of bytes in the MAC, including
* |header|, assuming that there's no padding. */
max_mac_bytes = len - md_size - 1;
/* num_blocks is the maximum number of hash blocks. */
num_blocks = (max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size;
/* In order to calculate the MAC in constant time we have to handle
* the final blocks specially because the padding value could cause the
* end to appear somewhere in the final |variance_blocks| blocks and we
* can't leak where. However, |num_starting_blocks| worth of data can
* be hashed right away because no padding value can affect whether
* they are plaintext. */
num_starting_blocks = 0;
/* k is the starting byte offset into the conceptual header||data where
* we start processing. */
k = 0;
/* mac_end_offset is the index just past the end of the data to be
* MACed. */
mac_end_offset = data_plus_mac_size + header_length - md_size;
/* c is the index of the 0x80 byte in the final hash block that
* contains application data. */
c = mac_end_offset % md_block_size;
/* index_a is the hash block number that contains the 0x80 terminating
* value. */
index_a = mac_end_offset / md_block_size;
/* index_b is the hash block number that contains the 64-bit hash
* length, in bits. */
index_b = (mac_end_offset + md_length_size) / md_block_size;
/* bits is the hash-length in bits. It includes the additional hash
* block for the masked HMAC key, or whole of |header| in the case of
* SSLv3. */
/* For SSLv3, if we're going to have any starting blocks then we need
* at least two because the header is larger than a single block. */
if (num_blocks > variance_blocks + (is_sslv3 ? 1 : 0))
{
num_starting_blocks = num_blocks - variance_blocks;
k = md_block_size*num_starting_blocks;
}
bits = 8*mac_end_offset;
if (!is_sslv3)
{
/* Compute the initial HMAC block. For SSLv3, the padding and
* secret bytes are included in |header| because they take more
* than a single block. */
bits += 8*md_block_size;
memset(hmac_pad, 0, md_block_size);
OPENSSL_assert(mac_secret_length <= sizeof(hmac_pad));
memcpy(hmac_pad, mac_secret, mac_secret_length);
for (i = 0; i < md_block_size; i++)
hmac_pad[i] ^= 0x36;
md_transform(md_state, hmac_pad);
}
memset(length_bytes,0,md_length_size-4);
length_bytes[md_length_size-4] = (unsigned char)(bits>>24);
length_bytes[md_length_size-3] = (unsigned char)(bits>>16);
length_bytes[md_length_size-2] = (unsigned char)(bits>>8);
length_bytes[md_length_size-1] = (unsigned char)bits;
if (k > 0)
{
if (is_sslv3)
{
/* The SSLv3 header is larger than a single block.
* overhang is the number of bytes beyond a single
* block that the header consumes: either 7 bytes
* (SHA1) or 11 bytes (MD5). */
unsigned overhang = header_length-md_block_size;
md_transform(md_state, header);
memcpy(first_block, header + md_block_size, overhang);
memcpy(first_block + overhang, data, md_block_size-overhang);
md_transform(md_state, first_block);
for (i = 1; i < k/md_block_size - 1; i++)
md_transform(md_state, data + md_block_size*i - overhang);
}
else
{
/* k is a multiple of md_block_size. */
memcpy(first_block, header, 13);
memcpy(first_block+13, data, md_block_size-13);
md_transform(md_state, first_block);
for (i = 1; i < k/md_block_size; i++)
md_transform(md_state, data + md_block_size*i - 13);
}
}
memset(mac_out, 0, sizeof(mac_out));
/* We now process the final hash blocks. For each block, we construct
* it in constant time. If the |i==index_a| then we'll include the 0x80
* bytes and zero pad etc. For each block we selectively copy it, in
* constant time, to |mac_out|. */
for (i = num_starting_blocks; i <= num_starting_blocks+variance_blocks; i++)
{
unsigned char block[MAX_HASH_BLOCK_SIZE];
unsigned char is_block_a = constant_time_eq_8(i, index_a);
unsigned char is_block_b = constant_time_eq_8(i, index_b);
for (j = 0; j < md_block_size; j++)
{
unsigned char b = 0, is_past_c, is_past_cp1;
if (k < header_length)
b = header[k];
else if (k < data_plus_mac_plus_padding_size + header_length)
b = data[k-header_length];
k++;
is_past_c = is_block_a & constant_time_ge(j, c);
is_past_cp1 = is_block_a & constant_time_ge(j, c+1);
/* If this is the block containing the end of the
* application data, and we are at the offset for the
* 0x80 value, then overwrite b with 0x80. */
b = (b&~is_past_c) | (0x80&is_past_c);
/* If this the the block containing the end of the
* application data and we're past the 0x80 value then
* just write zero. */
b = b&~is_past_cp1;
/* If this is index_b (the final block), but not
* index_a (the end of the data), then the 64-bit
* length didn't fit into index_a and we're having to
* add an extra block of zeros. */
b &= ~is_block_b | is_block_a;
/* The final bytes of one of the blocks contains the
* length. */
if (j >= md_block_size - md_length_size)
{
/* If this is index_b, write a length byte. */
b = (b&~is_block_b) | (is_block_b&length_bytes[j-(md_block_size-md_length_size)]);
}
block[j] = b;
}
md_transform(md_state, block);
md_final_raw(md_state, block);
/* If this is index_b, copy the hash value to |mac_out|. */
for (j = 0; j < md_size; j++)
mac_out[j] |= block[j]&is_block_b;
}
EVP_MD_CTX_init(&md_ctx);
EVP_DigestInit_ex(&md_ctx, digest, NULL /* engine */);
if (is_sslv3)
{
/* We repurpose |hmac_pad| to contain the SSLv3 pad2 block. */
memset(hmac_pad, 0x5c, sslv3_pad_length);
EVP_DigestUpdate(&md_ctx, mac_secret, mac_secret_length);
EVP_DigestUpdate(&md_ctx, hmac_pad, sslv3_pad_length);
EVP_DigestUpdate(&md_ctx, mac_out, md_size);
}
else
{
/* Complete the HMAC in the standard manner. */
for (i = 0; i < md_block_size; i++)
hmac_pad[i] ^= 0x6a;
EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size);
EVP_DigestUpdate(&md_ctx, mac_out, md_size);
}
EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u);
if (md_out_size)
*md_out_size = md_out_size_u;
EVP_MD_CTX_cleanup(&md_ctx);
} | 1 | [
"CWE-310"
] | openssl | 5f9345a2f0b592457fc4a619ac98ea59ffd394ba | 214,698,647,160,452,300,000,000,000,000,000,000,000 | 290 | ssl/s3_cbc.c: md_state alignment portability fix.
RISCs are picky and alignment granted by compiler for md_state can be
insufficient for SHA512.
(cherry picked from commit 36260233e7e3396feed884d3f501283e0453c04f) |
GF_Err iods_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 descSize;
char *desc;
GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s;
//use the OD codec...
descSize = (u32) (ptr->size);
desc = (char*)gf_malloc(sizeof(char) * descSize);
if (!desc) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, desc, descSize);
e = gf_odf_desc_read(desc, descSize, &ptr->descriptor);
//OK, free our desc
gf_free(desc);
return e;
} | 1 | [
"CWE-476"
] | gpac | 586e817dcd531bb3e75438390f1f753cfe6e940a | 81,779,831,433,822,195,000,000,000,000,000,000,000 | 18 | fixed #2046 |
dwg_model_y_max (const Dwg_Data *dwg)
{
assert (dwg);
return dwg->header_vars.EXTMAX.y;
} | 0 | [
"CWE-787"
] | libredwg | ecf5183d8b3b286afe2a30021353b7116e0208dd | 49,176,360,847,710,405,000,000,000,000,000,000,000 | 5 | dwg_section_wtype: fix fuzzing overflow
with illegal and overlong section names. Fixes GH #349, #352
section names cannot be longer than 24 |
proc_file_lseek(struct file *file, loff_t offset, int orig)
{
lock_kernel();
switch (orig) {
case 0:
if (offset < 0)
goto out;
file->f_pos = offset;
unlock_kernel();
return(file->f_pos);
case 1:
if (offset + file->f_pos < 0)
goto out;
file->f_pos += offset;
unlock_kernel();
return(file->f_pos);
case 2:
goto out;
default:
goto out;
}
out:
unlock_kernel();
return -EINVAL;
} | 1 | [] | linux-2.6 | 8b90db0df7187a01fb7177f1f812123138f562cf | 260,607,114,478,156,540,000,000,000,000,000,000,000 | 27 | Insanity avoidance in /proc
The old /proc interfaces were never updated to use loff_t, and are just
generally broken. Now, we should be using the seq_file interface for
all of the proc files, but converting the legacy functions is more work
than most people care for and has little upside..
But at least we can make the non-LFS rules explicit, rather than just
insanely wrapping the offset or something.
Signed-off-by: Linus Torvalds <[email protected]> |
static char *phar_get_link_location(phar_entry_info *entry) /* {{{ */
{
char *p, *ret = NULL;
if (!entry->link) {
return NULL;
}
if (entry->link[0] == '/') {
return estrdup(entry->link + 1);
}
p = strrchr(entry->filename, '/');
if (p) {
*p = '\0';
spprintf(&ret, 0, "%s/%s", entry->filename, entry->link);
return ret;
}
return entry->link;
} | 0 | [
"CWE-119",
"CWE-787"
] | php-src | 0bfb970f43acd1e81d11be1154805f86655f15d5 | 302,674,865,181,092,040,000,000,000,000,000,000,000 | 17 | Fix bug #72928 - Out of bound when verify signature of zip phar in phar_parse_zipfile
(cherry picked from commit 19484ab77466f99c78fc0e677f7e03da0584d6a2) |
compressed_to_ciphertext (gnutls_session_t session,
uint8_t * cipher_data, int cipher_size,
gnutls_datum_t *compressed,
content_type_t type,
record_parameters_st * params)
{
uint8_t * tag_ptr = NULL;
uint8_t pad;
int length, length_to_encrypt, ret;
uint8_t preamble[MAX_PREAMBLE_SIZE];
int preamble_size;
int tag_size = _gnutls_auth_cipher_tag_len (¶ms->write.cipher_state);
int blocksize = gnutls_cipher_get_block_size (params->cipher_algorithm);
unsigned block_algo =
_gnutls_cipher_is_block (params->cipher_algorithm);
uint8_t *data_ptr;
int ver = gnutls_protocol_get_version (session);
int explicit_iv = _gnutls_version_has_explicit_iv (session->security_parameters.version);
int auth_cipher = _gnutls_auth_cipher_is_aead(¶ms->write.cipher_state);
int random_pad;
/* We don't use long padding if requested or if we are in DTLS.
*/
if (session->internals.priorities.no_padding == 0 && (!IS_DTLS(session)))
random_pad = 1;
else
random_pad = 0;
_gnutls_hard_log("ENC[%p]: cipher: %s, MAC: %s, Epoch: %u\n",
session, gnutls_cipher_get_name(params->cipher_algorithm), gnutls_mac_get_name(params->mac_algorithm),
(unsigned int)params->epoch);
preamble_size =
make_preamble (UINT64DATA
(params->write.sequence_number),
type, compressed->size, ver, preamble);
/* Calculate the encrypted length (padding etc.)
*/
length_to_encrypt = length =
calc_enc_length (session, compressed->size, tag_size, &pad,
random_pad, block_algo, auth_cipher, blocksize);
if (length < 0)
{
return gnutls_assert_val(length);
}
/* copy the encrypted data to cipher_data.
*/
if (cipher_size < length)
{
return gnutls_assert_val(GNUTLS_E_MEMORY_ERROR);
}
data_ptr = cipher_data;
if (explicit_iv)
{
if (block_algo == CIPHER_BLOCK)
{
/* copy the random IV.
*/
ret = _gnutls_rnd (GNUTLS_RND_NONCE, data_ptr, blocksize);
if (ret < 0)
return gnutls_assert_val(ret);
_gnutls_auth_cipher_setiv(¶ms->write.cipher_state, data_ptr, blocksize);
data_ptr += blocksize;
cipher_data += blocksize;
length_to_encrypt -= blocksize;
}
else if (auth_cipher)
{
uint8_t nonce[blocksize];
/* Values in AEAD are pretty fixed in TLS 1.2 for 128-bit block
*/
if (params->write.IV.data == NULL || params->write.IV.size != AEAD_IMPLICIT_DATA_SIZE)
return gnutls_assert_val(GNUTLS_E_INTERNAL_ERROR);
/* Instead of generating a new nonce on every packet, we use the
* write.sequence_number (It is a MAY on RFC 5288).
*/
memcpy(nonce, params->write.IV.data, params->write.IV.size);
memcpy(&nonce[AEAD_IMPLICIT_DATA_SIZE], UINT64DATA(params->write.sequence_number), 8);
_gnutls_auth_cipher_setiv(¶ms->write.cipher_state, nonce, AEAD_IMPLICIT_DATA_SIZE+AEAD_EXPLICIT_DATA_SIZE);
/* copy the explicit part */
memcpy(data_ptr, &nonce[AEAD_IMPLICIT_DATA_SIZE], AEAD_EXPLICIT_DATA_SIZE);
data_ptr += AEAD_EXPLICIT_DATA_SIZE;
cipher_data += AEAD_EXPLICIT_DATA_SIZE;
/* In AEAD ciphers we don't encrypt the tag
*/
length_to_encrypt -= AEAD_EXPLICIT_DATA_SIZE + tag_size;
}
}
else
{
/* AEAD ciphers have an explicit IV. Shouldn't be used otherwise.
*/
if (auth_cipher) return gnutls_assert_val(GNUTLS_E_INTERNAL_ERROR);
}
memcpy (data_ptr, compressed->data, compressed->size);
data_ptr += compressed->size;
if (tag_size > 0)
{
tag_ptr = data_ptr;
data_ptr += tag_size;
}
if (block_algo == CIPHER_BLOCK && pad > 0)
{
memset (data_ptr, pad - 1, pad);
}
/* add the authenticate data */
ret = _gnutls_auth_cipher_add_auth(¶ms->write.cipher_state, preamble, preamble_size);
if (ret < 0)
return gnutls_assert_val(ret);
/* Actual encryption (inplace).
*/
ret =
_gnutls_auth_cipher_encrypt2_tag (¶ms->write.cipher_state,
cipher_data, length_to_encrypt,
cipher_data, cipher_size,
tag_ptr, tag_size, compressed->size);
if (ret < 0)
return gnutls_assert_val(ret);
return length;
} | 1 | [
"CWE-310"
] | gnutls | b495740f2ff66550ca9395b3fda3ea32c3acb185 | 240,626,911,650,660,580,000,000,000,000,000,000,000 | 137 | changes in packet parsing. |
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
{
struct block_device *bdev;
char b[BDEVNAME_SIZE];
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
if (IS_ERR(bdev))
goto fail;
return bdev;
fail:
ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
__bdevname(dev, b), PTR_ERR(bdev));
return NULL;
} | 0 | [
"CWE-362"
] | linux | ea3d7209ca01da209cda6f0dea8be9cc4b7a933b | 185,556,595,605,281,100,000,000,000,000,000,000,000 | 15 | ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]> |
static int mov_read_glbl(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
int ret;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
if ((uint64_t)atom.size > (1<<30))
return AVERROR_INVALIDDATA;
if (atom.size >= 10) {
// Broken files created by legacy versions of libavformat will
// wrap a whole fiel atom inside of a glbl atom.
unsigned size = avio_rb32(pb);
unsigned type = avio_rl32(pb);
avio_seek(pb, -8, SEEK_CUR);
if (type == MKTAG('f','i','e','l') && size == atom.size)
return mov_read_default(c, pb, atom);
}
if (st->codecpar->extradata_size > 1 && st->codecpar->extradata) {
av_log(c, AV_LOG_WARNING, "ignoring multiple glbl\n");
return 0;
}
av_freep(&st->codecpar->extradata);
ret = ff_get_extradata(c->fc, st->codecpar, pb, atom.size);
if (ret < 0)
return ret;
return 0;
} | 0 | [
"CWE-399",
"CWE-834"
] | FFmpeg | 9cb4eb772839c5e1de2855d126bf74ff16d13382 | 17,089,598,105,127,095,000,000,000,000,000,000,000 | 32 | avformat/mov: Fix DoS in read_tfra()
Fixes: Missing EOF check in loop
No testcase
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Signed-off-by: Michael Niedermayer <[email protected]> |
__ieee80211_beacon_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_mutable_offsets *offs,
bool is_template)
{
struct ieee80211_local *local = hw_to_local(hw);
struct beacon_data *beacon = NULL;
struct sk_buff *skb = NULL;
struct ieee80211_tx_info *info;
struct ieee80211_sub_if_data *sdata = NULL;
enum nl80211_band band;
struct ieee80211_tx_rate_control txrc;
struct ieee80211_chanctx_conf *chanctx_conf;
int csa_off_base = 0;
rcu_read_lock();
sdata = vif_to_sdata(vif);
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
goto out;
if (offs)
memset(offs, 0, sizeof(*offs));
if (sdata->vif.type == NL80211_IFTYPE_AP) {
struct ieee80211_if_ap *ap = &sdata->u.ap;
beacon = rcu_dereference(ap->beacon);
if (beacon) {
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
ieee80211_beacon_update_cntdwn(vif);
ieee80211_set_beacon_cntdwn(sdata, beacon);
}
/*
* headroom, head length,
* tail length and maximum TIM length
*/
skb = dev_alloc_skb(local->tx_headroom +
beacon->head_len +
beacon->tail_len + 256 +
local->hw.extra_beacon_tailroom);
if (!skb)
goto out;
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
ieee80211_beacon_add_tim(sdata, &ap->ps, skb,
is_template);
if (offs) {
offs->tim_offset = beacon->head_len;
offs->tim_length = skb->len - beacon->head_len;
/* for AP the csa offsets are from tail */
csa_off_base = skb->len;
}
if (beacon->tail)
skb_put_data(skb, beacon->tail,
beacon->tail_len);
if (ieee80211_beacon_protect(skb, local, sdata) < 0)
goto out;
} else
goto out;
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_hdr *hdr;
beacon = rcu_dereference(ifibss->presp);
if (!beacon)
goto out;
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
__ieee80211_beacon_update_cntdwn(beacon);
ieee80211_set_beacon_cntdwn(sdata, beacon);
}
skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
local->hw.extra_beacon_tailroom);
if (!skb)
goto out;
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
hdr = (struct ieee80211_hdr *) skb->data;
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_BEACON);
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
beacon = rcu_dereference(ifmsh->beacon);
if (!beacon)
goto out;
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
/* TODO: For mesh csa_counter is in TU, so
* decrementing it by one isn't correct, but
* for now we leave it consistent with overall
* mac80211's behavior.
*/
__ieee80211_beacon_update_cntdwn(beacon);
ieee80211_set_beacon_cntdwn(sdata, beacon);
}
if (ifmsh->sync_ops)
ifmsh->sync_ops->adjust_tsf(sdata, beacon);
skb = dev_alloc_skb(local->tx_headroom +
beacon->head_len +
256 + /* TIM IE */
beacon->tail_len +
local->hw.extra_beacon_tailroom);
if (!skb)
goto out;
skb_reserve(skb, local->tx_headroom);
skb_put_data(skb, beacon->head, beacon->head_len);
ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
if (offs) {
offs->tim_offset = beacon->head_len;
offs->tim_length = skb->len - beacon->head_len;
}
skb_put_data(skb, beacon->tail, beacon->tail_len);
} else {
WARN_ON(1);
goto out;
}
/* CSA offsets */
if (offs && beacon) {
int i;
for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; i++) {
u16 csa_off = beacon->cntdwn_counter_offsets[i];
if (!csa_off)
continue;
offs->cntdwn_counter_offs[i] = csa_off_base + csa_off;
}
}
band = chanctx_conf->def.chan->band;
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
info->flags |= IEEE80211_TX_CTL_NO_ACK;
info->band = band;
memset(&txrc, 0, sizeof(txrc));
txrc.hw = hw;
txrc.sband = local->hw.wiphy->bands[band];
txrc.bss_conf = &sdata->vif.bss_conf;
txrc.skb = skb;
txrc.reported_rate.idx = -1;
if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band])
txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band];
else
txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
txrc.bss = true;
rate_control_get_rate(sdata, NULL, &txrc);
info->control.vif = vif;
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
IEEE80211_TX_CTL_ASSIGN_SEQ |
IEEE80211_TX_CTL_FIRST_FRAGMENT;
out:
rcu_read_unlock();
return skb;
} | 0 | [
"CWE-476"
] | linux | bddc0c411a45d3718ac535a070f349be8eca8d48 | 304,912,136,531,049,400,000,000,000,000,000,000,000 | 185 | mac80211: Fix NULL ptr deref for injected rate info
The commit cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx
queue") moved the code to validate the radiotap header from
ieee80211_monitor_start_xmit to ieee80211_parse_tx_radiotap. This made is
possible to share more code with the new Tx queue selection code for
injected frames. But at the same time, it now required the call of
ieee80211_parse_tx_radiotap at the beginning of functions which wanted to
handle the radiotap header. And this broke the rate parser for radiotap
header parser.
The radiotap parser for rates is operating most of the time only on the
data in the actual radiotap header. But for the 802.11a/b/g rates, it must
also know the selected band from the chandef information. But this
information is only written to the ieee80211_tx_info at the end of the
ieee80211_monitor_start_xmit - long after ieee80211_parse_tx_radiotap was
already called. The info->band information was therefore always 0
(NL80211_BAND_2GHZ) when the parser code tried to access it.
For a 5GHz only device, injecting a frame with 802.11a rates would cause a
NULL pointer dereference because local->hw.wiphy->bands[NL80211_BAND_2GHZ]
would most likely have been NULL when the radiotap parser searched for the
correct rate index of the driver.
Cc: [email protected]
Reported-by: Ben Greear <[email protected]>
Fixes: cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx queue")
Signed-off-by: Mathy Vanhoef <[email protected]>
[[email protected]: added commit message]
Signed-off-by: Sven Eckelmann <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Johannes Berg <[email protected]> |
tsize_t _tiffWriteProc(thandle_t hdata, tdata_t buf, tsize_t size) {
TIFFSTATE *state = (TIFFSTATE *)hdata;
tsize_t to_write;
TRACE(("_tiffWriteProc: %d \n", (int)size));
dump_state(state);
to_write = min(size, state->size - (tsize_t)state->loc);
if (state->flrealloc && size>to_write) {
tdata_t new_data;
tsize_t newsize=state->size;
while (newsize < (size + state->size)) {
if (newsize > INT_MAX - 64*1024){
return 0;
}
newsize += 64*1024;
// newsize*=2; // UNDONE, by 64k chunks?
}
TRACE(("Reallocing in write to %d bytes\n", (int)newsize));
/* malloc check ok, overflow checked above */
new_data = realloc(state->data, newsize);
if (!new_data) {
// fail out
return 0;
}
state->data = new_data;
state->size = newsize;
to_write = size;
}
TRACE(("to_write: %d\n", (int)to_write));
_TIFFmemcpy((UINT8 *)state->data + state->loc, buf, to_write);
state->loc += (toff_t)to_write;
state->eof = max(state->loc, state->eof);
dump_state(state);
return to_write;
} | 0 | [
"CWE-190",
"CWE-787"
] | Pillow | 4e2def2539ec13e53a82e06c4b3daf00454100c4 | 319,934,761,458,815,330,000,000,000,000,000,000,000 | 39 | Overflow checks for realloc for tiff decoding |
template<typename tp, typename tc>
CImg<T>& draw_polygon(const CImg<tp>& points,
const tc *const color, const float opacity=1) {
if (is_empty() || !points) return *this;
if (!color)
throw CImgArgumentException(_cimg_instance
"draw_polygon(): Specified color is (null).",
cimg_instance);
if (points.height()!=2)
throw CImgArgumentException(_cimg_instance
"draw_polygon(): Invalid specified point set (%u,%u,%u,%u).",
cimg_instance,
points._width,points._height,points._depth,points._spectrum);
if (points._width==1) return draw_point(cimg::uiround(points(0,0)),cimg::uiround(points(0,1)),color,opacity);
if (points._width==2) return draw_line(cimg::uiround(points(0,0)),cimg::uiround(points(0,1)),
cimg::uiround(points(1,0)),cimg::uiround(points(1,1)),color,opacity);
if (points._width==3) return draw_triangle(cimg::uiround(points(0,0)),cimg::uiround(points(0,1)),
cimg::uiround(points(1,0)),cimg::uiround(points(1,1)),
cimg::uiround(points(2,0)),cimg::uiround(points(2,1)),color,opacity);
cimg_init_scanline(opacity);
int
xmin = 0, ymin = 0,
xmax = points.get_shared_row(0).max_min(xmin),
ymax = points.get_shared_row(1).max_min(ymin);
if (xmax<0 || xmin>=width() || ymax<0 || ymin>=height()) return *this;
if (ymin==ymax) return draw_line(xmin,ymin,xmax,ymax,color,opacity);
ymin = std::max(0,ymin);
ymax = std::min(height() - 1,ymax);
CImg<intT> Xs(points._width,ymax - ymin + 1);
CImg<uintT> count(Xs._height,1,1,1,0);
unsigned int n = 0, nn = 1;
bool go_on = true;
while (go_on) {
unsigned int an = (nn + 1)%points._width;
const int
x0 = cimg::uiround(points(n,0)),
y0 = cimg::uiround(points(n,1));
if (points(nn,1)==y0) while (points(an,1)==y0) { nn = an; (an+=1)%=points._width; }
const int
x1 = cimg::uiround(points(nn,0)),
y1 = cimg::uiround(points(nn,1));
unsigned int tn = an;
while (points(tn,1)==y1) (tn+=1)%=points._width;
if (y0!=y1) {
const int
y2 = cimg::uiround(points(tn,1)),
x01 = x1 - x0, y01 = y1 - y0, y12 = y2 - y1,
step = cimg::sign(y01),
tmax = std::max(1,cimg::abs(y01)), htmax = tmax*cimg::sign(x01)/2,
tend = tmax - (step==cimg::sign(y12));
unsigned int y = (unsigned int)y0 - ymin;
for (int t = 0; t<=tend; ++t, y+=step)
if (y<Xs._height) Xs(count[y]++,y) = x0 + (t*x01 + htmax)/tmax;
}
go_on = nn>n;
n = nn;
nn = an;
}
cimg_pragma_openmp(parallel for cimg_openmp_if(Xs._height>=(cimg_openmp_sizefactor)*32))
cimg_forY(Xs,y) {
const CImg<intT> Xsy = Xs.get_shared_points(0,count[y] - 1,y).sort();
int px = width();
for (unsigned int k = 0; k<Xsy._width; k+=2) {
int x0 = Xsy[k];
const int x1 = Xsy[k + 1];
x0+=x0==px;
cimg_draw_scanline(x0,x1,y + ymin,color,opacity,1);
px = x1;
}
}
return *this; | 0 | [
"CWE-119",
"CWE-787"
] | CImg | ac8003393569aba51048c9d67e1491559877b1d1 | 326,742,390,948,155,100,000,000,000,000,000,000,000 | 75 | . |
untrusted_launcher_response_callback (GtkDialog *dialog,
int response_id,
ActivateParametersDesktop *parameters)
{
GdkScreen *screen;
char *uri;
GFile *file;
switch (response_id) {
case RESPONSE_RUN:
screen = gtk_widget_get_screen (GTK_WIDGET (parameters->parent_window));
uri = nautilus_file_get_uri (parameters->file);
nautilus_debug_log (FALSE, NAUTILUS_DEBUG_LOG_DOMAIN_USER,
"directory view activate_callback launch_desktop_file window=%p: %s",
parameters->parent_window, uri);
nautilus_launch_desktop_file (screen, uri, NULL,
parameters->parent_window);
g_free (uri);
break;
case RESPONSE_MARK_TRUSTED:
file = nautilus_file_get_location (parameters->file);
nautilus_file_mark_desktop_file_trusted (file,
parameters->parent_window,
TRUE,
NULL, NULL);
g_object_unref (file);
break;
default:
/* Just destroy dialog */
break;
}
gtk_widget_destroy (GTK_WIDGET (dialog));
activate_parameters_desktop_free (parameters);
} | 0 | [] | nautilus | 1e1c916f5537eb5e4144950f291f4a3962fc2395 | 59,553,655,479,255,890,000,000,000,000,000,000,000 | 35 | Add "interactive" argument to nautilus_file_mark_desktop_file_trusted.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-file-operations.c:
* libnautilus-private/nautilus-file-operations.h:
* libnautilus-private/nautilus-mime-actions.c:
Add "interactive" argument to
nautilus_file_mark_desktop_file_trusted.
* src/nautilus-application.c:
Mark all desktopfiles on the desktop trusted on first
run.
svn path=/trunk/; revision=15009 |
static BROTLI_INLINE uint32_t GetInsertExtra(uint16_t inscode) {
return kBrotliInsExtra[inscode];
} | 0 | [
"CWE-120"
] | brotli | 223d80cfbec8fd346e32906c732c8ede21f0cea6 | 177,642,852,641,259,560,000,000,000,000,000,000,000 | 3 | Update (#826)
* IMPORTANT: decoder: fix potential overflow when input chunk is >2GiB
* simplify max Huffman table size calculation
* eliminate symbol duplicates (static arrays in .h files)
* minor combing in research/ code |
bool Item_null::send(Protocol *protocol, st_value *buffer)
{
return protocol->store_null();
} | 0 | [
"CWE-416"
] | server | c02ebf3510850ba78a106be9974c94c3b97d8585 | 323,577,905,060,630,600,000,000,000,000,000,000,000 | 4 | MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments. |
xmlHashSize(xmlHashTablePtr table) {
if (table == NULL)
return(-1);
return(table->nbElems);
} | 0 | [
"CWE-399"
] | libxml2 | 8973d58b7498fa5100a876815476b81fd1a2412a | 99,366,659,424,120,750,000,000,000,000,000,000,000 | 5 | Add hash randomization to hash and dict structures
Following http://www.ocert.org/advisories/ocert-2011-003.html
it seems that having hash randomization might be a good idea
when using XML with untrusted data
* configure.in: lookup for rand, srand and time
* dict.c: add randomization to dictionaries hash tables
* hash.c: add randomization to normal hash tables |
static bool dce83_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dce110_resource_pool *pool)
{
unsigned int i;
struct dc_context *ctx = dc->ctx;
struct dc_bios *bp;
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_83;
pool->base.funcs = &dce80_res_pool_funcs;
/*************************************************
* Resource + asic cap harcoding *
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = res_cap_83.num_timing_generator;
pool->base.timing_generator_count = res_cap_83.num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.is_apu = true;
/*************************************************
* Create resources *
*************************************************/
bp = ctx->dc_bios;
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
pool->base.clock_sources[0] =
dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
pool->base.clock_sources[1] =
dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
pool->base.clk_src_count = 1;
}
if (pool->base.dp_clock_source == NULL) {
dm_error("DC: failed to create dp clock source!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
}
for (i = 0; i < pool->base.clk_src_count; i++) {
if (pool->base.clock_sources[i] == NULL) {
dm_error("DC: failed to create clock sources!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
}
}
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
&dmcu_mask);
if (pool->base.dmcu == NULL) {
dm_error("DC: failed to create dmcu!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
}
pool->base.abm = dce_abm_create(ctx,
&abm_regs,
&abm_shift,
&abm_mask);
if (pool->base.abm == NULL) {
dm_error("DC: failed to create abm!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
}
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
pool->base.irqs = dal_irq_service_dce80_create(&init_data);
if (!pool->base.irqs)
goto res_create_fail;
}
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] = dce80_timing_generator_create(
ctx, i, &dce80_tg_offsets[i]);
if (pool->base.timing_generators[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto res_create_fail;
}
pool->base.mis[i] = dce80_mem_input_create(ctx, i);
if (pool->base.mis[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create memory input!\n");
goto res_create_fail;
}
pool->base.ipps[i] = dce80_ipp_create(ctx, i);
if (pool->base.ipps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create input pixel processor!\n");
goto res_create_fail;
}
pool->base.transforms[i] = dce80_transform_create(ctx, i);
if (pool->base.transforms[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create transform!\n");
goto res_create_fail;
}
pool->base.opps[i] = dce80_opp_create(ctx, i);
if (pool->base.opps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
if (pool->base.engines[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create aux engine!!\n");
goto res_create_fail;
}
pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i);
if (pool->base.hw_i2cs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create i2c engine!!\n");
goto res_create_fail;
}
pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx);
if (pool->base.sw_i2cs[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC:failed to create sw i2c!!\n");
goto res_create_fail;
}
}
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
dc->caps.planes[i] = plane_cap;
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto res_create_fail;
/* Create hardware sequencer */
dce80_hw_sequencer_construct(dc);
return true;
res_create_fail:
destruct(pool);
return false;
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-401"
] | linux | 055e547478a11a6360c7ce05e2afc3e366968a12 | 215,323,047,127,637,340,000,000,000,000,000,000,000 | 175 | drm/amd/display: memory leak
In dcn*_clock_source_create when dcn20_clk_src_construct fails allocated
clk_src needs release.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Alex Deucher <[email protected]> |
void perf_bp_event(struct perf_event *bp, void *data)
{
struct perf_sample_data sample;
struct pt_regs *regs = data;
perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
if (!bp->hw.state && !perf_exclude_event(bp, regs))
perf_swevent_event(bp, 1, &sample, regs);
} | 0 | [
"CWE-703",
"CWE-189"
] | linux | 8176cced706b5e5d15887584150764894e94e02f | 240,999,162,517,763,430,000,000,000,000,000,000,000 | 10 | perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
flatpak_proxy_add_wildcarded_policy (FlatpakProxy *proxy,
const char *name,
FlatpakPolicy policy)
{
g_hash_table_replace (proxy->wildcard_policy, g_strdup (name), GINT_TO_POINTER (policy));
} | 0 | [
"CWE-284",
"CWE-436"
] | flatpak | 52346bf187b5a7f1c0fe9075b328b7ad6abe78f6 | 141,689,717,583,904,000,000,000,000,000,000,000,000 | 6 | Fix vulnerability in dbus proxy
During the authentication all client data is directly forwarded
to the dbus daemon as is, until we detect the BEGIN command after
which we start filtering the binary dbus protocol.
Unfortunately the detection of the BEGIN command in the proxy
did not exactly match the detection in the dbus daemon. A BEGIN
followed by a space or tab was considered ok in the daemon but
not by the proxy. This could be exploited to send arbitrary
dbus messages to the host, which can be used to break out of
the sandbox.
This was noticed by Gabriel Campana of The Google Security Team.
This fix makes the detection of the authentication phase end
match the dbus code. In addition we duplicate the authentication
line validation from dbus, which includes ensuring all data is
ASCII, and limiting the size of a line to 16k. In fact, we add
some extra stringent checks, disallowing ASCII control chars and
requiring that auth lines start with a capital letter. |
static void wdm_in_callback(struct urb *urb)
{
struct wdm_device *desc = urb->context;
int status = urb->status;
int length = urb->actual_length;
spin_lock(&desc->iuspin);
clear_bit(WDM_RESPONDING, &desc->flags);
if (status) {
switch (status) {
case -ENOENT:
dev_dbg(&desc->intf->dev,
"nonzero urb status received: -ENOENT");
goto skip_error;
case -ECONNRESET:
dev_dbg(&desc->intf->dev,
"nonzero urb status received: -ECONNRESET");
goto skip_error;
case -ESHUTDOWN:
dev_dbg(&desc->intf->dev,
"nonzero urb status received: -ESHUTDOWN");
goto skip_error;
case -EPIPE:
dev_err(&desc->intf->dev,
"nonzero urb status received: -EPIPE\n");
break;
default:
dev_err(&desc->intf->dev,
"Unexpected error %d\n", status);
break;
}
}
desc->rerr = status;
if (length + desc->length > desc->wMaxCommand) {
/* The buffer would overflow */
set_bit(WDM_OVERFLOW, &desc->flags);
} else {
/* we may already be in overflow */
if (!test_bit(WDM_OVERFLOW, &desc->flags)) {
memmove(desc->ubuf + desc->length, desc->inbuf, length);
desc->length += length;
desc->reslength = length;
}
}
skip_error:
wake_up(&desc->wait);
set_bit(WDM_READ, &desc->flags);
spin_unlock(&desc->iuspin);
} | 0 | [
"CWE-119",
"CWE-269"
] | linux | c0f5ecee4e741667b2493c742b60b6218d40b3aa | 211,145,399,598,249,400,000,000,000,000,000,000,000 | 52 | USB: cdc-wdm: fix buffer overflow
The buffer for responses must not overflow.
If this would happen, set a flag, drop the data and return
an error after user space has read all remaining data.
Signed-off-by: Oliver Neukum <[email protected]>
CC: [email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static void filter_build_regex(struct filter_pred *pred)
{
struct regex *r = &pred->regex;
char *search;
enum regex_type type = MATCH_FULL;
if (pred->op == OP_GLOB) {
type = filter_parse_regex(r->pattern, r->len, &search, &pred->not);
r->len = strlen(search);
memmove(r->pattern, search, r->len+1);
}
switch (type) {
case MATCH_FULL:
r->match = regex_match_full;
break;
case MATCH_FRONT_ONLY:
r->match = regex_match_front;
break;
case MATCH_MIDDLE_ONLY:
r->match = regex_match_middle;
break;
case MATCH_END_ONLY:
r->match = regex_match_end;
break;
case MATCH_GLOB:
r->match = regex_match_glob;
break;
}
} | 0 | [
"CWE-787"
] | linux | 70303420b5721c38998cf987e6b7d30cc62d4ff1 | 162,929,031,952,135,470,000,000,000,000,000,000,000 | 30 | tracing: Check for no filter when processing event filters
The syzkaller detected a out-of-bounds issue with the events filter code,
specifically here:
prog[N].pred = NULL; /* #13 */
prog[N].target = 1; /* TRUE */
prog[N+1].pred = NULL;
prog[N+1].target = 0; /* FALSE */
-> prog[N-1].target = N;
prog[N-1].when_to_branch = false;
As that's the first reference to a "N-1" index, it appears that the code got
here with N = 0, which means the filter parser found no filter to parse
(which shouldn't ever happen, but apparently it did).
Add a new error to the parsing code that will check to make sure that N is
not zero before going into this part of the code. If N = 0, then -EINVAL is
returned, and a error message is added to the filter.
Cc: [email protected]
Fixes: 80765597bc587 ("tracing: Rewrite filter logic to be simpler and faster")
Reported-by: air icy <[email protected]>
bugzilla url: https://bugzilla.kernel.org/show_bug.cgi?id=200019
Signed-off-by: Steven Rostedt (VMware) <[email protected]> |
MagickExport ssize_t ParsePixelChannelOption(const char *channels)
{
char
*q,
token[MagickPathExtent];
ssize_t
channel;
GetMagickToken(channels,NULL,token);
if ((*token == ';') || (*token == '|'))
return(RedPixelChannel);
channel=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,token);
if (channel >= 0)
return(channel);
q=(char *) token;
channel=(ssize_t) InterpretLocaleValue(token,&q);
if ((q == token) || (channel < 0) || (channel >= MaxPixelChannels))
return(-1);
return(channel);
} | 0 | [
"CWE-399"
] | ImageMagick | 6790815c75bdea0357df5564345847856e995d6b | 267,697,187,079,972,100,000,000,000,000,000,000,000 | 21 | Fixed memory leak in IsOptionMember. |
cd_device_db_get_property (CdDeviceDb *ddb,
const gchar *device_id,
const gchar *property,
GError **error)
{
CdDeviceDbPrivate *priv = GET_PRIVATE (ddb);
gchar *error_msg = NULL;
gchar *statement;
gint rc;
gchar *value = NULL;
g_autoptr(GPtrArray) array_tmp = NULL;
g_return_val_if_fail (CD_IS_DEVICE_DB (ddb), NULL);
g_return_val_if_fail (priv->db != NULL, NULL);
g_debug ("CdDeviceDb: get property %s for %s", property, device_id);
statement = sqlite3_mprintf ("SELECT value FROM properties_v2 WHERE "
"device_id = '%q' AND "
"property = '%q' LIMIT 1;",
device_id, property);
/* remove the entry */
array_tmp = g_ptr_array_new_with_free_func (g_free);
rc = sqlite3_exec (priv->db,
statement,
cd_device_db_sqlite_cb,
array_tmp,
&error_msg);
if (rc != SQLITE_OK) {
g_set_error (error,
CD_CLIENT_ERROR,
CD_CLIENT_ERROR_INTERNAL,
"SQL error: %s",
error_msg);
sqlite3_free (error_msg);
goto out;
}
/* never set */
if (array_tmp->len == 0) {
g_set_error (error,
CD_CLIENT_ERROR,
CD_CLIENT_ERROR_INTERNAL,
"no such property %s for %s",
property, device_id);
goto out;
}
/* success */
value = g_strdup (g_ptr_array_index (array_tmp, 0));
out:
sqlite3_free (statement);
return value;
} | 0 | [
"CWE-200"
] | colord | adf41f36cf7214d7d6fa8d528b74eba47c377405 | 141,435,211,408,977,280,000,000,000,000,000,000,000 | 54 | Fix a small memory leak in sqlite3_exec()
Fixes https://github.com/hughsie/colord/issues/110 |
pk_transaction_get_tid (PkTransaction *transaction)
{
g_return_val_if_fail (PK_IS_TRANSACTION (transaction), NULL);
g_return_val_if_fail (transaction->priv->tid != NULL, NULL);
return transaction->priv->tid;
} | 0 | [
"CWE-287"
] | PackageKit | 7e8a7905ea9abbd1f384f05f36a4458682cd4697 | 54,715,245,070,569,510,000,000,000,000,000,000,000 | 7 | Do not set JUST_REINSTALL on any kind of auth failure
If we try to continue the auth queue when it has been cancelled (or failed)
then we fall upon the obscure JUST_REINSTALL transaction flag which only the
DNF backend actually verifies.
Many thanks to Matthias Gerstner <[email protected]> for spotting the problem. |
void auth_request_default_verify_plain_continue(struct auth_request *request,
verify_plain_callback_t *callback)
{
struct auth_passdb *passdb;
enum passdb_result result;
const char *cache_key, *error;
const char *password = request->mech_password;
i_assert(request->state == AUTH_REQUEST_STATE_MECH_CONTINUE);
if (auth_request_is_disabled_master_user(request)) {
callback(PASSDB_RESULT_USER_UNKNOWN, request);
return;
}
if (password_has_illegal_chars(password)) {
e_info(authdb_event(request),
"Attempted login with password having illegal chars");
callback(PASSDB_RESULT_USER_UNKNOWN, request);
return;
}
passdb = request->passdb;
while (passdb != NULL && auth_request_want_skip_passdb(request, passdb))
passdb = passdb->next;
request->passdb = passdb;
if (passdb == NULL) {
auth_request_log_error(request,
request->mech != NULL ? AUTH_SUBSYS_MECH : "none",
"All password databases were skipped");
callback(PASSDB_RESULT_INTERNAL_FAILURE, request);
return;
}
auth_request_passdb_lookup_begin(request);
request->private_callback.verify_plain = callback;
cache_key = passdb_cache == NULL ? NULL : passdb->cache_key;
if (passdb_cache_verify_plain(request, cache_key, password,
&result, FALSE)) {
return;
}
auth_request_set_state(request, AUTH_REQUEST_STATE_PASSDB);
/* In case this request had already done a credentials lookup (is it
even possible?), make sure wanted_credentials_scheme is cleared
so passdbs don't think we're doing a credentials lookup. */
request->wanted_credentials_scheme = NULL;
if (passdb->passdb->iface.verify_plain == NULL) {
/* we're deinitializing and just want to get rid of this
request */
auth_request_verify_plain_callback(
PASSDB_RESULT_INTERNAL_FAILURE, request);
} else if (passdb->passdb->blocking) {
passdb_blocking_verify_plain(request);
} else if (passdb_template_export(passdb->default_fields_tmpl,
request, &error) < 0) {
e_error(authdb_event(request),
"Failed to expand default_fields: %s", error);
auth_request_verify_plain_callback(
PASSDB_RESULT_INTERNAL_FAILURE, request);
} else {
passdb->passdb->iface.verify_plain(request, password,
auth_request_verify_plain_callback);
}
} | 0 | [
"CWE-284"
] | core | 7bad6a24160e34bce8f10e73dbbf9e5fbbcd1904 | 108,797,289,026,346,050,000,000,000,000,000,000,000 | 70 | auth: Fix handling passdbs with identical driver/args but different mechanisms/username_filter
The passdb was wrongly deduplicated in this situation, causing wrong
mechanisms or username_filter setting to be used. This would be a rather
unlikely configuration though.
Fixed by moving mechanisms and username_filter from struct passdb_module
to struct auth_passdb, which is where they should have been in the first
place. |
xmlXPathNodeSetMergeAndClear(xmlNodeSetPtr set1, xmlNodeSetPtr set2,
int hasNullEntries)
{
if ((set1 == NULL) && (hasNullEntries == 0)) {
/*
* Note that doing a memcpy of the list, namespace nodes are
* just assigned to set1, since set2 is cleared anyway.
*/
set1 = xmlXPathNodeSetCreateSize(set2->nodeNr);
if (set1 == NULL)
return(NULL);
if (set2->nodeNr != 0) {
memcpy(set1->nodeTab, set2->nodeTab,
set2->nodeNr * sizeof(xmlNodePtr));
set1->nodeNr = set2->nodeNr;
}
} else {
int i, j, initNbSet1;
xmlNodePtr n1, n2;
if (set1 == NULL)
set1 = xmlXPathNodeSetCreate(NULL);
if (set1 == NULL)
return (NULL);
initNbSet1 = set1->nodeNr;
for (i = 0;i < set2->nodeNr;i++) {
n2 = set2->nodeTab[i];
/*
* Skip NULLed entries.
*/
if (n2 == NULL)
continue;
/*
* Skip duplicates.
*/
for (j = 0; j < initNbSet1; j++) {
n1 = set1->nodeTab[j];
if (n1 == n2) {
goto skip_node;
} else if ((n1->type == XML_NAMESPACE_DECL) &&
(n2->type == XML_NAMESPACE_DECL))
{
if ((((xmlNsPtr) n1)->next == ((xmlNsPtr) n2)->next) &&
(xmlStrEqual(((xmlNsPtr) n1)->prefix,
((xmlNsPtr) n2)->prefix)))
{
/*
* Free the namespace node.
*/
set2->nodeTab[i] = NULL;
xmlXPathNodeSetFreeNs((xmlNsPtr) n2);
goto skip_node;
}
}
}
/*
* grow the nodeTab if needed
*/
if (set1->nodeMax == 0) {
set1->nodeTab = (xmlNodePtr *) xmlMalloc(
XML_NODESET_DEFAULT * sizeof(xmlNodePtr));
if (set1->nodeTab == NULL) {
xmlXPathErrMemory(NULL, "merging nodeset\n");
return(NULL);
}
memset(set1->nodeTab, 0,
XML_NODESET_DEFAULT * (size_t) sizeof(xmlNodePtr));
set1->nodeMax = XML_NODESET_DEFAULT;
} else if (set1->nodeNr >= set1->nodeMax) {
xmlNodePtr *temp;
if (set1->nodeMax >= XPATH_MAX_NODESET_LENGTH) {
xmlXPathErrMemory(NULL, "merging nodeset hit limit\n");
return(NULL);
}
temp = (xmlNodePtr *) xmlRealloc(
set1->nodeTab, set1->nodeMax * 2 * sizeof(xmlNodePtr));
if (temp == NULL) {
xmlXPathErrMemory(NULL, "merging nodeset\n");
return(NULL);
}
set1->nodeTab = temp;
set1->nodeMax *= 2;
}
if (n2->type == XML_NAMESPACE_DECL) {
xmlNsPtr ns = (xmlNsPtr) n2;
set1->nodeTab[set1->nodeNr++] =
xmlXPathNodeSetDupNs((xmlNodePtr) ns->next, ns);
} else
set1->nodeTab[set1->nodeNr++] = n2;
skip_node:
{}
}
}
set2->nodeNr = 0;
return(set1);
} | 0 | [] | libxml2 | 03c6723043775122313f107695066e5744189a08 | 335,971,609,796,243,740,000,000,000,000,000,000,000 | 99 | Handling of XPath function arguments in error case
The XPath engine tries to guarantee that every XPath function can pop
'nargs' non-NULL values off the stack. libxslt, for example, relies on
this assumption. But the check isn't thorough enough if there are errors
during the evaluation of arguments. This can lead to segfaults:
https://mail.gnome.org/archives/xslt/2013-December/msg00005.html
This commit makes the handling of function arguments more robust.
* Bail out early when evaluation of XPath function arguments fails.
* Make sure that there are 'nargs' arguments in the current call frame. |
LEX_USER *create_default_definer(THD *thd, bool role)
{
LEX_USER *definer;
if (! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER))))
return 0;
thd->get_definer(definer, role);
if (role && definer->user.length == 0)
{
my_error(ER_MALFORMED_DEFINER, MYF(0));
return 0;
}
else
return definer;
} | 0 | [] | server | ba4927e520190bbad763bb5260ae154f29a61231 | 305,650,998,114,633,100,000,000,000,000,000,000,000 | 17 | MDEV-19398: Assertion `item1->type() == Item::FIELD_ITEM ...
Window Functions code tries to minimize the number of times it
needs to sort the select's resultset by finding "compatible"
OVER (PARTITION BY ... ORDER BY ...) clauses.
This employs compare_order_elements(). That function assumed that
the order expressions are Item_field-derived objects (that refer
to a temp.table). But this is not always the case: one can
construct queries order expressions are arbitrary item expressions.
Add handling for such expressions: sort them according to the window
specification they appeared in.
This means we cannot detect that two compatible PARTITION BY clauses
that use expressions can share the sorting step.
But at least we won't crash. |
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); | 0 | [
"CWE-459"
] | linux | 683412ccf61294d727ead4a73d97397396e69a6b | 123,693,422,736,611,260,000,000,000,000,000,000,000 | 6 | KVM: SEV: add cache flush to solve SEV cache incoherency issues
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: [email protected]
Suggested-by: Sean Christpherson <[email protected]>
Reported-by: Mingwei Zhang <[email protected]>
Signed-off-by: Mingwei Zhang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
bgp_attr_nexthop (struct bgp_attr_parser_args *args)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
in_addr_t nexthop_h, nexthop_n;
/* Check nexthop attribute length. */
if (length != 4)
{
zlog (peer->log, LOG_ERR, "Nexthop attribute length isn't four [%d]",
length);
return bgp_attr_malformed (args,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
args->total);
}
/* According to section 6.3 of RFC4271, syntactically incorrect NEXT_HOP
attribute must result in a NOTIFICATION message (this is implemented below).
At the same time, semantically incorrect NEXT_HOP is more likely to be just
logged locally (this is implemented somewhere else). The UPDATE message
gets ignored in any of these cases. */
nexthop_n = stream_get_ipv4 (peer->ibuf);
nexthop_h = ntohl (nexthop_n);
if (IPV4_NET0 (nexthop_h) || IPV4_NET127 (nexthop_h) || IPV4_CLASS_DE (nexthop_h))
{
char buf[INET_ADDRSTRLEN];
inet_ntop (AF_INET, &nexthop_h, buf, INET_ADDRSTRLEN);
zlog (peer->log, LOG_ERR, "Martian nexthop %s", buf);
return bgp_attr_malformed (args,
BGP_NOTIFY_UPDATE_INVAL_NEXT_HOP,
args->total);
}
attr->nexthop.s_addr = nexthop_n;
attr->flag |= ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP);
return BGP_ATTR_PARSE_PROCEED;
} | 0 | [] | quagga | 8794e8d229dc9fe29ea31424883433d4880ef408 | 181,059,589,998,604,960,000,000,000,000,000,000,000 | 41 | bgpd: Fix regression in args consolidation, total should be inited from args
* bgp_attr.c: (bgp_attr_unknown) total should be initialised from the args. |
static int nci_dev_up(struct nfc_dev *nfc_dev)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
return nci_open_device(ndev);
} | 0 | [] | linux | 48b71a9e66c2eab60564b1b1c85f4928ed04e406 | 323,236,840,873,713,300,000,000,000,000,000,000,000 | 6 | NFC: add NCI_UNREG flag to eliminate the race
There are two sites that calls queue_work() after the
destroy_workqueue() and lead to possible UAF.
The first site is nci_send_cmd(), which can happen after the
nci_close_device as below
nfcmrvl_nci_unregister_dev | nfc_genl_dev_up
nci_close_device |
flush_workqueue |
del_timer_sync |
nci_unregister_device | nfc_get_device
destroy_workqueue | nfc_dev_up
nfc_unregister_device | nci_dev_up
device_del | nci_open_device
| __nci_request
| nci_send_cmd
| queue_work !!!
Another site is nci_cmd_timer, awaked by the nci_cmd_work from the
nci_send_cmd.
... | ...
nci_unregister_device | queue_work
destroy_workqueue |
nfc_unregister_device | ...
device_del | nci_cmd_work
| mod_timer
| ...
| nci_cmd_timer
| queue_work !!!
For the above two UAF, the root cause is that the nfc_dev_up can race
between the nci_unregister_device routine. Therefore, this patch
introduce NCI_UNREG flag to easily eliminate the possible race. In
addition, the mutex_lock in nci_close_device can act as a barrier.
Signed-off-by: Lin Ma <[email protected]>
Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
Reviewed-by: Jakub Kicinski <[email protected]>
Reviewed-by: Krzysztof Kozlowski <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
static int expand_mmacro(Token * tline)
{
Token *startline = tline;
Token *label = NULL;
bool dont_prepend = false;
Token **params, *t, *tt;
MMacro *m;
Line *l, *ll;
int i, *paramlen;
const char *mname;
int nparam = 0;
t = tline;
t = skip_white(t);
/* if (!tok_type(t, TOK_ID)) Lino 02/25/02 */
if (!tok_type(t, TOK_ID) && !tok_type(t, TOK_LOCAL_MACRO))
return 0;
m = is_mmacro(t, &nparam, ¶ms);
if (m) {
mname = tok_text(t);
} else {
Token *last;
/*
* We have an id which isn't a macro call. We'll assume
* it might be a label; we'll also check to see if a
* colon follows it. Then, if there's another id after
* that lot, we'll check it again for macro-hood.
*/
label = last = t;
t = t->next;
if (tok_white(t))
last = t, t = t->next;
if (tok_is(t, ':')) {
dont_prepend = true;
last = t, t = t->next;
if (tok_white(t))
last = t, t = t->next;
}
if (!tok_type(t, TOK_ID) || !(m = is_mmacro(t, &nparam, ¶ms)))
return 0;
last->next = NULL;
mname = tok_text(t);
tline = t;
}
if (unlikely(mmacro_deadman.total >= nasm_limit[LIMIT_MMACROS] ||
mmacro_deadman.levels >= nasm_limit[LIMIT_MACRO_LEVELS])) {
if (!mmacro_deadman.triggered) {
nasm_nonfatal("interminable multiline macro recursion");
mmacro_deadman.triggered = true;
}
return 0;
}
mmacro_deadman.total++;
mmacro_deadman.levels++;
/*
* Fix up the parameters: this involves stripping leading and
* trailing whitespace and stripping braces if they are present.
*/
nasm_newn(paramlen, nparam+1);
for (i = 1; (t = params[i]); i++) {
bool braced = false;
int brace = 0;
int white = 0;
bool comma = !m->plus || i < nparam;
t = skip_white(t);
if (tok_is(t, '{')) {
t = t->next;
brace = 1;
braced = true;
comma = false;
}
params[i] = t;
for (; t; t = t->next) {
if (tok_white(t)) {
white++;
continue;
}
if (t->type == TOK_OTHER && t->len == 1) {
switch (t->text.a[0]) {
case ',':
if (comma && !brace)
goto endparam;
break;
case '{':
brace++;
break;
case '}':
brace--;
if (braced && !brace) {
paramlen[i] += white;
goto endparam;
}
break;
default:
break;
}
}
paramlen[i] += white + 1;
white = 0;
}
endparam:
;
}
/*
* OK, we have a MMacro structure together with a set of
* parameters. We must now go through the expansion and push
* copies of each Line on to istk->expansion. Substitution of
* parameter tokens and macro-local tokens doesn't get done
* until the single-line macro substitution process; this is
* because delaying them allows us to change the semantics
* later through %rotate and give the right semantics for
* nested mmacros.
*
* First, push an end marker on to istk->expansion, mark this
* macro as in progress, and set up its invocation-specific
* variables.
*/
nasm_new(ll);
ll->next = istk->expansion;
ll->finishes = m;
ll->where = istk->where;
istk->expansion = ll;
/*
* Save the previous MMacro expansion in the case of
* macro recursion
*/
#if 0
if (m->max_depth && m->in_progress)
push_mmacro(m);
#endif
m->in_progress ++;
m->params = params;
m->iline = tline;
m->iname = nasm_strdup(mname);
m->nparam = nparam;
m->rotate = 0;
m->paramlen = paramlen;
m->unique = unique++;
m->condcnt = 0;
m->mstk = istk->mstk;
istk->mstk.mstk = istk->mstk.mmac = m;
list_for_each(l, m->expansion) {
nasm_new(ll);
ll->next = istk->expansion;
istk->expansion = ll;
ll->first = dup_tlist(l->first, NULL);
ll->where = l->where;
}
/*
* If we had a label, and this macro definition does not include
* a %00, push it on as the first line of, ot
* the macro expansion.
*/
if (label) {
/*
* We had a label. If this macro contains an %00 parameter,
* save the value as a special parameter (which is what it
* is), otherwise push it as the first line of the macro
* expansion.
*/
if (m->capture_label) {
params[0] = dup_Token(NULL, label);
paramlen[0] = 1;
free_tlist(startline);
} else {
nasm_new(ll);
ll->finishes = NULL;
ll->next = istk->expansion;
istk->expansion = ll;
ll->first = startline;
ll->where = istk->where;
if (!dont_prepend) {
while (label->next)
label = label->next;
label->next = tt = make_tok_char(NULL, ':');
}
}
}
istk->nolist += !!(m->nolist & NL_LIST);
istk->noline += !!(m->nolist & NL_LINE);
if (!istk->nolist) {
lfmt->uplevel(LIST_MACRO, 0);
if (list_option('m'))
list_mmacro_call(m);
}
if (!istk->noline)
src_macro_push(m, istk->where);
return 1;
} | 0 | [] | nasm | 6299a3114ce0f3acd55d07de201a8ca2f0a83059 | 258,097,016,822,009,840,000,000,000,000,000,000,000 | 211 | BR 3392708: fix NULL pointer reference for invalid %stacksize
After issuing an error message for a missing %stacksize argument, need
to quit rather than continuing to try to access the pointer.
Fold uses of tok_text() while we are at it.
Reported-by: Suhwan <[email protected]>
Signed-off-by: H. Peter Anvin (Intel) <[email protected]> |
int get_anon_bdev(dev_t *p)
{
int dev;
int error;
retry:
if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
return -ENOMEM;
spin_lock(&unnamed_dev_lock);
error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
if (!error)
unnamed_dev_start = dev + 1;
spin_unlock(&unnamed_dev_lock);
if (error == -EAGAIN)
/* We raced and lost with another CPU. */
goto retry;
else if (error)
return -EAGAIN;
if (dev == (1 << MINORBITS)) {
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, dev);
if (unnamed_dev_start > dev)
unnamed_dev_start = dev;
spin_unlock(&unnamed_dev_lock);
return -EMFILE;
}
*p = MKDEV(0, dev & MINORMASK);
return 0;
} | 0 | [
"CWE-17"
] | linux | eee5cc2702929fd41cce28058dc6d6717f723f87 | 240,088,001,591,228,630,000,000,000,000,000,000,000 | 30 | get rid of s_files and files_lock
The only thing we need it for is alt-sysrq-r (emergency remount r/o)
and these days we can do just as well without going through the
list of files.
Signed-off-by: Al Viro <[email protected]> |
_vte_table_add(struct _vte_table *table,
const char *pattern, gssize length,
const char *result, GQuark quark)
{
_vte_table_addi(table,
(const unsigned char *) pattern, length,
pattern, length,
result, quark, 0);
} | 0 | [
"CWE-119"
] | vte | feeee4b5832b17641e505b7083e0d299fdae318e | 67,146,201,302,304,290,000,000,000,000,000,000,000 | 9 | emulation: Limit integer arguments to 65535
To guard against malicious sequences containing excessively big numbers,
limit all parsed numbers to 16 bit range. Doing this here in the parsing
routine is a catch-all guard; this doesn't preclude enforcing
more stringent limits in the handlers themselves.
https://bugzilla.gnome.org/show_bug.cgi?id=676090 |
void fs_logger(const char *msg) {
FsMsg *ptr = newmsg();
ptr->msg = strdup(msg);
if (!ptr->msg)
errExit("strdup");
insertmsg(ptr);
} | 0 | [
"CWE-269",
"CWE-94"
] | firejail | 27cde3d7d1e4e16d4190932347c7151dc2a84c50 | 97,942,744,150,948,820,000,000,000,000,000,000,000 | 7 | fixing CVE-2022-31214 |
static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
{
struct list_head *q;
struct rpc_task *t;
INIT_LIST_HEAD(&task->u.tk_wait.links);
q = &queue->tasks[task->tk_priority];
if (unlikely(task->tk_priority > queue->maxpriority))
q = &queue->tasks[queue->maxpriority];
list_for_each_entry(t, q, u.tk_wait.list) {
if (t->tk_owner == task->tk_owner) {
list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
return;
}
}
list_add_tail(&task->u.tk_wait.list, q);
} | 0 | [
"CWE-400",
"CWE-399",
"CWE-703"
] | linux | 0b760113a3a155269a3fba93a409c640031dd68f | 251,514,342,123,341,480,000,000,000,000,000,000,000 | 17 | NLM: Don't hang forever on NLM unlock requests
If the NLM daemon is killed on the NFS server, we can currently end up
hanging forever on an 'unlock' request, instead of aborting. Basically,
if the rpcbind request fails, or the server keeps returning garbage, we
really want to quit instead of retrying.
Tested-by: Vasily Averin <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
Cc: [email protected] |
void PSOutputDev::updateFlatness(GfxState *state) {
writePSFmt("{0:d} i\n", state->getFlatness());
} | 0 | [] | poppler | abf167af8b15e5f3b510275ce619e6fdb42edd40 | 281,975,758,486,128,800,000,000,000,000,000,000,000 | 3 | Implement tiling/patterns in SplashOutputDev
Fixes bug 13518 |
void dofeat(void)
{
# define FEAT "Extensions supported:" CRLF \
" UTF8" CRLF \
" EPRT" CRLF " IDLE" CRLF " MDTM" CRLF " SIZE" CRLF " MFMT" CRLF \
" REST STREAM" CRLF \
" MLST type*;size*;sizd*;modify*;UNIX.mode*;UNIX.uid*;UNIX.gid*;unique*;" CRLF \
" MLSD" CRLF \
" PRET"
# ifdef WITH_TLS
# define FEAT_TLS CRLF " AUTH TLS" CRLF " PBSZ" CRLF " PROT"
# else
# define FEAT_TLS ""
# endif
# ifdef DEBUG
# define FEAT_DEBUG CRLF " XDBG"
# else
# define FEAT_DEBUG ""
# endif
# ifdef WITH_VIRTUAL_CHROOT
# define FEAT_TVFS ""
# else
# define FEAT_TVFS CRLF " TVFS"
# endif
# define FEAT_PASV CRLF " PASV" CRLF " EPSV"
# ifdef MINIMAL
# define FEAT_ESTA ""
# define FEAT_ESTP ""
# else
# define FEAT_ESTA CRLF " ESTA"
# define FEAT_ESTP CRLF " ESTP"
# endif
char feat[] = FEAT FEAT_DEBUG FEAT_TLS FEAT_TVFS FEAT_ESTA FEAT_PASV FEAT_ESTP;
if (disallow_passive != 0) {
feat[sizeof FEAT FEAT_DEBUG FEAT_TLS FEAT_TVFS FEAT_ESTA - 1U] = 0;
}
# ifndef MINIMAL
else if (STORAGE_FAMILY(force_passive_ip) != 0) {
feat[sizeof FEAT FEAT_DEBUG FEAT_TLS FEAT_TVFS FEAT_ESTA FEAT_PASV - 1U] = 0;
}
# endif
addreply_noformat(0, feat);
addreply_noformat(211, "End.");
} | 0 | [
"CWE-434"
] | pure-ftpd | 37ad222868e52271905b94afea4fc780d83294b4 | 18,356,872,946,557,904,000,000,000,000,000,000,000 | 48 | Initialize the max upload file size when quotas are enabled
Due to an unwanted check, files causing the quota to be exceeded
were deleted after the upload, but not during the upload.
The bug was introduced in 2009 in version 1.0.23
Spotted by @DroidTest, thanks! |
formatVideoAttrs(XtermWidget xw, char *buffer, CELL *cell)
{
TScreen *screen = TScreenOf(xw);
LineData *ld = GET_LINEDATA(screen, cell->row);
*buffer = '\0';
if (ld != 0 && cell->col < (int) ld->lineSize) {
IAttr attribs = ld->attribs[cell->col];
const char *delim = "";
if (attribs & INVERSE) {
buffer += sprintf(buffer, "7");
delim = ";";
}
if (attribs & UNDERLINE) {
buffer += sprintf(buffer, "%s4", delim);
delim = ";";
}
if (attribs & BOLD) {
buffer += sprintf(buffer, "%s1", delim);
delim = ";";
}
if (attribs & BLINK) {
buffer += sprintf(buffer, "%s5", delim);
delim = ";";
}
#if OPT_ISO_COLORS
if (attribs & FG_COLOR) {
Pixel fg = extract_fg(xw, ld->color[cell->col], attribs);
if (fg < 8) {
fg += 30;
} else if (fg < 16) {
fg += 90;
} else {
buffer += sprintf(buffer, "%s38;5", delim);
delim = ";";
}
buffer += sprintf(buffer, "%s%lu", delim, fg);
delim = ";";
}
if (attribs & BG_COLOR) {
Pixel bg = extract_bg(xw, ld->color[cell->col], attribs);
if (bg < 8) {
bg += 40;
} else if (bg < 16) {
bg += 100;
} else {
buffer += sprintf(buffer, "%s48;5", delim);
delim = ";";
}
(void) sprintf(buffer, "%s%lu", delim, bg);
}
#endif
}
} | 0 | [
"CWE-399"
] | xterm-snapshots | 82ba55b8f994ab30ff561a347b82ea340ba7075c | 222,626,076,465,875,520,000,000,000,000,000,000,000 | 55 | snapshot of project "xterm", label xterm-365d |
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
unsigned long, new_len, unsigned long, flags,
unsigned long, new_addr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
unsigned long charged = 0;
bool locked = false;
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
LIST_HEAD(uf_unmap_early);
LIST_HEAD(uf_unmap);
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
return ret;
if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
return ret;
if (offset_in_page(addr))
return ret;
old_len = PAGE_ALIGN(old_len);
new_len = PAGE_ALIGN(new_len);
/*
* We allow a zero old-len as a special case
* for DOS-emu "duplicate shm area" thing. But
* a zero new-len is nonsensical.
*/
if (!new_len)
return ret;
if (down_write_killable(¤t->mm->mmap_sem))
return -EINTR;
if (flags & MREMAP_FIXED) {
ret = mremap_to(addr, old_len, new_addr, new_len,
&locked, &uf, &uf_unmap_early, &uf_unmap);
goto out;
}
/*
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
* do_munmap does all the needed commit accounting
*/
if (old_len >= new_len) {
ret = do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap);
if (ret && old_len != new_len)
goto out;
ret = addr;
goto out;
}
/*
* Ok, we need to grow..
*/
vma = vma_to_resize(addr, old_len, new_len, &charged);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
/* old_len exactly to the end of the area..
*/
if (old_len == vma->vm_end - addr) {
/* can we just expand the current mapping? */
if (vma_expandable(vma, new_len - old_len)) {
int pages = (new_len - old_len) >> PAGE_SHIFT;
if (vma_adjust(vma, vma->vm_start, addr + new_len,
vma->vm_pgoff, NULL)) {
ret = -ENOMEM;
goto out;
}
vm_stat_account(mm, vma->vm_flags, pages);
if (vma->vm_flags & VM_LOCKED) {
mm->locked_vm += pages;
locked = true;
new_addr = addr;
}
ret = addr;
goto out;
}
}
/*
* We weren't able to just expand or shrink the area,
* we need to create a new one and move it..
*/
ret = -ENOMEM;
if (flags & MREMAP_MAYMOVE) {
unsigned long map_flags = 0;
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
vma->vm_pgoff +
((addr - vma->vm_start) >> PAGE_SHIFT),
map_flags);
if (offset_in_page(new_addr)) {
ret = new_addr;
goto out;
}
ret = move_vma(vma, addr, old_len, new_len, new_addr,
&locked, &uf, &uf_unmap);
}
out:
if (offset_in_page(ret)) {
vm_unacct_memory(charged);
locked = 0;
}
up_write(¤t->mm->mmap_sem);
if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len);
userfaultfd_unmap_complete(mm, &uf_unmap_early);
mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
userfaultfd_unmap_complete(mm, &uf_unmap);
return ret;
} | 0 | [
"CWE-459"
] | linux | eb66ae030829605d61fbef1909ce310e29f78821 | 279,715,616,008,693,730,000,000,000,000,000,000,000 | 123 | mremap: properly flush TLB before releasing the page
Jann Horn points out that our TLB flushing was subtly wrong for the
mremap() case. What makes mremap() special is that we don't follow the
usual "add page to list of pages to be freed, then flush tlb, and then
free pages". No, mremap() obviously just _moves_ the page from one page
table location to another.
That matters, because mremap() thus doesn't directly control the
lifetime of the moved page with a freelist: instead, the lifetime of the
page is controlled by the page table locking, that serializes access to
the entry.
As a result, we need to flush the TLB not just before releasing the lock
for the source location (to avoid any concurrent accesses to the entry),
but also before we release the destination page table lock (to avoid the
TLB being flushed after somebody else has already done something to that
page).
This also makes the whole "need_flush" logic unnecessary, since we now
always end up flushing the TLB for every valid entry.
Reported-and-tested-by: Jann Horn <[email protected]>
Acked-by: Will Deacon <[email protected]>
Tested-by: Ingo Molnar <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
{
return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
} | 0 | [] | linux | ff002b30181d30cdfbca316dadd099c3ca0d739c | 105,297,189,800,133,840,000,000,000,000,000,000,000 | 4 | io_uring: grab ->fs as part of async preparation
This passes it in to io-wq, so it assumes the right fs_struct when
executing async work that may need to do lookups.
Cc: [email protected] # 5.3+
Signed-off-by: Jens Axboe <[email protected]> |
has_database_privilege_name_id(PG_FUNCTION_ARGS)
{
Name username = PG_GETARG_NAME(0);
Oid databaseoid = PG_GETARG_OID(1);
text *priv_type_text = PG_GETARG_TEXT_P(2);
Oid roleid;
AclMode mode;
AclResult aclresult;
roleid = get_role_oid_or_public(NameStr(*username));
mode = convert_database_priv_string(priv_type_text);
if (!SearchSysCacheExists1(DATABASEOID, ObjectIdGetDatum(databaseoid)))
PG_RETURN_NULL();
aclresult = pg_database_aclcheck(databaseoid, roleid, mode);
PG_RETURN_BOOL(aclresult == ACLCHECK_OK);
} | 0 | [
"CWE-264"
] | postgres | fea164a72a7bfd50d77ba5fb418d357f8f2bb7d0 | 81,614,880,187,858,380,000,000,000,000,000,000,000 | 19 | Shore up ADMIN OPTION restrictions.
Granting a role without ADMIN OPTION is supposed to prevent the grantee
from adding or removing members from the granted role. Issuing SET ROLE
before the GRANT bypassed that, because the role itself had an implicit
right to add or remove members. Plug that hole by recognizing that
implicit right only when the session user matches the current role.
Additionally, do not recognize it during a security-restricted operation
or during execution of a SECURITY DEFINER function. The restriction on
SECURITY DEFINER is not security-critical. However, it seems best for a
user testing his own SECURITY DEFINER function to see the same behavior
others will see. Back-patch to 8.4 (all supported versions).
The SQL standards do not conflate roles and users as PostgreSQL does;
only SQL roles have members, and only SQL users initiate sessions. An
application using PostgreSQL users and roles as SQL users and roles will
never attempt to grant membership in the role that is the session user,
so the implicit right to add or remove members will never arise.
The security impact was mostly that a role member could revoke access
from others, contrary to the wishes of his own grantor. Unapproved role
member additions are less notable, because the member can still largely
achieve that by creating a view or a SECURITY DEFINER function.
Reviewed by Andres Freund and Tom Lane. Reported, independently, by
Jonas Sundman and Noah Misch.
Security: CVE-2014-0060 |
SYSCALL_DEFINE(ftruncate64)(unsigned int fd, loff_t length)
{
long ret = do_sys_ftruncate(fd, length, 0);
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(2, ret, fd, length);
return ret;
} | 0 | [
"CWE-732"
] | linux-stable | e57712ebebbb9db7d8dcef216437b3171ddcf115 | 118,874,587,508,282,700,000,000,000,000,000,000,000 | 7 | merge fchmod() and fchmodat() guts, kill ancient broken kludge
The kludge in question is undocumented and doesn't work for 32bit
binaries on amd64, sparc64 and s390. Passing (mode_t)-1 as
mode had (since 0.99.14v and contrary to behaviour of any
other Unix, prescriptions of POSIX, SuS and our own manpages)
was kinda-sorta no-op. Note that any software relying on
that (and looking for examples shows none) would be visibly
broken on sparc64, where practically all userland is built
32bit. No such complaints noticed...
Signed-off-by: Al Viro <[email protected]> |
TEST_F(SingleAllowMissingInOrListTest, MissingIssToken) {
EXPECT_CALL(mock_cb_, onComplete(Status::JwtUnknownIssuer));
auto headers = Http::TestRequestHeaderMapImpl{{kExampleHeader, ES256WithoutIssToken}};
context_ = Verifier::createContext(headers, parent_span_, &mock_cb_);
verifier_->verify(context_);
EXPECT_THAT(headers, JwtOutputFailedOrIgnore(kExampleHeader));
} | 0 | [
"CWE-303",
"CWE-703"
] | envoy | ea39e3cba652bcc4b11bb0d5c62b017e584d2e5a | 154,028,196,534,117,560,000,000,000,000,000,000,000 | 7 | jwt_authn: fix a bug where JWT with wrong issuer is allowed in allow_missing case (#15194)
[jwt] When allow_missing is used inside RequiresAny, the requests with JWT with wrong issuer are accepted. This is a bug, allow_missing should only allow requests without any JWT. This change fixed the above issue by preserving JwtUnknownIssuer in allow_missing case.
Signed-off-by: Wayne Zhang <[email protected]> |
void set_geometry_type(uint type)
{
Type_geometry_attributes::set_geometry_type(type);
} | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 73,418,347,600,482,135,000,000,000,000,000,000,000 | 4 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
add_interval(MYSQL_TIME *ltime, const Time_zone *time_zone,
interval_type scale, INTERVAL interval)
{
if (date_add_interval(ltime, scale, interval))
return 0;
uint not_used;
return time_zone->TIME_to_gmt_sec(ltime, ¬_used);
} | 0 | [
"CWE-284"
] | server | 0b5a5258abbeaf8a0c3a18c7e753699787fdf46e | 172,596,682,810,975,700,000,000,000,000,000,000,000 | 9 | MW-416 DDL replication moved after acl checking
galera_events test shows a regression with the original fix for MW-416
Reason was that Events::drop_event() can be called also from inside event
execution, and there we have a speacial treatment for event, which executes
"DROP EVENT" statement, and runs TOI replication inside the event processing body.
This resulted in executing WSREP_TO_ISOLATION two times for such DROP EVENT statement.
Fix is to call WSREP_TO_ISOLATION_BEGIN only in Events::drop_event() |
TiledInputFile::Data::Data (int numThreads):
numXTiles (0),
numYTiles (0),
partNumber (-1),
multiPartBackwardSupport(false),
numThreads(numThreads),
memoryMapped(false),
_streamData(NULL),
_deleteStream(false)
{
//
// We need at least one tileBuffer, but if threading is used,
// to keep n threads busy we need 2*n tileBuffers
//
tileBuffers.resize (max (1, 2 * numThreads));
} | 0 | [
"CWE-125"
] | openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 266,447,539,826,292,100,000,000,000,000,000,000,000 | 17 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
read_client_connect_attrs(char **ptr, size_t *max_bytes_available,
const CHARSET_INFO *from_cs)
{
size_t length, length_length;
char *ptr_save;
/* not enough bytes to hold the length */
if (*max_bytes_available < 1)
return true;
/* read the length */
ptr_save= *ptr;
length= net_field_length_ll((uchar **) ptr);
length_length= *ptr - ptr_save;
if (*max_bytes_available < length_length)
return true;
*max_bytes_available-= length_length;
/* length says there're more data than can fit into the packet */
if (length > *max_bytes_available)
return true;
/* impose an artificial length limit of 64k */
if (length > 65535)
return true;
#ifdef HAVE_PSI_THREAD_INTERFACE
if (PSI_THREAD_CALL(set_thread_connect_attrs)(*ptr, length, from_cs) && log_warnings)
sql_print_warning("Connection attributes of length %lu were truncated",
(unsigned long) length);
#endif
return false;
} | 0 | [] | mysql-server | 25d1b7e03b9b375a243fabdf0556c063c7282361 | 259,813,115,275,674,560,000,000,000,000,000,000,000 | 33 | Bug #22722946: integer overflow may lead to wrong results in get_56_lenc_string |
PbrMetallicRoughness()
: baseColorFactor(std::vector<double>{1.0, 1.0, 1.0, 1.0}),
metallicFactor(1.0),
roughnessFactor(1.0) {} | 0 | [
"CWE-20"
] | tinygltf | 52ff00a38447f06a17eab1caa2cf0730a119c751 | 38,872,634,824,683,706,000,000,000,000,000,000,000 | 4 | Do not expand file path since its not necessary for glTF asset path(URI) and for security reason(`wordexp`). |
static int _progress_ufwd(pmixp_coll_t *coll)
{
pmixp_coll_tree_t *tree = &coll->state.tree;
pmixp_ep_t ep[tree->chldrn_cnt];
int ep_cnt = 0;
int rc, i;
char *nodename = NULL;
pmixp_coll_cbdata_t *cbdata = NULL;
xassert(PMIXP_COLL_TREE_UPFWD == tree->state);
/* for some reasons doesnt switch to downfwd */
switch (tree->ufwd_status) {
case PMIXP_COLL_TREE_SND_FAILED:
/* something went wrong with upward send.
* notify libpmix about that and abort
* collective */
/* respond to the libpmix */
pmixp_coll_localcb_nodata(coll, SLURM_ERROR);
_reset_coll(coll);
/* Don't need to do anything else */
return false;
case PMIXP_COLL_TREE_SND_ACTIVE:
/* still waiting for the send completion */
return false;
case PMIXP_COLL_TREE_SND_DONE:
if (tree->contrib_prnt) {
/* all-set to go to the next stage */
break;
}
return false;
default:
/* Should not happen */
PMIXP_ERROR("Bad collective ufwd state=%d",
(int)tree->ufwd_status);
/* collective is spoiled, reset state */
tree->state = PMIXP_COLL_TREE_SYNC;
slurm_kill_job_step(pmixp_info_jobid(),
pmixp_info_stepid(), SIGKILL);
return false;
}
/* We now can upward part for the next collective */
_reset_coll_ufwd(coll);
/* move to the next state */
tree->state = PMIXP_COLL_TREE_DOWNFWD;
tree->dfwd_status = PMIXP_COLL_TREE_SND_ACTIVE;
if (!pmixp_info_srv_direct_conn()) {
/* only root of the tree should get here */
xassert(0 > tree->prnt_peerid);
if (tree->chldrn_cnt) {
/* We can run on just one node */
ep[ep_cnt].type = PMIXP_EP_HLIST;
ep[ep_cnt].ep.hostlist = tree->chldrn_str;
ep_cnt++;
}
} else {
for(i=0; i<tree->chldrn_cnt; i++){
ep[i].type = PMIXP_EP_NOIDEID;
ep[i].ep.nodeid = tree->chldrn_ids[i];
ep_cnt++;
}
}
/* We need to wait for ep_cnt send completions + the local callback */
tree->dfwd_cb_wait = ep_cnt;
if (ep_cnt || coll->cbfunc) {
/* allocate the callback data */
cbdata = xmalloc(sizeof(pmixp_coll_cbdata_t));
cbdata->coll = coll;
cbdata->seq = coll->seq;
cbdata->refcntr = ep_cnt;
if (coll->cbfunc) {
cbdata->refcntr++;
}
}
for(i=0; i < ep_cnt; i++){
rc = pmixp_server_send_nb(&ep[i], PMIXP_MSG_FAN_OUT, coll->seq,
tree->dfwd_buf,
_dfwd_sent_cb, cbdata);
if (SLURM_SUCCESS != rc) {
if (PMIXP_EP_NOIDEID == ep[i].type){
nodename = pmixp_info_job_host(ep[i].ep.nodeid);
PMIXP_ERROR("Cannot send data (size = %u), "
"to %s:%d",
get_buf_offset(tree->dfwd_buf),
nodename, ep[i].ep.nodeid);
xfree(nodename);
} else {
PMIXP_ERROR("Cannot send data (size = %u), "
"to %s",
get_buf_offset(tree->dfwd_buf),
ep[i].ep.hostlist);
}
tree->dfwd_status = PMIXP_COLL_TREE_SND_FAILED;
}
#ifdef PMIXP_COLL_DEBUG
if (PMIXP_EP_NOIDEID == ep[i].type) {
nodename = pmixp_info_job_host(ep[i].ep.nodeid);
PMIXP_DEBUG("%p: fwd to %s:%d, size = %u",
coll, nodename, ep[i].ep.nodeid,
get_buf_offset(tree->dfwd_buf));
xfree(nodename);
} else {
PMIXP_DEBUG("%p: fwd to %s, size = %u",
coll, ep[i].ep.hostlist,
get_buf_offset(tree->dfwd_buf));
}
#endif
}
if (coll->cbfunc) {
char *data = get_buf_data(tree->dfwd_buf) + tree->dfwd_offset;
size_t size = get_buf_offset(tree->dfwd_buf) -
tree->dfwd_offset;
tree->dfwd_cb_wait++;
pmixp_lib_modex_invoke(coll->cbfunc, SLURM_SUCCESS,
data, size, coll->cbdata,
_libpmix_cb, (void*)cbdata);
/* Clear callback info as we are not
* allowed to use it second time
*/
coll->cbfunc = NULL;
coll->cbdata = NULL;
#ifdef PMIXP_COLL_DEBUG
PMIXP_DEBUG("%p: local delivery, size = %lu",
coll, size);
#endif
}
/* events observed - need another iteration */
return true;
} | 0 | [
"CWE-120"
] | slurm | c3142dd87e06621ff148791c3d2f298b5c0b3a81 | 336,750,762,050,586,150,000,000,000,000,000,000,000 | 140 | PMIx - fix potential buffer overflows from use of unpackmem().
CVE-2020-27745. |
int luaLogCommand(lua_State *lua) {
int j, argc = lua_gettop(lua);
int level;
sds log;
if (argc < 2) {
lua_pushstring(lua, "redis.log() requires two arguments or more.");
return lua_error(lua);
} else if (!lua_isnumber(lua,-argc)) {
lua_pushstring(lua, "First argument must be a number (log level).");
return lua_error(lua);
}
level = lua_tonumber(lua,-argc);
if (level < LL_DEBUG || level > LL_WARNING) {
lua_pushstring(lua, "Invalid debug level.");
return lua_error(lua);
}
if (level < server.verbosity) return 0;
/* Glue together all the arguments */
log = sdsempty();
for (j = 1; j < argc; j++) {
size_t len;
char *s;
s = (char*)lua_tolstring(lua,(-argc)+j,&len);
if (s) {
if (j != 1) log = sdscatlen(log," ",1);
log = sdscatlen(log,s,len);
}
}
serverLogRaw(level,log);
sdsfree(log);
return 0;
} | 0 | [
"CWE-703",
"CWE-125"
] | redis | 6ac3c0b7abd35f37201ed2d6298ecef4ea1ae1dd | 120,401,373,800,172,560,000,000,000,000,000,000,000 | 35 | Fix protocol parsing on 'ldbReplParseCommand' (CVE-2021-32672)
The protocol parsing on 'ldbReplParseCommand' (LUA debugging)
Assumed protocol correctness. This means that if the following
is given:
*1
$100
test
The parser will try to read additional 94 unallocated bytes after
the client buffer.
This commit fixes this issue by validating that there are actually enough
bytes to read. It also limits the amount of data that can be sent by
the debugger client to 1M so the client will not be able to explode
the memory. |
store_one(int c, char *s)
{
s[0] = (char)(c & 255);
} | 0 | [
"CWE-119",
"CWE-787"
] | t1utils | 6b9d1aafcb61a3663c883663eb19ccdbfcde8d33 | 114,555,551,530,797,000,000,000,000,000,000,000,000 | 4 | Security fixes.
- Don't overflow the small cs_start buffer (reported by Niels
Thykier via the debian tracker (Jakub Wilk), found with a
fuzzer ("American fuzzy lop")).
- Cast arguments to <ctype.h> functions to unsigned char. |
vte_sequence_handler_soft_reset (VteTerminal *terminal, GValueArray *params)
{
vte_terminal_reset(terminal, FALSE, FALSE);
} | 0 | [] | vte | 58bc3a942f198a1a8788553ca72c19d7c1702b74 | 100,363,665,022,219,520,000,000,000,000,000,000,000 | 4 | fix bug #548272
svn path=/trunk/; revision=2365 |
const char *charset_name(struct smb_iconv_handle *ic, charset_t ch)
{
switch (ch) {
case CH_UTF16: return "UTF-16LE";
case CH_UNIX: return ic->unix_charset;
case CH_DOS: return ic->dos_charset;
case CH_UTF8: return "UTF8";
case CH_UTF16BE: return "UTF-16BE";
case CH_UTF16MUNGED: return "UTF16_MUNGED";
default:
return "ASCII";
}
} | 0 | [
"CWE-200"
] | samba | ba5dbda6d0174a59d221c45cca52ecd232820d48 | 223,298,157,450,624,600,000,000,000,000,000,000,000 | 13 | CVE-2015-5330: Fix handling of unicode near string endings
Until now next_codepoint_ext() and next_codepoint_handle_ext() were
using strnlen(str, 5) to determine how much string they should try to
decode. This ended up looking past the end of the string when it was not
null terminated and the final character looked like a multi-byte encoding.
The fix is to let the caller say how long the string can be.
Bug: https://bugzilla.samba.org/show_bug.cgi?id=11599
Signed-off-by: Douglas Bagnall <[email protected]>
Pair-programmed-with: Andrew Bartlett <[email protected]>
Reviewed-by: Ralph Boehme <[email protected]> |
NTTIME smbXcli_conn_server_system_time(struct smbXcli_conn *conn)
{
if (conn->protocol >= PROTOCOL_SMB2_02) {
return conn->smb2.server.system_time;
}
return conn->smb1.server.system_time;
} | 0 | [
"CWE-20"
] | samba | a819d2b440aafa3138d95ff6e8b824da885a70e9 | 299,945,031,838,879,070,000,000,000,000,000,000,000 | 8 | CVE-2015-5296: libcli/smb: make sure we require signing when we demand encryption on a session
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11536
Signed-off-by: Stefan Metzmacher <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]> |
FreeAuditTimer(void)
{
if (auditTimer != NULL) {
/* Force output of pending messages */
TimerForce(auditTimer);
TimerFree(auditTimer);
auditTimer = NULL;
}
} | 0 | [
"CWE-863"
] | xserver | da15c7413916f754708c62c2089265528cd661e2 | 48,070,677,691,238,790,000,000,000,000,000,000,000 | 9 | LogFilePrep: add a comment to the unsafe format string.
CVE-2018-14665 also made it possible to exploit this to access
memory. With -logfile forbidden when running with elevated privileges
this is no longer an issue.
Signed-off-by: Matthieu Herrb <[email protected]>
Reviewed-by: Adam Jackson <[email protected]>
(cherry picked from commit 248d164eae27f1f310266d78e52f13f64362f81e) |
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sctp_association *asoc;
__u16 needed, freed;
asoc = ulpq->asoc;
if (chunk) {
needed = ntohs(chunk->chunk_hdr->length);
needed -= sizeof(sctp_data_chunk_t);
} else
needed = SCTP_DEFAULT_MAXWINDOW;
freed = 0;
if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
freed = sctp_ulpq_renege_order(ulpq, needed);
if (freed < needed) {
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
}
}
/* If able to free enough room, accept this chunk. */
if (chunk && (freed >= needed)) {
__u32 tsn;
tsn = ntohl(chunk->subh.data_hdr->tsn);
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
sctp_ulpq_tail_data(ulpq, chunk, gfp);
sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
}
return;
} | 0 | [] | linux-2.6 | 672e7cca17ed6036a1756ed34cf20dbd72d5e5f6 | 289,304,298,553,509,440,000,000,000,000,000,000,000 | 34 | [SCTP]: Prevent possible infinite recursion with multiple bundled DATA.
There is a rare situation that causes lksctp to go into infinite recursion
and crash the system. The trigger is a packet that contains at least the
first two DATA fragments of a message bundled together. The recursion is
triggered when the user data buffer is smaller that the full data message.
The problem is that we clone the skb for every fragment in the message.
When reassembling the full message, we try to link skbs from the "first
fragment" clone using the frag_list. However, since the frag_list is shared
between two clones in this rare situation, we end up setting the frag_list
pointer of the second fragment to point to itself. This causes
sctp_skb_pull() to potentially recurse indefinitely.
Proposed solution is to make a copy of the skb when attempting to link
things using frag_list.
Signed-off-by: Vladislav Yasevich <[email protected]>
Signed-off-by: Sridhar Samudrala <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void udta_del(GF_Box *s)
{
u32 i;
GF_UserDataMap *map;
GF_UserDataBox *ptr = (GF_UserDataBox *)s;
if (ptr == NULL) return;
i=0;
while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) {
gf_isom_box_array_del(map->other_boxes);
gf_free(map);
}
gf_list_del(ptr->recordList);
gf_free(ptr); | 0 | [
"CWE-400",
"CWE-401"
] | gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 339,459,834,649,226,530,000,000,000,000,000,000,000 | 14 | prevent dref memleak on invalid input (#1183) |
GF_Err hdlr_Read(GF_Box *s, GF_BitStream *bs)
{
GF_HandlerBox *ptr = (GF_HandlerBox *)s;
ptr->reserved1 = gf_bs_read_u32(bs);
ptr->handlerType = gf_bs_read_u32(bs);
gf_bs_read_data(bs, (char*)ptr->reserved2, 12);
ISOM_DECREASE_SIZE(ptr, 20);
if (ptr->size) {
size_t len;
ptr->nameUTF8 = (char*)gf_malloc((u32) ptr->size);
if (ptr->nameUTF8 == NULL) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->nameUTF8, (u32) ptr->size);
/*safety check in case the string is not null-terminated*/
if (ptr->nameUTF8[ptr->size-1]) {
char *str = (char*)gf_malloc((u32) ptr->size + 1);
memcpy(str, ptr->nameUTF8, (u32) ptr->size);
str[ptr->size] = 0;
gf_free(ptr->nameUTF8);
ptr->nameUTF8 = str;
}
//patch for old QT files
if (ptr->size > 1 && ptr->nameUTF8[0] == ptr->size-1) {
len = strlen(ptr->nameUTF8 + 1);
memmove(ptr->nameUTF8, ptr->nameUTF8+1, len );
ptr->nameUTF8[len] = 0;
ptr->store_counted_string = GF_TRUE;
}
}
return GF_OK;
} | 0 | [
"CWE-400",
"CWE-401"
] | gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 270,092,450,935,669,200,000,000,000,000,000,000,000 | 33 | prevent dref memleak on invalid input (#1183) |
int OPENSSL_init_crypto(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings)
{
if (stopped) {
if (!(opts & OPENSSL_INIT_BASE_ONLY))
CRYPTOerr(CRYPTO_F_OPENSSL_INIT_CRYPTO, ERR_R_INIT_FAIL);
return 0;
}
/*
* When the caller specifies OPENSSL_INIT_BASE_ONLY, that should be the
* *only* option specified. With that option we return immediately after
* doing the requested limited initialization. Note that
* err_shelve_state() called by us via ossl_init_load_crypto_nodelete()
* re-enters OPENSSL_init_crypto() with OPENSSL_INIT_BASE_ONLY, but with
* base already initialized this is a harmless NOOP.
*
* If we remain the only caller of err_shelve_state() the recursion should
* perhaps be removed, but if in doubt, it can be left in place.
*/
if (!RUN_ONCE(&base, ossl_init_base))
return 0;
if (opts & OPENSSL_INIT_BASE_ONLY)
return 1;
/*
* Now we don't always set up exit handlers, the INIT_BASE_ONLY calls
* should not have the side-effect of setting up exit handlers, and
* therefore, this code block is below the INIT_BASE_ONLY-conditioned early
* return above.
*/
if ((opts & OPENSSL_INIT_NO_ATEXIT) != 0) {
if (!RUN_ONCE_ALT(®ister_atexit, ossl_init_no_register_atexit,
ossl_init_register_atexit))
return 0;
} else if (!RUN_ONCE(®ister_atexit, ossl_init_register_atexit)) {
return 0;
}
if (!RUN_ONCE(&load_crypto_nodelete, ossl_init_load_crypto_nodelete))
return 0;
if ((opts & OPENSSL_INIT_NO_LOAD_CRYPTO_STRINGS)
&& !RUN_ONCE_ALT(&load_crypto_strings,
ossl_init_no_load_crypto_strings,
ossl_init_load_crypto_strings))
return 0;
if ((opts & OPENSSL_INIT_LOAD_CRYPTO_STRINGS)
&& !RUN_ONCE(&load_crypto_strings, ossl_init_load_crypto_strings))
return 0;
if ((opts & OPENSSL_INIT_NO_ADD_ALL_CIPHERS)
&& !RUN_ONCE_ALT(&add_all_ciphers, ossl_init_no_add_all_ciphers,
ossl_init_add_all_ciphers))
return 0;
if ((opts & OPENSSL_INIT_ADD_ALL_CIPHERS)
&& !RUN_ONCE(&add_all_ciphers, ossl_init_add_all_ciphers))
return 0;
if ((opts & OPENSSL_INIT_NO_ADD_ALL_DIGESTS)
&& !RUN_ONCE_ALT(&add_all_digests, ossl_init_no_add_all_digests,
ossl_init_add_all_digests))
return 0;
if ((opts & OPENSSL_INIT_ADD_ALL_DIGESTS)
&& !RUN_ONCE(&add_all_digests, ossl_init_add_all_digests))
return 0;
if ((opts & OPENSSL_INIT_ATFORK)
&& !openssl_init_fork_handlers())
return 0;
if ((opts & OPENSSL_INIT_NO_LOAD_CONFIG)
&& !RUN_ONCE_ALT(&config, ossl_init_no_config, ossl_init_config))
return 0;
if (opts & OPENSSL_INIT_LOAD_CONFIG) {
int ret;
CRYPTO_THREAD_write_lock(init_lock);
conf_settings = settings;
ret = RUN_ONCE(&config, ossl_init_config);
conf_settings = NULL;
CRYPTO_THREAD_unlock(init_lock);
if (ret <= 0)
return 0;
}
if ((opts & OPENSSL_INIT_ASYNC)
&& !RUN_ONCE(&async, ossl_init_async))
return 0;
#ifndef OPENSSL_NO_ENGINE
if ((opts & OPENSSL_INIT_ENGINE_OPENSSL)
&& !RUN_ONCE(&engine_openssl, ossl_init_engine_openssl))
return 0;
# if !defined(OPENSSL_NO_HW) && !defined(OPENSSL_NO_DEVCRYPTOENG)
if ((opts & OPENSSL_INIT_ENGINE_CRYPTODEV)
&& !RUN_ONCE(&engine_devcrypto, ossl_init_engine_devcrypto))
return 0;
# endif
# ifndef OPENSSL_NO_RDRAND
if ((opts & OPENSSL_INIT_ENGINE_RDRAND)
&& !RUN_ONCE(&engine_rdrand, ossl_init_engine_rdrand))
return 0;
# endif
if ((opts & OPENSSL_INIT_ENGINE_DYNAMIC)
&& !RUN_ONCE(&engine_dynamic, ossl_init_engine_dynamic))
return 0;
# ifndef OPENSSL_NO_STATIC_ENGINE
# if !defined(OPENSSL_NO_HW) && !defined(OPENSSL_NO_HW_PADLOCK)
if ((opts & OPENSSL_INIT_ENGINE_PADLOCK)
&& !RUN_ONCE(&engine_padlock, ossl_init_engine_padlock))
return 0;
# endif
# if defined(OPENSSL_SYS_WIN32) && !defined(OPENSSL_NO_CAPIENG)
if ((opts & OPENSSL_INIT_ENGINE_CAPI)
&& !RUN_ONCE(&engine_capi, ossl_init_engine_capi))
return 0;
# endif
# if !defined(OPENSSL_NO_AFALGENG)
if ((opts & OPENSSL_INIT_ENGINE_AFALG)
&& !RUN_ONCE(&engine_afalg, ossl_init_engine_afalg))
return 0;
# endif
# endif
if (opts & (OPENSSL_INIT_ENGINE_ALL_BUILTIN
| OPENSSL_INIT_ENGINE_OPENSSL
| OPENSSL_INIT_ENGINE_AFALG)) {
ENGINE_register_all_complete();
}
#endif
#ifndef OPENSSL_NO_COMP
if ((opts & OPENSSL_INIT_ZLIB)
&& !RUN_ONCE(&zlib, ossl_init_zlib))
return 0;
#endif
return 1;
} | 0 | [
"CWE-330"
] | openssl | 1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be | 48,698,833,410,058,300,000,000,000,000,000,000,000 | 141 | drbg: ensure fork-safety without using a pthread_atfork handler
When the new OpenSSL CSPRNG was introduced in version 1.1.1,
it was announced in the release notes that it would be fork-safe,
which the old CSPRNG hadn't been.
The fork-safety was implemented using a fork count, which was
incremented by a pthread_atfork handler. Initially, this handler
was enabled by default. Unfortunately, the default behaviour
had to be changed for other reasons in commit b5319bdbd095, so
the new OpenSSL CSPRNG failed to keep its promise.
This commit restores the fork-safety using a different approach.
It replaces the fork count by a fork id, which coincides with
the process id on UNIX-like operating systems and is zero on other
operating systems. It is used to detect when an automatic reseed
after a fork is necessary.
To prevent a future regression, it also adds a test to verify that
the child reseeds after fork.
CVE-2019-1549
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9802) |
void visit(Character &ope) override { found_ope = ope.shared_from_this(); } | 0 | [
"CWE-125"
] | cpp-peglib | b3b29ce8f3acf3a32733d930105a17d7b0ba347e | 285,042,036,465,956,870,000,000,000,000,000,000,000 | 1 | Fix #122 |
int imap_create_mailbox (IMAP_DATA* idata, char* mailbox)
{
char buf[LONG_STRING*2], mbox[LONG_STRING];
imap_munge_mbox_name (idata, mbox, sizeof (mbox), mailbox);
snprintf (buf, sizeof (buf), "CREATE %s", mbox);
if (imap_exec (idata, buf, 0) != 0)
{
mutt_error (_("CREATE failed: %s"), imap_cmd_trailer (idata));
return -1;
}
return 0;
} | 0 | [
"CWE-200",
"CWE-319"
] | mutt | 3e88866dc60b5fa6aaba6fd7c1710c12c1c3cd01 | 84,358,961,198,354,415,000,000,000,000,000,000,000 | 15 | Prevent possible IMAP MITM via PREAUTH response.
This is similar to CVE-2014-2567 and CVE-2020-12398. STARTTLS is not
allowed in the Authenticated state, so previously Mutt would
implicitly mark the connection as authenticated and skip any
encryption checking/enabling.
No credentials are exposed, but it does allow messages to be sent to
an attacker, via postpone or fcc'ing for instance.
Reuse the $ssl_starttls quadoption "in reverse" to prompt to abort the
connection if it is unencrypted.
Thanks very much to Damian Poddebniak and Fabian Ising from the
Münster University of Applied Sciences for reporting this issue, and
their help in testing the fix. |
rad_cvt_addr(const void *data)
{
struct in_addr value;
memcpy(&value.s_addr, data, sizeof value.s_addr);
return value;
} | 0 | [
"CWE-119",
"CWE-787"
] | php-radius | 13c149b051f82b709e8d7cc32111e84b49d57234 | 201,937,777,788,709,760,000,000,000,000,000,000,000 | 7 | Fix a security issue in radius_get_vendor_attr().
The underlying rad_get_vendor_attr() function assumed that it would always be
given valid VSA data. Indeed, the buffer length wasn't even passed in; the
assumption was that the length field within the VSA structure would be valid.
This could result in denial of service by providing a length that would be
beyond the memory limit, or potential arbitrary memory access by providing a
length greater than the actual data given.
rad_get_vendor_attr() has been changed to require the raw data length be
provided, and this is then used to check that the VSA is valid.
Conflicts:
radlib_vs.h |
void PlayerGeneric::setPeakAutoAdjust(bool b)
{
this->autoAdjustPeak = b;
} | 0 | [
"CWE-416"
] | MilkyTracker | 7afd55c42ad80d01a339197a2d8b5461d214edaf | 272,256,525,108,257,030,000,000,000,000,000,000,000 | 4 | Fix use-after-free in PlayerGeneric destructor |
const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
int *dst_length, int *consumed, int length)
{
int i, si, di;
uint8_t *dst;
int bufidx;
// src[0]&0x80; // forbidden bit
h->nal_ref_idc = src[0] >> 5;
h->nal_unit_type = src[0] & 0x1F;
src++;
length--;
#define STARTCODE_TEST \
if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
if (src[i + 2] != 3 && src[i + 2] != 0) { \
/* startcode, so we must be past the end */ \
length = i; \
} \
break; \
}
#if HAVE_FAST_UNALIGNED
#define FIND_FIRST_ZERO \
if (i > 0 && !src[i]) \
i--; \
while (src[i]) \
i++
#if HAVE_FAST_64BIT
for (i = 0; i + 1 < length; i += 9) {
if (!((~AV_RN64A(src + i) &
(AV_RN64A(src + i) - 0x0100010001000101ULL)) &
0x8000800080008080ULL))
continue;
FIND_FIRST_ZERO;
STARTCODE_TEST;
i -= 7;
}
#else
for (i = 0; i + 1 < length; i += 5) {
if (!((~AV_RN32A(src + i) &
(AV_RN32A(src + i) - 0x01000101U)) &
0x80008080U))
continue;
FIND_FIRST_ZERO;
STARTCODE_TEST;
i -= 3;
}
#endif
#else
for (i = 0; i + 1 < length; i += 2) {
if (src[i])
continue;
if (i > 0 && src[i - 1] == 0)
i--;
STARTCODE_TEST;
}
#endif
// use second escape buffer for inter data
bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
av_fast_padded_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+MAX_MBPAIR_SIZE);
dst = h->rbsp_buffer[bufidx];
if (!dst)
return NULL;
if(i>=length-1){ //no escaped 0
*dst_length= length;
*consumed= length+1; //+1 for the header
if(h->avctx->flags2 & CODEC_FLAG2_FAST){
return src;
}else{
memcpy(dst, src, length);
return dst;
}
}
memcpy(dst, src, i);
si = di = i;
while (si + 2 < length) {
// remove escapes (very rare 1:2^22)
if (src[si + 2] > 3) {
dst[di++] = src[si++];
dst[di++] = src[si++];
} else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) {
if (src[si + 2] == 3) { // escape
dst[di++] = 0;
dst[di++] = 0;
si += 3;
continue;
} else // next start code
goto nsc;
}
dst[di++] = src[si++];
}
while (si < length)
dst[di++] = src[si++];
nsc:
memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
*dst_length = di;
*consumed = si + 1; // +1 for the header
/* FIXME store exact number of bits in the getbitcontext
* (it is needed for decoding) */
return dst;
} | 0 | [
"CWE-703"
] | FFmpeg | e8714f6f93d1a32f4e4655209960afcf4c185214 | 234,362,454,332,010,200,000,000,000,000,000,000,000 | 112 | avcodec/h264: Clear delayed_pic on deallocation
Fixes use of freed memory
Fixes: case5_av_frame_copy_props.mp4
Found-by: Michal Zalewski <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]> |
dump_keyblob (tupledesc_t tuples)
{
size_t n;
unsigned int tag;
const void *value;
log_info ("keyblob dump:\n");
tag = KEYBLOB_TAG_BLOBVERSION;
value = find_tuple (tuples, tag, &n);
while (value)
{
log_info (" tag: %-5u len: %-2u value: ", tag, (unsigned int)n);
if (tag == KEYBLOB_TAG_ENCKEY
|| tag == KEYBLOB_TAG_MACKEY)
log_printf ("[confidential]\n");
else if (!n)
log_printf ("[none]\n");
else
log_printhex ("", value, n);
value = next_tuple (tuples, &tag, &n);
}
} | 0 | [
"CWE-20"
] | gnupg | 2183683bd633818dd031b090b5530951de76f392 | 141,419,736,456,855,930,000,000,000,000,000,000,000 | 22 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
png_set_check_for_invalid_index(png_structrp png_ptr, int allowed)
{
png_debug(1, "in png_set_check_for_invalid_index");
if (allowed > 0)
png_ptr->num_palette_max = 0;
else
png_ptr->num_palette_max = -1;
} | 0 | [
"CWE-120"
] | libpng | a901eb3ce6087e0afeef988247f1a1aa208cb54d | 50,705,072,961,310,700,000,000,000,000,000,000,000 | 10 | [libpng16] Prevent reading over-length PLTE chunk (Cosmin Truta). |
TEST_F(OwnedImplTest, ReadReserveAndCommit) {
BufferFragmentImpl frag("", 0, nullptr);
Buffer::OwnedImpl buf;
buf.add("bbbbb");
os_fd_t pipe_fds[2] = {0, 0};
auto& os_sys_calls = Api::OsSysCallsSingleton::get();
#ifdef WIN32
ASSERT_EQ(os_sys_calls.socketpair(AF_INET, SOCK_STREAM, 0, pipe_fds).rc_, 0);
#else
ASSERT_EQ(pipe(pipe_fds), 0);
#endif
Network::IoSocketHandleImpl io_handle(pipe_fds[0]);
ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[0], false).rc_, 0);
ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[1], false).rc_, 0);
const uint32_t read_length = 32768;
std::string data = "e";
const ssize_t rc = os_sys_calls.write(pipe_fds[1], data.data(), data.size()).rc_;
ASSERT_GT(rc, 0);
Api::IoCallUint64Result result = buf.read(io_handle, read_length);
ASSERT_EQ(result.rc_, static_cast<uint64_t>(rc));
ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0);
EXPECT_EQ("bbbbbe", buf.toString());
expectSlices({{6, 4026, 4032}}, buf);
} | 0 | [
"CWE-401"
] | envoy | 5eba69a1f375413fb93fab4173f9c393ac8c2818 | 13,455,521,335,043,260,000,000,000,000,000,000,000 | 26 | [buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144)
Signed-off-by: antonio <[email protected]> |
static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun,
MegasasCmd *cmd)
{
struct mfi_pd_info *info = cmd->iov_buf;
size_t dcmd_size = sizeof(struct mfi_pd_info);
uint64_t pd_size;
uint16_t pd_id = ((sdev->id & 0xFF) << 8) | (lun & 0xFF);
uint8_t cmdbuf[6];
SCSIRequest *req;
size_t len, resid;
if (!cmd->iov_buf) {
cmd->iov_buf = g_malloc0(dcmd_size);
info = cmd->iov_buf;
info->inquiry_data[0] = 0x7f; /* Force PQual 0x3, PType 0x1f */
info->vpd_page83[0] = 0x7f;
megasas_setup_inquiry(cmdbuf, 0, sizeof(info->inquiry_data));
req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd);
if (!req) {
trace_megasas_dcmd_req_alloc_failed(cmd->index,
"PD get info std inquiry");
g_free(cmd->iov_buf);
cmd->iov_buf = NULL;
return MFI_STAT_FLASH_ALLOC_FAIL;
}
trace_megasas_dcmd_internal_submit(cmd->index,
"PD get info std inquiry", lun);
len = scsi_req_enqueue(req);
if (len > 0) {
cmd->iov_size = len;
scsi_req_continue(req);
}
return MFI_STAT_INVALID_STATUS;
} else if (info->inquiry_data[0] != 0x7f && info->vpd_page83[0] == 0x7f) {
megasas_setup_inquiry(cmdbuf, 0x83, sizeof(info->vpd_page83));
req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd);
if (!req) {
trace_megasas_dcmd_req_alloc_failed(cmd->index,
"PD get info vpd inquiry");
return MFI_STAT_FLASH_ALLOC_FAIL;
}
trace_megasas_dcmd_internal_submit(cmd->index,
"PD get info vpd inquiry", lun);
len = scsi_req_enqueue(req);
if (len > 0) {
cmd->iov_size = len;
scsi_req_continue(req);
}
return MFI_STAT_INVALID_STATUS;
}
/* Finished, set FW state */
if ((info->inquiry_data[0] >> 5) == 0) {
if (megasas_is_jbod(cmd->state)) {
info->fw_state = cpu_to_le16(MFI_PD_STATE_SYSTEM);
} else {
info->fw_state = cpu_to_le16(MFI_PD_STATE_ONLINE);
}
} else {
info->fw_state = cpu_to_le16(MFI_PD_STATE_OFFLINE);
}
info->ref.v.device_id = cpu_to_le16(pd_id);
info->state.ddf.pd_type = cpu_to_le16(MFI_PD_DDF_TYPE_IN_VD|
MFI_PD_DDF_TYPE_INTF_SAS);
blk_get_geometry(sdev->conf.blk, &pd_size);
info->raw_size = cpu_to_le64(pd_size);
info->non_coerced_size = cpu_to_le64(pd_size);
info->coerced_size = cpu_to_le64(pd_size);
info->encl_device_id = 0xFFFF;
info->slot_number = (sdev->id & 0xFF);
info->path_info.count = 1;
info->path_info.sas_addr[0] =
cpu_to_le64(megasas_get_sata_addr(pd_id));
info->connected_port_bitmap = 0x1;
info->device_speed = 1;
info->link_speed = 1;
resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg);
g_free(cmd->iov_buf);
cmd->iov_size = dcmd_size - resid;
cmd->iov_buf = NULL;
return MFI_STAT_OK;
} | 0 | [
"CWE-401"
] | qemu | 765a707000e838c30b18d712fe6cb3dd8e0435f3 | 222,442,367,898,323,330,000,000,000,000,000,000,000 | 82 | megasas: fix guest-triggered memory leak
If the guest sets the sglist size to a value >=2GB, megasas_handle_dcmd
will return MFI_STAT_MEMORY_NOT_AVAILABLE without freeing the memory.
Avoid this by returning only the status from map_dcmd, and loading
cmd->iov_size in the caller.
Reported-by: Li Qiang <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
longlong Field_year::val_int(void)
{
ASSERT_COLUMN_MARKED_FOR_READ;
DBUG_ASSERT(field_length == 2 || field_length == 4);
int tmp= (int) ptr[0];
if (field_length != 4)
tmp%=100; // Return last 2 char
else if (tmp)
tmp+=1900;
return (longlong) tmp;
} | 0 | [
"CWE-416",
"CWE-703"
] | server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 83,392,009,755,726,700,000,000,000,000,000,000,000 | 11 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
up4(unsigned x)
{
return ~3u & (3+ x);
} | 0 | [
"CWE-476"
] | upx | ef336dbcc6dc8344482f8cf6c909ae96c3286317 | 87,565,762,369,288,260,000,000,000,000,000,000,000 | 4 | Protect against bad crafted input.
https://github.com/upx/upx/issues/128
modified: p_lx_elf.cpp |
exec_normal(int was_typed, int use_vpeekc, int may_use_terminal_loop UNUSED)
{
oparg_T oa;
clear_oparg(&oa);
finish_op = FALSE;
while ((!stuff_empty()
|| ((was_typed || !typebuf_typed()) && typebuf.tb_len > 0)
|| (use_vpeekc && vpeekc() != NUL))
&& !got_int)
{
update_topline_cursor();
#ifdef FEAT_TERMINAL
if (may_use_terminal_loop && term_use_loop()
&& oa.op_type == OP_NOP && oa.regname == NUL
&& !VIsual_active)
{
/* If terminal_loop() returns OK we got a key that is handled
* in Normal model. With FAIL we first need to position the
* cursor and the screen needs to be redrawn. */
if (terminal_loop(TRUE) == OK)
normal_cmd(&oa, TRUE);
}
else
#endif
/* execute a Normal mode cmd */
normal_cmd(&oa, TRUE);
}
} | 0 | [
"CWE-78"
] | vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 325,728,273,060,676,200,000,000,000,000,000,000,000 | 29 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
ATExecEnableDisableTrigger(Relation rel, char *trigname,
char fires_when, bool skip_system, LOCKMODE lockmode)
{
EnableDisableTrigger(rel, trigname, fires_when, skip_system);
} | 0 | [
"CWE-362"
] | postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 77,346,387,684,154,540,000,000,000,000,000,000,000 | 5 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
ipv4_send_dest_unreach(skb);
rt = skb_rtable(skb);
if (rt)
dst_set_expires(&rt->dst, 0);
} | 0 | [
"CWE-327"
] | linux | aa6dd211e4b1dde9d5dc25d699d35f789ae7eeba | 57,648,618,524,499,550,000,000,000,000,000,000,000 | 10 | inet: use bigger hash table for IP ID generation
In commit 73f156a6e8c1 ("inetpeer: get rid of ip_id_count")
I used a very small hash table that could be abused
by patient attackers to reveal sensitive information.
Switch to a dynamic sizing, depending on RAM size.
Typical big hosts will now use 128x more storage (2 MB)
to get a similar increase in security and reduction
of hash collisions.
As a bonus, use of alloc_large_system_hash() spreads
allocated memory among all NUMA nodes.
Fixes: 73f156a6e8c1 ("inetpeer: get rid of ip_id_count")
Reported-by: Amit Klein <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Willy Tarreau <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
ldns_str2rdf_eui48(ldns_rdf **rd, const char *str)
{
unsigned int a, b, c, d, e, f;
uint8_t bytes[6];
int l;
if (sscanf(str, "%2x-%2x-%2x-%2x-%2x-%2x%n",
&a, &b, &c, &d, &e, &f, &l) != 6 ||
l != (int)strlen(str)) {
return LDNS_STATUS_INVALID_EUI48;
} else {
bytes[0] = a;
bytes[1] = b;
bytes[2] = c;
bytes[3] = d;
bytes[4] = e;
bytes[5] = f;
*rd = ldns_rdf_new_frm_data(LDNS_RDF_TYPE_EUI48, 6, &bytes);
}
return *rd ? LDNS_STATUS_OK : LDNS_STATUS_MEM_ERR;
} | 0 | [] | ldns | 3bdeed02505c9bbacb3b64a97ddcb1de967153b7 | 116,792,167,596,626,740,000,000,000,000,000,000,000 | 21 | bugfix #1257: Free after reallocing to 0 size
Thanks Stephan Zeisberg |
flatpak_dir_needs_update_for_commit_and_subpaths (FlatpakDir *self,
const char *remote,
const char *ref,
const char *target_commit,
const char **opt_subpaths)
{
g_autoptr(GVariant) deploy_data = NULL;
g_autofree const char **old_subpaths = NULL;
const char **subpaths;
g_autofree char *url = NULL;
const char *installed_commit;
const char *installed_alt_id;
g_assert (target_commit != NULL);
/* Never update from disabled remotes */
if (!ostree_repo_remote_get_url (self->repo, remote, &url, NULL))
return FALSE;
if (*url == 0)
return FALSE;
deploy_data = flatpak_dir_get_deploy_data (self, ref, FLATPAK_DEPLOY_VERSION_ANY, NULL, NULL);
if (deploy_data != NULL)
old_subpaths = flatpak_deploy_data_get_subpaths (deploy_data);
else
old_subpaths = g_new0 (const char *, 1); /* Empty strv == all subpaths*/
if (opt_subpaths)
subpaths = opt_subpaths;
else
subpaths = old_subpaths;
/* Not deployed => need update */
if (deploy_data == NULL)
return TRUE;
installed_commit = flatpak_deploy_data_get_commit (deploy_data);
installed_alt_id = flatpak_deploy_data_get_alt_id (deploy_data);
/* Different target commit than deployed => update */
if (g_strcmp0 (target_commit, installed_commit) != 0 &&
g_strcmp0 (target_commit, installed_alt_id) != 0)
return TRUE;
/* target commit is the same as current, but maybe something else that is different? */
/* Same commit, but different subpaths => update */
if (!_g_strv_equal0 ((char **) subpaths, (char **) old_subpaths))
return TRUE;
/* Same subpaths and commit, no need to update */
return FALSE;
} | 0 | [
"CWE-668"
] | flatpak | cd2142888fc4c199723a0dfca1f15ea8788a5483 | 265,446,661,957,928,820,000,000,000,000,000,000,000 | 54 | Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this. |
static int do_video_get_event(unsigned int fd, unsigned int cmd, unsigned long arg)
{
struct video_event kevent;
mm_segment_t old_fs = get_fs();
int err;
set_fs(KERNEL_DS);
err = sys_ioctl(fd, cmd, (unsigned long) &kevent);
set_fs(old_fs);
if (!err) {
struct compat_video_event __user *up = compat_ptr(arg);
err = put_user(kevent.type, &up->type);
err |= put_user(kevent.timestamp, &up->timestamp);
err |= put_user(kevent.u.size.w, &up->u.size.w);
err |= put_user(kevent.u.size.h, &up->u.size.h);
err |= put_user(kevent.u.size.aspect_ratio,
&up->u.size.aspect_ratio);
if (err)
err = -EFAULT;
}
return err;
} | 0 | [] | linux-2.6 | 188f83dfe0eeecd1427d0d255cc97dbf7ef6b4b7 | 276,859,405,116,477,000,000,000,000,000,000,000,000 | 25 | [PATCH] BLOCK: Move the msdos device ioctl compat stuff to the msdos driver [try #6]
Move the msdos device ioctl compat stuff from fs/compat_ioctl.c to the msdos
driver so that the msdos header file doesn't need to be included.
Signed-Off-By: David Howells <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
static uint16_t __sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
uint8_t reg_addr)
{
if (phy_addr != s->phy_addr) {
return 0xffff;
}
/* Primitive emulation of a BCM5201 to please the driver,
* ID is 0x00406210. TODO: Do a gigabit PHY like BCM5400
*/
switch (reg_addr) {
case MII_BMCR:
return 0;
case MII_PHYID1:
return 0x0040;
case MII_PHYID2:
return 0x6210;
case MII_BMSR:
if (qemu_get_queue(s->nic)->link_down) {
return MII_BMSR_100TX_FD | MII_BMSR_AUTONEG;
} else {
return MII_BMSR_100TX_FD | MII_BMSR_AN_COMP |
MII_BMSR_AUTONEG | MII_BMSR_LINK_ST;
}
case MII_ANLPAR:
case MII_ANAR:
return MII_ANLPAR_TXFD;
case 0x18: /* 5201 AUX status */
return 3; /* 100FD */
default:
return 0;
};
} | 0 | [
"CWE-835"
] | qemu | 8c92060d3c0248bd4d515719a35922cd2391b9b4 | 289,218,114,817,043,960,000,000,000,000,000,000,000 | 32 | sungem: switch to use qemu_receive_packet() for loopback
This patch switches to use qemu_receive_packet() which can detect
reentrancy and return early.
This is intended to address CVE-2021-3416.
Cc: Prasad J Pandit <[email protected]>
Cc: [email protected]
Reviewed-by: Mark Cave-Ayland <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Reviewed-by: Alistair Francis <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
void begin_t() {
if (!code_begin_t) return;
mem[_cimg_mp_slot_x] = mem[_cimg_mp_slot_y] = mem[_cimg_mp_slot_z] = mem[_cimg_mp_slot_c] = 0;
p_code_end = code_begin_t.end();
for (p_code = code_begin_t; p_code<p_code_end; ++p_code) {
opcode._data = p_code->_data;
const ulongT target = opcode[1];
mem[target] = _cimg_mp_defunc(*this);
}
p_code_end = code.end();
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 249,799,681,606,557,670,000,000,000,000,000,000,000 | 11 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static inline int ishex(int a)
{
return (a >= 'A' && a <= 'F') ||
(a >= 'a' && a <= 'f') ||
(a >= '0' && a <= '9');
} | 0 | [
"CWE-119"
] | mupdf | 60dabde18d7fe12b19da8b509bdfee9cc886aafc | 99,146,521,953,289,400,000,000,000,000,000,000,000 | 6 | Bug 694957: fix stack buffer overflow in xps_parse_color
xps_parse_color happily reads more than FZ_MAX_COLORS values out of a
ContextColor array which overflows the passed in samples array.
Limiting the number of allowed samples to FZ_MAX_COLORS and make sure
to use that constant for all callers fixes the problem.
Thanks to Jean-Jamil Khalifé for reporting and investigating the issue
and providing a sample exploit file. |
static int fts3FunctionArg(
sqlite3_context *pContext, /* SQL function call context */
const char *zFunc, /* Function name */
sqlite3_value *pVal, /* argv[0] passed to function */
Fts3Cursor **ppCsr /* OUT: Store cursor handle here */
){
int rc;
*ppCsr = (Fts3Cursor*)sqlite3_value_pointer(pVal, "fts3cursor");
if( (*ppCsr)!=0 ){
rc = SQLITE_OK;
}else{
char *zErr = sqlite3_mprintf("illegal first argument to %s", zFunc);
sqlite3_result_error(pContext, zErr, -1);
sqlite3_free(zErr);
rc = SQLITE_ERROR;
}
return rc;
} | 0 | [
"CWE-787"
] | sqlite | c72f2fb7feff582444b8ffdc6c900c69847ce8a9 | 46,432,746,742,056,110,000,000,000,000,000,000,000 | 18 | More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d |
is_mbc_ambiguous(OnigCaseFoldType flag,
const UChar** pp, const UChar* end)
{
int v;
const UChar* p = *pp;
(*pp)++;
v = (EncISO_8859_7_CtypeTable[*p] & (BIT_CTYPE_UPPER | BIT_CTYPE_LOWER));
if ((v | BIT_CTYPE_LOWER) != 0) {
if (*p == 0xc0 || *p == 0xe0)
return FALSE;
else
return TRUE;
}
return (v != 0 ? TRUE : FALSE);
} | 0 | [
"CWE-125"
] | oniguruma | 65a9b1aa03c9bc2dc01b074295b9603232cb3b78 | 6,028,975,462,016,297,000,000,000,000,000,000,000 | 17 | onig-5.9.2 |
int sys_waitpid(pid_t pid,int *status,int options)
{
#ifdef HAVE_WAITPID
return waitpid(pid,status,options);
#else /* HAVE_WAITPID */
return wait4(pid, status, options, NULL);
#endif /* HAVE_WAITPID */
} | 0 | [
"CWE-20"
] | samba | d77a74237e660dd2ce9f1e14b02635f8a2569653 | 126,217,641,098,603,540,000,000,000,000,000,000,000 | 8 | s3: nmbd: Fix bug 10633 - nmbd denial of service
The Linux kernel has a bug in that it can give spurious
wakeups on a non-blocking UDP socket for a non-deliverable packet.
When nmbd was changed to use non-blocking sockets it
became vulnerable to a spurious wakeup from poll/epoll.
Fix sys_recvfile() to return on EWOULDBLOCK/EAGAIN.
CVE-2014-0244
https://bugzilla.samba.org/show_bug.cgi?id=10633
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Andreas Schneider <[email protected]> |
struct Line *LineBitmapRequester::Start8Lines(UBYTE c)
{
if (*m_pppImage[c] == NULL) {
struct Line **target = m_pppImage[c];
int cnt = 8;
do {
*target = new(m_pEnviron) struct Line;
(*target)->m_pData = (LONG *)m_pEnviron->AllocMem(m_pulWidth[c] * sizeof(LONG));
target = &((*target)->m_pNext);
} while(--cnt);
}
return *m_pppImage[c];
} | 0 | [
"CWE-476"
] | libjpeg | 51c3241b6da39df30f016b63f43f31c4011222c7 | 53,768,466,745,874,430,000,000,000,000,000,000,000 | 13 | Fixed a NULL-pointer access in the line-based reconstruction process
in case no valid scan was found and no data is present. |
static DexFieldId *dex_field_id_new(RzBuffer *buf, ut64 offset) {
DexFieldId *field_id = RZ_NEW0(DexFieldId);
if (!field_id) {
return NULL;
}
read_le16_or_fail(buf, field_id->class_idx, dex_field_id_new_fail);
read_le16_or_fail(buf, field_id->type_idx, dex_field_id_new_fail);
read_le32_or_fail(buf, field_id->name_idx, dex_field_id_new_fail);
field_id->offset = offset;
return field_id;
dex_field_id_new_fail:
free(field_id);
return NULL;
} | 0 | [
"CWE-787"
] | rizin | 1524f85211445e41506f98180f8f69f7bf115406 | 46,848,446,276,848,630,000,000,000,000,000,000,000 | 16 | fix #2969 - oob write (1 byte) in dex.c |
static int __swiotlb_mmap(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
is_device_dma_coherent(dev));
return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
} | 0 | [
"CWE-200"
] | linux | 6829e274a623187c24f7cfc0e3d35f25d087fcc5 | 333,008,266,269,815,200,000,000,000,000,000,000,000 | 9 | arm64: dma-mapping: always clear allocated buffers
Buffers allocated by dma_alloc_coherent() are always zeroed on Alpha,
ARM (32bit), MIPS, PowerPC, x86/x86_64 and probably other architectures.
It turned out that some drivers rely on this 'feature'. Allocated buffer
might be also exposed to userspace with dma_mmap() call, so clearing it
is desired from security point of view to avoid exposing random memory
to userspace. This patch unifies dma_alloc_coherent() behavior on ARM64
architecture with other implementations by unconditionally zeroing
allocated buffer.
Cc: <[email protected]> # v3.14+
Signed-off-by: Marek Szyprowski <[email protected]>
Signed-off-by: Will Deacon <[email protected]> |
Subsets and Splits