func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
int copy_mount_options(const void __user * data, unsigned long *where) { int i; unsigned long page; unsigned long size; *where = 0; if (!data) return 0; if (!(page = __get_free_page(GFP_KERNEL))) return -ENOMEM; /* We only care that *some* data at the address the user * gave us is valid. Just in case, we'll zero * the remainder of the page. */ /* copy_from_user cannot cross TASK_SIZE ! */ size = TASK_SIZE - (unsigned long)data; if (size > PAGE_SIZE) size = PAGE_SIZE; i = size - exact_copy_from_user((void *)page, data, size); if (!i) { free_page(page); return -EFAULT; } if (i != PAGE_SIZE) memset((char *)page + i, 0, PAGE_SIZE - i); *where = page; return 0; }
0
[ "CWE-269" ]
linux-2.6
ee6f958291e2a768fd727e7a67badfff0b67711a
91,332,603,080,480,560,000,000,000,000,000,000,000
32
check privileges before setting mount propagation There's a missing check for CAP_SYS_ADMIN in do_change_type(). Signed-off-by: Miklos Szeredi <[email protected]> Cc: Al Viro <[email protected]> Cc: Christoph Hellwig <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, const struct in6_addr *addr2, unsigned int prefixlen) { const __be32 *a1 = addr1->s6_addr32; const __be32 *a2 = addr2->s6_addr32; unsigned int pdw, pbi; /* check complete u32 in prefix */ pdw = prefixlen >> 5; if (pdw && memcmp(a1, a2, pdw << 2)) return false; /* check incomplete u32 in prefix */ pbi = prefixlen & 0x1f; if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi)))) return false; return true; }
0
[ "CWE-416", "CWE-284", "CWE-264" ]
linux
45f6fad84cc305103b28d73482b344d7f5b76f39
225,355,367,500,754,850,000,000,000,000,000,000,000
20
ipv6: add complete rcu protection around np->opt This patch addresses multiple problems : UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions while socket is not locked : Other threads can change np->opt concurrently. Dmitry posted a syzkaller (http://github.com/google/syzkaller) program desmonstrating use-after-free. Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock() and dccp_v6_request_recv_sock() also need to use RCU protection to dereference np->opt once (before calling ipv6_dup_options()) This patch adds full RCU protection to np->opt Reported-by: Dmitry Vyukov <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Acked-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
R_API ut32 U(r_bin_java_swap_uint)(ut32 x) { const ut32 Byte0 = x & 0x000000FF; const ut32 Byte1 = x & 0x0000FF00; const ut32 Byte2 = x & 0x00FF0000; const ut32 Byte3 = x & 0xFF000000; return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24); }
0
[ "CWE-119", "CWE-788" ]
radare2
6c4428f018d385fc80a33ecddcb37becea685dd5
174,313,683,199,790,130,000,000,000,000,000,000,000
7
Improve boundary checks to fix oobread segfaults ##crash * Reported by Cen Zhang via huntr.dev * Reproducer: bins/fuzzed/javaoob-havoc.class
GF_Err url_box_read(GF_Box *s, GF_BitStream *bs) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if (ptr->size) { ptr->location = (char*)gf_malloc((u32) ptr->size); if (! ptr->location) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->location, (u32)ptr->size); } return GF_OK; }
1
[ "CWE-787" ]
gpac
388ecce75d05e11fc8496aa4857b91245007d26e
113,561,327,000,945,940,000,000,000,000,000,000,000
11
fixed #1587
pshash_delindex (psi) ps_index_t psi; { struct pidstat *ps; ps = &bgpids.storage[psi]; if (ps->pid == NO_PID) return; if (ps->bucket_next != NO_PID) bgpids.storage[ps->bucket_next].bucket_prev = ps->bucket_prev; if (ps->bucket_prev != NO_PID) bgpids.storage[ps->bucket_prev].bucket_next = ps->bucket_next; else *(pshash_getbucket (ps->pid)) = ps->bucket_next; }
0
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
84,577,887,630,966,700,000,000,000,000,000,000,000
16
bash-4.4-rc2 release
void finish(int) override { pg->agent_choose_mode_restart(); }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
268,463,337,409,787,740,000,000,000,000,000,000,000
3
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <[email protected]> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
tuplesort_set_bound(Tuplesortstate *state, int64 bound) { /* Assert we're called before loading any tuples */ Assert(state->status == TSS_INITIAL); Assert(state->memtupcount == 0); Assert(!state->bounded); #ifdef DEBUG_BOUNDED_SORT /* Honor GUC setting that disables the feature (for easy testing) */ if (!optimize_bounded_sort) return; #endif /* We want to be able to compute bound * 2, so limit the setting */ if (bound > (int64) (INT_MAX / 2)) return; state->bounded = true; state->bound = (int) bound; /* * Bounded sorts are not an effective target for abbreviated key * optimization. Disable by setting state to be consistent with no * abbreviation support. */ state->sortKeys->abbrev_converter = NULL; if (state->sortKeys->abbrev_full_comparator) state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator; /* Not strictly necessary, but be tidy */ state->sortKeys->abbrev_abort = NULL; state->sortKeys->abbrev_full_comparator = NULL; }
0
[ "CWE-209" ]
postgres
804b6b6db4dcfc590a468e7be390738f9f7755fb
332,985,186,049,482,900,000,000,000,000,000,000,000
33
Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.
static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ Index *pIdx; Index *pPk; int nPk; int nExtra; int i, j; sqlite3 *db = pParse->db; Vdbe *v = pParse->pVdbe; /* Mark every PRIMARY KEY column as NOT NULL (except for imposter tables) */ if( !db->init.imposterTable ){ for(i=0; i<pTab->nCol; i++){ if( (pTab->aCol[i].colFlags & COLFLAG_PRIMKEY)!=0 ){ pTab->aCol[i].notNull = OE_Abort; } } pTab->tabFlags |= TF_HasNotNull; } /* Convert the P3 operand of the OP_CreateBtree opcode from BTREE_INTKEY ** into BTREE_BLOBKEY. */ if( pParse->addrCrTab ){ assert( v ); sqlite3VdbeChangeP3(v, pParse->addrCrTab, BTREE_BLOBKEY); } /* Locate the PRIMARY KEY index. Or, if this table was originally ** an INTEGER PRIMARY KEY table, create a new PRIMARY KEY index. */ if( pTab->iPKey>=0 ){ ExprList *pList; Token ipkToken; sqlite3TokenInit(&ipkToken, pTab->aCol[pTab->iPKey].zName); pList = sqlite3ExprListAppend(pParse, 0, sqlite3ExprAlloc(db, TK_ID, &ipkToken, 0)); if( pList==0 ) return; if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, pList->a[0].pExpr, &pTab->iPKey); } pList->a[0].sortFlags = pParse->iPkSortOrder; assert( pParse->pNewTable==pTab ); pTab->iPKey = -1; sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0, SQLITE_IDXTYPE_PRIMARYKEY); if( db->mallocFailed || pParse->nErr ) return; pPk = sqlite3PrimaryKeyIndex(pTab); assert( pPk->nKeyCol==1 ); }else{ pPk = sqlite3PrimaryKeyIndex(pTab); assert( pPk!=0 ); /* ** Remove all redundant columns from the PRIMARY KEY. For example, change ** "PRIMARY KEY(a,b,a,b,c,b,c,d)" into just "PRIMARY KEY(a,b,c,d)". Later ** code assumes the PRIMARY KEY contains no repeated columns. */ for(i=j=1; i<pPk->nKeyCol; i++){ if( isDupColumn(pPk, j, pPk, i) ){ pPk->nColumn--; }else{ testcase( hasColumn(pPk->aiColumn, j, pPk->aiColumn[i]) ); pPk->azColl[j] = pPk->azColl[i]; pPk->aSortOrder[j] = pPk->aSortOrder[i]; pPk->aiColumn[j++] = pPk->aiColumn[i]; } } pPk->nKeyCol = j; } assert( pPk!=0 ); pPk->isCovering = 1; if( !db->init.imposterTable ) pPk->uniqNotNull = 1; nPk = pPk->nColumn = pPk->nKeyCol; /* Bypass the creation of the PRIMARY KEY btree and the sqlite_master ** table entry. This is only required if currently generating VDBE ** code for a CREATE TABLE (not when parsing one as part of reading ** a database schema). */ if( v && pPk->tnum>0 ){ assert( db->init.busy==0 ); sqlite3VdbeChangeOpcode(v, pPk->tnum, OP_Goto); } /* The root page of the PRIMARY KEY is the table root page */ pPk->tnum = pTab->tnum; /* Update the in-memory representation of all UNIQUE indices by converting ** the final rowid column into one or more columns of the PRIMARY KEY. */ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int n; if( IsPrimaryKeyIndex(pIdx) ) continue; for(i=n=0; i<nPk; i++){ if( !isDupColumn(pIdx, pIdx->nKeyCol, pPk, i) ){ testcase( hasColumn(pIdx->aiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ); n++; } } if( n==0 ){ /* This index is a superset of the primary key */ pIdx->nColumn = pIdx->nKeyCol; continue; } if( resizeIndexObject(db, pIdx, pIdx->nKeyCol+n) ) return; for(i=0, j=pIdx->nKeyCol; i<nPk; i++){ if( !isDupColumn(pIdx, pIdx->nKeyCol, pPk, i) ){ testcase( hasColumn(pIdx->aiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ); pIdx->aiColumn[j] = pPk->aiColumn[i]; pIdx->azColl[j] = pPk->azColl[i]; if( pPk->aSortOrder[i] ){ /* See ticket https://www.sqlite.org/src/info/bba7b69f9849b5bf */ pIdx->bAscKeyBug = 1; } j++; } } assert( pIdx->nColumn>=pIdx->nKeyCol+n ); assert( pIdx->nColumn>=j ); } /* Add all table columns to the PRIMARY KEY index */ nExtra = 0; for(i=0; i<pTab->nCol; i++){ if( !hasColumn(pPk->aiColumn, nPk, i) && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ) nExtra++; } if( resizeIndexObject(db, pPk, nPk+nExtra) ) return; for(i=0, j=nPk; i<pTab->nCol; i++){ if( !hasColumn(pPk->aiColumn, j, i) && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){ assert( j<pPk->nColumn ); pPk->aiColumn[j] = i; pPk->azColl[j] = sqlite3StrBINARY; j++; } } assert( pPk->nColumn==j ); assert( pTab->nNVCol<=j ); recomputeColumnsNotIndexed(pPk); }
0
[ "CWE-674", "CWE-787" ]
sqlite
38096961c7cd109110ac21d3ed7dad7e0cb0ae06
217,313,902,779,994,330,000,000,000,000,000,000,000
143
Avoid infinite recursion in the ALTER TABLE code when a view contains an unused CTE that references, directly or indirectly, the view itself. FossilOrigin-Name: 1d2e53a39b87e364685e21de137655b6eee725e4c6d27fc90865072d7c5892b5
static bool sk_skb_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): return false; } if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_index): case bpf_ctx_range(struct __sk_buff, priority): break; default: return false; } } switch (off) { case bpf_ctx_range(struct __sk_buff, mark): return false; case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; } return bpf_skb_is_valid_access(off, size, type, prog, info); }
0
[ "CWE-120" ]
linux
050fad7c4534c13c8eb1d9c2ba66012e014773cb
167,970,556,260,321,930,000,000,000,000,000,000,000
34
bpf: fix truncated jump targets on heavy expansions Recently during testing, I ran into the following panic: [ 207.892422] Internal error: Accessing user space memory outside uaccess.h routines: 96000004 [#1] SMP [ 207.901637] Modules linked in: binfmt_misc [...] [ 207.966530] CPU: 45 PID: 2256 Comm: test_verifier Tainted: G W 4.17.0-rc3+ #7 [ 207.974956] Hardware name: FOXCONN R2-1221R-A4/C2U4N_MB, BIOS G31FB18A 03/31/2017 [ 207.982428] pstate: 60400005 (nZCv daif +PAN -UAO) [ 207.987214] pc : bpf_skb_load_helper_8_no_cache+0x34/0xc0 [ 207.992603] lr : 0xffff000000bdb754 [ 207.996080] sp : ffff000013703ca0 [ 207.999384] x29: ffff000013703ca0 x28: 0000000000000001 [ 208.004688] x27: 0000000000000001 x26: 0000000000000000 [ 208.009992] x25: ffff000013703ce0 x24: ffff800fb4afcb00 [ 208.015295] x23: ffff00007d2f5038 x22: ffff00007d2f5000 [ 208.020599] x21: fffffffffeff2a6f x20: 000000000000000a [ 208.025903] x19: ffff000009578000 x18: 0000000000000a03 [ 208.031206] x17: 0000000000000000 x16: 0000000000000000 [ 208.036510] x15: 0000ffff9de83000 x14: 0000000000000000 [ 208.041813] x13: 0000000000000000 x12: 0000000000000000 [ 208.047116] x11: 0000000000000001 x10: ffff0000089e7f18 [ 208.052419] x9 : fffffffffeff2a6f x8 : 0000000000000000 [ 208.057723] x7 : 000000000000000a x6 : 00280c6160000000 [ 208.063026] x5 : 0000000000000018 x4 : 0000000000007db6 [ 208.068329] x3 : 000000000008647a x2 : 19868179b1484500 [ 208.073632] x1 : 0000000000000000 x0 : ffff000009578c08 [ 208.078938] Process test_verifier (pid: 2256, stack limit = 0x0000000049ca7974) [ 208.086235] Call trace: [ 208.088672] bpf_skb_load_helper_8_no_cache+0x34/0xc0 [ 208.093713] 0xffff000000bdb754 [ 208.096845] bpf_test_run+0x78/0xf8 [ 208.100324] bpf_prog_test_run_skb+0x148/0x230 [ 208.104758] sys_bpf+0x314/0x1198 [ 208.108064] el0_svc_naked+0x30/0x34 [ 208.111632] Code: 91302260 f9400001 f9001fa1 d2800001 (29500680) [ 208.117717] ---[ end trace 263cb8a59b5bf29f ]--- The program itself which caused this had a long jump over the whole instruction sequence where all of the inner instructions required heavy expansions into multiple BPF instructions. Additionally, I also had BPF hardening enabled which requires once more rewrites of all constant values in order to blind them. Each time we rewrite insns, bpf_adj_branches() would need to potentially adjust branch targets which cross the patchlet boundary to accommodate for the additional delta. Eventually that lead to the case where the target offset could not fit into insn->off's upper 0x7fff limit anymore where then offset wraps around becoming negative (in s16 universe), or vice versa depending on the jump direction. Therefore it becomes necessary to detect and reject any such occasions in a generic way for native eBPF and cBPF to eBPF migrations. For the latter we can simply check bounds in the bpf_convert_filter()'s BPF_EMIT_JMP helper macro and bail out once we surpass limits. The bpf_patch_insn_single() for native eBPF (and cBPF to eBPF in case of subsequent hardening) is a bit more complex in that we need to detect such truncations before hitting the bpf_prog_realloc(). Thus the latter is split into an extra pass to probe problematic offsets on the original program in order to fail early. With that in place and carefully tested I no longer hit the panic and the rewrites are rejected properly. The above example panic I've seen on bpf-next, though the issue itself is generic in that a guard against this issue in bpf seems more appropriate in this case. Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Martin KaFai Lau <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
static void jpeg_term_destination(j_compress_ptr cinfo) { VncState *vs = cinfo->client_data; Buffer *buffer = &vs->tight->jpeg; buffer->offset = buffer->capacity - cinfo->dest->free_in_buffer; }
0
[ "CWE-401" ]
qemu
6bf21f3d83e95bcc4ba35a7a07cc6655e8b010b0
81,794,345,796,819,960,000,000,000,000,000,000,000
7
vnc: fix memory leak when vnc disconnect Currently when qemu receives a vnc connect, it creates a 'VncState' to represent this connection. In 'vnc_worker_thread_loop' it creates a local 'VncState'. The connection 'VcnState' and local 'VncState' exchange data in 'vnc_async_encoding_start' and 'vnc_async_encoding_end'. In 'zrle_compress_data' it calls 'deflateInit2' to allocate the libz library opaque data. The 'VncState' used in 'zrle_compress_data' is the local 'VncState'. In 'vnc_zrle_clear' it calls 'deflateEnd' to free the libz library opaque data. The 'VncState' used in 'vnc_zrle_clear' is the connection 'VncState'. In currently implementation there will be a memory leak when the vnc disconnect. Following is the asan output backtrack: Direct leak of 29760 byte(s) in 5 object(s) allocated from: 0 0xffffa67ef3c3 in __interceptor_calloc (/lib64/libasan.so.4+0xd33c3) 1 0xffffa65071cb in g_malloc0 (/lib64/libglib-2.0.so.0+0x571cb) 2 0xffffa5e968f7 in deflateInit2_ (/lib64/libz.so.1+0x78f7) 3 0xaaaacec58613 in zrle_compress_data ui/vnc-enc-zrle.c:87 4 0xaaaacec58613 in zrle_send_framebuffer_update ui/vnc-enc-zrle.c:344 5 0xaaaacec34e77 in vnc_send_framebuffer_update ui/vnc.c:919 6 0xaaaacec5e023 in vnc_worker_thread_loop ui/vnc-jobs.c:271 7 0xaaaacec5e5e7 in vnc_worker_thread ui/vnc-jobs.c:340 8 0xaaaacee4d3c3 in qemu_thread_start util/qemu-thread-posix.c:502 9 0xffffa544e8bb in start_thread (/lib64/libpthread.so.0+0x78bb) 10 0xffffa53965cb in thread_start (/lib64/libc.so.6+0xd55cb) This is because the opaque allocated in 'deflateInit2' is not freed in 'deflateEnd'. The reason is that the 'deflateEnd' calls 'deflateStateCheck' and in the latter will check whether 's->strm != strm'(libz's data structure). This check will be true so in 'deflateEnd' it just return 'Z_STREAM_ERROR' and not free the data allocated in 'deflateInit2'. The reason this happens is that the 'VncState' contains the whole 'VncZrle', so when calling 'deflateInit2', the 's->strm' will be the local address. So 's->strm != strm' will be true. To fix this issue, we need to make 'zrle' of 'VncState' to be a pointer. Then the connection 'VncState' and local 'VncState' exchange mechanism will work as expection. The 'tight' of 'VncState' has the same issue, let's also turn it to a pointer. Reported-by: Ying Fang <[email protected]> Signed-off-by: Li Qiang <[email protected]> Message-id: [email protected] Signed-off-by: Gerd Hoffmann <[email protected]>
static sasl_session_t *find_session(const char *uid) { sasl_session_t *p; mowgli_node_t *n; if (uid == NULL) return NULL; MOWGLI_ITER_FOREACH(n, sessions.head) { p = n->data; if(p->uid != NULL && !strcmp(p->uid, uid)) return p; } return NULL; }
0
[ "CWE-288" ]
atheme
de2ba3ca8f6c39b41431d989f3ac66002a487839
140,196,469,929,734,440,000,000,000,000,000,000,000
17
modules/saslserv/main: backport 7.3 commits for pending EID login This backports commits 4e664c75d0b280a052eb & ceb0235695e6736ce2ab from the master branch. The IRCv3.1 SASL specification contains the following wording: If the client completes registration (with CAP END, NICK, USER and any other necessary messages) while the SASL authentication is still in progress, the server SHOULD abort it and send a 906 numeric, then register the client without authentication. We were relying on this behaviour (which was our mistake; it's a SHOULD, not a MUST), which turned out to be implemented in every IRC server daemon (that supports SASL) that we are aware of. This means that if someone completes registration without having completed an SASL negotiation, the SASL session would be aborted before the client is introduced to the network. At that point, the session would not exist and the client would not be logged in. The InspIRCd developers changed this behaviour in the inspircd/inspircd@407b2e004cf66e442771 commit. It no longer aborts negotiation when a client prematurely completes registration. This means that if the client is attempting a multi-step (challenge- response) authentication mechanism, and that mechanism caches user credentials at some point before completion, the client can pre- maturely end negotiation and get logged in as that user. Worse still, SASL impersonation lets the attacker set the authzid to their intended victim, allowing them to login as anyone, even if they don't have a challenge-response authentication credential configured. This does not exist in version 7.1; the victim's account there has to have such a credential to be vulnerable to this attack. Vulnerable configurations are as follows: - All of: - InspIRCd 3+ - Any of: - Atheme 7.1 (any version) - Atheme 7.2 (any version before 7.2.12; this commit) - Atheme 7.3 (any version before commit 4e664c75d0b280a052eb) - Any of: - The saslserv/scram module is loaded - The saslserv/ecdh-x25519-challenge module is loaded - The saslserv/ecdsa-nist256p-challenge module is loaded This is a fix for a security vulnerability. The master (7.3) branch was already fixed in 4e664c75d0b280a052eb, but the scope of the problem was not fully known at that time. The 7.1 branch is no longer supported, is not receiving security updates, and will not be patched; users of the 7.1 series (using an IRCd that does not abort the SASL session when the client prematurely completes registration) must upgrade, or unload the `saslserv/ecdsa-nist256p-challenge` module. This problem was discovered by and reported by @edk0.
nfs4_dec_nlink_locked(struct inode *inode) { NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER; drop_nlink(inode); }
0
[ "CWE-787" ]
linux
b4487b93545214a9db8cbf32e86411677b0cca21
151,926,691,507,443,200,000,000,000,000,000,000,000
5
nfs: Fix getxattr kernel panic and memory overflow Move the buffer size check to decode_attr_security_label() before memcpy() Only call memcpy() if the buffer is large enough Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS") Signed-off-by: Jeffrey Mitchell <[email protected]> [Trond: clean up duplicate test of label->len != 0] Signed-off-by: Trond Myklebust <[email protected]>
sdap_get_ad_match_rule_initgroups_step(struct tevent_req *subreq) { errno_t ret; struct tevent_req *req = tevent_req_callback_data(subreq, struct tevent_req); struct sdap_ad_match_rule_initgr_state *state = tevent_req_data(req, struct sdap_ad_match_rule_initgr_state); size_t count, i; struct sysdb_attrs **groups; char **sysdb_grouplist; ret = sdap_get_generic_recv(subreq, state, &count, &groups); talloc_zfree(subreq); if (ret != EOK) { DEBUG(SSSDBG_MINOR_FAILURE, "LDAP search failed: [%s]\n", sss_strerror(ret)); goto error; } DEBUG(SSSDBG_TRACE_LIBS, "Search for users returned %zu results\n", count); /* Add this batch of groups to the list */ if (count > 0) { state->groups = talloc_realloc(state, state->groups, struct sysdb_attrs *, state->count + count + 1); if (!state->groups) { tevent_req_error(req, ENOMEM); return; } /* Copy the new groups into the list */ for (i = 0; i < count; i++) { state->groups[state->count + i] = talloc_steal(state->groups, groups[i]); } state->count += count; state->groups[state->count] = NULL; } /* Continue checking other search bases */ state->base_iter++; if (state->search_bases[state->base_iter]) { /* There are more search bases to try */ ret = sdap_get_ad_match_rule_initgroups_next_base(req); if (ret != EOK) { goto error; } return; } /* No more search bases. Save the groups. */ if (state->count == 0) { DEBUG(SSSDBG_TRACE_LIBS, "User is not a member of any group in the search bases\n"); } /* Get the current sysdb group list for this user * so we can update it. */ ret = get_sysdb_grouplist(state, state->sysdb, state->domain, state->name, &sysdb_grouplist); if (ret != EOK) { DEBUG(SSSDBG_MINOR_FAILURE, "Could not get the list of groups for [%s] in the sysdb: " "[%s]\n", state->name, strerror(ret)); goto error; } /* The extensibleMatch search rule eliminates the need for * nested group searches, so we can just update the * memberships now. */ ret = sdap_initgr_common_store(state->sysdb, state->domain, state->opts, state->name, SYSDB_MEMBER_USER, sysdb_grouplist, state->groups, state->count); if (ret != EOK) { DEBUG(SSSDBG_MINOR_FAILURE, "Could not store groups for user [%s]: [%s]\n", state->name, strerror(ret)); goto error; } tevent_req_done(req); return; error: tevent_req_error(req, ret); }
0
[ "CWE-264" ]
sssd
191d7f7ce3de10d9e19eaa0a6ab3319bcd4ca95d
197,884,846,974,650,520,000,000,000,000,000,000,000
98
AD: process non-posix nested groups using tokenGroups When initgr is performed for AD supporting tokenGroups, do not skip non-posix groups. Resolves: https://fedorahosted.org/sssd/ticket/2343 Reviewed-by: Michal Židek <[email protected]> (cherry picked from commit 4932db6258ccfb612a3a28eb6a618c2f042b9d58)
static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); int room; room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; /* Check #1 */ if (room > 0 && !tcp_under_memory_pressure(sk)) { int incr; /* Check #2. Increase window, if skb with such overhead * will fit to rcvbuf in future. */ if (tcp_win_from_space(sk, skb->truesize) <= skb->len) incr = 2 * tp->advmss; else incr = __tcp_grow_window(sk, skb); if (incr) { incr = max_t(int, incr, 2 * skb->len); tp->rcv_ssthresh += min(room, incr); inet_csk(sk)->icsk_ack.quick |= 1; } } }
0
[ "CWE-190" ]
net
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
112,254,697,720,269,070,000,000,000,000,000,000,000
26
tcp: limit payload size of sacked skbs Jonathan Looney reported that TCP can trigger the following crash in tcp_shifted_skb() : BUG_ON(tcp_skb_pcount(skb) < pcount); This can happen if the remote peer has advertized the smallest MSS that linux TCP accepts : 48 An skb can hold 17 fragments, and each fragment can hold 32KB on x86, or 64KB on PowerPC. This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs can overflow. Note that tcp_sendmsg() builds skbs with less than 64KB of payload, so this problem needs SACK to be enabled. SACK blocks allow TCP to coalesce multiple skbs in the retransmit queue, thus filling the 17 fragments to maximal capacity. CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Jonathan Looney <[email protected]> Acked-by: Neal Cardwell <[email protected]> Reviewed-by: Tyler Hicks <[email protected]> Cc: Yuchung Cheng <[email protected]> Cc: Bruce Curtis <[email protected]> Cc: Jonathan Lemon <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) { struct pci_dev *pdev = acb->pdev; switch (acb->adapter_type){ case ACB_ADAPTER_TYPE_A:{ acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0)); if (!acb->pmuA) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } break; } case ACB_ADAPTER_TYPE_B:{ void __iomem *mem_base0, *mem_base1; mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!mem_base0) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); if (!mem_base1) { iounmap(mem_base0); printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } acb->mem_base0 = mem_base0; acb->mem_base1 = mem_base1; break; } case ACB_ADAPTER_TYPE_C:{ acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!acb->pmuC) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/ return true; } break; } case ACB_ADAPTER_TYPE_D: { void __iomem *mem_base0; unsigned long addr, range, flags; addr = (unsigned long)pci_resource_start(pdev, 0); range = pci_resource_len(pdev, 0); flags = pci_resource_flags(pdev, 0); mem_base0 = ioremap(addr, range); if (!mem_base0) { pr_notice("arcmsr%d: memory mapping region fail\n", acb->host->host_no); return false; } acb->mem_base0 = mem_base0; break; } } return true; }
0
[ "CWE-119", "CWE-787" ]
linux
7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
334,198,568,829,339,940,000,000,000,000,000,000,000
60
scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer() We need to put an upper bound on "user_len" so the memcpy() doesn't overflow. Cc: <[email protected]> Reported-by: Marco Grassi <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Reviewed-by: Tomas Henzl <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
comp_opt_exact_or_map_info(OptExactInfo* e, OptMapInfo* m) { #define COMP_EM_BASE 20 int ve, vm; if (m->value <= 0) return -1; ve = COMP_EM_BASE * e->len * (e->ignore_case ? 1 : 2); vm = COMP_EM_BASE * 5 * 2 / m->value; return comp_distance_value(&e->mmd, &m->mmd, ve, vm); }
0
[ "CWE-125" ]
php-src
c6e34d91b88638966662caac62c4d0e90538e317
288,105,096,801,742,750,000,000,000,000,000,000,000
11
Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node)
static void dump_status(struct req_state *s, int status, const char *status_name) { s->formatter->set_status(status, status_name); try { RESTFUL_IO(s)->send_status(status, status_name); } catch (rgw::io::Exception& e) { ldout(s->cct, 0) << "ERROR: s->cio->send_status() returned err=" << e.what() << dendl; } }
0
[ "CWE-770" ]
ceph
ab29bed2fc9f961fe895de1086a8208e21ddaddc
97,141,961,130,966,500,000,000,000,000,000,000,000
11
rgw: fix issues with 'enforce bounds' patch The patch to enforce bounds on max-keys/max-uploads/max-parts had a few issues that would prevent us from compiling it. Instead of changing the code provided by the submitter, we're addressing them in a separate commit to maintain the DCO. Signed-off-by: Joao Eduardo Luis <[email protected]> Signed-off-by: Abhishek Lekshmanan <[email protected]> (cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a) mimic specific fixes: As the largeish change from master g_conf() isn't in mimic yet, use the g_conf global structure, also make rgw_op use the value from req_info ceph context as we do for all the requests
encode_winansi(unsigned long codepoint) { // Use this ugly switch statement to avoid a static, which is not // thread-safe. unsigned char ch = '\0'; switch (codepoint) { case 0x20ac: ch = 0x80; break; case 0x201a: ch = 0x82; break; case 0x192: ch = 0x83; break; case 0x201e: ch = 0x84; break; case 0x2026: ch = 0x85; break; case 0x2020: ch = 0x86; break; case 0x2021: ch = 0x87; break; case 0x2c6: ch = 0x88; break; case 0x2030: ch = 0x89; break; case 0x160: ch = 0x8a; break; case 0x2039: ch = 0x8b; break; case 0x152: ch = 0x8c; break; case 0x17d: ch = 0x8e; break; case 0x2018: ch = 0x91; break; case 0x2019: ch = 0x92; break; case 0x201c: ch = 0x93; break; case 0x201d: ch = 0x94; break; case 0x2022: ch = 0x95; break; case 0x2013: ch = 0x96; break; case 0x2014: ch = 0x97; break; case 0x303: ch = 0x98; break; case 0x2122: ch = 0x99; break; case 0x161: ch = 0x9a; break; case 0x203a: ch = 0x9b; break; case 0x153: ch = 0x9c; break; case 0x17e: ch = 0x9e; break; case 0x178: ch = 0x9f; break; case 0xa0: ch = 0xa0; break; default: break; } return ch; }
0
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
12,254,923,528,805,125,000,000,000,000,000,000,000
96
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
static int update_prepare_order_info(rdpContext* context, ORDER_INFO* orderInfo, UINT32 orderType) { int length = 1; orderInfo->fieldFlags = 0; orderInfo->orderType = orderType; orderInfo->controlFlags = ORDER_STANDARD; orderInfo->controlFlags |= ORDER_TYPE_CHANGE; length += 1; length += get_primary_drawing_order_field_bytes(orderInfo->orderType, NULL); length += update_prepare_bounds(context, orderInfo); return length; }
0
[ "CWE-20", "CWE-125" ]
FreeRDP
733ee3208306b1ea32697b356c0215180fc3f049
255,267,148,814,229,220,000,000,000,000,000,000,000
12
Fixed invalid access in update_recv_primary_order CVE-2020-11095 thanks @antonio-morales for finding this.
static size_t vnc_client_read_plain(VncState *vs) { size_t ret; VNC_DEBUG("Read plain %p size %zd offset %zd\n", vs->input.buffer, vs->input.capacity, vs->input.offset); buffer_reserve(&vs->input, 4096); ret = vnc_client_read_buf(vs, buffer_end(&vs->input), 4096); if (!ret) return 0; vs->input.offset += ret; return ret; }
0
[ "CWE-401" ]
qemu
6bf21f3d83e95bcc4ba35a7a07cc6655e8b010b0
262,401,705,668,759,830,000,000,000,000,000,000,000
12
vnc: fix memory leak when vnc disconnect Currently when qemu receives a vnc connect, it creates a 'VncState' to represent this connection. In 'vnc_worker_thread_loop' it creates a local 'VncState'. The connection 'VcnState' and local 'VncState' exchange data in 'vnc_async_encoding_start' and 'vnc_async_encoding_end'. In 'zrle_compress_data' it calls 'deflateInit2' to allocate the libz library opaque data. The 'VncState' used in 'zrle_compress_data' is the local 'VncState'. In 'vnc_zrle_clear' it calls 'deflateEnd' to free the libz library opaque data. The 'VncState' used in 'vnc_zrle_clear' is the connection 'VncState'. In currently implementation there will be a memory leak when the vnc disconnect. Following is the asan output backtrack: Direct leak of 29760 byte(s) in 5 object(s) allocated from: 0 0xffffa67ef3c3 in __interceptor_calloc (/lib64/libasan.so.4+0xd33c3) 1 0xffffa65071cb in g_malloc0 (/lib64/libglib-2.0.so.0+0x571cb) 2 0xffffa5e968f7 in deflateInit2_ (/lib64/libz.so.1+0x78f7) 3 0xaaaacec58613 in zrle_compress_data ui/vnc-enc-zrle.c:87 4 0xaaaacec58613 in zrle_send_framebuffer_update ui/vnc-enc-zrle.c:344 5 0xaaaacec34e77 in vnc_send_framebuffer_update ui/vnc.c:919 6 0xaaaacec5e023 in vnc_worker_thread_loop ui/vnc-jobs.c:271 7 0xaaaacec5e5e7 in vnc_worker_thread ui/vnc-jobs.c:340 8 0xaaaacee4d3c3 in qemu_thread_start util/qemu-thread-posix.c:502 9 0xffffa544e8bb in start_thread (/lib64/libpthread.so.0+0x78bb) 10 0xffffa53965cb in thread_start (/lib64/libc.so.6+0xd55cb) This is because the opaque allocated in 'deflateInit2' is not freed in 'deflateEnd'. The reason is that the 'deflateEnd' calls 'deflateStateCheck' and in the latter will check whether 's->strm != strm'(libz's data structure). This check will be true so in 'deflateEnd' it just return 'Z_STREAM_ERROR' and not free the data allocated in 'deflateInit2'. The reason this happens is that the 'VncState' contains the whole 'VncZrle', so when calling 'deflateInit2', the 's->strm' will be the local address. So 's->strm != strm' will be true. To fix this issue, we need to make 'zrle' of 'VncState' to be a pointer. Then the connection 'VncState' and local 'VncState' exchange mechanism will work as expection. The 'tight' of 'VncState' has the same issue, let's also turn it to a pointer. Reported-by: Ying Fang <[email protected]> Signed-off-by: Li Qiang <[email protected]> Message-id: [email protected] Signed-off-by: Gerd Hoffmann <[email protected]>
parse_CLONE(char *arg, const struct ofpact_parse_params *pp) { const size_t clone_offset = ofpacts_pull(pp->ofpacts); struct ofpact_nest *clone = ofpact_put_CLONE(pp->ofpacts); char *error; ofpbuf_pull(pp->ofpacts, sizeof *clone); error = ofpacts_parse_copy(arg, pp, false, OFPACT_CLONE); /* header points to the action list */ pp->ofpacts->header = ofpbuf_push_uninit(pp->ofpacts, sizeof *clone); clone = pp->ofpacts->header; if (ofpbuf_oversized(pp->ofpacts)) { free(error); return xasprintf("input too big"); } ofpact_finish_CLONE(pp->ofpacts, &clone); ofpbuf_push_uninit(pp->ofpacts, clone_offset); return error; }
0
[ "CWE-416" ]
ovs
77cccc74deede443e8b9102299efc869a52b65b2
289,889,028,028,683,500,000,000,000,000,000,000,000
21
ofp-actions: Fix use-after-free while decoding RAW_ENCAP. While decoding RAW_ENCAP action, decode_ed_prop() might re-allocate ofpbuf if there is no enough space left. However, function 'decode_NXAST_RAW_ENCAP' continues to use old pointer to 'encap' structure leading to write-after-free and incorrect decoding. ==3549105==ERROR: AddressSanitizer: heap-use-after-free on address 0x60600000011a at pc 0x0000005f6cc6 bp 0x7ffc3a2d4410 sp 0x7ffc3a2d4408 WRITE of size 2 at 0x60600000011a thread T0 #0 0x5f6cc5 in decode_NXAST_RAW_ENCAP lib/ofp-actions.c:4461:20 #1 0x5f0551 in ofpact_decode ./lib/ofp-actions.inc2:4777:16 #2 0x5ed17c in ofpacts_decode lib/ofp-actions.c:7752:21 #3 0x5eba9a in ofpacts_pull_openflow_actions__ lib/ofp-actions.c:7791:13 #4 0x5eb9fc in ofpacts_pull_openflow_actions lib/ofp-actions.c:7835:12 #5 0x64bb8b in ofputil_decode_packet_out lib/ofp-packet.c:1113:17 #6 0x65b6f4 in ofp_print_packet_out lib/ofp-print.c:148:13 #7 0x659e3f in ofp_to_string__ lib/ofp-print.c:1029:16 #8 0x659b24 in ofp_to_string lib/ofp-print.c:1244:21 #9 0x65a28c in ofp_print lib/ofp-print.c:1288:28 #10 0x540d11 in ofctl_ofp_parse utilities/ovs-ofctl.c:2814:9 #11 0x564228 in ovs_cmdl_run_command__ lib/command-line.c:247:17 #12 0x56408a in ovs_cmdl_run_command lib/command-line.c:278:5 #13 0x5391ae in main utilities/ovs-ofctl.c:179:9 #14 0x7f6911ce9081 in __libc_start_main (/lib64/libc.so.6+0x27081) #15 0x461fed in _start (utilities/ovs-ofctl+0x461fed) Fix that by getting a new pointer before using. Credit to OSS-Fuzz. Fuzzer regression test will fail only with AddressSanitizer enabled. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=27851 Fixes: f839892a206a ("OF support and translation of generic encap and decap") Acked-by: William Tu <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; }
0
[ "CWE-20" ]
linux
c131187db2d3fa2f8bf32fdf4e9a4ef805168467
58,392,334,513,482,720,000,000,000,000,000,000,000
5
bpf: fix branch pruning logic when the verifier detects that register contains a runtime constant and it's compared with another constant it will prune exploration of the branch that is guaranteed not to be taken at runtime. This is all correct, but malicious program may be constructed in such a way that it always has a constant comparison and the other branch is never taken under any conditions. In this case such path through the program will not be explored by the verifier. It won't be taken at run-time either, but since all instructions are JITed the malicious program may cause JITs to complain about using reserved fields, etc. To fix the issue we have to track the instructions explored by the verifier and sanitize instructions that are dead at run time with NOPs. We cannot reject such dead code, since llvm generates it for valid C code, since it doesn't do as much data flow analysis as the verifier does. Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)") Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]>
MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sepia_image=CloneImage(image,0,0,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse) { sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(sepia_image,ClampToQuantum(tone),q); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(sepia_image,ClampToQuantum(tone),q); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(sepia_image,ClampToQuantum(tone),q); tone=threshold/7.0; if ((double) GetPixelGreen(image,q) < tone) SetPixelGreen(sepia_image,ClampToQuantum(tone),q); if ((double) GetPixelBlue(image,q) < tone) SetPixelBlue(sepia_image,ClampToQuantum(tone),q); SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(sepia_image); } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SepiaToneImage) #endif proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image,exception); (void) ContrastImage(sepia_image,MagickTrue,exception); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); }
0
[ "CWE-119", "CWE-703" ]
ImageMagick
3cbfb163cff9e5b8cdeace8312e9bfee810ed02b
306,488,196,474,486,500,000,000,000,000,000,000,000
118
https://github.com/ImageMagick/ImageMagick/issues/296
verify (gcry_mpi_t r, gcry_mpi_t s, gcry_mpi_t input, DSA_public_key *pkey ) { gpg_err_code_t rc = 0; gcry_mpi_t w, u1, u2, v; gcry_mpi_t base[3]; gcry_mpi_t ex[3]; gcry_mpi_t hash; unsigned int nbits; if( !(mpi_cmp_ui( r, 0 ) > 0 && mpi_cmp( r, pkey->q ) < 0) ) return GPG_ERR_BAD_SIGNATURE; /* Assertion 0 < r < n failed. */ if( !(mpi_cmp_ui( s, 0 ) > 0 && mpi_cmp( s, pkey->q ) < 0) ) return GPG_ERR_BAD_SIGNATURE; /* Assertion 0 < s < n failed. */ nbits = mpi_get_nbits (pkey->q); rc = _gcry_dsa_normalize_hash (input, &hash, nbits); if (rc) return rc; w = mpi_alloc( mpi_get_nlimbs(pkey->q) ); u1 = mpi_alloc( mpi_get_nlimbs(pkey->q) ); u2 = mpi_alloc( mpi_get_nlimbs(pkey->q) ); v = mpi_alloc( mpi_get_nlimbs(pkey->p) ); /* w = s^(-1) mod q */ mpi_invm( w, s, pkey->q ); /* u1 = (hash * w) mod q */ mpi_mulm( u1, hash, w, pkey->q ); /* u2 = r * w mod q */ mpi_mulm( u2, r, w, pkey->q ); /* v = g^u1 * y^u2 mod p mod q */ base[0] = pkey->g; ex[0] = u1; base[1] = pkey->y; ex[1] = u2; base[2] = NULL; ex[2] = NULL; mpi_mulpowm( v, base, ex, pkey->p ); mpi_fdiv_r( v, v, pkey->q ); if (mpi_cmp( v, r )) { if (DBG_CIPHER) { log_mpidump (" i", input); log_mpidump (" h", hash); log_mpidump (" v", v); log_mpidump (" r", r); log_mpidump (" s", s); } rc = GPG_ERR_BAD_SIGNATURE; } mpi_free(w); mpi_free(u1); mpi_free(u2); mpi_free(v); if (hash != input) mpi_free (hash); return rc; }
0
[ "CWE-203" ]
libgcrypt
7c2943309d14407b51c8166c4dcecb56a3628567
31,975,729,531,115,707,000,000,000,000,000,000,000
62
dsa,ecdsa: Fix use of nonce, use larger one. * cipher/dsa-common.c (_gcry_dsa_modify_k): New. * cipher/pubkey-internal.h (_gcry_dsa_modify_k): New. * cipher/dsa.c (sign): Use _gcry_dsa_modify_k. * cipher/ecc-ecdsa.c (_gcry_ecc_ecdsa_sign): Likewise. * cipher/ecc-gost.c (_gcry_ecc_gost_sign): Likewise. CVE-id: CVE-2019-13627 GnuPG-bug-id: 4626 Signed-off-by: NIIBE Yutaka <[email protected]>
void cql_server::response::serialize(const event::schema_change& event, uint8_t version) { if (version >= 3) { write_string(to_string(event.change)); write_string(to_string(event.target)); write_string(event.keyspace); switch (event.target) { case event::schema_change::target_type::KEYSPACE: break; case event::schema_change::target_type::TYPE: case event::schema_change::target_type::TABLE: write_string(event.arguments[0]); break; case event::schema_change::target_type::FUNCTION: case event::schema_change::target_type::AGGREGATE: write_string(event.arguments[0]); write_string_list(std::vector<sstring>(event.arguments.begin() + 1, event.arguments.end())); break; } } else { switch (event.target) { // FIXME: Should we handle FUNCTION and AGGREGATE the same way as type? // FIXME: How do we get here? Can a client using v2 know about UDF? case event::schema_change::target_type::TYPE: case event::schema_change::target_type::FUNCTION: case event::schema_change::target_type::AGGREGATE: // The v1/v2 protocol is unable to represent these changes. Tell the // client that the keyspace was updated instead. write_string(to_string(event::schema_change::change_type::UPDATED)); write_string(event.keyspace); write_string(""); break; case event::schema_change::target_type::TABLE: case event::schema_change::target_type::KEYSPACE: write_string(to_string(event.change)); write_string(event.keyspace); if (event.target == event::schema_change::target_type::TABLE) { write_string(event.arguments[0]); } else { write_string(""); } } } }
0
[]
scylladb
1c2eef384da439b0457b6d71c7e37d7268e471cb
315,743,978,349,483,970,000,000,000,000,000,000,000
44
transport/server.cc: Return correct size of decompressed lz4 buffer An incorrect size is returned from the function, which could lead to crashes or undefined behavior. Fix by erroring out in these cases. Fixes #11476
static void family_init(QuicFamily *family, int bpc, int limit) { int l, b; for (l = 0; l < bpc; l++) { /* fill arrays indexed by code number */ int altprefixlen, altcodewords; altprefixlen = limit - bpc; if (altprefixlen > (int)(bppmask[bpc - l])) { altprefixlen = bppmask[bpc - l]; } altcodewords = bppmask[bpc] + 1 - (altprefixlen << l); family->nGRcodewords[l] = (altprefixlen << l); family->notGRsuffixlen[l] = ceil_log_2(altcodewords); /* needed for decoding only */ family->notGRcwlen[l] = altprefixlen + family->notGRsuffixlen[l]; family->notGRprefixmask[l] = bppmask[32 - altprefixlen]; /* needed for decoding only */ for (b = 0; b < 256; b++) { unsigned int code, len; golomb_coding_slow(family, b, l, &code, &len); family->golomb_code[b][l] = code; family->golomb_code_len[b][l] = len; } } decorrelate_init(family, bpc); correlate_init(family, bpc); }
0
[]
spice-common
762e0abae36033ccde658fd52d3235887b60862d
289,414,642,679,739,330,000,000,000,000,000,000,000
30
quic: Check we have some data to start decoding quic image All paths already pass some data to quic_decode_begin but for the test check it, it's not that expensive test. Checking for not 0 is enough, all other words will potentially be read calling more_io_words but we need one to avoid a potential initial buffer overflow or deferencing an invalid pointer. Signed-off-by: Frediano Ziglio <[email protected]> Acked-by: Uri Lublin <[email protected]>
_equalLoadStmt(const LoadStmt *a, const LoadStmt *b) { COMPARE_STRING_FIELD(filename); return true; }
0
[ "CWE-362" ]
postgres
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
180,561,571,100,479,800,000,000,000,000,000,000,000
6
Avoid repeated name lookups during table and index DDL. If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. This changes the calling convention for DefineIndex, CreateTrigger, transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible (in 9.2 and newer), and AlterTable (in 9.1 and older). In addition, CheckRelationOwnership is removed in 9.2 and newer and the calling convention is changed in older branches. A field has also been added to the Constraint node (FkConstraint in 8.4). Third-party code calling these functions or using the Constraint node will require updating. Report by Andres Freund. Patch by Robert Haas and Andres Freund, reviewed by Tom Lane. Security: CVE-2014-0062
bool EbmlMaster::InsertElement(EbmlElement & element, size_t position) { std::vector<EbmlElement *>::iterator Itr = ElementList.begin(); while (Itr != ElementList.end() && position--) { ++Itr; } if ((Itr == ElementList.end()) && position) return false; ElementList.insert(Itr, &element); return true; }
0
[ "CWE-703" ]
libebml
88409e2a94dd3b40ff81d08bf6d92f486d036b24
249,522,507,123,264,670,000,000,000,000,000,000,000
13
EbmlMaster: propagate upper level element after infinite sized one correctly When the parser encountered a deeply nested element with an infinite size then a following element of an upper level was not propagated correctly. Instead the element with the infinite size was added into the EBML element tree a second time resulting in memory access after freeing it and multiple attempts to free the same memory address during destruction. Fixes the issue reported as Cisco TALOS-CAN-0037.
proto_read_null_string (p11_rpc_message *msg, CK_UTF8CHAR_PTR *val) { const unsigned char *data; size_t n_data; assert (msg != NULL); assert (val != NULL); assert (msg->input != NULL); /* Check that we're supposed to have this at this point */ assert (!msg->signature || p11_rpc_message_verify_part (msg, "z")); if (!p11_rpc_buffer_get_byte_array (msg->input, &msg->parsed, &data, &n_data)) return PARSE_ERROR; /* Allocate a block of memory for it */ *val = p11_rpc_message_alloc_extra (msg, n_data + 1); if (*val == NULL) return CKR_DEVICE_MEMORY; memcpy (*val, data, n_data); (*val)[n_data] = 0; return CKR_OK; }
0
[ "CWE-190" ]
p11-kit
5307a1d21a50cacd06f471a873a018d23ba4b963
236,315,824,351,904,160,000,000,000,000,000,000,000
26
Check for arithmetic overflows before allocating
Field::geometry_type get_geometry_type() const { return Type_geometry_attributes::get_geometry_type(); };
0
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
139,578,382,652,606,460,000,000,000,000,000,000,000
2
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
static void scrub_put_ctx(struct scrub_ctx *sctx) { if (refcount_dec_and_test(&sctx->refs)) scrub_free_ctx(sctx); }
0
[ "CWE-476", "CWE-284" ]
linux
09ba3bc9dd150457c506e4661380a6183af651c1
231,187,587,214,810,400,000,000,000,000,000,000,000
5
btrfs: merge btrfs_find_device and find_device Both btrfs_find_device() and find_device() does the same thing except that the latter does not take the seed device onto account in the device scanning context. We can merge them. Signed-off-by: Anand Jain <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
void iscsi_set_session_parameters( struct iscsi_sess_ops *ops, struct iscsi_param_list *param_list, int leading) { char *tmpptr; struct iscsi_param *param; pr_debug("----------------------------------------------------" "--------------\n"); list_for_each_entry(param, &param_list->param_list, p_list) { if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param)) continue; if (!strcmp(param->name, INITIATORNAME)) { if (!param->value) continue; if (leading) snprintf(ops->InitiatorName, sizeof(ops->InitiatorName), "%s", param->value); pr_debug("InitiatorName: %s\n", param->value); } else if (!strcmp(param->name, INITIATORALIAS)) { if (!param->value) continue; snprintf(ops->InitiatorAlias, sizeof(ops->InitiatorAlias), "%s", param->value); pr_debug("InitiatorAlias: %s\n", param->value); } else if (!strcmp(param->name, TARGETNAME)) { if (!param->value) continue; if (leading) snprintf(ops->TargetName, sizeof(ops->TargetName), "%s", param->value); pr_debug("TargetName: %s\n", param->value); } else if (!strcmp(param->name, TARGETALIAS)) { if (!param->value) continue; snprintf(ops->TargetAlias, sizeof(ops->TargetAlias), "%s", param->value); pr_debug("TargetAlias: %s\n", param->value); } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) { ops->TargetPortalGroupTag = simple_strtoul(param->value, &tmpptr, 0); pr_debug("TargetPortalGroupTag: %s\n", param->value); } else if (!strcmp(param->name, MAXCONNECTIONS)) { ops->MaxConnections = simple_strtoul(param->value, &tmpptr, 0); pr_debug("MaxConnections: %s\n", param->value); } else if (!strcmp(param->name, INITIALR2T)) { ops->InitialR2T = !strcmp(param->value, YES); pr_debug("InitialR2T: %s\n", param->value); } else if (!strcmp(param->name, IMMEDIATEDATA)) { ops->ImmediateData = !strcmp(param->value, YES); pr_debug("ImmediateData: %s\n", param->value); } else if (!strcmp(param->name, MAXBURSTLENGTH)) { ops->MaxBurstLength = simple_strtoul(param->value, &tmpptr, 0); pr_debug("MaxBurstLength: %s\n", param->value); } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) { ops->FirstBurstLength = simple_strtoul(param->value, &tmpptr, 0); pr_debug("FirstBurstLength: %s\n", param->value); } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) { ops->DefaultTime2Wait = simple_strtoul(param->value, &tmpptr, 0); pr_debug("DefaultTime2Wait: %s\n", param->value); } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) { ops->DefaultTime2Retain = simple_strtoul(param->value, &tmpptr, 0); pr_debug("DefaultTime2Retain: %s\n", param->value); } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) { ops->MaxOutstandingR2T = simple_strtoul(param->value, &tmpptr, 0); pr_debug("MaxOutstandingR2T: %s\n", param->value); } else if (!strcmp(param->name, DATAPDUINORDER)) { ops->DataPDUInOrder = !strcmp(param->value, YES); pr_debug("DataPDUInOrder: %s\n", param->value); } else if (!strcmp(param->name, DATASEQUENCEINORDER)) { ops->DataSequenceInOrder = !strcmp(param->value, YES); pr_debug("DataSequenceInOrder: %s\n", param->value); } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) { ops->ErrorRecoveryLevel = simple_strtoul(param->value, &tmpptr, 0); pr_debug("ErrorRecoveryLevel: %s\n", param->value); } else if (!strcmp(param->name, SESSIONTYPE)) { ops->SessionType = !strcmp(param->value, DISCOVERY); pr_debug("SessionType: %s\n", param->value); } else if (!strcmp(param->name, RDMAEXTENSIONS)) { ops->RDMAExtensions = !strcmp(param->value, YES); pr_debug("RDMAExtensions: %s\n", param->value); } } pr_debug("----------------------------------------------------" "--------------\n"); }
0
[ "CWE-119" ]
target-pending
cea4dcfdad926a27a18e188720efe0f2c9403456
154,944,380,634,730,170,000,000,000,000,000,000,000
116
iscsi-target: fix heap buffer overflow on error If a key was larger than 64 bytes, as checked by iscsi_check_key(), the error response packet, generated by iscsi_add_notunderstood_response(), would still attempt to copy the entire key into the packet, overflowing the structure on the heap. Remote preauthentication kernel memory corruption was possible if a target was configured and listening on the network. CVE-2013-2850 Signed-off-by: Kees Cook <[email protected]> Cc: [email protected] Signed-off-by: Nicholas Bellinger <[email protected]>
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger *trigger, struct list_head *list) { int i; u32 size = 0; for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) { u32 reg_id = le32_to_cpu(trigger->data[i]), reg_type; struct iwl_fw_ini_region_cfg *reg; if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))) continue; reg = fwrt->dump.active_regs[reg_id]; if (!reg) { IWL_WARN(fwrt, "WRT: Unassigned region id %d, skipping\n", reg_id); continue; } /* currently the driver supports always on domain only */ if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON) continue; reg_type = le32_to_cpu(reg->region_type); if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; size += iwl_dump_ini_mem(fwrt, list, reg, &iwl_dump_ini_region_ops[reg_type]); } if (size) size += iwl_dump_ini_info(fwrt, trigger, list); return size; }
0
[ "CWE-400", "CWE-401" ]
linux
b4b814fec1a5a849383f7b3886b654a13abbda7d
151,751,464,360,015,200,000,000,000,000,000,000,000
39
iwlwifi: dbg_ini: fix memory leak in alloc_sgtable In alloc_sgtable if alloc_page fails, the alocated table should be released. Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: Luca Coelho <[email protected]>
static double mp_vector_crop(_cimg_math_parser& mp) { double *const ptrd = &_mp_arg(1) + 1; const double *const ptrs = &_mp_arg(2) + 1; const longT length = (longT)mp.opcode[3], start = (longT)_mp_arg(4), sublength = (longT)mp.opcode[5]; if (start<0 || start + sublength>length) throw CImgArgumentException("[" cimg_appname "_math_parser] CImg<%s>: Value accessor '[]': " "Out-of-bounds sub-vector request " "(length: %ld, start: %ld, sub-length: %ld).", mp.imgin.pixel_type(),length,start,sublength); std::memcpy(ptrd,ptrs + start,sublength*sizeof(double)); return cimg::type<double>::nan();
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
279,731,169,877,032,530,000,000,000,000,000,000,000
15
Fix other issues in 'CImg<T>::load_bmp()'.
GList *completion_msg(SERVER_REC *win_server, SERVER_REC *find_server, const char *nick, const char *prefix) { GSList *tmp, *list; char *newprefix; g_return_val_if_fail(nick != NULL, NULL); if (servers == NULL) return NULL; list = NULL; if (find_server != NULL) { completion_msg_server(&list, find_server, nick, prefix); return convert_msglist(list); } completion_msg_server(&list, NULL, nick, prefix); for (tmp = servers; tmp != NULL; tmp = tmp->next) { SERVER_REC *rec = tmp->data; if (servers->next == NULL && rec == win_server) newprefix = g_strdup(prefix); else { newprefix = prefix == NULL ? g_strdup_printf("-%s", rec->tag) : g_strdup_printf("%s -%s", prefix, rec->tag); } completion_msg_server(&list, rec, nick, newprefix); g_free_not_null(newprefix); } return convert_msglist(list); }
0
[ "CWE-416" ]
irssi
36564717c9f701e3a339da362ab46d220d27e0c1
182,011,713,990,101,500,000,000,000,000,000,000,000
34
Merge branch 'security' into 'master' Security See merge request irssi/irssi!34 (cherry picked from commit b0d9cb33cd9ef9da7c331409e8b7c57a6f3aef3f)
int check_hex(char *str, int len) { int i; for (i = 0; i < len; i++) { if ((str[i] < '0' && str[i] > '9') && (str[i] < 'a' && str[i] > 'f') && (str[i] < 'A' && str[i] > 'F') ) { return 0; } } return 1; }
0
[ "CWE-119", "CWE-703", "CWE-787" ]
uwsgi
cb4636f7c0af2e97a4eef7a3cdcbd85a71247bfe
315,191,616,083,875,550,000,000,000,000,000,000,000
12
improve uwsgi_expand_path() to sanitize input, avoiding stack corruption and potential security issue
// Count parentheses/brackets level of each character of the expression. CImg<uintT> get_level(CImg<charT>& _expr) const { bool is_escaped = false, next_is_escaped = false; unsigned int mode = 0, next_mode = 0; // { 0=normal | 1=char-string | 2=vector-string CImg<uintT> res(_expr._width - 1); unsigned int *pd = res._data; int _level = 0; for (const char *ps = _expr._data; *ps && _level>=0; ++ps) { if (!is_escaped && !next_is_escaped && *ps=='\\') next_is_escaped = true; if (!is_escaped && *ps=='\'') { // Non-escaped character if (!mode && ps>_expr._data && *(ps - 1)=='[') next_mode = mode = 2; // Start vector-string else if (mode==2 && *(ps + 1)==']') next_mode = !mode; // End vector-string else if (mode<2) next_mode = mode?(mode = 0):1; // Start/end char-string } *(pd++) = (unsigned int)(mode>=1 || is_escaped?_level + (mode==1): *ps=='(' || *ps=='['?_level++: *ps==')' || *ps==']'?--_level: _level); mode = next_mode; is_escaped = next_is_escaped; next_is_escaped = false; } if (mode) { cimg::strellipsize(_expr,64); throw CImgArgumentException("[" cimg_appname "_math_parser] " "CImg<%s>::%s: Unterminated string literal, in expression '%s'.", pixel_type(),_cimg_mp_calling_function, _expr._data); } if (_level) { cimg::strellipsize(_expr,64); throw CImgArgumentException("[" cimg_appname "_math_parser] " "CImg<%s>::%s: Unbalanced parentheses/brackets, in expression '%s'.", pixel_type(),_cimg_mp_calling_function, _expr._data); } return res;
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
229,133,618,166,820,150,000,000,000,000,000,000,000
37
.
static int setup_env(pool *p, cmd_rec *cmd, char *user, char *pass) { struct passwd *pw; config_rec *c, *tmpc; char *origuser, *ourname = NULL, *anonname = NULL, *anongroup = NULL, *ugroup = NULL; char *defaulttransfermode, *defroot = NULL,*defchdir = NULL,*xferlog = NULL; const char *sess_ttyname; int aclp, i, res = 0, allow_chroot_symlinks = TRUE, showsymlinks; unsigned char *wtmp_log = NULL, *anon_require_passwd = NULL; /********************* Authenticate the user here *********************/ session.hide_password = TRUE; origuser = user; c = pr_auth_get_anon_config(p, &user, &ourname, &anonname); if (c) session.anon_config = c; if (!user) { pr_log_auth(PR_LOG_NOTICE, "USER %s: user is not a UserAlias from %s [%s] " "to %s:%i", origuser, session.c->remote_name, pr_netaddr_get_ipstr(session.c->remote_addr), pr_netaddr_get_ipstr(session.c->local_addr), session.c->local_port); goto auth_failure; } pw = pr_auth_getpwnam(p, user); if (pw == NULL && c != NULL && ourname != NULL) { /* If the client is authenticating using an alias (e.g. "AuthAliasOnly on"), * then we need to try checking using the real username, too (Bug#4255). */ pr_trace_msg("auth", 16, "no user entry found for <Anonymous> alias '%s', using '%s'", user, ourname); pw = pr_auth_getpwnam(p, ourname); } if (pw == NULL) { int auth_code = PR_AUTH_NOPWD; pr_log_auth(PR_LOG_NOTICE, "USER %s: no such user found from %s [%s] to %s:%i", user, session.c->remote_name, pr_netaddr_get_ipstr(session.c->remote_addr), pr_netaddr_get_ipstr(session.c->local_addr), session.c->local_port); pr_event_generate("mod_auth.authentication-code", &auth_code); goto auth_failure; } /* Security: other functions perform pw lookups, thus we need to make * a local copy of the user just looked up. */ pw = passwd_dup(p, pw); if (pw->pw_uid == PR_ROOT_UID) { unsigned char *root_allow = NULL; pr_event_generate("mod_auth.root-login", NULL); /* If RootLogin is set to true, we allow this... even though we * still log a warning. :) */ if ((root_allow = get_param_ptr(c ? c->subset : main_server->conf, "RootLogin", FALSE)) == NULL || *root_allow != TRUE) { if (pass) { pr_memscrub(pass, strlen(pass)); } pr_log_auth(PR_LOG_NOTICE, "SECURITY VIOLATION: Root login attempted"); return 0; } } session.user = pstrdup(p, pw->pw_name); session.group = pstrdup(p, pr_auth_gid2name(p, pw->pw_gid)); /* Set the login_uid and login_uid */ session.login_uid = pw->pw_uid; session.login_gid = pw->pw_gid; /* Check for any expandable variables in session.cwd. */ pw->pw_dir = path_subst_uservar(p, &pw->pw_dir); /* Before we check for supplemental groups, check to see if the locally * resolved name of the user, returned via auth_getpwnam(), is different * from the USER argument sent by the client. The name can change, since * auth modules can play all sorts of neat tricks on us. * * If the names differ, assume that any cached data in the session.gids * and session.groups lists are stale, and clear them out. */ if (strcmp(pw->pw_name, user) != 0) { pr_log_debug(DEBUG10, "local user name '%s' differs from client-sent " "user name '%s', clearing cached group data", pw->pw_name, user); session.gids = NULL; session.groups = NULL; } if (!session.gids && !session.groups) { /* Get the supplemental groups. Note that we only look up the * supplemental group credentials if we have not cached the group * credentials before, in session.gids and session.groups. * * Those credentials may have already been retrieved, as part of the * pr_auth_get_anon_config() call. */ res = pr_auth_getgroups(p, pw->pw_name, &session.gids, &session.groups); if (res < 1) { pr_log_debug(DEBUG2, "no supplemental groups found for user '%s'", pw->pw_name); } } tmpc = find_config(main_server->conf, CONF_PARAM, "AllowChrootSymlinks", FALSE); if (tmpc != NULL) { allow_chroot_symlinks = *((int *) tmpc->argv[0]); } /* If c != NULL from this point on, we have an anonymous login */ aclp = login_check_limits(main_server->conf, FALSE, TRUE, &i); if (c) { anongroup = get_param_ptr(c->subset, "GroupName", FALSE); if (!anongroup) anongroup = get_param_ptr(main_server->conf, "GroupName",FALSE); /* Check for configured AnonRejectPasswords regex here, and fail the login * if the given password matches the regex. */ #ifdef PR_USE_REGEX if ((tmpc = find_config(c->subset, CONF_PARAM, "AnonRejectPasswords", FALSE)) != NULL) { int re_res; pr_regex_t *pw_regex = (pr_regex_t *) tmpc->argv[0]; if (pw_regex && pass && ((re_res = pr_regexp_exec(pw_regex, pass, 0, NULL, 0, 0, 0)) == 0)) { char errstr[200] = {'\0'}; pr_regexp_error(re_res, pw_regex, errstr, sizeof(errstr)); pr_log_auth(PR_LOG_NOTICE, "ANON %s: AnonRejectPasswords denies login", origuser); pr_event_generate("mod_auth.anon-reject-passwords", session.c); goto auth_failure; } } #endif if (!login_check_limits(c->subset, FALSE, TRUE, &i) || (!aclp && !i) ){ pr_log_auth(PR_LOG_NOTICE, "ANON %s (Login failed): Limit access denies " "login", origuser); goto auth_failure; } } if (c == NULL && aclp == 0) { pr_log_auth(PR_LOG_NOTICE, "USER %s (Login failed): Limit access denies login", origuser); goto auth_failure; } if (c) { anon_require_passwd = get_param_ptr(c->subset, "AnonRequirePassword", FALSE); } if (!c || (anon_require_passwd && *anon_require_passwd == TRUE)) { int auth_code; char *user_name = user; if (c && origuser && strcasecmp(user, origuser) != 0) { unsigned char *auth_using_alias = get_param_ptr(c->subset, "AuthUsingAlias", FALSE); /* If 'AuthUsingAlias' set and we're logging in under an alias, * then auth using that alias. */ if (auth_using_alias && *auth_using_alias == TRUE) { user_name = origuser; pr_log_auth(PR_LOG_INFO, "ANON AUTH: User %s, authenticating using alias %s", user, user_name); } } /* It is possible for the user to have already been authenticated during * the handling of the USER command, as by an RFC2228 mechanism. If * that had happened, we won't need to call _do_auth() here. */ if (!authenticated_without_pass) { auth_code = _do_auth(p, c ? c->subset : main_server->conf, user_name, pass); } else { auth_code = PR_AUTH_OK_NO_PASS; } pr_event_generate("mod_auth.authentication-code", &auth_code); if (auth_code < 0) { /* Normal authentication has failed, see if group authentication * passes */ c = _auth_group(p, user, &anongroup, &ourname, &anonname, pass); if (c != NULL) { if (c->config_type != CONF_ANON) { c = NULL; ugroup = anongroup; anongroup = NULL; } auth_code = PR_AUTH_OK; } } if (pass) pr_memscrub(pass, strlen(pass)); if (session.auth_mech) pr_log_debug(DEBUG2, "user '%s' authenticated by %s", user, session.auth_mech); switch (auth_code) { case PR_AUTH_OK_NO_PASS: auth_pass_resp_code = R_232; break; case PR_AUTH_OK: auth_pass_resp_code = R_230; break; case PR_AUTH_NOPWD: pr_log_auth(PR_LOG_NOTICE, "USER %s (Login failed): No such user found", user); goto auth_failure; case PR_AUTH_BADPWD: pr_log_auth(PR_LOG_NOTICE, "USER %s (Login failed): Incorrect password", origuser); goto auth_failure; case PR_AUTH_AGEPWD: pr_log_auth(PR_LOG_NOTICE, "USER %s (Login failed): Password expired", user); goto auth_failure; case PR_AUTH_DISABLEDPWD: pr_log_auth(PR_LOG_NOTICE, "USER %s (Login failed): Account disabled", user); goto auth_failure; default: break; }; /* Catch the case where we forgot to handle a bad auth code above. */ if (auth_code < 0) goto auth_failure; if (pw->pw_uid == PR_ROOT_UID) { pr_log_auth(PR_LOG_WARNING, "ROOT FTP login successful"); } } else if (c && (!anon_require_passwd || *anon_require_passwd == FALSE)) { session.hide_password = FALSE; } pr_auth_setgrent(p); res = pr_auth_is_valid_shell(c ? c->subset : main_server->conf, pw->pw_shell); if (res == FALSE) { pr_log_auth(PR_LOG_NOTICE, "USER %s (Login failed): Invalid shell: '%s'", user, pw->pw_shell); goto auth_failure; } res = pr_auth_banned_by_ftpusers(c ? c->subset : main_server->conf, pw->pw_name); if (res == TRUE) { pr_log_auth(PR_LOG_NOTICE, "USER %s (Login failed): User in " PR_FTPUSERS_PATH, user); goto auth_failure; } if (c) { struct group *grp = NULL; unsigned char *add_userdir = NULL; char *u, *chroot_dir; u = pr_table_get(session.notes, "mod_auth.orig-user", NULL); add_userdir = get_param_ptr(c->subset, "UserDirRoot", FALSE); /* If resolving an <Anonymous> user, make sure that user's groups * are set properly for the check of the home directory path (which * depend on those supplemental group memberships). Additionally, * temporarily switch to the new user's uid. */ pr_signals_block(); PRIVS_ROOT res = set_groups(p, pw->pw_gid, session.gids); if (res < 0) { pr_log_pri(PR_LOG_WARNING, "error: unable to set groups: %s", strerror(errno)); } #ifndef PR_DEVEL_COREDUMP # ifdef __hpux if (setresuid(0, 0, 0) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setresuid(): %s", strerror(errno)); } if (setresgid(0, 0, 0) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setresgid(): %s", strerror(errno)); } # else if (setuid(PR_ROOT_UID) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setuid(): %s", strerror(errno)); } if (setgid(PR_ROOT_GID) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setgid(): %s", strerror(errno)); } # endif /* __hpux */ #endif /* PR_DEVEL_COREDUMP */ PRIVS_SETUP(pw->pw_uid, pw->pw_gid) if ((add_userdir && *add_userdir == TRUE) && strcmp(u, user) != 0) { chroot_dir = pdircat(p, c->name, u, NULL); } else { chroot_dir = c->name; } if (allow_chroot_symlinks == FALSE) { char *chroot_path, target_path[PR_TUNABLE_PATH_MAX+1]; struct stat st; chroot_path = chroot_dir; if (chroot_path[0] != '/') { if (chroot_path[0] == '~') { if (pr_fs_interpolate(chroot_path, target_path, sizeof(target_path)-1) == 0) { chroot_path = target_path; } else { chroot_path = NULL; } } } if (chroot_path != NULL) { size_t chroot_pathlen; /* Note: lstat(2) is sensitive to the presence of a trailing slash on * the path, particularly in the case of a symlink to a directory. * Thus to get the correct test, we need to remove any trailing slash * that might be present. Subtle. */ chroot_pathlen = strlen(chroot_path); if (chroot_pathlen > 1 && chroot_path[chroot_pathlen-1] == '/') { chroot_path[chroot_pathlen-1] = '\0'; } pr_fs_clear_cache(); res = pr_fsio_lstat(chroot_path, &st); if (res < 0) { int xerrno = errno; pr_log_pri(PR_LOG_WARNING, "error: unable to check %s: %s", chroot_path, strerror(xerrno)); errno = xerrno; chroot_path = NULL; } else { if (S_ISLNK(st.st_mode)) { pr_log_pri(PR_LOG_WARNING, "error: <Anonymous %s> is a symlink (denied by " "AllowChrootSymlinks config)", chroot_path); errno = EPERM; chroot_path = NULL; } } } if (chroot_path != NULL) { session.chroot_path = dir_realpath(p, chroot_dir); } else { session.chroot_path = NULL; } if (session.chroot_path == NULL) { pr_log_debug(DEBUG8, "error resolving '%s': %s", chroot_dir, strerror(errno)); } } else { session.chroot_path = dir_realpath(p, chroot_dir); if (session.chroot_path == NULL) { pr_log_debug(DEBUG8, "error resolving '%s': %s", chroot_dir, strerror(errno)); } } if (session.chroot_path && pr_fsio_access(session.chroot_path, X_OK, session.uid, session.gid, session.gids) != 0) { session.chroot_path = NULL; } else { session.chroot_path = pstrdup(session.pool, session.chroot_path); } /* Return all privileges back to that of the daemon, for now. */ PRIVS_ROOT res = set_groups(p, daemon_gid, daemon_gids); if (res < 0) { pr_log_pri(PR_LOG_ERR, "error: unable to set groups: %s", strerror(errno)); } #ifndef PR_DEVEL_COREDUMP # ifdef __hpux if (setresuid(0, 0, 0) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setresuid(): %s", strerror(errno)); } if (setresgid(0, 0, 0) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setresgid(): %s", strerror(errno)); } # else if (setuid(PR_ROOT_UID) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setuid(): %s", strerror(errno)); } if (setgid(PR_ROOT_GID) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setgid(): %s", strerror(errno)); } # endif /* __hpux */ #endif /* PR_DEVEL_COREDUMP */ PRIVS_SETUP(daemon_uid, daemon_gid) pr_signals_unblock(); /* Sanity check, make sure we have daemon_uid and daemon_gid back */ #ifdef HAVE_GETEUID if (getegid() != daemon_gid || geteuid() != daemon_uid) { PRIVS_RELINQUISH pr_log_pri(PR_LOG_WARNING, "switching IDs from user %s back to daemon uid/gid failed: %s", session.user, strerror(errno)); pr_session_disconnect(&auth_module, PR_SESS_DISCONNECT_BY_APPLICATION, NULL); } #endif /* HAVE_GETEUID */ if (anon_require_passwd && *anon_require_passwd == TRUE) { session.anon_user = pstrdup(session.pool, origuser); } else { session.anon_user = pstrdup(session.pool, pass); } if (!session.chroot_path) { pr_log_pri(PR_LOG_NOTICE, "%s: Directory %s is not accessible", session.user, c->name); pr_response_add_err(R_530, _("Unable to set anonymous privileges.")); goto auth_failure; } sstrncpy(session.cwd, "/", sizeof(session.cwd)); xferlog = get_param_ptr(c->subset, "TransferLog", FALSE); if (anongroup) { grp = pr_auth_getgrnam(p, anongroup); if (grp) { pw->pw_gid = grp->gr_gid; session.group = pstrdup(p, grp->gr_name); } } } else { struct group *grp; char *homedir; if (ugroup) { grp = pr_auth_getgrnam(p, ugroup); if (grp) { pw->pw_gid = grp->gr_gid; session.group = pstrdup(p, grp->gr_name); } } /* Attempt to resolve any possible symlinks. */ PRIVS_USER homedir = dir_realpath(p, pw->pw_dir); PRIVS_RELINQUISH if (homedir) sstrncpy(session.cwd, homedir, sizeof(session.cwd)); else sstrncpy(session.cwd, pw->pw_dir, sizeof(session.cwd)); } /* Create the home directory, if need be. */ if (!c && mkhome) { if (create_home(p, session.cwd, origuser, pw->pw_uid, pw->pw_gid) < 0) { /* NOTE: should this cause the login to fail? */ goto auth_failure; } } /* Get default chdir (if any) */ defchdir = get_default_chdir(p, (c ? c->subset : main_server->conf)); if (defchdir) sstrncpy(session.cwd, defchdir, sizeof(session.cwd)); /* Check limits again to make sure deny/allow directives still permit * access. */ if (!login_check_limits((c ? c->subset : main_server->conf), FALSE, TRUE, &i)) { pr_log_auth(PR_LOG_NOTICE, "%s %s: Limit access denies login", (c != NULL) ? "ANON" : C_USER, origuser); goto auth_failure; } /* Perform a directory fixup. */ resolve_deferred_dirs(main_server); fixup_dirs(main_server, CF_DEFER); /* If running under an anonymous context, resolve all <Directory> * blocks inside it. */ if (c && c->subset) resolve_anonymous_dirs(c->subset); /* Write the login to wtmp. This must be done here because we won't * have access after we give up root. This can result in falsified * wtmp entries if an error kicks the user out before we get * through with the login process. Oh well. */ sess_ttyname = pr_session_get_ttyname(p); /* Perform wtmp logging only if not turned off in <Anonymous> * or the current server */ if (c) wtmp_log = get_param_ptr(c->subset, "WtmpLog", FALSE); if (wtmp_log == NULL) wtmp_log = get_param_ptr(main_server->conf, "WtmpLog", FALSE); /* As per Bug#3482, we need to disable WtmpLog for FreeBSD 9.0, as * an interim measure. * * The issue is that some platforms update multiple files for a single * pututxline(3) call; proftpd tries to update those files manually, * do to chroots (after which a pututxline(3) call will fail). A proper * solution requires a separate process, running with the correct * privileges, which would handle wtmp logging. The proftpd session * processes would send messages to this logging daemon (via Unix domain * socket, or FIFO, or TCP socket). * * Also note that this hack to disable WtmpLog may need to be extended * to other platforms in the future. */ #if defined(HAVE_UTMPX_H) && \ defined(__FreeBSD_version) && __FreeBSD_version >= 900007 if (wtmp_log == NULL || *wtmp_log == TRUE) { wtmp_log = pcalloc(p, sizeof(unsigned char)); *wtmp_log = FALSE; pr_log_debug(DEBUG5, "WtpmLog automatically disabled; see Bug#3482 for details"); } #endif PRIVS_ROOT if (wtmp_log == NULL || *wtmp_log == TRUE) { log_wtmp(sess_ttyname, session.user, session.c->remote_name, session.c->remote_addr); session.wtmp_log = TRUE; } #ifdef PR_USE_LASTLOG if (lastlog) { log_lastlog(pw->pw_uid, session.user, sess_ttyname, session.c->remote_addr); } #endif /* PR_USE_LASTLOG */ /* Open any TransferLogs */ if (!xferlog) { if (c) xferlog = get_param_ptr(c->subset, "TransferLog", FALSE); if (!xferlog) xferlog = get_param_ptr(main_server->conf, "TransferLog", FALSE); if (!xferlog) xferlog = PR_XFERLOG_PATH; } if (strcasecmp(xferlog, "NONE") == 0) { xferlog_open(NULL); } else { xferlog_open(xferlog); } res = set_groups(p, pw->pw_gid, session.gids); if (res < 0) { pr_log_pri(PR_LOG_ERR, "error: unable to set groups: %s", strerror(errno)); } PRIVS_RELINQUISH /* Now check to see if the user has an applicable DefaultRoot */ if (c == NULL) { if (get_default_root(session.pool, allow_chroot_symlinks, &defroot) < 0) { pr_log_pri(PR_LOG_NOTICE, "error: unable to determine DefaultRoot directory"); pr_response_send(R_530, _("Login incorrect.")); pr_session_end(0); } ensure_open_passwd(p); if (defroot != NULL) { if (pr_auth_chroot(defroot) == -1) { pr_log_pri(PR_LOG_NOTICE, "error: unable to set DefaultRoot directory"); pr_response_send(R_530, _("Login incorrect.")); pr_session_end(0); } /* Re-calc the new cwd based on this root dir. If not applicable * place the user in / (of defroot) */ if (strncmp(session.cwd, defroot, strlen(defroot)) == 0) { char *newcwd = &session.cwd[strlen(defroot)]; if (*newcwd == '/') newcwd++; session.cwd[0] = '/'; sstrncpy(&session.cwd[1], newcwd, sizeof(session.cwd)); } } } if (c) ensure_open_passwd(p); if (c && pr_auth_chroot(session.chroot_path) == -1) { pr_log_pri(PR_LOG_NOTICE, "error: unable to set anonymous privileges"); pr_response_send(R_530, _("Login incorrect.")); pr_session_end(0); } /* new in 1.1.x, I gave in and we don't give up root permanently.. * sigh. */ PRIVS_ROOT #ifndef PR_DEVEL_COREDUMP # ifdef __hpux if (setresuid(0, 0, 0) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setresuid(): %s", strerror(errno)); } if (setresgid(0, 0, 0) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setresgid(): %s", strerror(errno)); } # else if (setuid(PR_ROOT_UID) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setuid(): %s", strerror(errno)); } if (setgid(PR_ROOT_GID) < 0) { pr_log_pri(PR_LOG_ERR, "unable to setgid(): %s", strerror(errno)); } # endif /* __hpux */ #endif /* PR_DEVEL_COREDUMP */ PRIVS_SETUP(pw->pw_uid, pw->pw_gid) #ifdef HAVE_GETEUID if (getegid() != pw->pw_gid || geteuid() != pw->pw_uid) { PRIVS_RELINQUISH pr_log_pri(PR_LOG_ERR, "error: %s setregid() or setreuid(): %s", session.user, strerror(errno)); pr_response_send(R_530, _("Login incorrect.")); pr_session_end(0); } #endif /* If the home directory is NULL or "", reject the login. */ if (pw->pw_dir == NULL || strncmp(pw->pw_dir, "", 1) == 0) { pr_log_pri(PR_LOG_WARNING, "error: user %s home directory is NULL or \"\"", session.user); pr_response_send(R_530, _("Login incorrect.")); pr_session_end(0); } { unsigned char *show_symlinks = get_param_ptr( c ? c->subset : main_server->conf, "ShowSymlinks", FALSE); if (!show_symlinks || *show_symlinks == TRUE) showsymlinks = TRUE; else showsymlinks = FALSE; } /* chdir to the proper directory, do this even if anonymous * to make sure we aren't outside our chrooted space. */ /* Attempt to change to the correct directory -- use session.cwd first. * This will contain the DefaultChdir directory, if configured... */ if (pr_fsio_chdir_canon(session.cwd, !showsymlinks) == -1) { /* if we've got DefaultRoot or anonymous login, ignore this error * and chdir to / */ if (session.chroot_path != NULL || defroot) { pr_log_debug(DEBUG2, "unable to chdir to %s (%s), defaulting to chroot " "directory %s", session.cwd, strerror(errno), (session.chroot_path ? session.chroot_path : defroot)); if (pr_fsio_chdir_canon("/", !showsymlinks) == -1) { pr_log_pri(PR_LOG_NOTICE, "%s chdir(\"/\") failed: %s", session.user, strerror(errno)); pr_response_send(R_530, _("Login incorrect.")); pr_session_end(0); } } else if (defchdir) { /* If we've got defchdir, failure is ok as well, simply switch to * user's homedir. */ pr_log_debug(DEBUG2, "unable to chdir to %s (%s), defaulting to home " "directory %s", session.cwd, strerror(errno), pw->pw_dir); if (pr_fsio_chdir_canon(pw->pw_dir, !showsymlinks) == -1) { pr_log_pri(PR_LOG_NOTICE, "%s chdir(\"%s\") failed: %s", session.user, session.cwd, strerror(errno)); pr_response_send(R_530, _("Login incorrect.")); pr_session_end(0); } } else { /* Unable to switch to user's real home directory, which is not * allowed. */ pr_log_pri(PR_LOG_NOTICE, "%s chdir(\"%s\") failed: %s", session.user, session.cwd, strerror(errno)); pr_response_send(R_530, _("Login incorrect.")); pr_session_end(0); } } sstrncpy(session.cwd, pr_fs_getcwd(), sizeof(session.cwd)); sstrncpy(session.vwd, pr_fs_getvwd(), sizeof(session.vwd)); /* Make sure directory config pointers are set correctly */ dir_check_full(p, cmd, G_NONE, session.cwd, NULL); if (c) { if (!session.hide_password) { session.proc_prefix = pstrcat(session.pool, session.c->remote_name, ": anonymous/", pass, NULL); } else { session.proc_prefix = pstrcat(session.pool, session.c->remote_name, ": anonymous", NULL); } session.sf_flags = SF_ANON; } else { session.proc_prefix = pstrdup(session.pool, session.c->remote_name); session.sf_flags = 0; } /* While closing the pointer to the password database would avoid any * potential attempt to hijack this information, it is unfortunately needed * in a chroot()ed environment. Otherwise, mappings from UIDs to names, * among other things, would fail. */ /* pr_auth_endpwent(p); */ /* Default transfer mode is ASCII */ defaulttransfermode = (char *) get_param_ptr(main_server->conf, "DefaultTransferMode", FALSE); if (defaulttransfermode && strcasecmp(defaulttransfermode, "binary") == 0) { session.sf_flags &= (SF_ALL^SF_ASCII); } else { session.sf_flags |= SF_ASCII; } /* Authentication complete, user logged in, now kill the login * timer. */ /* Update the scoreboard entry */ pr_scoreboard_entry_update(session.pid, PR_SCORE_USER, session.user, PR_SCORE_CWD, session.cwd, NULL); pr_session_set_idle(); pr_timer_remove(PR_TIMER_LOGIN, &auth_module); /* These copies are made from the session.pool, instead of the more * volatile pool used originally, in order that the copied data maintain * its integrity for the lifetime of the session. */ session.user = pstrdup(session.pool, session.user); if (session.group) session.group = pstrdup(session.pool, session.group); if (session.gids) session.gids = copy_array(session.pool, session.gids); /* session.groups is an array of strings, so we must copy the string data * as well as the pointers. */ session.groups = copy_array_str(session.pool, session.groups); /* Resolve any deferred-resolution paths in the FS layer */ pr_resolve_fs_map(); return 1; auth_failure: if (pass) pr_memscrub(pass, strlen(pass)); session.user = session.group = NULL; session.gids = session.groups = NULL; session.wtmp_log = FALSE; return 0; }
0
[ "CWE-59", "CWE-61" ]
proftpd
ecff21e0d0e84f35c299ef91d7fda088e516d4ed
141,777,316,043,421,560,000,000,000,000,000,000,000
892
Backporting recursive handling of DefaultRoot path, when AllowChrootSymlinks is off, to 1.3.5 branch.
inline uint64_t make_type(const T &arg) { return MakeValue< BasicFormatter<char> >::type(arg); }
0
[ "CWE-134", "CWE-119", "CWE-787" ]
fmt
8cf30aa2be256eba07bb1cefb998c52326e846e7
307,453,937,267,810,070,000,000,000,000,000,000,000
3
Fix segfault on complex pointer formatting (#642)
int ASN1_GENERALIZEDTIME_set_string(ASN1_GENERALIZEDTIME *s, const char *str) { ASN1_GENERALIZEDTIME t; t.type=V_ASN1_GENERALIZEDTIME; t.length=strlen(str); t.data=(unsigned char *)str; if (ASN1_GENERALIZEDTIME_check(&t)) { if (s != NULL) { if (!ASN1_STRING_set((ASN1_STRING *)s, (unsigned char *)str,t.length)) return 0; s->type=V_ASN1_GENERALIZEDTIME; } return(1); } else return(0); }
0
[]
openssl
c7235be6e36c4bef84594aa3b2f0561db84b63d8
237,389,930,268,864,100,000,000,000,000,000,000,000
21
RFC 3161 compliant time stamp request creation, response generation and response verification. Submitted by: Zoltan Glozik <[email protected]> Reviewed by: Ulf Moeller
static void remove_kevent(struct inotify_device *dev, struct inotify_kernel_event *kevent) { list_del(&kevent->list); dev->event_count--; dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; }
0
[ "CWE-399" ]
linux-2.6
3632dee2f8b8a9720329f29eeaa4ec4669a3aff8
134,945,999,827,232,410,000,000,000,000,000,000,000
8
inotify: clean up inotify_read and fix locking problems If userspace supplies an invalid pointer to a read() of an inotify instance, the inotify device's event list mutex is unlocked twice. This causes an unbalance which effectively leaves the data structure unprotected, and we can trigger oopses by accessing the inotify instance from different tasks concurrently. The best fix (contributed largely by Linus) is a total rewrite of the function in question: On Thu, Jan 22, 2009 at 7:05 AM, Linus Torvalds wrote: > The thing to notice is that: > > - locking is done in just one place, and there is no question about it > not having an unlock. > > - that whole double-while(1)-loop thing is gone. > > - use multiple functions to make nesting and error handling sane > > - do error testing after doing the things you always need to do, ie do > this: > > mutex_lock(..) > ret = function_call(); > mutex_unlock(..) > > .. test ret here .. > > instead of doing conditional exits with unlocking or freeing. > > So if the code is written in this way, it may still be buggy, but at least > it's not buggy because of subtle "forgot to unlock" or "forgot to free" > issues. > > This _always_ unlocks if it locked, and it always frees if it got a > non-error kevent. Cc: John McCutchan <[email protected]> Cc: Robert Love <[email protected]> Cc: <[email protected]> Signed-off-by: Vegard Nossum <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; if (!sctp_vtag_verify_either(chunk, asoc)) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); /* Make sure that the ABORT chunk has a valid length. * Since this is an ABORT chunk, we have to discard it * because of the following text: * RFC 2960, Section 3.3.7 * If an endpoint receives an ABORT with a format error or for an * association that doesn't exist, it MUST silently discard it. * Becasue the length is "invalid", we can't really discard just * as we do not know its true length. So, to be safe, discard the * packet. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); /* Stop the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); /* Stop the T5-shutdown guard timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); return sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands); }
0
[]
linux
bbd0d59809f923ea2b540cbd781b32110e249f6e
25,147,894,964,932,880,000,000,000,000,000,000,000
34
[SCTP]: Implement the receive and verification of AUTH chunk This patch implements the receive path needed to process authenticated chunks. Add ability to process the AUTH chunk and handle edge cases for authenticated COOKIE-ECHO as well. Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
bool ldb_dn_check_special(struct ldb_dn *dn, const char *check) { if ( ! dn || dn->invalid) return false; return ! strcmp(dn->linearized, check); }
0
[ "CWE-200" ]
samba
7f51ec8c4ed9ba1f53d722e44fb6fb3cde933b72
103,942,553,030,989,870,000,000,000,000,000,000,000
5
CVE-2015-5330: ldb_dn: simplify and fix ldb_dn_escape_internal() Previously we relied on NUL terminated strings and jumped back and forth between copying escaped bytes and memcpy()ing un-escaped chunks. This simple version is easier to reason about and works with unterminated strings. It may also be faster as it avoids reading the string twice (first with strcspn, then with memcpy). Bug: https://bugzilla.samba.org/show_bug.cgi?id=11599 Signed-off-by: Douglas Bagnall <[email protected]> Pair-programmed-with: Andrew Bartlett <[email protected]> Reviewed-by: Ralph Boehme <[email protected]>
uint STDCALL mysql_errno(MYSQL *mysql) { return mysql ? mysql->net.last_errno : mysql_server_last_errno; }
0
[ "CWE-254" ]
server
f0d774d48416bb06063184380b684380ca005a41
254,825,175,681,152,620,000,000,000,000,000,000,000
4
MDEV-9212 ssl-validate-cert incorrect hostname check Reimplement ssl_verify_server_cert() using the logic from https://wiki.openssl.org/index.php/Hostname_validation The bug was discovered by Alex Gaynor.
inline std::shared_ptr<Ope> dot() { return std::make_shared<AnyCharacter>(); }
0
[ "CWE-125" ]
cpp-peglib
b3b29ce8f3acf3a32733d930105a17d7b0ba347e
202,207,807,046,540,670,000,000,000,000,000,000,000
1
Fix #122
int git_index__changed_relative_to( git_index *index, const git_oid *checksum) { /* attempt to update index (ignoring errors) */ if (git_index_read(index, false) < 0) giterr_clear(); return !!git_oid_cmp(&index->checksum, checksum); }
0
[ "CWE-415", "CWE-190" ]
libgit2
3db1af1f370295ad5355b8f64b865a2a357bcac0
8,779,108,248,268,574,000,000,000,000,000,000,000
9
index: error out on unreasonable prefix-compressed path lengths When computing the complete path length from the encoded prefix-compressed path, we end up just allocating the complete path without ever checking what the encoded path length actually is. This can easily lead to a denial of service by just encoding an unreasonable long path name inside of the index. Git already enforces a maximum path length of 4096 bytes. As we also have that enforcement ready in some places, just make sure that the resulting path is smaller than GIT_PATH_MAX. Reported-by: Krishna Ram Prakash R <[email protected]> Reported-by: Vivek Parikh <[email protected]>
cnt_pipe(struct worker *wrk, struct req *req) { struct busyobj *bo; enum req_fsm_nxt nxt; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); AZ(req->objcore); AZ(req->stale_oc); AN(req->vcl); wrk->stats->s_pipe++; bo = VBO_GetBusyObj(wrk, req); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); VSLb(bo->vsl, SLT_Begin, "bereq %u pipe", VXID(req->vsl->wid)); VSLb(req->vsl, SLT_Link, "bereq %u pipe", VXID(bo->vsl->wid)); THR_SetBusyobj(bo); bo->sp = req->sp; SES_Ref(bo->sp); HTTP_Setup(bo->bereq, bo->ws, bo->vsl, SLT_BereqMethod); http_FilterReq(bo->bereq, req->http, 0); // XXX: 0 ? http_PrintfHeader(bo->bereq, "X-Varnish: %u", VXID(req->vsl->wid)); http_ForceHeader(bo->bereq, H_Connection, "close"); if (req->want100cont) { http_SetHeader(bo->bereq, "Expect: 100-continue"); req->want100cont = 0; } VCL_pipe_method(req->vcl, wrk, req, bo, NULL); switch (wrk->handling) { case VCL_RET_SYNTH: req->req_step = R_STP_SYNTH; nxt = REQ_FSM_MORE; break; case VCL_RET_PIPE: if (V1P_Enter() == 0) { AZ(bo->req); bo->req = req; bo->wrk = wrk; SES_Close(req->sp, VDI_Http1Pipe(req, bo)); nxt = REQ_FSM_DONE; V1P_Leave(); break; } wrk->stats->pipe_limited++; /* fall through */ case VCL_RET_FAIL: req->req_step = R_STP_VCLFAIL; nxt = REQ_FSM_MORE; break; default: WRONG("Illegal return from vcl_pipe{}"); } http_Teardown(bo->bereq); SES_Rel(bo->sp); VBO_ReleaseBusyObj(wrk, &bo); THR_SetBusyobj(NULL); return (nxt); }
0
[ "CWE-212" ]
varnish-cache
bd7b3d6d47ccbb5e1747126f8e2a297f38e56b8c
159,386,209,258,983,890,000,000,000,000,000,000,000
62
Clear err_code and err_reason at start of request handling req->err_code and req->err_reason are set when going to synthetic handling. From there the resp.reason HTTP field is set from req->err_reason if set, or the generic code based on req->err_code is used if it was NULL. This patch clears these members so that a value from the handling of a previous request doesn't linger. Fixes: VSV00004
GF_Err tenc_box_dump(GF_Box *a, FILE * trace) { GF_TrackEncryptionBox *ptr = (GF_TrackEncryptionBox*) a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "TrackEncryptionBox", trace); gf_fprintf(trace, "isEncrypted=\"%d\"", ptr->isProtected); if (ptr->key_info[3]) gf_fprintf(trace, " IV_size=\"%d\" KID=\"", ptr->key_info[3]); else { gf_fprintf(trace, " constant_IV_size=\"%d\" constant_IV=\"", ptr->key_info[20]); dump_data_hex(trace, (char *) ptr->key_info+21, ptr->key_info[20]); gf_fprintf(trace, "\" KID=\""); } dump_data_hex(trace, (char *) ptr->key_info+4, 16); if (ptr->version) gf_fprintf(trace, "\" crypt_byte_block=\"%d\" skip_byte_block=\"%d", ptr->crypt_byte_block, ptr->skip_byte_block); gf_fprintf(trace, "\">\n"); if (!ptr->size) { gf_fprintf(trace, " IV_size=\"\" KID=\"\" constant_IV_size=\"\" constant_IV=\"\" crypt_byte_block=\"\" skip_byte_block=\"\">\n"); gf_fprintf(trace, "<TENCKey IV_size=\"\" KID=\"\" const_IV_size=\"\" constIV=\"\"/>\n"); } gf_isom_box_dump_done("TrackEncryptionBox", a, trace); return GF_OK; }
0
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
93,847,206,658,841,030,000,000,000,000,000,000,000
28
fixed #2138
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page; struct address_space *mapping; int err, ro = 0; /* * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; if (unlikely((address % sizeof(u32)) != 0)) return -EINVAL; address -= key->both.offset; if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) return -EFAULT; if (unlikely(should_fail_futex(fshared))) return -EFAULT; /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs * virtual address, we dont even have to find the underlying vma. * Note : We do have to check 'uaddr' is a valid user address, * but access_ok() should be faster than find_vma() */ if (!fshared) { key->private.mm = mm; key->private.address = address; get_futex_key_refs(key); /* implies smp_mb(); (B) */ return 0; } again: /* Ignore any VERIFY_READ mapping (futex common case) */ if (unlikely(should_fail_futex(fshared))) return -EFAULT; err = get_user_pages_fast(address, 1, 1, &page); /* * If write access is not required (eg. FUTEX_WAIT), try * and get read-only access. */ if (err == -EFAULT && rw == VERIFY_READ) { err = get_user_pages_fast(address, 1, 0, &page); ro = 1; } if (err < 0) return err; else err = 0; /* * The treatment of mapping from this point on is critical. The page * lock protects many things but in this context the page lock * stabilizes mapping, prevents inode freeing in the shared * file-backed region case and guards against movement to swap cache. * * Strictly speaking the page lock is not needed in all cases being * considered here and page lock forces unnecessarily serialization * From this point on, mapping will be re-verified if necessary and * page lock will be acquired only if it is unavoidable */ page = compound_head(page); mapping = READ_ONCE(page->mapping); /* * If page->mapping is NULL, then it cannot be a PageAnon * page; but it might be the ZERO_PAGE or in the gate area or * in a special mapping (all cases which we are happy to fail); * or it may have been a good file page when get_user_pages_fast * found it, but truncated or holepunched or subjected to * invalidate_complete_page2 before we got the page lock (also * cases which we are happy to fail). And we hold a reference, * so refcount care in invalidate_complete_page's remove_mapping * prevents drop_caches from setting mapping to NULL beneath us. * * The case we do have to guard against is when memory pressure made * shmem_writepage move it from filecache to swapcache beneath us: * an unlikely race, but we do need to retry for page->mapping. */ if (unlikely(!mapping)) { int shmem_swizzled; /* * Page lock is required to identify which special case above * applies. If this is really a shmem page then the page lock * will prevent unexpected transitions. */ lock_page(page); shmem_swizzled = PageSwapCache(page) || page->mapping; unlock_page(page); put_page(page); if (shmem_swizzled) goto again; return -EFAULT; } /* * Private mappings are handled in a simple way. * * If the futex key is stored on an anonymous page, then the associated * object is the mm which is implicitly pinned by the calling process. * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. */ if (PageAnon(page)) { /* * A RO anonymous page will never change and thus doesn't make * sense for futex operations. */ if (unlikely(should_fail_futex(fshared)) || ro) { err = -EFAULT; goto out; } key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; get_futex_key_refs(key); /* implies smp_mb(); (B) */ } else { struct inode *inode; /* * The associated futex object in this case is the inode and * the page->mapping must be traversed. Ordinarily this should * be stabilised under page lock but it's not strictly * necessary in this case as we just want to pin the inode, not * update the radix tree or anything like that. * * The RCU read lock is taken as the inode is finally freed * under RCU. If the mapping still matches expectations then the * mapping->host can be safely accessed as being a valid inode. */ rcu_read_lock(); if (READ_ONCE(page->mapping) != mapping) { rcu_read_unlock(); put_page(page); goto again; } inode = READ_ONCE(mapping->host); if (!inode) { rcu_read_unlock(); put_page(page); goto again; } /* * Take a reference unless it is about to be freed. Previously * this reference was taken by ihold under the page lock * pinning the inode in place so i_lock was unnecessary. The * only way for this check to fail is if the inode was * truncated in parallel so warn for now if this happens. * * We are not calling into get_futex_key_refs() in file-backed * cases, therefore a successful atomic_inc return below will * guarantee that get_futex_key() will still imply smp_mb(); (B). */ if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) { rcu_read_unlock(); put_page(page); goto again; } /* Should be impossible but lets be paranoid for now */ if (WARN_ON_ONCE(inode->i_mapping != mapping)) { err = -EFAULT; rcu_read_unlock(); iput(inode); goto out; } key->both.offset |= FUT_OFF_INODE; /* inode-based key */ key->shared.inode = inode; key->shared.pgoff = basepage_index(page); rcu_read_unlock(); } out: put_page(page); return err; }
0
[ "CWE-416" ]
linux
65d8fc777f6dcfee12785c057a6b57f679641c90
173,169,372,514,173,120,000,000,000,000,000,000,000
197
futex: Remove requirement for lock_page() in get_futex_key() When dealing with key handling for shared futexes, we can drastically reduce the usage/need of the page lock. 1) For anonymous pages, the associated futex object is the mm_struct which does not require the page lock. 2) For inode based, keys, we can check under RCU read lock if the page mapping is still valid and take reference to the inode. This just leaves one rare race that requires the page lock in the slow path when examining the swapcache. Additionally realtime users currently have a problem with the page lock being contended for unbounded periods of time during futex operations. Task A get_futex_key() lock_page() ---> preempted Now any other task trying to lock that page will have to wait until task A gets scheduled back in, which is an unbound time. With this patch, we pretty much have a lockless futex_get_key(). Experiments show that this patch can boost/speedup the hashing of shared futexes with the perf futex benchmarks (which is good for measuring such change) by up to 45% when there are high (> 100) thread counts on a 60 core Westmere. Lower counts are pretty much in the noise range or less than 10%, but mid range can be seen at over 30% overall throughput (hash ops/sec). This makes anon-mem shared futexes much closer to its private counterpart. Signed-off-by: Mel Gorman <[email protected]> [ Ported on top of thp refcount rework, changelog, comments, fixes. ] Signed-off-by: Davidlohr Bueso <[email protected]> Reviewed-by: Thomas Gleixner <[email protected]> Cc: Chris Mason <[email protected]> Cc: Darren Hart <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Sebastian Andrzej Siewior <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
static void nvme_rw_cb(void *opaque, int ret) { NvmeRequest *req = opaque; NvmeNamespace *ns = req->ns; BlockBackend *blk = ns->blkconf.blk; trace_pci_nvme_rw_cb(nvme_cid(req), blk_name(blk)); if (ret) { goto out; } if (ns->lbaf.ms) { NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; uint64_t slba = le64_to_cpu(rw->slba); uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; uint64_t offset = nvme_moff(ns, slba); if (req->cmd.opcode == NVME_CMD_WRITE_ZEROES) { size_t mlen = nvme_m2b(ns, nlb); req->aiocb = blk_aio_pwrite_zeroes(blk, offset, mlen, BDRV_REQ_MAY_UNMAP, nvme_rw_complete_cb, req); return; } if (nvme_ns_ext(ns) || req->cmd.mptr) { uint16_t status; nvme_sg_unmap(&req->sg); status = nvme_map_mdata(nvme_ctrl(req), nlb, req); if (status) { ret = -EFAULT; goto out; } if (req->cmd.opcode == NVME_CMD_READ) { return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req); } return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req); } } out: nvme_rw_complete_cb(req, ret); }
0
[]
qemu
736b01642d85be832385063f278fe7cd4ffb5221
189,898,044,510,041,800,000,000,000,000,000,000,000
49
hw/nvme: fix CVE-2021-3929 This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the device itself. This still allows DMA to MMIO regions of other devices (e.g. doing P2P DMA to the controller memory buffer of another NVMe device). Fixes: CVE-2021-3929 Reported-by: Qiuhao Li <[email protected]> Reviewed-by: Keith Busch <[email protected]> Reviewed-by: Philippe Mathieu-Daudé <[email protected]> Signed-off-by: Klaus Jensen <[email protected]>
onigenc_mbn_mbc_case_fold(OnigEncoding enc, OnigCaseFoldType flag ARG_UNUSED, const UChar** pp, const UChar* end ARG_UNUSED, UChar* lower) { int len; const UChar *p = *pp; if (ONIGENC_IS_MBC_ASCII(p)) { *lower = ONIGENC_ASCII_CODE_TO_LOWER_CASE(*p); (*pp)++; return 1; } else { int i; len = enclen(enc, p, end); for (i = 0; i < len; i++) { *lower++ = *p++; } (*pp) += len; return len; /* return byte length of converted to lower char */ } }
0
[ "CWE-125" ]
Onigmo
d4cf99d30bd5f6a8a4ababd0b9d7b06f3a479a24
261,871,436,213,230,920,000,000,000,000,000,000,000
23
Fix out-of-bounds read in parse_char_class() (Close #139) /[\x{111111}]/ causes out-of-bounds read when encoding is a single byte encoding. \x{111111} is an invalid codepoint for a single byte encoding. Check if it is a valid codepoint.
static int starttls(struct transaction_t *txn, struct http_connection *conn) { int https = (txn == NULL); int result; SSL_CTX *ctx = NULL; if (!conn) conn = txn->conn; result=tls_init_serverengine("http", 5, /* depth to verify */ !https, /* can client auth? */ &ctx); if (result == -1) { syslog(LOG_ERR, "error initializing TLS"); if (txn) txn->error.desc = "Error initializing TLS"; return HTTP_SERVER_ERROR; } if (http2_enabled()) { #ifdef HAVE_TLS_ALPN /* enable TLS ALPN extension */ SSL_CTX_set_alpn_select_cb(ctx, alpn_select_cb, conn); #endif } if (!https) { /* tell client to start TLS upgrade (RFC 2817) */ response_header(HTTP_SWITCH_PROT, txn); } result=tls_start_servertls(0, /* read */ 1, /* write */ https ? 180 : httpd_timeout, &saslprops, (SSL **) &conn->tls_ctx); /* if error */ if (result == -1) { syslog(LOG_NOTICE, "starttls failed: %s", conn->clienthost); if (txn) txn->error.desc = "Error negotiating TLS"; return HTTP_BAD_REQUEST; } /* tell SASL about the negotiated layer */ result = saslprops_set_tls(&saslprops, httpd_saslconn); if (result != SASL_OK) { syslog(LOG_NOTICE, "saslprops_set_tls() failed: cmd_starttls()"); if (https == 0) { fatal("saslprops_set_tls() failed: cmd_starttls()", EX_TEMPFAIL); } else { shut_down(0); } } /* tell the prot layer about our new layers */ prot_settls(httpd_in, conn->tls_ctx); prot_settls(httpd_out, conn->tls_ctx); httpd_tls_required = 0; avail_auth_schemes |= AUTH_BASIC; return 0; }
0
[]
cyrus-imapd
602f12ed2af0a49ac4a58affbfea57d0fc23dea5
153,705,295,342,544,320,000,000,000,000,000,000,000
67
httpd.c: only allow reuse of auth creds on a persistent connection against a backend server in a Murder
static void read_conf(FILE *conffile) { char *buffer, *line, *val; buffer = loadfile(conffile); for (line = strtok(buffer, "\r\n"); line; line = strtok(NULL, "\r\n")) { if (!strncmp(line, "export ", 7)) continue; val = strchr(line, '='); if (!val) { printf("invalid configuration line\n"); break; } *val++ = '\0'; if (!strcmp(line, "JSON_INDENT")) conf.indent = atoi(val); if (!strcmp(line, "JSON_COMPACT")) conf.compact = atoi(val); if (!strcmp(line, "JSON_ENSURE_ASCII")) conf.ensure_ascii = atoi(val); if (!strcmp(line, "JSON_PRESERVE_ORDER")) conf.preserve_order = atoi(val); if (!strcmp(line, "JSON_SORT_KEYS")) conf.sort_keys = atoi(val); if (!strcmp(line, "STRIP")) conf.strip = atoi(val); if (!strcmp(line, "HASHSEED")) { conf.have_hashseed = 1; conf.hashseed = atoi(val); } else { conf.have_hashseed = 0; } } free(buffer); }
0
[ "CWE-310" ]
jansson
8f80c2d83808150724d31793e6ade92749b1faa4
118,272,487,429,679,510,000,000,000,000,000,000,000
37
CVE-2013-6401: Change hash function, randomize hashes Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing and testing.
static bool vfswrap_brl_unlock_windows(struct vfs_handle_struct *handle, struct messaging_context *msg_ctx, struct byte_range_lock *br_lck, const struct lock_struct *plock) { SMB_ASSERT(plock->lock_flav == WINDOWS_LOCK); return brl_unlock_windows_default(msg_ctx, br_lck, plock); }
0
[ "CWE-665" ]
samba
30e724cbff1ecd90e5a676831902d1e41ec1b347
64,963,508,929,926,100,000,000,000,000,000,000,000
9
FSCTL_GET_SHADOW_COPY_DATA: Initialize output array to zero Otherwise num_volumes and the end marker can return uninitialized data to the client. Signed-off-by: Christof Schmitt <[email protected]> Reviewed-by: Jeremy Allison <[email protected]> Reviewed-by: Simo Sorce <[email protected]>
MagickExport void ConvertRGBToHSL(const Quantum red,const Quantum green, const Quantum blue,double *hue,double *saturation,double *lightness) { double c, max, min; /* Convert RGB to HSL colorspace. */ assert(hue != (double *) NULL); assert(saturation != (double *) NULL); assert(lightness != (double *) NULL); max=MagickMax(QuantumScale*red,MagickMax(QuantumScale*green, QuantumScale*blue)); min=MagickMin(QuantumScale*red,MagickMin(QuantumScale*green, QuantumScale*blue)); c=max-min; *lightness=(max+min)/2.0; if (c <= 0.0) { *hue=0.0; *saturation=0.0; return; } if (fabs(max-QuantumScale*red) < MagickEpsilon) { *hue=(QuantumScale*green-QuantumScale*blue)/c; if ((QuantumScale*green) < (QuantumScale*blue)) *hue+=6.0; } else if (fabs(max-QuantumScale*green) < MagickEpsilon) *hue=2.0+(QuantumScale*blue-QuantumScale*red)/c; else *hue=4.0+(QuantumScale*red-QuantumScale*green)/c; *hue*=60.0/360.0; if (*lightness <= 0.5) *saturation=c/(2.0*(*lightness)); else *saturation=c/(2.0-2.0*(*lightness)); }
1
[]
ImageMagick6
64c0cc234280544dabacc2b28017521851deebde
64,316,640,286,512,380,000,000,000,000,000,000,000
43
https://github.com/ImageMagick/ImageMagick/issues/3321
status_to_message(u_int32_t status) { const char *status_messages[] = { "Success", /* SSH_FX_OK */ "End of file", /* SSH_FX_EOF */ "No such file", /* SSH_FX_NO_SUCH_FILE */ "Permission denied", /* SSH_FX_PERMISSION_DENIED */ "Failure", /* SSH_FX_FAILURE */ "Bad message", /* SSH_FX_BAD_MESSAGE */ "No connection", /* SSH_FX_NO_CONNECTION */ "Connection lost", /* SSH_FX_CONNECTION_LOST */ "Operation unsupported", /* SSH_FX_OP_UNSUPPORTED */ "Unknown error" /* Others */ }; return (status_messages[MINIMUM(status,SSH2_FX_MAX)]); }
0
[ "CWE-732", "CWE-703", "CWE-269" ]
src
a6981567e8e215acc1ef690c8dbb30f2d9b00a19
302,252,616,440,238,470,000,000,000,000,000,000,000
16
disallow creation (of empty files) in read-only mode; reported by Michal Zalewski, feedback & ok deraadt@
spool_sname(const uschar * purpose, uschar * subdir) { return string_sprintf("%s%s%s%s%s", queue_name, *queue_name ? "/" : "", purpose, *subdir ? "/" : "", subdir); }
0
[ "CWE-78" ]
exim
7ea1237c783e380d7bdb86c90b13d8203c7ecf26
113,941,964,680,245,870,000,000,000,000,000,000,000
7
Events: raise msg:fail:internal & msg:complete for -Mrm. Bug 2310
static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) { struct mm_struct *mm = vma->vm_mm; struct dev_pagemap *pgmap = NULL; struct page *page; spinlock_t *ptl; pte_t *ptep, pte; retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!pte_present(pte)) { swp_entry_t entry; /* * KSM's break_ksm() relies upon recognizing a ksm page * even while it is being migrated, so for that case we * need migration_entry_wait(). */ if (likely(!(flags & FOLL_MIGRATION))) goto no_page; if (pte_none(pte)) goto no_page; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto no_page; pte_unmap_unlock(ptep, ptl); migration_entry_wait(mm, pmd, address); goto retry; } if ((flags & FOLL_NUMA) && pte_protnone(pte)) goto no_page; if ((flags & FOLL_WRITE) && !pte_write(pte)) { pte_unmap_unlock(ptep, ptl); return NULL; } page = vm_normal_page(vma, address, pte); if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { /* * Only return device mapping pages in the FOLL_GET case since * they are only valid while holding the pgmap reference. */ pgmap = get_dev_pagemap(pte_pfn(pte), NULL); if (pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { int ret; ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } } if (flags & FOLL_SPLIT && PageTransCompound(page)) { int ret; get_page(page); pte_unmap_unlock(ptep, ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) return ERR_PTR(ret); goto retry; } if (flags & FOLL_GET) { get_page(page); /* drop the pgmap reference now that we hold the page */ if (pgmap) { put_dev_pagemap(pgmap); pgmap = NULL; } } if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Do not mlock pte-mapped THP */ if (PageTransCompound(page)) goto out; /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here, and migration is * blocked by the pte's page reference, and we * know the page is still mapped, we don't even * need to check for file-cache page truncation. */ mlock_vma_page(page); unlock_page(page); } } out: pte_unmap_unlock(ptep, ptl); return page; no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return NULL; return no_page_table(vma, flags); }
1
[ "CWE-362" ]
linux
19be0eaffa3ac7d8eb6784ad9bdbc7d67ed8e619
1,422,580,736,863,415,300,000,000,000,000,000,000
137
mm: remove gup_flags FOLL_WRITE games from __get_user_pages() This is an ancient bug that was actually attempted to be fixed once (badly) by me eleven years ago in commit 4ceb5db9757a ("Fix get_user_pages() race for write access") but that was then undone due to problems on s390 by commit f33ea7f404e5 ("fix get_user_pages bug"). In the meantime, the s390 situation has long been fixed, and we can now fix it by checking the pte_dirty() bit properly (and do it better). The s390 dirty bit was implemented in abf09bed3cce ("s390/mm: implement software dirty bits") which made it into v3.9. Earlier kernels will have to look at the page state itself. Also, the VM has become more scalable, and what used a purely theoretical race back then has become easier to trigger. To fix it, we introduce a new internal FOLL_COW flag to mark the "yes, we already did a COW" rather than play racy games with FOLL_WRITE that is very fundamental, and then use the pte dirty flag to validate that the FOLL_COW flag is still valid. Reported-and-tested-by: Phil "not Paul" Oester <[email protected]> Acked-by: Hugh Dickins <[email protected]> Reviewed-by: Michal Hocko <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Kees Cook <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Willy Tarreau <[email protected]> Cc: Nick Piggin <[email protected]> Cc: Greg Thelen <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
static int qemu_chr_write_all(CharDriverState *s, const uint8_t *buf, int len) { int offset; int res; if (s->replay && replay_mode == REPLAY_MODE_PLAY) { replay_char_write_event_load(&res, &offset); assert(offset <= len); qemu_chr_fe_write_buffer(s, buf, offset, &offset); return res; } res = qemu_chr_fe_write_buffer(s, buf, len, &offset); if (s->replay && replay_mode == REPLAY_MODE_RECORD) { replay_char_write_event_save(res, offset); } if (res < 0) { return res; } return offset; }
0
[ "CWE-416" ]
qemu
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
238,918,482,396,094,780,000,000,000,000,000,000,000
23
char: move front end handlers in CharBackend Since the hanlders are associated with a CharBackend, rather than the CharDriverState, it is more appropriate to store in CharBackend. This avoids the handler copy dance in qemu_chr_fe_set_handlers() then mux_chr_update_read_handler(), by storing the CharBackend pointer directly. Also a mux CharDriver should go through mux->backends[focused], since chr->be will stay NULL. Before that, it was possible to call chr->handler by mistake with surprising results, for ex through qemu_chr_be_can_write(), which would result in calling the last set handler front end, not the one with focus. Signed-off-by: Marc-André Lureau <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
prepare_error_tgs (struct kdc_request_state *state, krb5_kdc_req *request, krb5_ticket *ticket, int error, krb5_principal canon_server, krb5_data **response, const char *status, krb5_pa_data **e_data) { krb5_error errpkt; krb5_error_code retval = 0; krb5_data *scratch, *e_data_asn1 = NULL, *fast_edata = NULL; kdc_realm_t *kdc_active_realm = state->realm_data; errpkt.ctime = request->nonce; errpkt.cusec = 0; if ((retval = krb5_us_timeofday(kdc_context, &errpkt.stime, &errpkt.susec))) return(retval); errpkt.error = error; errpkt.server = request->server; if (ticket && ticket->enc_part2) errpkt.client = ticket->enc_part2->client; else errpkt.client = NULL; errpkt.text.length = strlen(status); if (!(errpkt.text.data = strdup(status))) return ENOMEM; if (!(scratch = (krb5_data *)malloc(sizeof(*scratch)))) { free(errpkt.text.data); return ENOMEM; } if (e_data != NULL) { retval = encode_krb5_padata_sequence(e_data, &e_data_asn1); if (retval) { free(scratch); free(errpkt.text.data); return retval; } errpkt.e_data = *e_data_asn1; } else errpkt.e_data = empty_data(); if (state) { retval = kdc_fast_handle_error(kdc_context, state, request, e_data, &errpkt, &fast_edata); } if (retval) { free(scratch); free(errpkt.text.data); krb5_free_data(kdc_context, e_data_asn1); return retval; } if (fast_edata) errpkt.e_data = *fast_edata; retval = krb5_mk_error(kdc_context, &errpkt, scratch); free(errpkt.text.data); krb5_free_data(kdc_context, e_data_asn1); krb5_free_data(kdc_context, fast_edata); if (retval) free(scratch); else *response = scratch; return retval; }
0
[ "CWE-20" ]
krb5
4c023ba43c16396f0d199e2df1cfa59b88b62acc
201,408,506,978,008,640,000,000,000,000,000,000,000
66
KDC null deref due to referrals [CVE-2013-1417] An authenticated remote client can cause a KDC to crash by making a valid TGS-REQ to a KDC serving a realm with a single-component name. The process_tgs_req() function dereferences a null pointer because an unusual failure condition causes a helper function to return success. While attempting to provide cross-realm referrals for host-based service principals, the find_referral_tgs() function could return a TGS principal for a zero-length realm name (indicating that the hostname in the service principal has no known realm associated with it). Subsequently, the find_alternate_tgs() function would attempt to construct a path to this empty-string realm, and return success along with a null pointer in its output parameter. This happens because krb5_walk_realm_tree() returns a list of length one when it attempts to construct a transit path between a single-component realm and the empty-string realm. This list causes a loop in find_alternate_tgs() to iterate over zero elements, resulting in the unexpected output of a null pointer, which process_tgs_req() proceeds to dereference because there is no error condition. Add an error condition to find_referral_tgs() when krb5_get_host_realm() returns an empty realm name. Also add an error condition to find_alternate_tgs() to handle the length-one output from krb5_walk_realm_tree(). The vulnerable configuration is not likely to arise in practice. (Realm names that have a single component are likely to be test realms.) Releases prior to krb5-1.11 are not vulnerable. Thanks to Sol Jerome for reporting this problem. CVSSv2: AV:N/AC:M/Au:S/C:N/I:N/A:P/E:H/RL:O/RC:C (cherry picked from commit 3c7f1c21ffaaf6c90f1045f0f5440303c766acc0) ticket: 7668 version_fixed: 1.11.4 status: resolved
krb5_gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 major_status; major_status = kg_seal_iov(minor_status, context_handle, FALSE, qop_req, NULL, iov, iov_count, KG_TOK_MIC_MSG); return major_status; }
0
[ "CWE-703" ]
krb5
82dc33da50338ac84c7b4102dc6513d897d0506a
209,088,999,487,501,600,000,000,000,000,000,000,000
14
Fix gss_process_context_token() [CVE-2014-5352] [MITKRB5-SA-2015-001] The krb5 gss_process_context_token() should not actually delete the context; that leaves the caller with a dangling pointer and no way to know that it is invalid. Instead, mark the context as terminated, and check for terminated contexts in the GSS functions which expect established contexts. Also add checks in export_sec_context and pseudo_random, and adjust t_prf.c for the pseudo_random check. ticket: 8055 (new) target_version: 1.13.1 tags: pullup
void SpatialAvgPool(OpKernelContext* context, Tensor* output, const Tensor& input, const PoolParameters& params, const Padding& padding) { if (output->NumElements() == 0) { return; } typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; auto in_flat = input.flat<T>(); auto out_flat = output->flat<T>(); auto shard = [&params, &in_flat, &out_flat](int64_t start, int64_t limit) { // Calculate indices for this shards chunk of work. const int64_t input_image_size = params.tensor_in_rows * params.tensor_in_cols * params.depth; const int64_t output_image_size = params.out_width * params.out_height * params.depth; const int64_t shard_batch_size = limit - start; ConstEigenMatrixMap in_mat( in_flat.data() + start * input_image_size, params.depth, params.tensor_in_cols * params.tensor_in_rows * shard_batch_size); EigenMatrixMap out_mat( out_flat.data() + start * output_image_size, params.depth, params.out_width * params.out_height * shard_batch_size); Eigen::Matrix<T, Eigen::Dynamic, 1> out_count(out_mat.cols()); out_count.setZero(); // Initializes output to zero. out_mat.setZero(); // The following code basically does the following: // 1. Flattens the input and output tensors into two dimensional arrays. // tensor_in_as_matrix: // depth by (tensor_in_cols * tensor_in_rows * tensor_in_batch) // output_as_matrix: // depth by (out_width * out_height * tensor_in_batch) // // 2. Walks through the set of columns in the flattened // tensor_in_as_matrix, // and updates the corresponding column(s) in output_as_matrix with the // average value. for (int b = 0; b < shard_batch_size; ++b) { for (int h = 0; h < params.tensor_in_rows; ++h) { for (int w = 0; w < params.tensor_in_cols; ++w) { // (h_start, h_end) * (w_start, w_end) is the range that the input // vector projects to. const int hpad = h + params.pad_top; const int wpad = w + params.pad_left; const int h_start = (hpad < params.window_rows) ? 0 : (hpad - params.window_rows) / params.row_stride + 1; const int h_end = std::min<int>(hpad / params.row_stride + 1, params.out_height); const int w_start = (wpad < params.window_cols) ? 0 : (wpad - params.window_cols) / params.col_stride + 1; const int w_end = std::min<int>(wpad / params.col_stride + 1, params.out_width); const int in_offset = (b * params.tensor_in_rows + h) * params.tensor_in_cols + w; Eigen::DSizes<Eigen::DenseIndex, 2> in_indices(0, in_offset); for (int ph = h_start; ph < h_end; ++ph) { for (int pw = w_start; pw < w_end; ++pw) { const int out_offset = (b * params.out_height + ph) * params.out_width + pw; out_mat.col(out_offset) += in_mat.col(in_offset); out_count(out_offset) += T(1); } } } } } DCHECK_GT(out_count.minCoeff(), T(0)); out_mat.array().rowwise() /= out_count.transpose().array(); }; const int64_t work_unit_size = params.tensor_in_rows * params.tensor_in_cols * params.depth; // NOTE: Constants in calculation below were estimated based on benchmarking. // Nanoseconds/work_unit for benchmarks ranged from 0.01 to 0.001, and // so the factor 0.01 (i.e. 1/100) with a max of 10000, was chosen to limit // the work unit cost to an operating range in which it empirically performed // best. const int64_t work_unit_cost = std::max(int64_t{10000}, work_unit_size / 100); const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, params.tensor_in_batch, work_unit_cost, shard); }
0
[ "CWE-354" ]
tensorflow
4dddb2fd0b01cdd196101afbba6518658a2c9e07
93,816,996,596,922,280,000,000,000,000,000,000,000
96
Fix segfault in pools on empty shapes when certain dimension were very large. Pooling ops multiply certain components of the input shape, e.g. by multiplying input.shape[1] * input.shape[2] * input.shape[3]. This multiplication could overflow an int64 value if shape[0] was 0 but shape[1], shape[2], and shape[3] were very large, e.g. by passing an input with shape (0, 2**25, 2**25, 2**25). PiperOrigin-RevId: 404644978 Change-Id: Ic79f89c970357ca2962b1f231449066db9403146
QPDFObjectHandle::newNull() { return QPDFObjectHandle(new QPDF_Null()); }
0
[ "CWE-835" ]
qpdf
afe0242b263a9e1a8d51dd81e42ab6de2e5127eb
251,106,360,306,852,000,000,000,000,000,000,000,000
4
Handle object ID 0 (fixes #99) This is CVE-2017-9208. The QPDF library uses object ID 0 internally as a sentinel to represent a direct object, but prior to this fix, was not blocking handling of 0 0 obj or 0 0 R as a special case. Creating an object in the file with 0 0 obj could cause various infinite loops. The PDF spec doesn't allow for object 0. Having qpdf handle object 0 might be a better fix, but changing all the places in the code that assumes objid == 0 means direct would be risky.
void dumpState(std::ostream& os, int indent_level = 0) const override { HeaderMapImpl::dumpState(os, indent_level); }
0
[]
envoy
2c60632d41555ec8b3d9ef5246242be637a2db0f
274,346,657,827,517,770,000,000,000,000,000,000,000
3
http: header map security fixes for duplicate headers (#197) Previously header matching did not match on all headers for non-inline headers. This patch changes the default behavior to always logically match on all headers. Multiple individual headers will be logically concatenated with ',' similar to what is done with inline headers. This makes the behavior effectively consistent. This behavior can be temporary reverted by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to "false". Targeted fixes have been additionally performed on the following extensions which make them consider all duplicate headers by default as a comma concatenated list: 1) Any extension using CEL matching on headers. 2) The header to metadata filter. 3) The JWT filter. 4) The Lua filter. Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to false. Finally, the setCopy() header map API previously only set the first header in the case of duplicate non-inline headers. setCopy() now behaves similiarly to the other set*() APIs and replaces all found headers with a single value. This may have had security implications in the extauth filter which uses this API. This behavior can be disabled by setting the runtime value "envoy.reloadable_features.http_set_copy_replace_all_headers" to false. Fixes https://github.com/envoyproxy/envoy-setec/issues/188 Signed-off-by: Matt Klein <[email protected]>
static void qxl_check_state(PCIQXLDevice *d) { QXLRam *ram = d->ram; int spice_display_running = qemu_spice_display_is_running(&d->ssd); assert(!spice_display_running || SPICE_RING_IS_EMPTY(&ram->cmd_ring)); assert(!spice_display_running || SPICE_RING_IS_EMPTY(&ram->cursor_ring)); }
0
[ "CWE-476" ]
qemu
d52680fc932efb8a2f334cc6993e705ed1e31e99
141,281,114,048,742,480,000,000,000,000,000,000,000
8
qxl: check release info object When releasing spice resources in release_resource() routine, if release info object 'ext.info' is null, it leads to null pointer dereference. Add check to avoid it. Reported-by: Bugs SysSec <[email protected]> Signed-off-by: Prasad J Pandit <[email protected]> Message-id: [email protected] Signed-off-by: Gerd Hoffmann <[email protected]>
**/ CImg<Tfloat> operator%(const char *const expression) const { return CImg<Tfloat>(*this,false)%=expression;
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
202,088,470,804,707,830,000,000,000,000,000,000,000
3
Fix other issues in 'CImg<T>::load_bmp()'.
bool open_and_lock_tables(THD *thd, const DDL_options_st &options, TABLE_LIST *tables, bool derived, uint flags, Prelocking_strategy *prelocking_strategy) { uint counter; MDL_savepoint mdl_savepoint= thd->mdl_context.mdl_savepoint(); DBUG_ENTER("open_and_lock_tables"); DBUG_PRINT("enter", ("derived handling: %d", derived)); if (open_tables(thd, options, &tables, &counter, flags, prelocking_strategy)) goto err; DBUG_EXECUTE_IF("sleep_open_and_lock_after_open", { const char *old_proc_info= thd->proc_info; thd->proc_info= "DBUG sleep"; my_sleep(6000000); thd->proc_info= old_proc_info;}); if (lock_tables(thd, tables, counter, flags)) goto err; (void) read_statistics_for_tables_if_needed(thd, tables); if (derived) { if (mysql_handle_derived(thd->lex, DT_INIT)) goto err; if (thd->prepare_derived_at_open && (mysql_handle_derived(thd->lex, DT_PREPARE))) goto err; } DBUG_RETURN(FALSE); err: if (! thd->in_sub_stmt) trans_rollback_stmt(thd); /* Necessary if derived handling failed. */ close_thread_tables(thd); /* Don't keep locks for a failed statement. */ thd->mdl_context.rollback_to_savepoint(mdl_savepoint); DBUG_RETURN(TRUE); }
0
[]
server
0168d1eda30dad4b517659422e347175eb89e923
93,787,756,599,495,160,000,000,000,000,000,000,000
42
MDEV-25766 Unused CTE lead to a crash in find_field_in_tables/find_order_in_list Do not assume that subquery Item always present.
static void reset_adv_monitors(uint16_t index) { struct mgmt_cp_remove_adv_monitor cp; DBG("sending remove Adv Monitor command with handle 0"); /* Handle 0 indicates to remove all */ cp.monitor_handle = 0; if (mgmt_send(mgmt_master, MGMT_OP_REMOVE_ADV_MONITOR, index, sizeof(cp), &cp, reset_adv_monitors_complete, NULL, NULL) > 0) { return; } error("Failed to reset Adv Monitors"); }
0
[ "CWE-862", "CWE-863" ]
bluez
b497b5942a8beb8f89ca1c359c54ad67ec843055
280,389,009,798,733,200,000,000,000,000,000,000,000
16
adapter: Fix storing discoverable setting discoverable setting shall only be store when changed via Discoverable property and not when discovery client set it as that be considered temporary just for the lifetime of the discovery.
T& atXY(const int x, const int y, const int z=0, const int c=0) { if (is_empty()) throw CImgInstanceException(_cimg_instance "atXY(): Empty instance.", cimg_instance); return _atXY(x,y,z,c); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
18,746,282,718,936,814,000,000,000,000,000,000,000
7
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name) { char key[MAX_DBKEY_LENGTH]; uint key_length= tdc_create_key(key, db, table_name); for (TABLE *table= list; table ; table=table->next) { if (table->s->table_cache_key.length == key_length && !memcmp(table->s->table_cache_key.str, key, key_length)) return table; } return(0); }
0
[]
server
0168d1eda30dad4b517659422e347175eb89e923
169,112,046,288,846,400,000,000,000,000,000,000,000
13
MDEV-25766 Unused CTE lead to a crash in find_field_in_tables/find_order_in_list Do not assume that subquery Item always present.
toVAlign(char *oval, int *valign) { if (strcasecmp(oval, "top") == 0 || strcasecmp(oval, "baseline") == 0) *valign = VALIGN_TOP; else if (strcasecmp(oval, "bottom") == 0) *valign = VALIGN_BOTTOM; else if (strcasecmp(oval, "middle") == 0) *valign = VALIGN_MIDDLE; else return 0; return 1; }
0
[ "CWE-20", "CWE-476" ]
w3m
33509cc81ec5f2ba44eb6fd98bd5c1b5873e46bd
285,585,383,957,004,170,000,000,000,000,000,000,000
12
Fix uninitialised values for <i> and <dd> Bug-Debian: https://github.com/tats/w3m/issues/16
daemon_msg_open_req(uint8 ver, struct daemon_slpars *pars, uint32 plen, char *source, size_t sourcelen) { char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors char errmsgbuf[PCAP_ERRBUF_SIZE]; // buffer for errors to send to the client pcap_t *fp; // pcap_t main variable int nread; char sendbuf[RPCAP_NETBUF_SIZE]; // temporary buffer in which data to be sent is buffered int sendbufidx = 0; // index which keeps the number of bytes currently buffered struct rpcap_openreply *openreply; // open reply message if (plen > sourcelen - 1) { pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Source string too long"); goto error; } nread = sock_recv(pars->sockctrl, source, plen, SOCK_RECEIVEALL_YES|SOCK_EOF_IS_ERROR, errbuf, PCAP_ERRBUF_SIZE); if (nread == -1) { rpcapd_log(LOGPRIO_ERROR, "Read from client failed: %s", errbuf); return -1; } source[nread] = '\0'; plen -= nread; // XXX - make sure it's *not* a URL; we don't support opening // remote devices here. // Open the selected device // This is a fake open, since we do that only to get the needed parameters, then we close the device again if ((fp = pcap_open_live(source, 1500 /* fake snaplen */, 0 /* no promis */, 1000 /* fake timeout */, errmsgbuf)) == NULL) goto error; // Now, I can send a RPCAP open reply message if (sock_bufferize(NULL, sizeof(struct rpcap_header), NULL, &sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1) goto error; rpcap_createhdr((struct rpcap_header *) sendbuf, ver, RPCAP_MSG_OPEN_REPLY, 0, sizeof(struct rpcap_openreply)); openreply = (struct rpcap_openreply *) &sendbuf[sendbufidx]; if (sock_bufferize(NULL, sizeof(struct rpcap_openreply), NULL, &sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1) goto error; memset(openreply, 0, sizeof(struct rpcap_openreply)); openreply->linktype = htonl(pcap_datalink(fp)); openreply->tzoff = 0; /* This is always 0 for live captures */ // We're done with the pcap_t. pcap_close(fp); // Send the reply. if (sock_send(pars->sockctrl, sendbuf, sendbufidx, errbuf, PCAP_ERRBUF_SIZE) == -1) { rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf); return -1; } return 0; error: if (rpcap_senderror(pars->sockctrl, ver, PCAP_ERR_OPEN, errmsgbuf, errbuf) == -1) { // That failed; log a message and give up. rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf); return -1; } // Check if all the data has been read; if not, discard the data in excess if (rpcapd_discard(pars->sockctrl, plen) == -1) { return -1; } return 0; }
1
[ "CWE-703", "CWE-918" ]
libpcap
33834cb2a4d035b52aa2a26742f832a112e90a0a
236,360,285,436,192,250,000,000,000,000,000,000,000
84
In the open request, reject capture sources that are URLs. You shouldn't be able to ask a server to open a remote device on some *other* server; just open it yourself. This addresses Include Security issue F13: [libpcap] Remote Packet Capture Daemon Allows Opening Capture URLs.
void wait_for_device_probe(void) { /* wait for probe timeout */ wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout); /* wait for the deferred probe workqueue to finish */ flush_work(&deferred_probe_work); /* wait for the known devices to complete their probing */ wait_event(probe_waitqueue, atomic_read(&probe_count) == 0); async_synchronize_full(); }
0
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
17,829,170,913,940,864,000,000,000,000,000,000,000
12
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <[email protected]> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <[email protected]>
~RGWPutLC() override { free(data); }
0
[ "CWE-770" ]
ceph
ab29bed2fc9f961fe895de1086a8208e21ddaddc
162,008,446,378,977,720,000,000,000,000,000,000,000
3
rgw: fix issues with 'enforce bounds' patch The patch to enforce bounds on max-keys/max-uploads/max-parts had a few issues that would prevent us from compiling it. Instead of changing the code provided by the submitter, we're addressing them in a separate commit to maintain the DCO. Signed-off-by: Joao Eduardo Luis <[email protected]> Signed-off-by: Abhishek Lekshmanan <[email protected]> (cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a) mimic specific fixes: As the largeish change from master g_conf() isn't in mimic yet, use the g_conf global structure, also make rgw_op use the value from req_info ceph context as we do for all the requests
push_render_image(Str str, int width, int limit, struct html_feed_environ *h_env) { struct readbuffer *obuf = h_env->obuf; int indent = h_env->envs[h_env->envc].indent; push_spaces(obuf, 1, (limit - width) / 2); push_str(obuf, width, str, PC_ASCII); push_spaces(obuf, 1, (limit - width + 1) / 2); if (width > 0) flushline(h_env, obuf, indent, 0, h_env->limit); }
0
[ "CWE-476" ]
w3m
59b91cd8e30c86f23476fa81ae005cabff49ebb6
272,030,003,524,599,660,000,000,000,000,000,000,000
12
Prevent segfault with malformed input type Bug-Debian: https://github.com/tats/w3m/issues/7
Decode a modified UTF-7 string */ PHP_FUNCTION(imap_utf7_decode) { /* author: Andrew Skalski <[email protected]> */ zend_string *arg; const unsigned char *in, *inp, *endp; unsigned char *out, *outp; unsigned char c; int inlen, outlen; enum { ST_NORMAL, /* printable text */ ST_DECODE0, /* encoded text rotation... */ ST_DECODE1, ST_DECODE2, ST_DECODE3 } state; if (zend_parse_parameters(ZEND_NUM_ARGS(), "S", &arg) == FAILURE) { return; } in = (const unsigned char *) ZSTR_VAL(arg); inlen = ZSTR_LEN(arg); /* validate and compute length of output string */ outlen = 0; state = ST_NORMAL; for (endp = (inp = in) + inlen; inp < endp; inp++) { if (state == ST_NORMAL) { /* process printable character */ if (SPECIAL(*inp)) { php_error_docref(NULL, E_WARNING, "Invalid modified UTF-7 character: `%c'", *inp); RETURN_FALSE; } else if (*inp != '&') { outlen++; } else if (inp + 1 == endp) { php_error_docref(NULL, E_WARNING, "Unexpected end of string"); RETURN_FALSE; } else if (inp[1] != '-') { state = ST_DECODE0; } else { outlen++; inp++; } } else if (*inp == '-') { /* return to NORMAL mode */ if (state == ST_DECODE1) { php_error_docref(NULL, E_WARNING, "Stray modified base64 character: `%c'", *--inp); RETURN_FALSE; } state = ST_NORMAL; } else if (!B64CHAR(*inp)) { php_error_docref(NULL, E_WARNING, "Invalid modified base64 character: `%c'", *inp); RETURN_FALSE; } else { switch (state) { case ST_DECODE3: outlen++; state = ST_DECODE0; break; case ST_DECODE2: case ST_DECODE1: outlen++; case ST_DECODE0: state++; case ST_NORMAL: break; } } } /* enforce end state */ if (state != ST_NORMAL) { php_error_docref(NULL, E_WARNING, "Unexpected end of string"); RETURN_FALSE; } /* allocate output buffer */ out = emalloc(outlen + 1); /* decode input string */ outp = out; state = ST_NORMAL; for (endp = (inp = in) + inlen; inp < endp; inp++) { if (state == ST_NORMAL) { if (*inp == '&' && inp[1] != '-') { state = ST_DECODE0; } else if ((*outp++ = *inp) == '&') { inp++; } } else if (*inp == '-') { state = ST_NORMAL; } else { /* decode input character */ switch (state) { case ST_DECODE0: *outp = UNB64(*inp) << 2; state = ST_DECODE1; break; case ST_DECODE1: outp[1] = UNB64(*inp); c = outp[1] >> 4; *outp++ |= c; *outp <<= 4; state = ST_DECODE2; break; case ST_DECODE2: outp[1] = UNB64(*inp); c = outp[1] >> 2; *outp++ |= c; *outp <<= 6; state = ST_DECODE3; break; case ST_DECODE3: *outp++ |= UNB64(*inp); state = ST_DECODE0; case ST_NORMAL: break; } } } *outp = 0; #if PHP_DEBUG /* warn if we computed outlen incorrectly */ if (outp - out != outlen) { php_error_docref(NULL, E_WARNING, "outp - out [%zd] != outlen [%d]", outp - out, outlen); } #endif RETURN_STRINGL((char*)out, outlen);
0
[ "CWE-88" ]
php-src
336d2086a9189006909ae06c7e95902d7d5ff77e
294,295,765,307,140,740,000,000,000,000,000,000,000
135
Disable rsh/ssh functionality in imap by default (bug #77153)
TEST_F(ZNCTest, AwayNotify) { auto znc = Run(); auto ircd = ConnectIRCd(); auto client = ConnectClient(); client.Write("CAP LS"); client.Write("PASS :hunter2"); client.Write("NICK nick"); client.Write("USER user/test x x :x"); QByteArray cap_ls; client.ReadUntilAndGet(" LS :", cap_ls); ASSERT_THAT(cap_ls.toStdString(), AllOf(HasSubstr("cap-notify"), Not(HasSubstr("away-notify")))); client.Write("CAP REQ :cap-notify"); client.ReadUntil("ACK :cap-notify"); client.Write("CAP END"); client.ReadUntil(" 001 "); ircd.ReadUntil("USER"); ircd.Write("CAP user LS :away-notify"); ircd.ReadUntil("CAP REQ :away-notify"); ircd.Write("CAP user ACK :away-notify"); ircd.ReadUntil("CAP END"); ircd.Write(":server 001 user :welcome"); client.ReadUntil("CAP user NEW :away-notify"); client.Write("CAP REQ :away-notify"); client.ReadUntil("ACK :away-notify"); ircd.Write(":x!y@z AWAY :reason"); client.ReadUntil(":x!y@z AWAY :reason"); ircd.Close(); client.ReadUntil("DEL :away-notify"); }
0
[ "CWE-476" ]
znc
d229761821da38d984a9e4098ad96842490dc001
108,078,487,648,564,600,000,000,000,000,000,000,000
30
Fix echo-message for *status Close #1705
swrite (CamelMimePart *sigpart, GCancellable *cancellable, GError **error) { CamelStream *ostream; CamelDataWrapper *wrapper; gchar *template; gint fd, ret; template = g_build_filename (g_get_tmp_dir (), "evolution-pgp.XXXXXX", NULL); if ((fd = g_mkstemp (template)) == -1) { g_free (template); return NULL; } ostream = camel_stream_fs_new_with_fd (fd); wrapper = camel_medium_get_content (CAMEL_MEDIUM (sigpart)); if (!wrapper) wrapper = CAMEL_DATA_WRAPPER (sigpart); ret = camel_data_wrapper_decode_to_stream_sync ( wrapper, ostream, cancellable, error); if (ret != -1) { ret = camel_stream_flush (ostream, cancellable, error); if (ret != -1) ret = camel_stream_close (ostream, cancellable, error); } g_object_unref (ostream); if (ret == -1) { g_unlink (template); g_free (template); return NULL; } return template; }
0
[ "CWE-200" ]
evolution-data-server
5d8b92c622f6927b253762ff9310479dd3ac627d
199,698,920,810,696,600,000,000,000,000,000,000,000
38
CamelGpgContext: Enclose email addresses in brackets. The recipient list for encrypting can be specified by either key ID or email address. Enclose email addresses in brackets to ensure an exact match, as per the gpg man page: HOW TO SPECIFY A USER ID ... By exact match on an email address. This is indicated by enclosing the email address in the usual way with left and right angles. <[email protected]> Without the brackets gpg uses a substring match, which risks selecting the wrong recipient.
static int __nci_request(struct nci_dev *ndev, void (*req)(struct nci_dev *ndev, const void *opt), const void *opt, __u32 timeout) { int rc = 0; long completion_rc; ndev->req_status = NCI_REQ_PEND; reinit_completion(&ndev->req_completion); req(ndev, opt); completion_rc = wait_for_completion_interruptible_timeout(&ndev->req_completion, timeout); pr_debug("wait_for_completion return %ld\n", completion_rc); if (completion_rc > 0) { switch (ndev->req_status) { case NCI_REQ_DONE: rc = nci_to_errno(ndev->req_result); break; case NCI_REQ_CANCELED: rc = -ndev->req_result; break; default: rc = -ETIMEDOUT; break; } } else { pr_err("wait_for_completion_interruptible_timeout failed %ld\n", completion_rc); rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc)); } ndev->req_status = ndev->req_result = 0; return rc; }
0
[]
linux
48b71a9e66c2eab60564b1b1c85f4928ed04e406
285,599,665,850,331,700,000,000,000,000,000,000,000
42
NFC: add NCI_UNREG flag to eliminate the race There are two sites that calls queue_work() after the destroy_workqueue() and lead to possible UAF. The first site is nci_send_cmd(), which can happen after the nci_close_device as below nfcmrvl_nci_unregister_dev | nfc_genl_dev_up nci_close_device | flush_workqueue | del_timer_sync | nci_unregister_device | nfc_get_device destroy_workqueue | nfc_dev_up nfc_unregister_device | nci_dev_up device_del | nci_open_device | __nci_request | nci_send_cmd | queue_work !!! Another site is nci_cmd_timer, awaked by the nci_cmd_work from the nci_send_cmd. ... | ... nci_unregister_device | queue_work destroy_workqueue | nfc_unregister_device | ... device_del | nci_cmd_work | mod_timer | ... | nci_cmd_timer | queue_work !!! For the above two UAF, the root cause is that the nfc_dev_up can race between the nci_unregister_device routine. Therefore, this patch introduce NCI_UNREG flag to easily eliminate the possible race. In addition, the mutex_lock in nci_close_device can act as a barrier. Signed-off-by: Lin Ma <[email protected]> Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation") Reviewed-by: Jakub Kicinski <[email protected]> Reviewed-by: Krzysztof Kozlowski <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
inline THD *get_thd() const { return likely(table) ? table->in_use : current_thd; }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
189,804,453,587,544,930,000,000,000,000,000,000,000
2
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <[email protected]>
int blkid_parttable_set_id(blkid_parttable tab, const unsigned char *id) { if (!tab) return -1; strncpy(tab->id, (const char *) id, sizeof(tab->id)); return 0; }
0
[]
util-linux
50d1594c2e6142a3b51d2143c74027480df082e0
266,063,887,379,488,530,000,000,000,000,000,000,000
8
libblkid: avoid non-empty recursion in EBR This is extension to the patch 7164a1c34d18831ac61c6744ad14ce916d389b3f. We also need to detect non-empty recursion in the EBR chain. It's possible to create standard valid logical partitions and in the last one points back to the EBR chain. In this case all offsets will be non-empty. Unfortunately, it's valid to create logical partitions that are not in the "disk order" (sorted by start offset). So link somewhere back is valid, but this link cannot points to already existing partition (otherwise we will see recursion). This patch forces libblkid to ignore duplicate logical partitions, the duplicate chain segment is interpreted as non-data segment, after 100 iterations with non-data segments it will break the loop -- no memory is allocated in this case by the loop. Addresses: https://bugzilla.redhat.com/show_bug.cgi?id=1349536 References: http://seclists.org/oss-sec/2016/q3/40 Signed-off-by: Karel Zak <[email protected]>
\param filename Filename to write data to. \param number When positive, represents an index added to the filename. Otherwise, no number is added. \param digits Number of digits used for adding the number to the filename. **/ const CImgList<T>& save(const char *const filename, const int number=-1, const unsigned int digits=6) const { if (!filename) throw CImgArgumentException(_cimglist_instance "save(): Specified filename is (null).", cimglist_instance); // Do not test for empty instances, since .cimg format is able to manage empty instances. const bool is_stdout = *filename=='-' && (!filename[1] || filename[1]=='.'); const char *const ext = cimg::split_filename(filename); CImg<charT> nfilename(1024); const char *const fn = is_stdout?filename:number>=0?cimg::number_filename(filename,number,digits,nfilename): filename; #ifdef cimglist_save_plugin cimglist_save_plugin(fn); #endif #ifdef cimglist_save_plugin1 cimglist_save_plugin1(fn); #endif #ifdef cimglist_save_plugin2 cimglist_save_plugin2(fn); #endif #ifdef cimglist_save_plugin3 cimglist_save_plugin3(fn); #endif #ifdef cimglist_save_plugin4 cimglist_save_plugin4(fn); #endif #ifdef cimglist_save_plugin5 cimglist_save_plugin5(fn); #endif #ifdef cimglist_save_plugin6 cimglist_save_plugin6(fn); #endif #ifdef cimglist_save_plugin7 cimglist_save_plugin7(fn); #endif #ifdef cimglist_save_plugin8 cimglist_save_plugin8(fn); #endif if (!cimg::strcasecmp(ext,"cimgz")) return save_cimg(fn,true); else if (!cimg::strcasecmp(ext,"cimg") || !*ext) return save_cimg(fn,false); else if (!cimg::strcasecmp(ext,"yuv")) return save_yuv(fn,444,true); else if (!cimg::strcasecmp(ext,"avi") || !cimg::strcasecmp(ext,"mov") || !cimg::strcasecmp(ext,"asf") || !cimg::strcasecmp(ext,"divx") || !cimg::strcasecmp(ext,"flv") || !cimg::strcasecmp(ext,"mpg") || !cimg::strcasecmp(ext,"m1v") || !cimg::strcasecmp(ext,"m2v") || !cimg::strcasecmp(ext,"m4v") || !cimg::strcasecmp(ext,"mjp") || !cimg::strcasecmp(ext,"mp4") || !cimg::strcasecmp(ext,"mkv") || !cimg::strcasecmp(ext,"mpe") || !cimg::strcasecmp(ext,"movie") || !cimg::strcasecmp(ext,"ogm") || !cimg::strcasecmp(ext,"ogg") || !cimg::strcasecmp(ext,"ogv") || !cimg::strcasecmp(ext,"qt") || !cimg::strcasecmp(ext,"rm") || !cimg::strcasecmp(ext,"vob") || !cimg::strcasecmp(ext,"wmv") || !cimg::strcasecmp(ext,"xvid") || !cimg::strcasecmp(ext,"mpeg")) return save_video(fn); #ifdef cimg_use_tiff else if (!cimg::strcasecmp(ext,"tif") || !cimg::strcasecmp(ext,"tiff")) return save_tiff(fn); #endif else if (!cimg::strcasecmp(ext,"gz")) return save_gzip_external(fn); else { if (_width==1) _data[0].save(fn,-1);
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
40,087,132,398,793,466,000,000,000,000,000,000,000
76
Fix other issues in 'CImg<T>::load_bmp()'.
static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, struct fib_config *cfg) { __be32 addr; int plen; memset(cfg, 0, sizeof(*cfg)); cfg->fc_nlinfo.nl_net = net; if (rt->rt_dst.sa_family != AF_INET) return -EAFNOSUPPORT; /* * Check mask for validity: * a) it must be contiguous. * b) destination must have all host bits clear. * c) if application forgot to set correct family (AF_INET), * reject request unless it is absolutely clear i.e. * both family and mask are zero. */ plen = 32; addr = sk_extract_addr(&rt->rt_dst); if (!(rt->rt_flags & RTF_HOST)) { __be32 mask = sk_extract_addr(&rt->rt_genmask); if (rt->rt_genmask.sa_family != AF_INET) { if (mask || rt->rt_genmask.sa_family) return -EAFNOSUPPORT; } if (bad_mask(mask, addr)) return -EINVAL; plen = inet_mask_len(mask); } cfg->fc_dst_len = plen; cfg->fc_dst = addr; if (cmd != SIOCDELRT) { cfg->fc_nlflags = NLM_F_CREATE; cfg->fc_protocol = RTPROT_BOOT; } if (rt->rt_metric) cfg->fc_priority = rt->rt_metric - 1; if (rt->rt_flags & RTF_REJECT) { cfg->fc_scope = RT_SCOPE_HOST; cfg->fc_type = RTN_UNREACHABLE; return 0; } cfg->fc_scope = RT_SCOPE_NOWHERE; cfg->fc_type = RTN_UNICAST; if (rt->rt_dev) { char *colon; struct net_device *dev; char devname[IFNAMSIZ]; if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1)) return -EFAULT; devname[IFNAMSIZ-1] = 0; colon = strchr(devname, ':'); if (colon) *colon = 0; dev = __dev_get_by_name(net, devname); if (!dev) return -ENODEV; cfg->fc_oif = dev->ifindex; if (colon) { struct in_ifaddr *ifa; struct in_device *in_dev = __in_dev_get_rtnl(dev); if (!in_dev) return -ENODEV; *colon = ':'; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) if (strcmp(ifa->ifa_label, devname) == 0) break; if (!ifa) return -ENODEV; cfg->fc_prefsrc = ifa->ifa_local; } } addr = sk_extract_addr(&rt->rt_gateway); if (rt->rt_gateway.sa_family == AF_INET && addr) { unsigned int addr_type; cfg->fc_gw = addr; addr_type = inet_addr_type_table(net, addr, cfg->fc_table); if (rt->rt_flags & RTF_GATEWAY && addr_type == RTN_UNICAST) cfg->fc_scope = RT_SCOPE_UNIVERSE; } if (cmd == SIOCDELRT) return 0; if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw) return -EINVAL; if (cfg->fc_scope == RT_SCOPE_NOWHERE) cfg->fc_scope = RT_SCOPE_LINK; if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) { struct nlattr *mx; int len = 0; mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL); if (!mx) return -ENOMEM; if (rt->rt_flags & RTF_MTU) len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40); if (rt->rt_flags & RTF_WINDOW) len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window); if (rt->rt_flags & RTF_IRTT) len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3); cfg->fc_mx = mx; cfg->fc_mx_len = len; } return 0; }
0
[ "CWE-399" ]
net-next
fbd40ea0180a2d328c5adc61414dc8bab9335ce2
238,236,213,906,836,250,000,000,000,000,000,000,000
130
ipv4: Don't do expensive useless work during inetdev destroy. When an inetdev is destroyed, every address assigned to the interface is removed. And in this scenerio we do two pointless things which can be very expensive if the number of assigned interfaces is large: 1) Address promotion. We are deleting all addresses, so there is no point in doing this. 2) A full nf conntrack table purge for every address. We only need to do this once, as is already caught by the existing masq_dev_notifier so masq_inet_event() can skip this. Reported-by: Solar Designer <[email protected]> Signed-off-by: David S. Miller <[email protected]> Tested-by: Cyrill Gorcunov <[email protected]>
static int bell(void *user) { VTermScreen *screen = user; if(screen->callbacks && screen->callbacks->bell) return (*screen->callbacks->bell)(screen->cbdata); return 0; }
0
[ "CWE-476" ]
vim
cd929f7ba8cc5b6d6dcf35c8b34124e969fed6b8
77,076,444,038,143,230,000,000,000,000,000,000,000
9
patch 8.1.0633: crash when out of memory while opening a terminal window Problem: Crash when out of memory while opening a terminal window. Solution: Handle out-of-memory more gracefully.
static void qrtr_port_remove(struct qrtr_sock *ipc) { struct qrtr_ctrl_pkt *pkt; struct sk_buff *skb; int port = ipc->us.sq_port; struct sockaddr_qrtr to; to.sq_family = AF_QIPCRTR; to.sq_node = QRTR_NODE_BCAST; to.sq_port = QRTR_PORT_CTRL; skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL); if (skb) { pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT); pkt->client.node = cpu_to_le32(ipc->us.sq_node); pkt->client.port = cpu_to_le32(ipc->us.sq_port); skb_set_owner_w(skb, &ipc->sk); qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us, &to); } if (port == QRTR_PORT_CTRL) port = 0; __sock_put(&ipc->sk); xa_erase(&qrtr_ports, port); /* Ensure that if qrtr_port_lookup() did enter the RCU read section we * wait for it to up increment the refcount */ synchronize_rcu(); }
0
[]
net
7e78c597c3ebfd0cb329aa09a838734147e4f117
120,961,129,138,150,280,000,000,000,000,000,000,000
33
net: qrtr: fix another OOB Read in qrtr_endpoint_post This check was incomplete, did not consider size is 0: if (len != ALIGN(size, 4) + hdrlen) goto err; if size from qrtr_hdr is 0, the result of ALIGN(size, 4) will be 0, In case of len == hdrlen and size == 0 in header this check won't fail and if (cb->type == QRTR_TYPE_NEW_SERVER) { /* Remote node endpoint can bridge other distant nodes */ const struct qrtr_ctrl_pkt *pkt = data + hdrlen; qrtr_node_assign(node, le32_to_cpu(pkt->server.node)); } will also read out of bound from data, which is hdrlen allocated block. Fixes: 194ccc88297a ("net: qrtr: Support decoding incoming v2 packets") Fixes: ad9d24c9429e ("net: qrtr: fix OOB Read in qrtr_endpoint_post") Signed-off-by: Xiaolong Huang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
GF_Err stbl_Size(GF_Box *s) { GF_Err e; GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; //Mandatory boxs (but not internally :) if (ptr->SampleDescription) { e = gf_isom_box_size((GF_Box *) ptr->SampleDescription); if (e) return e; ptr->size += ptr->SampleDescription->size; } if (ptr->SampleSize) { e = gf_isom_box_size((GF_Box *) ptr->SampleSize); if (e) return e; ptr->size += ptr->SampleSize->size; } if (ptr->SampleToChunk) { e = gf_isom_box_size((GF_Box *) ptr->SampleToChunk); if (e) return e; ptr->size += ptr->SampleToChunk->size; } if (ptr->TimeToSample) { e = gf_isom_box_size((GF_Box *) ptr->TimeToSample); if (e) return e; ptr->size += ptr->TimeToSample->size; } if (ptr->ChunkOffset) { e = gf_isom_box_size(ptr->ChunkOffset); if (e) return e; ptr->size += ptr->ChunkOffset->size; } //optional boxs if (ptr->CompositionOffset) { e = gf_isom_box_size((GF_Box *) ptr->CompositionOffset); if (e) return e; ptr->size += ptr->CompositionOffset->size; } if (ptr->CompositionToDecode) { e = gf_isom_box_size((GF_Box *) ptr->CompositionToDecode); if (e) return e; ptr->size += ptr->CompositionToDecode->size; } if (ptr->DegradationPriority) { e = gf_isom_box_size((GF_Box *) ptr->DegradationPriority); if (e) return e; ptr->size += ptr->DegradationPriority->size; } if (ptr->ShadowSync) { e = gf_isom_box_size((GF_Box *) ptr->ShadowSync); if (e) return e; ptr->size += ptr->ShadowSync->size; } if (ptr->SyncSample) { e = gf_isom_box_size((GF_Box *) ptr->SyncSample); if (e) return e; ptr->size += ptr->SyncSample->size; } if (ptr->SampleDep && ptr->SampleDep->sampleCount) { e = gf_isom_box_size((GF_Box *) ptr->SampleDep); if (e) return e; ptr->size += ptr->SampleDep->size; } //padb if (ptr->PaddingBits) { e = gf_isom_box_size((GF_Box *) ptr->PaddingBits); if (e) return e; ptr->size += ptr->PaddingBits->size; } if (ptr->sub_samples) { e = gf_isom_box_array_size(s, ptr->sub_samples); if (e) return e; } if (ptr->sampleGroups) { e = gf_isom_box_array_size(s, ptr->sampleGroups); if (e) return e; } if (ptr->sampleGroupsDescription) { e = gf_isom_box_array_size(s, ptr->sampleGroupsDescription); if (e) return e; } if (ptr->sai_sizes) { e = gf_isom_box_array_size(s, ptr->sai_sizes); if (e) return e; } if (ptr->sai_offsets) { e = gf_isom_box_array_size(s, ptr->sai_offsets); if (e) return e; } return GF_OK; }
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
212,873,638,841,540,700,000,000,000,000,000,000,000
92
prevent dref memleak on invalid input (#1183)
time_t DTime::time() { return d_set.tv_sec; }
0
[ "CWE-399" ]
pdns
881b5b03a590198d03008e4200dd00cc537712f3
176,751,640,734,942,040,000,000,000,000,000,000,000
4
Reject qname's wirelength > 255, `chopOff()` handle dot inside labels
/* Send skb data on a socket. Socket must be locked. */ int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int len) { unsigned int orig_len = len; struct sk_buff *head = skb; unsigned short fragidx; int slen, ret; do_frag_list: /* Deal with head data */ while (offset < skb_headlen(skb) && len) { struct kvec kv; struct msghdr msg; slen = min_t(int, len, skb_headlen(skb) - offset); kv.iov_base = skb->data + offset; kv.iov_len = slen; memset(&msg, 0, sizeof(msg)); ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen); if (ret <= 0) goto error; offset += ret; len -= ret; } /* All the data was skb head? */ if (!len) goto out; /* Make offset relative to start of frags */ offset -= skb_headlen(skb); /* Find where we are in frag list */ for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; if (offset < frag->size) break; offset -= frag->size; } for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; slen = min_t(size_t, len, frag->size - offset); while (slen) { ret = kernel_sendpage_locked(sk, frag->page.p, frag->page_offset + offset, slen, MSG_DONTWAIT); if (ret <= 0) goto error; len -= ret; offset += ret; slen -= ret; } offset = 0; } if (len) { /* Process any frag lists */ if (skb == head) { if (skb_has_frag_list(skb)) { skb = skb_shinfo(skb)->frag_list; goto do_frag_list; } } else if (skb->next) { skb = skb->next; goto do_frag_list; } } out: return orig_len - len; error: return orig_len == len ? ret : orig_len - len;
0
[ "CWE-20" ]
linux
2b16f048729bf35e6c28a40cbfad07239f9dcd90
207,529,037,788,762,470,000,000,000,000,000,000,000
85
net: create skb_gso_validate_mac_len() If you take a GSO skb, and split it into packets, will the MAC length (L2 + L3 + L4 headers + payload) of those packets be small enough to fit within a given length? Move skb_gso_mac_seglen() to skbuff.h with other related functions like skb_gso_network_seglen() so we can use it, and then create skb_gso_validate_mac_len to do the full calculation. Signed-off-by: Daniel Axtens <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { if (!is_device_dma_coherent(dev)) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); }
0
[ "CWE-200" ]
linux
6829e274a623187c24f7cfc0e3d35f25d087fcc5
1,410,979,563,341,105,700,000,000,000,000,000,000
8
arm64: dma-mapping: always clear allocated buffers Buffers allocated by dma_alloc_coherent() are always zeroed on Alpha, ARM (32bit), MIPS, PowerPC, x86/x86_64 and probably other architectures. It turned out that some drivers rely on this 'feature'. Allocated buffer might be also exposed to userspace with dma_mmap() call, so clearing it is desired from security point of view to avoid exposing random memory to userspace. This patch unifies dma_alloc_coherent() behavior on ARM64 architecture with other implementations by unconditionally zeroing allocated buffer. Cc: <[email protected]> # v3.14+ Signed-off-by: Marek Szyprowski <[email protected]> Signed-off-by: Will Deacon <[email protected]>
ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_prealloc_space *pa; struct ext4_group_info *grp; struct ext4_inode_info *ei; /* preallocate only when found space is larger then requested */ BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); if (pa == NULL) return -ENOMEM; if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { int winl; int wins; int win; int offs; /* we can't allocate as much as normalizer wants. * so, found space must get proper lstart * to cover original request */ BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); /* we're limited by original request in that * logical block must be covered any way * winl is window we can move our chunk within */ winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; /* also, we should cover whole original request */ wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); /* the smallest one defines real window */ win = min(winl, wins); offs = ac->ac_o_ex.fe_logical % EXT4_C2B(sbi, ac->ac_b_ex.fe_len); if (offs && offs < win) win = offs; ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - EXT4_NUM_B2C(sbi, win); BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); } /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; pa->pa_lstart = ac->ac_b_ex.fe_logical; pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; atomic_set(&pa->pa_count, 1); spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_inode_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_INODE_PA; mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_inode_pa(ac, pa); ext4_mb_use_inode_pa(ac, pa); atomic_add(pa->pa_free, &sbi->s_mb_preallocated); ei = EXT4_I(ac->ac_inode); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); pa->pa_obj_lock = &ei->i_prealloc_lock; pa->pa_inode = ac->ac_inode; ext4_lock_group(sb, ac->ac_b_ex.fe_group); list_add(&pa->pa_group_list, &grp->bb_prealloc_list); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); spin_lock(pa->pa_obj_lock); list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); return 0; }
0
[ "CWE-416" ]
linux
8844618d8aa7a9973e7b527d038a2a589665002c
162,531,810,600,478,280,000,000,000,000,000,000,000
89
ext4: only look at the bg_flags field if it is valid The bg_flags field in the block group descripts is only valid if the uninit_bg or metadata_csum feature is enabled. We were not consistently looking at this field; fix this. Also block group #0 must never have uninitialized allocation bitmaps, or need to be zeroed, since that's where the root inode, and other special inodes are set up. Check for these conditions and mark the file system as corrupted if they are detected. This addresses CVE-2018-10876. https://bugzilla.kernel.org/show_bug.cgi?id=199403 Signed-off-by: Theodore Ts'o <[email protected]> Cc: [email protected]
const string unquotify(const string &item) { if(item.size()<2) return item; string::size_type bpos=0, epos=item.size(); if(item[0]=='"') bpos=1; if(item[epos-1]=='"') epos-=1; return item.substr(bpos,epos-bpos); }
0
[ "CWE-399" ]
pdns
881b5b03a590198d03008e4200dd00cc537712f3
125,916,338,374,232,570,000,000,000,000,000,000,000
15
Reject qname's wirelength > 255, `chopOff()` handle dot inside labels
static int clone_slice(H264Context *dst, H264Context *src) { memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset)); dst->cur_pic_ptr = src->cur_pic_ptr; dst->cur_pic = src->cur_pic; dst->linesize = src->linesize; dst->uvlinesize = src->uvlinesize; dst->first_field = src->first_field; dst->prev_poc_msb = src->prev_poc_msb; dst->prev_poc_lsb = src->prev_poc_lsb; dst->prev_frame_num_offset = src->prev_frame_num_offset; dst->prev_frame_num = src->prev_frame_num; dst->short_ref_count = src->short_ref_count; memcpy(dst->short_ref, src->short_ref, sizeof(dst->short_ref)); memcpy(dst->long_ref, src->long_ref, sizeof(dst->long_ref)); memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list)); memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff)); memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff)); return 0; }
0
[ "CWE-703" ]
FFmpeg
29ffeef5e73b8f41ff3a3f2242d356759c66f91f
451,506,347,534,484,200,000,000,000,000,000,000
24
avcodec/h264: do not trust last_pic_droppable when marking pictures as done This simplifies the code and fixes a deadlock Fixes Ticket2927 Signed-off-by: Michael Niedermayer <[email protected]>
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) { if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u " "addr %pM\n", sta_id, priv->stations[sta_id].sta.sta.addr); if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { IWL_DEBUG_ASSOC(priv, "STA id %u addr %pM already present in uCode " "(according to driver)\n", sta_id, priv->stations[sta_id].sta.sta.addr); } else { priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", sta_id, priv->stations[sta_id].sta.sta.addr); } }
1
[ "CWE-119", "CWE-787" ]
linux
2da424b0773cea3db47e1e81db71eeebde8269d4
280,514,546,215,644,700,000,000,000,000,000,000,000
19
iwlwifi: Sanity check for sta_id On my testing, I saw some strange behavior [ 421.739708] iwlwifi 0000:01:00.0: ACTIVATE a non DRIVER active station id 148 addr 00:00:00:00:00:00 [ 421.739719] iwlwifi 0000:01:00.0: iwl_sta_ucode_activate Added STA id 148 addr 00:00:00:00:00:00 to uCode not sure how it happen, but adding the sanity check to prevent memory corruption Signed-off-by: Wey-Yi Guy <[email protected]> Signed-off-by: John W. Linville <[email protected]>
int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) { switch (dsaf_dev->dsaf_mode) { case DSAF_MODE_ENABLE_FIX: case DSAF_MODE_DISABLE_SP: return 1; case DSAF_MODE_DISABLE_FIX: return 6; case DSAF_MODE_ENABLE_0VM: return 32; case DSAF_MODE_DISABLE_6PORT_0VM: case DSAF_MODE_ENABLE_16VM: case DSAF_MODE_DISABLE_6PORT_2VM: case DSAF_MODE_DISABLE_6PORT_16VM: case DSAF_MODE_DISABLE_6PORT_4VM: case DSAF_MODE_ENABLE_8VM: return 96; case DSAF_MODE_DISABLE_2PORT_16VM: case DSAF_MODE_DISABLE_2PORT_8VM: case DSAF_MODE_ENABLE_32VM: case DSAF_MODE_DISABLE_2PORT_64VM: case DSAF_MODE_ENABLE_128VM: return 128; default: dev_warn(dsaf_dev->dev, "get ring num fail,use default!dsaf_mode=%d\n", dsaf_dev->dsaf_mode); return 128; } }
0
[ "CWE-119", "CWE-703" ]
linux
412b65d15a7f8a93794653968308fc100f2aa87c
157,093,356,821,790,350,000,000,000,000,000,000,000
35
net: hns: fix ethtool_get_strings overflow in hns driver hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated is not enough for ethtool_get_strings(), which will cause random memory corruption. When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the the following can be observed without this patch: [ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80 [ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070. [ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70) [ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk [ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k [ 43.115218] Next obj: start=ffff801fb0b69098, len=80 [ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b. [ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38) [ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_ [ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai Signed-off-by: Timmy Li <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void operator()(OpKernelContext* context, const Tensor& input_tensor, Tensor& output_tensor, int n, bool reverse) { const T* input = input_tensor.flat<T>().data(); T* output = output_tensor.flat<T>().data(); // Assume input_shape is [d1,d2,...dk], and output_shape is [d1,d2...dk-1], // then num_rows = d1*d2...dk-1, last_dim = dk. const int num_rows = output_tensor.NumElements(); const int last_dim = input_tensor.dim_size(input_tensor.dims() - 1); // Allocate each row to different shard. auto SubNthElement = [&, input, output, last_dim, n](int64 start, int64 limit) { // std::nth_element would rearrange the array, so we need a new buffer. std::vector<T> buf(last_dim); for (int b = start; b < limit; ++b) { // Copy from one row of elements to buffer const T* input_start = input + b * last_dim; const T* input_end = input + (b + 1) * last_dim; std::copy(input_start, input_end, buf.begin()); std::nth_element(buf.begin(), buf.begin() + n, buf.end()); // The element placed in the nth position is exactly the element that // would occur in this position if the range was fully sorted. output[b] = buf[n]; } }; auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); // The average time complexity of partition-based nth_element (BFPRT) is // O(n), although the worst time complexity could be O(n^2). Here, 20 is a // empirical factor of cost_per_unit. Shard(worker_threads.num_threads, worker_threads.workers, num_rows, 20 * last_dim, SubNthElement); }
0
[ "CWE-703", "CWE-197" ]
tensorflow
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
93,561,155,282,865,970,000,000,000,000,000,000,000
36
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
static int ModuleCompare(const void *x,const void *y) { const char **p, **q; p=(const char **) x; q=(const char **) y; return(LocaleCompare(*p,*q)); }
0
[ "CWE-200", "CWE-362" ]
ImageMagick
01faddbe2711a4156180c4a92837e2f23683cc68
19,568,263,953,975,582,000,000,000,000,000,000,000
10
Use the correct rights.
mb_lefthalve(int row, int col) { return (*mb_off2cells)(LineOffset[row] + col, LineOffset[row] + screen_Columns) > 1; }
0
[ "CWE-122", "CWE-787" ]
vim
f6d39c31d2177549a986d170e192d8351bd571e2
115,604,695,237,173,570,000,000,000,000,000,000,000
5
patch 9.0.0220: invalid memory access with for loop over NULL string Problem: Invalid memory access with for loop over NULL string. Solution: Make sure mb_ptr2len() consistently returns zero for NUL.
static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); }
0
[ "CWE-119", "CWE-787" ]
ImageMagick6
91e58d967a92250439ede038ccfb0913a81e59fe
209,494,753,919,192,900,000,000,000,000,000,000,000
8
https://github.com/ImageMagick/ImageMagick/issues/1615
set_tagfunc_option(void) { #ifdef FEAT_EVAL free_callback(&tfu_cb); free_callback(&curbuf->b_tfu_cb); if (*curbuf->b_p_tfu == NUL) return OK; if (option_set_callback_func(curbuf->b_p_tfu, &tfu_cb) == FAIL) return FAIL; copy_callback(&curbuf->b_tfu_cb, &tfu_cb); #endif return OK; }
0
[ "CWE-416" ]
vim
adce965162dd89bf29ee0e5baf53652e7515762c
210,615,668,613,465,760,000,000,000,000,000,000,000
17
patch 9.0.0246: using freed memory when 'tagfunc' deletes the buffer Problem: Using freed memory when 'tagfunc' deletes the buffer. Solution: Make a copy of the tag name.
void bandwidth_del_run_file(pid_t pid) { char *fname; if (asprintf(&fname, "%s/%d-bandwidth", RUN_FIREJAIL_BANDWIDTH_DIR, (int) pid) == -1) errExit("asprintf"); unlink(fname); free(fname); }
0
[ "CWE-284", "CWE-269" ]
firejail
5d43fdcd215203868d440ffc42036f5f5ffc89fc
65,801,095,461,797,770,000,000,000,000,000,000,000
7
security fix