func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
process_cookie(ns_client_t *client, isc_buffer_t *buf, size_t optlen) { ns_altsecret_t *altsecret; unsigned char dbuf[COOKIE_SIZE]; unsigned char *old; isc_stdtime_t now; uint32_t when; uint32_t nonce; isc_buffer_t db; /* * If we have already seen a cookie option skip this cookie option. */ if ((!client->sctx->answercookie) || (client->attributes & NS_CLIENTATTR_WANTCOOKIE) != 0) { isc_buffer_forward(buf, (unsigned int)optlen); return; } client->attributes |= NS_CLIENTATTR_WANTCOOKIE; ns_stats_increment(client->sctx->nsstats, ns_statscounter_cookiein); if (optlen != COOKIE_SIZE) { /* * Not our token. */ INSIST(optlen >= 8U); memmove(client->cookie, isc_buffer_current(buf), 8); isc_buffer_forward(buf, (unsigned int)optlen); if (optlen == 8U) { ns_stats_increment(client->sctx->nsstats, ns_statscounter_cookienew); } else { ns_stats_increment(client->sctx->nsstats, ns_statscounter_cookiebadsize); } return; } /* * Process all of the incoming buffer. */ old = isc_buffer_current(buf); memmove(client->cookie, old, 8); isc_buffer_forward(buf, 8); nonce = isc_buffer_getuint32(buf); when = isc_buffer_getuint32(buf); isc_buffer_forward(buf, 8); /* * Allow for a 5 minute clock skew between servers sharing a secret. * Only accept COOKIE if we have talked to the client in the last hour. */ isc_stdtime_get(&now); if (isc_serial_gt(when, (now + 300)) || /* In the future. */ isc_serial_lt(when, (now - 3600))) { /* In the past. */ ns_stats_increment(client->sctx->nsstats, ns_statscounter_cookiebadtime); return; } isc_buffer_init(&db, dbuf, sizeof(dbuf)); compute_cookie(client, when, nonce, client->sctx->secret, &db); if (isc_safe_memequal(old, dbuf, COOKIE_SIZE)) { ns_stats_increment(client->sctx->nsstats, ns_statscounter_cookiematch); client->attributes |= NS_CLIENTATTR_HAVECOOKIE; return; } for (altsecret = ISC_LIST_HEAD(client->sctx->altsecrets); altsecret != NULL; altsecret = ISC_LIST_NEXT(altsecret, link)) { isc_buffer_init(&db, dbuf, sizeof(dbuf)); compute_cookie(client, when, nonce, altsecret->secret, &db); if (isc_safe_memequal(old, dbuf, COOKIE_SIZE)) { ns_stats_increment(client->sctx->nsstats, ns_statscounter_cookiematch); client->attributes |= NS_CLIENTATTR_HAVECOOKIE; return; } } ns_stats_increment(client->sctx->nsstats, ns_statscounter_cookienomatch); }
0
[ "CWE-617" ]
bind9
15996f0cb15631b95a801e3e88928494a69ad6ee
69,563,406,695,543,710,000,000,000,000,000,000,000
90
ns_client_error() could assert if rcode was overridden to NOERROR The client->rcode_override was originally created to force the server to send SERVFAIL in some cases when it would normally have sent FORMERR. More recently, it was used in a3ba95116ed04594ea59a8124bf781b30367a7a2 commit (part of GL #2790) to force the sending of a TC=1 NOERROR response, triggering a retry via TCP, when a UDP packet could not be sent due to ISC_R_MAXSIZE. This ran afoul of a pre-existing INSIST in ns_client_error() when RRL was in use. the INSIST was based on the assumption that ns_client_error() could never result in a non-error rcode. as that assumption is no longer valid, the INSIST has been removed.
static Bool parse_short_term_ref_pic_set(GF_BitStream *bs, HEVC_SPS *sps, u32 idx_rps) { u32 i; Bool inter_ref_pic_set_prediction_flag = 0; if (idx_rps != 0) inter_ref_pic_set_prediction_flag = gf_bs_read_int(bs, 1); if (inter_ref_pic_set_prediction_flag ) { HEVC_ReferencePictureSets *ref_ps, *rps; u32 delta_idx_minus1 = 0; u32 ref_idx; u32 delta_rps_sign; u32 abs_delta_rps_minus1, nb_ref_pics; s32 deltaRPS; u32 k = 0, k0 = 0, k1 = 0; if (idx_rps == sps->num_short_term_ref_pic_sets) delta_idx_minus1 = bs_get_ue(bs); assert(delta_idx_minus1 <= idx_rps - 1); ref_idx = idx_rps - 1 - delta_idx_minus1; delta_rps_sign = gf_bs_read_int(bs, 1); abs_delta_rps_minus1 = bs_get_ue(bs); deltaRPS = (1 - (delta_rps_sign<<1)) * (abs_delta_rps_minus1 + 1); rps = &sps->rps[idx_rps]; ref_ps = &sps->rps[ref_idx]; nb_ref_pics = ref_ps->num_negative_pics + ref_ps->num_positive_pics; for (i=0; i<=nb_ref_pics; i++) { s32 ref_idc; s32 used_by_curr_pic_flag = gf_bs_read_int(bs, 1); ref_idc = used_by_curr_pic_flag ? 1 : 0; if ( !used_by_curr_pic_flag ) { used_by_curr_pic_flag = gf_bs_read_int(bs, 1); ref_idc = used_by_curr_pic_flag << 1; } if ((ref_idc==1) || (ref_idc== 2)) { s32 deltaPOC = deltaRPS; if (i < nb_ref_pics) deltaPOC += ref_ps->delta_poc[i]; rps->delta_poc[k] = deltaPOC; if (deltaPOC < 0) k0++; else k1++; k++; } } rps->num_negative_pics = k0; rps->num_positive_pics = k1; } else { s32 prev = 0, poc = 0; sps->rps[idx_rps].num_negative_pics = bs_get_ue(bs); sps->rps[idx_rps].num_positive_pics = bs_get_ue(bs); if (sps->rps[idx_rps].num_negative_pics>16) return GF_FALSE; if (sps->rps[idx_rps].num_positive_pics>16) return GF_FALSE; for (i=0; i<sps->rps[idx_rps].num_negative_pics; i++) { u32 delta_poc_s0_minus1 = bs_get_ue(bs); poc = prev - delta_poc_s0_minus1 - 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; /*used_by_curr_pic_s1_flag[ i ] = */gf_bs_read_int(bs, 1); } for (i=0; i<sps->rps[idx_rps].num_positive_pics; i++) { u32 delta_poc_s1_minus1 = bs_get_ue(bs); poc = prev + delta_poc_s1_minus1 + 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; /*used_by_curr_pic_s1_flag[ i ] = */gf_bs_read_int(bs, 1); } } return GF_TRUE; }
0
[ "CWE-119", "CWE-787" ]
gpac
90dc7f853d31b0a4e9441cba97feccf36d8b69a4
234,712,170,647,460,260,000,000,000,000,000,000,000
75
fix some exploitable overflows (#994, #997)
kvm_eventfd_init(struct kvm *kvm) { #ifdef CONFIG_HAVE_KVM_IRQFD spin_lock_init(&kvm->irqfds.lock); INIT_LIST_HEAD(&kvm->irqfds.items); INIT_LIST_HEAD(&kvm->irqfds.resampler_list); mutex_init(&kvm->irqfds.resampler_lock); #endif INIT_LIST_HEAD(&kvm->ioeventfds); }
0
[ "CWE-20", "CWE-617" ]
linux
36ae3c0a36b7456432fedce38ae2f7bd3e01a563
138,333,823,523,632,660,000,000,000,000,000,000,000
10
KVM: Don't accept obviously wrong gsi values via KVM_IRQFD We cannot add routes for gsi values >= KVM_MAX_IRQ_ROUTES -- see kvm_set_irq_routing(). Hence, there is no sense in accepting them via KVM_IRQFD. Prevent them from entering the system in the first place. Signed-off-by: Jan H. Schönherr <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
ecEncCtx* wc_ecc_ctx_new_ex(int flags, WC_RNG* rng, void* heap) { int ret = 0; ecEncCtx* ctx = (ecEncCtx*)XMALLOC(sizeof(ecEncCtx), heap, DYNAMIC_TYPE_ECC); if (ctx) { ctx->protocol = (byte)flags; ctx->heap = heap; } ret = wc_ecc_ctx_reset(ctx, rng); if (ret != 0) { wc_ecc_ctx_free(ctx); ctx = NULL; } return ctx; }
0
[ "CWE-326", "CWE-203" ]
wolfssl
1de07da61f0c8e9926dcbd68119f73230dae283f
310,745,095,536,365,560,000,000,000,000,000,000,000
19
Constant time EC map to affine for private operations For fast math, use a constant time modular inverse when mapping to affine when operation involves a private key - key gen, calc shared secret, sign.
xsltParseStylesheetImport(xsltStylesheetPtr style, xmlNodePtr cur) { int ret = -1; xmlDocPtr import = NULL; xmlChar *base = NULL; xmlChar *uriRef = NULL; xmlChar *URI = NULL; xsltStylesheetPtr res; xsltSecurityPrefsPtr sec; if ((cur == NULL) || (style == NULL)) return (ret); uriRef = xmlGetNsProp(cur, (const xmlChar *)"href", NULL); if (uriRef == NULL) { xsltTransformError(NULL, style, cur, "xsl:import : missing href attribute\n"); goto error; } base = xmlNodeGetBase(style->doc, cur); URI = xmlBuildURI(uriRef, base); if (URI == NULL) { xsltTransformError(NULL, style, cur, "xsl:import : invalid URI reference %s\n", uriRef); goto error; } res = style; while (res != NULL) { if (res->doc == NULL) break; if (xmlStrEqual(res->doc->URL, URI)) { xsltTransformError(NULL, style, cur, "xsl:import : recursion detected on imported URL %s\n", URI); goto error; } res = res->parent; } /* * Security framework check */ sec = xsltGetDefaultSecurityPrefs(); if (sec != NULL) { int secres; secres = xsltCheckRead(sec, NULL, URI); if (secres == 0) { xsltTransformError(NULL, NULL, NULL, "xsl:import: read rights for %s denied\n", URI); goto error; } } import = xsltDocDefaultLoader(URI, style->dict, XSLT_PARSE_OPTIONS, (void *) style, XSLT_LOAD_STYLESHEET); if (import == NULL) { xsltTransformError(NULL, style, cur, "xsl:import : unable to load %s\n", URI); goto error; } res = xsltParseStylesheetImportedDoc(import, style); if (res != NULL) { res->next = style->imports; style->imports = res; if (style->parent == NULL) { xsltFixImportedCompSteps(style, res); } ret = 0; } else { xmlFreeDoc(import); } error: if (uriRef != NULL) xmlFree(uriRef); if (base != NULL) xmlFree(base); if (URI != NULL) xmlFree(URI); return (ret); }
1
[]
libxslt
e03553605b45c88f0b4b2980adfbbb8f6fca2fd6
42,538,628,651,514,160,000,000,000,000,000,000,000
85
Fix security framework bypass xsltCheckRead and xsltCheckWrite return -1 in case of error but callers don't check for this condition and allow access. With a specially crafted URL, xsltCheckRead could be tricked into returning an error because of a supposedly invalid URL that would still be loaded succesfully later on. Fixes #12. Thanks to Felix Wilhelm for the report.
int mg_http_parse(const char *s, size_t len, struct mg_http_message *hm) { int is_response, req_len = mg_http_get_request_len((unsigned char *) s, len); const char *end = s + req_len, *qs; struct mg_str *cl; memset(hm, 0, sizeof(*hm)); if (req_len <= 0) return req_len; hm->message.ptr = hm->head.ptr = s; hm->body.ptr = end; hm->head.len = (size_t) req_len; hm->chunk.ptr = end; hm->message.len = hm->body.len = (size_t) ~0; // Set body length to infinite // Parse request line s = skip(s, end, " ", &hm->method); s = skip(s, end, " ", &hm->uri); s = skip(s, end, "\r\n", &hm->proto); // Sanity check. Allow protocol/reason to be empty if (hm->method.len == 0 || hm->uri.len == 0) return -1; // If URI contains '?' character, setup query string if ((qs = (const char *) memchr(hm->uri.ptr, '?', hm->uri.len)) != NULL) { hm->query.ptr = qs + 1; hm->query.len = (size_t) (&hm->uri.ptr[hm->uri.len] - (qs + 1)); hm->uri.len = (size_t) (qs - hm->uri.ptr); } mg_http_parse_headers(s, end, hm->headers, sizeof(hm->headers) / sizeof(hm->headers[0])); if ((cl = mg_http_get_header(hm, "Content-Length")) != NULL) { hm->body.len = (size_t) mg_to64(*cl); hm->message.len = (size_t) req_len + hm->body.len; } // mg_http_parse() is used to parse both HTTP requests and HTTP // responses. If HTTP response does not have Content-Length set, then // body is read until socket is closed, i.e. body.len is infinite (~0). // // For HTTP requests though, according to // http://tools.ietf.org/html/rfc7231#section-8.1.3, // only POST and PUT methods have defined body semantics. // Therefore, if Content-Length is not specified and methods are // not one of PUT or POST, set body length to 0. // // So, if it is HTTP request, and Content-Length is not set, // and method is not (PUT or POST) then reset body length to zero. is_response = mg_ncasecmp(hm->method.ptr, "HTTP/", 5) == 0; if (hm->body.len == (size_t) ~0 && !is_response && mg_vcasecmp(&hm->method, "PUT") != 0 && mg_vcasecmp(&hm->method, "POST") != 0) { hm->body.len = 0; hm->message.len = (size_t) req_len; } // The 204 (No content) responses also have 0 body length if (hm->body.len == (size_t) ~0 && is_response && mg_vcasecmp(&hm->uri, "204") == 0) { hm->body.len = 0; hm->message.len = (size_t) req_len; } return req_len; }
0
[ "CWE-552" ]
mongoose
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
280,732,794,883,911,000,000,000,000,000,000,000,000
65
Protect against the directory traversal in mg_upload()
static void ConvertVectorsToMatrices( const OpInputList bucketized_features_list, std::vector<tensorflow::TTypes<int32>::ConstMatrix>& bucketized_features) { for (const Tensor& tensor : bucketized_features_list) { if (tensor.dims() == 1) { const auto v = tensor.vec<int32>(); bucketized_features.emplace_back( TTypes<int32>::ConstMatrix(v.data(), v.size(), 1)); } else { bucketized_features.emplace_back(tensor.matrix<int32>()); } } }
0
[ "CWE-703", "CWE-197" ]
tensorflow
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
144,598,157,356,585,770,000,000,000,000,000,000,000
13
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
void _map_window() { Display *const dpy = cimg::X11_attr().display; bool is_exposed = false, is_mapped = false; XWindowAttributes attr; XEvent event; XMapRaised(dpy,_window); do { // Wait for the window to be mapped XWindowEvent(dpy,_window,StructureNotifyMask | ExposureMask,&event); switch (event.type) { case MapNotify : is_mapped = true; break; case Expose : is_exposed = true; break; } } while (!is_exposed || !is_mapped); do { // Wait for the window to be visible XGetWindowAttributes(dpy,_window,&attr); if (attr.map_state!=IsViewable) { XSync(dpy,0); cimg::sleep(10); } } while (attr.map_state!=IsViewable); _window_x = attr.x; _window_y = attr.y;
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
58,284,481,412,935,360,000,000,000,000,000,000,000
20
.
snmp_synch_response_cb(netsnmp_session * ss, netsnmp_pdu *pdu, netsnmp_pdu **response, snmp_callback pcb) { struct synch_state lstate, *state; snmp_callback cbsav; void *cbmagsav; int numfds, count; netsnmp_large_fd_set fdset; struct timeval timeout, *tvp; int block; memset((void *) &lstate, 0, sizeof(lstate)); state = &lstate; cbsav = ss->callback; cbmagsav = ss->callback_magic; ss->callback = pcb; ss->callback_magic = (void *) state; netsnmp_large_fd_set_init(&fdset, FD_SETSIZE); if (snmp_send(ss, pdu) == 0) { snmp_free_pdu(pdu); state->status = STAT_ERROR; } else { state->reqid = pdu->reqid; state->waiting = 1; } while (state->waiting) { numfds = 0; NETSNMP_LARGE_FD_ZERO(&fdset); block = NETSNMP_SNMPBLOCK; tvp = &timeout; timerclear(tvp); snmp_sess_select_info2_flags(NULL, &numfds, &fdset, tvp, &block, NETSNMP_SELECT_NOALARMS); if (block == 1) tvp = NULL; /* block without timeout */ count = netsnmp_large_fd_set_select(numfds, &fdset, NULL, NULL, tvp); if (count > 0) { snmp_read2(&fdset); } else { switch (count) { case 0: snmp_timeout(); break; case -1: if (errno == EINTR) { continue; } else { snmp_errno = SNMPERR_GENERR; /*MTCRITICAL_RESOURCE */ /* * CAUTION! if another thread closed the socket(s) * waited on here, the session structure was freed. * It would be nice, but we can't rely on the pointer. * ss->s_snmp_errno = SNMPERR_GENERR; * ss->s_errno = errno; */ snmp_set_detail(strerror(errno)); } /* FALLTHRU */ default: state->status = STAT_ERROR; state->waiting = 0; } } if ( ss->flags & SNMP_FLAGS_RESP_CALLBACK ) { void (*cb)(void); cb = (void (*)(void))(ss->myvoid); cb(); /* Used to invoke 'netsnmp_check_outstanding_agent_requests();' on internal AgentX queries. */ } } *response = state->pdu; ss->callback = cbsav; ss->callback_magic = cbmagsav; netsnmp_large_fd_set_cleanup(&fdset); return state->status; }
0
[ "CWE-415" ]
net-snmp
5f881d3bf24599b90d67a45cae7a3eb099cd71c9
282,234,886,353,338,600,000,000,000,000,000,000,000
80
libsnmp, USM: Introduce a reference count in struct usmStateReference This patch fixes https://sourceforge.net/p/net-snmp/bugs/2956/.
TIFFWriteDirectoryTagCheckedSlong8Array(TIFF* tif, uint32* ndir, TIFFDirEntry* dir, uint16 tag, uint32 count, int64* value) { assert(count<0x20000000); assert(sizeof(int64)==8); assert(tif->tif_flags&TIFF_BIGTIFF); if (tif->tif_flags&TIFF_SWAB) TIFFSwabArrayOfLong8((uint64*)value,count); return(TIFFWriteDirectoryTagData(tif,ndir,dir,tag,TIFF_SLONG8,count,count*8,value)); }
0
[ "CWE-20" ]
libtiff
3144e57770c1e4d26520d8abee750f8ac8b75490
173,218,067,503,924,500,000,000,000,000,000,000,000
9
* libtiff/tif_dir.c, tif_dirread.c, tif_dirwrite.c: implement various clampings of double to other data types to avoid undefined behaviour if the output range isn't big enough to hold the input value. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2643 http://bugzilla.maptools.org/show_bug.cgi?id=2642 http://bugzilla.maptools.org/show_bug.cgi?id=2646 http://bugzilla.maptools.org/show_bug.cgi?id=2647
static struct net_bridge_mdb_entry *br_multicast_get_group( struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group, int hash) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct hlist_node *p; unsigned count = 0; unsigned max; int elasticity; int err; mdb = rcu_dereference_protected(br->mdb, 1); hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { count++; if (unlikely(br_ip_equal(group, &mp->addr))) return mp; } elasticity = 0; max = mdb->max; if (unlikely(count > br->hash_elasticity && count)) { if (net_ratelimit()) br_info(br, "Multicast hash table " "chain limit reached: %s\n", port ? port->dev->name : br->dev->name); elasticity = br->hash_elasticity; } if (mdb->size >= max) { max *= 2; if (unlikely(max >= br->hash_max)) { br_warn(br, "Multicast hash table maximum " "reached, disabling snooping: %s, %d\n", port ? port->dev->name : br->dev->name, max); err = -E2BIG; disable: br->multicast_disabled = 1; goto err; } } if (max > mdb->max || elasticity) { if (mdb->old) { if (net_ratelimit()) br_info(br, "Multicast hash table " "on fire: %s\n", port ? port->dev->name : br->dev->name); err = -EEXIST; goto err; } err = br_mdb_rehash(&br->mdb, max, elasticity); if (err) { br_warn(br, "Cannot rehash multicast " "hash table, disabling snooping: %s, %d, %d\n", port ? port->dev->name : br->dev->name, mdb->size, err); goto disable; } err = -EAGAIN; goto err; } return NULL; err: mp = ERR_PTR(err); return mp; }
0
[ "CWE-399" ]
linux
6b0d6a9b4296fa16a28d10d416db7a770fc03287
15,284,612,578,552,806,000,000,000,000,000,000,000
73
bridge: Fix mglist corruption that leads to memory corruption The list mp->mglist is used to indicate whether a multicast group is active on the bridge interface itself as opposed to one of the constituent interfaces in the bridge. Unfortunately the operation that adds the mp->mglist node to the list neglected to check whether it has already been added. This leads to list corruption in the form of nodes pointing to itself. Normally this would be quite obvious as it would cause an infinite loop when walking the list. However, as this list is never actually walked (which means that we don't really need it, I'll get rid of it in a subsequent patch), this instead is hidden until we perform a delete operation on the affected nodes. As the same node may now be pointed to by more than one node, the delete operations can then cause modification of freed memory. This was observed in practice to cause corruption in 512-byte slabs, most commonly leading to crashes in jbd2. Thanks to Josef Bacik for pointing me in the right direction. Reported-by: Ian Page Hands <[email protected]> Signed-off-by: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int propfind_restype(const xmlChar *name, xmlNsPtr ns, struct propfind_ctx *fctx, xmlNodePtr resp, struct propstat propstat[], void *rock __attribute__((unused))) { xmlNodePtr node = xml_add_prop(HTTP_OK, fctx->ns[NS_DAV], &propstat[PROPSTAT_OK], name, ns, NULL, 0); if (!fctx->record) { xmlNewChild(node, NULL, BAD_CAST "collection", NULL); if (fctx->req_tgt->collection) { ensure_ns(fctx->ns, NS_CALDAV, resp->parent, XML_NS_CALDAV, "C"); if (!strcmp(fctx->req_tgt->collection, SCHED_INBOX)) { xmlNewChild(node, fctx->ns[NS_CALDAV], BAD_CAST "schedule-inbox", NULL); } else if (!strcmp(fctx->req_tgt->collection, SCHED_OUTBOX)) { xmlNewChild(node, fctx->ns[NS_CALDAV], BAD_CAST "schedule-outbox", NULL); } else { xmlNewChild(node, fctx->ns[NS_CALDAV], BAD_CAST "calendar", NULL); } } } return 0; }
0
[ "CWE-787" ]
cyrus-imapd
a5779db8163b99463e25e7c476f9cbba438b65f3
77,664,367,494,400,640,000,000,000,000,000,000,000
31
HTTP: don't overrun buffer when parsing strings with sscanf()
BOOL nego_recv_response(rdpNego* nego) { int status; wStream* s; s = Stream_New(NULL, 1024); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } status = transport_read_pdu(nego->transport, s); if (status < 0) { Stream_Free(s, TRUE); return FALSE; } status = nego_recv(nego->transport, s, nego); Stream_Free(s, TRUE); if (status < 0) return FALSE; return TRUE; }
0
[ "CWE-125" ]
FreeRDP
6b485b146a1b9d6ce72dfd7b5f36456c166e7a16
268,050,299,352,231,380,000,000,000,000,000,000,000
28
Fixed oob read in irp_write and similar
static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs) { struct nfp_flower_priv *priv = app->priv; if (!priv->nn) return 0; return nfp_flower_spawn_vnic_reprs(app, NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, NFP_REPR_TYPE_VF, num_vfs); }
0
[ "CWE-400", "CWE-401" ]
linux
8572cea1461a006bce1d06c0c4b0575869125fa4
38,951,482,155,560,894,000,000,000,000,000,000,000
11
nfp: flower: prevent memory leak in nfp_flower_spawn_phy_reprs In nfp_flower_spawn_phy_reprs, in the for loop over eth_tbl if any of intermediate allocations or initializations fail memory is leaked. requiered releases are added. Fixes: b94524529741 ("nfp: flower: add per repr private data for LAG offload") Signed-off-by: Navid Emamdoost <[email protected]> Acked-by: Jakub Kicinski <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct user_element *ue = kcontrol->private_data; int change = 0; void *new_data; if (op_flag > 0) { if (size > 1024 * 128) /* sane value */ return -EINVAL; new_data = memdup_user(tlv, size); if (IS_ERR(new_data)) return PTR_ERR(new_data); change = ue->tlv_data_size != size; if (!change) change = memcmp(ue->tlv_data, new_data, size); kfree(ue->tlv_data); ue->tlv_data = new_data; ue->tlv_data_size = size; } else { if (! ue->tlv_data_size || ! ue->tlv_data) return -ENXIO; if (size < ue->tlv_data_size) return -ENOSPC; if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size)) return -EFAULT; } return change; }
1
[ "CWE-362", "CWE-399" ]
linux
07f4d9d74a04aa7c72c5dae0ef97565f28f17b92
275,738,377,376,799,750,000,000,000,000,000,000,000
32
ALSA: control: Protect user controls against concurrent access The user-control put and get handlers as well as the tlv do not protect against concurrent access from multiple threads. Since the state of the control is not updated atomically it is possible that either two write operations or a write and a read operation race against each other. Both can lead to arbitrary memory disclosure. This patch introduces a new lock that protects user-controls from concurrent access. Since applications typically access controls sequentially than in parallel a single lock per card should be fine. Signed-off-by: Lars-Peter Clausen <[email protected]> Acked-by: Jaroslav Kysela <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
static int bpf_btf_load(const union bpf_attr *attr) { if (CHECK_ATTR(BPF_BTF_LOAD)) return -EINVAL; if (!bpf_capable()) return -EPERM; return btf_new_fd(attr); }
0
[ "CWE-307" ]
linux
350a5c4dd2452ea999cc5e1d4a8dbf12de2f97ef
289,415,696,704,985,170,000,000,000,000,000,000,000
10
bpf: Dont allow vmlinux BTF to be used in map_create and prog_load. The syzbot got FD of vmlinux BTF and passed it into map_create which caused crash in btf_type_id_size() when it tried to access resolved_ids. The vmlinux BTF doesn't have 'resolved_ids' and 'resolved_sizes' initialized to save memory. To avoid such issues disallow using vmlinux BTF in prog_load and map_create commands. Fixes: 5329722057d4 ("bpf: Assign ID to vmlinux BTF and return extra info for BTF in GET_OBJ_INFO") Reported-by: [email protected] Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Yonghong Song <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
static void print_line(char* line) { while (*line && *line != '\n') { fputc(*line, stderr); line++; } fputc('\n', stderr); }
0
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
277,817,600,994,043,200,000,000,000,000,000,000,000
9
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
do_dcs(void) { // Implemented: // DECRQSS (Request Status String) // DECAUPSS (Assign User-Preferred Supplemental Set) // DECSIXEL // No DECUDK (User-Defined Keys) or xterm termcap/terminfo data. char *s = term.cmd_buf; if (!term.cmd_len) *s = 0; switch (term.dcs_cmd) { when CPAIR('!', 'u'): // DECAUPSS if (term.state == DCS_ESCAPE) { ushort nrc_code = 0; if (term.cmd_len == 1) nrc_code = *s; else if (term.cmd_len == 2) nrc_code = CPAIR(s[0], s[1]); term_cset cs = lookup_cset(nrc_code, 7, false); if (cs) { term.curs.decsupp = cs; term_update_cs(); return; } } when 'q': { sixel_state_t * st = (sixel_state_t *)term.imgs.parser_state; int status = -1; switch (term.state) { when DCS_PASSTHROUGH: if (!st) return; status = sixel_parser_parse(st, (unsigned char *)s, term.cmd_len); if (status < 0) { sixel_parser_deinit(st); //printf("free state 1 %p\n", term.imgs.parser_state); free(term.imgs.parser_state); term.imgs.parser_state = NULL; term.state = DCS_IGNORE; return; } when DCS_ESCAPE: if (!st) return; status = sixel_parser_parse(st, (unsigned char *)s, term.cmd_len); if (status < 0) { sixel_parser_deinit(st); //printf("free state 2 %p\n", term.imgs.parser_state); free(term.imgs.parser_state); term.imgs.parser_state = NULL; return; } unsigned char * pixels = sixel_parser_finalize(st); //printf("sixel_parser_finalize %p\n", pixels); sixel_parser_deinit(st); if (!pixels) { //printf("free state 3 %p\n", term.imgs.parser_state); free(term.imgs.parser_state); term.imgs.parser_state = NULL; return; } short left = term.curs.x; short top = term.virtuallines + (term.sixel_display ? 0: term.curs.y); int width = (st->image.width -1 ) / st->grid_width + 1; int height = (st->image.height -1 ) / st->grid_height + 1; int pixelwidth = st->image.width; int pixelheight = st->image.height; //printf("w %d/%d %d h %d/%d %d\n", pixelwidth, st->grid_width, width, pixelheight, st->grid_height, height); imglist * img; if (!winimg_new(&img, 0, pixels, 0, left, top, width, height, pixelwidth, pixelheight, false, 0, 0, 0, 0)) { free(pixels); sixel_parser_deinit(st); //printf("free state 4 %p\n", term.imgs.parser_state); free(term.imgs.parser_state); term.imgs.parser_state = NULL; return; } img->cwidth = st->max_x; img->cheight = st->max_y; fill_image_space(img); // add image to image list; // replace previous for optimisation in some cases if (term.imgs.first == NULL) { term.imgs.first = term.imgs.last = img; } else { // try some optimization: replace existing images if overwritten #ifdef debug_sixel_list printf("do_dcs checking imglist\n"); #endif #ifdef replace_images #warning do not replace images in the list anymore // with new flicker-reduce strategy of rendering overlapped images, // new images should always be added to the end of the queue; // completely overlayed images should be collected for removal // during the rendering loop (winimgs_paint), // or latest when they are scrolled out of the scrollback buffer for (imglist * cur = term.imgs.first; cur; cur = cur->next) { if (cur->pixelwidth == cur->width * st->grid_width && cur->pixelheight == cur->height * st->grid_height) { // if same size, replace if (img->top == cur->top && img->left == cur->left && img->width == cur->width && img->height == cur->height) { #ifdef debug_sixel_list printf("img replace\n"); #endif memcpy(cur->pixels, img->pixels, img->pixelwidth * img->pixelheight * 4); cur->imgi = img->imgi; winimg_destroy(img); return; } // if new image within area of previous image, ... #ifdef handle_overlay_images #warning this creates some crash conditions... if (img->top >= cur->top && img->left >= cur->left && img->left + img->width <= cur->left + cur->width && img->top + img->height <= cur->top + cur->height) { // inject new img into old structure; // copy img data in stripes, for unknown reason for (y = 0; y < img->pixelheight; ++y) { memcpy(cur->pixels + ((img->top - cur->top) * st->grid_height + y) * cur->pixelwidth * 4 + (img->left - cur->left) * st->grid_width * 4, img->pixels + y * img->pixelwidth * 4, img->pixelwidth * 4); } cur->imgi = img->imgi; winimg_destroy(img); return; } #endif } } #endif // append image to list img->prev = term.imgs.last; term.imgs.last->next = img; term.imgs.last = img; } otherwise: { /* parser status initialization */ colour fg = win_get_colour(FG_COLOUR_I); colour bg = win_get_colour(BG_COLOUR_I); if (!st) { st = term.imgs.parser_state = calloc(1, sizeof(sixel_state_t)); //printf("alloc state %d -> %p\n", (int)sizeof(sixel_state_t), st); sixel_parser_set_default_color(st); } status = sixel_parser_init(st, fg, bg, term.private_color_registers); if (status < 0) return; } } } when CPAIR('$', 'q'): switch (term.state) { when DCS_ESCAPE: { // DECRQSS cattr attr = term.curs.attr; if (!strcmp(s, "m")) { // SGR char buf[90], *p = buf; p += sprintf(p, "\eP1$r0"); if (attr.attr & ATTR_BOLD) p += sprintf(p, ";1"); if (attr.attr & ATTR_DIM) p += sprintf(p, ";2"); if (attr.attr & ATTR_SHADOW) p += sprintf(p, ";1:2"); if (attr.attr & ATTR_ITALIC) p += sprintf(p, ";3"); if (attr.attr & ATTR_BROKENUND) if (attr.attr & ATTR_DOUBLYUND) p += sprintf(p, ";4:5"); else p += sprintf(p, ";4:4"); else if ((attr.attr & UNDER_MASK) == ATTR_CURLYUND) p += sprintf(p, ";4:3"); else if (attr.attr & ATTR_UNDER) p += sprintf(p, ";4"); if (attr.attr & ATTR_BLINK) p += sprintf(p, ";5"); if (attr.attr & ATTR_BLINK2) p += sprintf(p, ";6"); if (attr.attr & ATTR_REVERSE) p += sprintf(p, ";7"); if (attr.attr & ATTR_INVISIBLE) p += sprintf(p, ";8"); if (attr.attr & ATTR_OVERSTRIKE) p += sprintf(p, ";8:7"); if (attr.attr & ATTR_STRIKEOUT) p += sprintf(p, ";9"); if ((attr.attr & UNDER_MASK) == ATTR_DOUBLYUND) p += sprintf(p, ";21"); if (attr.attr & ATTR_FRAMED) p += sprintf(p, ";51;52"); if (attr.attr & ATTR_OVERL) p += sprintf(p, ";53"); if (attr.attr & ATTR_SUPERSCR) p += sprintf(p, ";73"); if (attr.attr & ATTR_SUBSCR) p += sprintf(p, ";74"); if (term.curs.oem_acs) p += sprintf(p, ";%u", 10 + term.curs.oem_acs); else { uint ff = (attr.attr & FONTFAM_MASK) >> ATTR_FONTFAM_SHIFT; if (ff) p += sprintf(p, ";%u", 10 + ff); } uint fg = (attr.attr & ATTR_FGMASK) >> ATTR_FGSHIFT; if (fg != FG_COLOUR_I) { if (fg >= TRUE_COLOUR) //p += sprintf(p, ";38;2;%u;%u;%u", attr.truefg & 0xFF, // (attr.truefg >> 8) & 0xFF, (attr.truefg >> 16) & 0xFF); p += sprintf(p, ";38:2::%u:%u:%u", attr.truefg & 0xFF, (attr.truefg >> 8) & 0xFF, (attr.truefg >> 16) & 0xFF); else if (fg < 16) p += sprintf(p, ";%u", (fg < 8 ? 30 : 90) + (fg & 7)); else //p += sprintf(p, ";38;5;%u", fg); p += sprintf(p, ";38:5:%u", fg); } uint bg = (attr.attr & ATTR_BGMASK) >> ATTR_BGSHIFT; if (bg != BG_COLOUR_I) { if (bg >= TRUE_COLOUR) //p += sprintf(p, ";48;2;%u;%u;%u", attr.truebg & 0xFF, // (attr.truebg >> 8) & 0xFF, (attr.truebg >> 16) & 0xFF); p += sprintf(p, ";48:2::%u:%u:%u", attr.truebg & 0xFF, (attr.truebg >> 8) & 0xFF, (attr.truebg >> 16) & 0xFF); else if (bg < 16) p += sprintf(p, ";%u", (bg < 8 ? 40 : 100) + (bg & 7)); else //p += sprintf(p, ";48;5;%u", bg); p += sprintf(p, ";48:5:%u", bg); } if (attr.attr & ATTR_ULCOLOUR) { p += sprintf(p, ";58:2::%u:%u:%u", attr.ulcolr & 0xFF, (attr.ulcolr >> 8) & 0xFF, (attr.ulcolr >> 16) & 0xFF); } p += sprintf(p, "m\e\\"); // m for SGR, followed by ST child_write(buf, p - buf); } else if (!strcmp(s, "r")) { // DECSTBM (scrolling region margins) child_printf("\eP1$r%u;%ur\e\\", term.marg_top + 1, term.marg_bot + 1); } else if (!strcmp(s, "s")) { // DECSLRM (left and right margins) child_printf("\eP1$r%u;%us\e\\", term.marg_left + 1, term.marg_right + 1); } else if (!strcmp(s, "\"p")) { // DECSCL (conformance level) child_printf("\eP1$r%u;%u\"p\e\\", 65, 1); // report as VT500 S7C1T } else if (!strcmp(s, "\"q")) { // DECSCA (protection attribute) child_printf("\eP1$r%u\"q\e\\", (attr.attr & ATTR_PROTECTED) != 0); } else if (!strcmp(s, " q")) { // DECSCUSR (cursor style) child_printf("\eP1$r%u q\e\\", (term.cursor_type >= 0 ? term.cursor_type * 2 : 0) + 1 + !(term.cursor_blinks & 1)); } else if (!strcmp(s, "t") && term.rows >= 24) { // DECSLPP (lines) child_printf("\eP1$r%ut\e\\", term.rows); } else if (!strcmp(s, "$|")) { // DECSCPP (columns) child_printf("\eP1$r%u$|\e\\", term.cols); } else if (!strcmp(s, "*|")) { // DECSNLS (lines) child_printf("\eP1$r%u*|\e\\", term.rows); } else { child_printf("\eP0$r%s\e\\", s); } } otherwise: return; } } }
0
[ "CWE-703", "CWE-770" ]
mintty
bd52109993440b6996760aaccb66e68e782762b9
179,499,562,271,018,370,000,000,000,000,000,000,000
292
tame some window operations, just in case
attach_pending_entry_connections_cb(mainloop_event_t *ev, void *arg) { (void)ev; (void)arg; connection_ap_attach_pending(0); }
0
[ "CWE-532" ]
tor
80c404c4b79f3bcba3fc4585d4c62a62a04f3ed9
171,846,604,982,342,160,000,000,000,000,000,000,000
6
Log warning when connecting to soon-to-be-deprecated v2 onions.
static gboolean udp4_listener_event(GIOChannel *channel, GIOCondition condition, gpointer user_data) { struct listener_data *ifdata = user_data; return udp_listener_event(channel, condition, ifdata, AF_INET, &ifdata->udp4_listener_watch); }
0
[ "CWE-119" ]
connman
5c281d182ecdd0a424b64f7698f32467f8f67b71
336,248,983,432,195,330,000,000,000,000,000,000,000
8
dnsproxy: Fix crash on malformed DNS response If the response query string is malformed, we might access memory pass the end of "name" variable in parse_response().
const Hashes& sslHashes::get_certVerify() const { return certVerify_; }
0
[ "CWE-254" ]
mysql-server
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
245,921,591,440,358,050,000,000,000,000,000,000,000
4
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
DeepScanLineInputFile::~DeepScanLineInputFile () { if (_data->_deleteStream) delete _data->_streamData->is; if (_data) { if (!_data->memoryMapped) for (size_t i = 0; i < _data->lineBuffers.size(); i++) delete [] _data->lineBuffers[i]->buffer; // // Unless this file was opened via the multipart API, delete the streamdata // object too. // (TODO) it should be "isMultiPart(data->version)", but when there is only // single part, // (see the above constructor) the version field is not set. // // (TODO) we should have a way to tell if the stream data is owned by this // file or by a parent multipart file. // if (_data->partNumber == -1 && _data->_streamData) delete _data->_streamData; delete _data; } }
1
[ "CWE-125" ]
openexr
e79d2296496a50826a15c667bf92bdc5a05518b4
294,344,434,220,212,100,000,000,000,000,000,000,000
28
fix memory leaks and invalid memory accesses Signed-off-by: Peter Hillman <[email protected]>
long long ustime(void) { struct timeval tv; long long ust; gettimeofday(&tv, NULL); ust = ((long long)tv.tv_sec)*1000000; ust += tv.tv_usec; return ust; }
0
[ "CWE-770" ]
redis
5674b0057ff2903d43eaff802017eddf37c360f8
51,894,437,171,416,790,000,000,000,000,000,000,000
9
Prevent unauthenticated client from easily consuming lots of memory (CVE-2021-32675) This change sets a low limit for multibulk and bulk length in the protocol for unauthenticated connections, so that they can't easily cause redis to allocate massive amounts of memory by sending just a few characters on the network. The new limits are 10 arguments of 16kb each (instead of 1m of 512mb)
xmlSchemaSAXHandleStartElementNs(void *ctx, const xmlChar * localname, const xmlChar * prefix ATTRIBUTE_UNUSED, const xmlChar * URI, int nb_namespaces, const xmlChar ** namespaces, int nb_attributes, int nb_defaulted ATTRIBUTE_UNUSED, const xmlChar ** attributes) { xmlSchemaValidCtxtPtr vctxt = (xmlSchemaValidCtxtPtr) ctx; int ret; xmlSchemaNodeInfoPtr ielem; int i, j; /* * SAX VAL TODO: What to do with nb_defaulted? */ /* * Skip elements if inside a "skip" wildcard or invalid. */ vctxt->depth++; if ((vctxt->skipDepth != -1) && (vctxt->depth >= vctxt->skipDepth)) return; /* * Push the element. */ if (xmlSchemaValidatorPushElem(vctxt) == -1) { VERROR_INT("xmlSchemaSAXHandleStartElementNs", "calling xmlSchemaValidatorPushElem()"); goto internal_error; } ielem = vctxt->inode; /* * TODO: Is this OK? */ ielem->nodeLine = xmlSAX2GetLineNumber(vctxt->parserCtxt); ielem->localName = localname; ielem->nsName = URI; ielem->flags |= XML_SCHEMA_ELEM_INFO_EMPTY; /* * Register namespaces on the elem info. */ if (nb_namespaces != 0) { /* * Although the parser builds its own namespace list, * we have no access to it, so we'll use an own one. */ for (i = 0, j = 0; i < nb_namespaces; i++, j += 2) { /* * Store prefix and namespace name. */ if (ielem->nsBindings == NULL) { ielem->nsBindings = (const xmlChar **) xmlMalloc(10 * sizeof(const xmlChar *)); if (ielem->nsBindings == NULL) { xmlSchemaVErrMemory(vctxt, "allocating namespace bindings for SAX validation", NULL); goto internal_error; } ielem->nbNsBindings = 0; ielem->sizeNsBindings = 5; } else if (ielem->sizeNsBindings <= ielem->nbNsBindings) { ielem->sizeNsBindings *= 2; ielem->nsBindings = (const xmlChar **) xmlRealloc( (void *) ielem->nsBindings, ielem->sizeNsBindings * 2 * sizeof(const xmlChar *)); if (ielem->nsBindings == NULL) { xmlSchemaVErrMemory(vctxt, "re-allocating namespace bindings for SAX validation", NULL); goto internal_error; } } ielem->nsBindings[ielem->nbNsBindings * 2] = namespaces[j]; if (namespaces[j+1][0] == 0) { /* * Handle xmlns="". */ ielem->nsBindings[ielem->nbNsBindings * 2 + 1] = NULL; } else ielem->nsBindings[ielem->nbNsBindings * 2 + 1] = namespaces[j+1]; ielem->nbNsBindings++; } } /* * Register attributes. * SAX VAL TODO: We are not adding namespace declaration * attributes yet. */ if (nb_attributes != 0) { xmlChar *value; for (j = 0, i = 0; i < nb_attributes; i++, j += 5) { /* * Duplicate the value, changing any &#38; to a literal ampersand. * * libxml2 differs from normal SAX here in that it escapes all ampersands * as &#38; instead of delivering the raw converted string. Changing the * behavior at this point would break applications that use this API, so * we are forced to work around it. There is no danger of accidentally * decoding some entity other than &#38; in this step because without * unescaped ampersands there can be no other entities in the string. */ value = xmlStringLenDecodeEntities(vctxt->parserCtxt, attributes[j+3], attributes[j+4] - attributes[j+3], XML_SUBSTITUTE_REF, 0, 0, 0); /* * TODO: Set the node line. */ ret = xmlSchemaValidatorPushAttribute(vctxt, NULL, ielem->nodeLine, attributes[j], attributes[j+2], 0, value, 1); if (ret == -1) { VERROR_INT("xmlSchemaSAXHandleStartElementNs", "calling xmlSchemaValidatorPushAttribute()"); goto internal_error; } } } /* * Validate the element. */ ret = xmlSchemaValidateElem(vctxt); if (ret != 0) { if (ret == -1) { VERROR_INT("xmlSchemaSAXHandleStartElementNs", "calling xmlSchemaValidateElem()"); goto internal_error; } goto exit; } exit: return; internal_error: vctxt->err = -1; xmlStopParser(vctxt->parserCtxt); return; }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
182,894,037,550,523,080,000,000,000,000,000,000,000
144
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
msg_puts_title(char *s) { msg_puts_attr(s, HL_ATTR(HLF_T)); }
0
[ "CWE-416" ]
vim
9f1a39a5d1cd7989ada2d1cb32f97d84360e050f
338,321,489,813,187,530,000,000,000,000,000,000,000
4
patch 8.2.4040: keeping track of allocated lines is too complicated Problem: Keeping track of allocated lines in user functions is too complicated. Solution: Instead of freeing individual lines keep them all until the end.
int luaRedisSha1hexCommand(lua_State *lua) { int argc = lua_gettop(lua); char digest[41]; size_t len; char *s; if (argc != 1) { lua_pushstring(lua, "wrong number of arguments"); return lua_error(lua); } s = (char*)lua_tolstring(lua,1,&len); sha1hex(digest,s,len); lua_pushstring(lua,digest); return 1; }
0
[ "CWE-703", "CWE-125" ]
redis
6ac3c0b7abd35f37201ed2d6298ecef4ea1ae1dd
131,199,443,083,203,370,000,000,000,000,000,000,000
16
Fix protocol parsing on 'ldbReplParseCommand' (CVE-2021-32672) The protocol parsing on 'ldbReplParseCommand' (LUA debugging) Assumed protocol correctness. This means that if the following is given: *1 $100 test The parser will try to read additional 94 unallocated bytes after the client buffer. This commit fixes this issue by validating that there are actually enough bytes to read. It also limits the amount of data that can be sent by the debugger client to 1M so the client will not be able to explode the memory.
reverse_nat_packet(struct dp_packet *pkt, const struct conn *conn) { char *tail = dp_packet_tail(pkt); uint8_t pad = dp_packet_l2_pad_size(pkt); struct conn_key inner_key; const char *inner_l4 = NULL; uint16_t orig_l3_ofs = pkt->l3_ofs; uint16_t orig_l4_ofs = pkt->l4_ofs; if (conn->key.dl_type == htons(ETH_TYPE_IP)) { struct ip_header *nh = dp_packet_l3(pkt); struct icmp_header *icmp = dp_packet_l4(pkt); struct ip_header *inner_l3 = (struct ip_header *) (icmp + 1); /* This call is already verified to succeed during the code path from * 'conn_key_extract()' which calls 'extract_l4_icmp()'. */ extract_l3_ipv4(&inner_key, inner_l3, tail - ((char *)inner_l3) - pad, &inner_l4, false); pkt->l3_ofs += (char *) inner_l3 - (char *) nh; pkt->l4_ofs += inner_l4 - (char *) icmp; if (conn->nat_info->nat_action & NAT_ACTION_SRC) { packet_set_ipv4_addr(pkt, &inner_l3->ip_src, conn->key.src.addr.ipv4_aligned); } else if (conn->nat_info->nat_action & NAT_ACTION_DST) { packet_set_ipv4_addr(pkt, &inner_l3->ip_dst, conn->key.dst.addr.ipv4_aligned); } reverse_pat_packet(pkt, conn); icmp->icmp_csum = 0; icmp->icmp_csum = csum(icmp, tail - (char *) icmp - pad); } else { struct ovs_16aligned_ip6_hdr *nh6 = dp_packet_l3(pkt); struct icmp6_error_header *icmp6 = dp_packet_l4(pkt); struct ovs_16aligned_ip6_hdr *inner_l3_6 = (struct ovs_16aligned_ip6_hdr *) (icmp6 + 1); /* This call is already verified to succeed during the code path from * 'conn_key_extract()' which calls 'extract_l4_icmp6()'. */ extract_l3_ipv6(&inner_key, inner_l3_6, tail - ((char *)inner_l3_6) - pad, &inner_l4); pkt->l3_ofs += (char *) inner_l3_6 - (char *) nh6; pkt->l4_ofs += inner_l4 - (char *) icmp6; if (conn->nat_info->nat_action & NAT_ACTION_SRC) { packet_set_ipv6_addr(pkt, conn->key.nw_proto, inner_l3_6->ip6_src.be32, &conn->key.src.addr.ipv6_aligned, true); } else if (conn->nat_info->nat_action & NAT_ACTION_DST) { packet_set_ipv6_addr(pkt, conn->key.nw_proto, inner_l3_6->ip6_dst.be32, &conn->key.dst.addr.ipv6_aligned, true); } reverse_pat_packet(pkt, conn); icmp6->icmp6_base.icmp6_cksum = 0; icmp6->icmp6_base.icmp6_cksum = packet_csum_upperlayer6(nh6, icmp6, IPPROTO_ICMPV6, tail - (char *) icmp6 - pad); } pkt->l3_ofs = orig_l3_ofs; pkt->l4_ofs = orig_l4_ofs; }
1
[ "CWE-400" ]
ovs
35c280072c1c3ed58202745b7d27fbbd0736999b
243,784,179,096,545,000,000,000,000,000,000,000,000
63
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <[email protected]> Acked-by: Ilya Maximets <[email protected]> Signed-off-by: Flavio Leitner <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
TPMI_ALG_ECC_SCHEME_Unmarshal(TPMI_ALG_ECC_SCHEME *target, BYTE **buffer, INT32 *size, BOOL allowNull) { TPM_RC rc = TPM_RC_SUCCESS; TPMI_ALG_ECC_SCHEME orig_target = *target; // libtpms added if (rc == TPM_RC_SUCCESS) { rc = TPM_ALG_ID_Unmarshal(target, buffer, size); } if (rc == TPM_RC_SUCCESS) { switch (*target) { #if ALG_ECDSA case TPM_ALG_ECDSA: #endif #if ALG_SM2 case TPM_ALG_SM2: #endif #if ALG_ECDAA case TPM_ALG_ECDAA: #endif #if ALG_ECSCHNORR case TPM_ALG_ECSCHNORR: #endif #if ALG_ECDH case TPM_ALG_ECDH: #endif #if ALG_ECMQV case TPM_ALG_ECMQV: #endif break; case TPM_ALG_NULL: if (allowNull) { break; } default: rc = TPM_RC_SCHEME; *target = orig_target; // libtpms added } } return rc; }
0
[ "CWE-787" ]
libtpms
5cc98a62dc6f204dcf5b87c2ee83ac742a6a319b
61,617,175,963,765,590,000,000,000,000,000,000,000
40
tpm2: Restore original value if unmarshalled value was illegal Restore the original value of the memory location where data from a stream was unmarshalled and the unmarshalled value was found to be illegal. The goal is to not keep illegal values in memory. Signed-off-by: Stefan Berger <[email protected]>
void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { ASSERT(!reset_stream_called_); reset_stream_called_ = true; onResetStream(reason); }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
220,577,967,067,008,420,000,000,000,000,000,000,000
5
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
void remove_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; BUG_ON(!PageLocked(page)); spin_lock_irq(&mapping->tree_lock); __remove_from_page_cache(page); spin_unlock_irq(&mapping->tree_lock); }
0
[ "CWE-193" ]
linux-2.6
94ad374a0751f40d25e22e036c37f7263569d24c
241,249,323,538,195,550,000,000,000,000,000,000,000
10
Fix off-by-one error in iov_iter_advance() The iov_iter_advance() function would look at the iov->iov_len entry even though it might have iterated over the whole array, and iov was pointing past the end. This would cause DEBUG_PAGEALLOC to trigger a kernel page fault if the allocation was at the end of a page, and the next page was unallocated. The quick fix is to just change the order of the tests: check that there is any iovec data left before we check the iov entry itself. Thanks to Alexey Dobriyan for finding this case, and testing the fix. Reported-and-tested-by: Alexey Dobriyan <[email protected]> Cc: Nick Piggin <[email protected]> Cc: Andrew Morton <[email protected]> Cc: <[email protected]> [2.6.25.x, 2.6.26.x] Signed-off-by: Linus Torvalds <[email protected]>
bytes_fromhex_impl(PyTypeObject *type, PyObject *string) /*[clinic end generated code: output=0973acc63661bb2e input=bf4d1c361670acd3]*/ { PyObject *newstring; char *buf; Py_ssize_t hexlen, byteslen, i, j; int top, bot; void *data; unsigned int kind; assert(PyUnicode_Check(string)); if (PyUnicode_READY(string)) return NULL; kind = PyUnicode_KIND(string); data = PyUnicode_DATA(string); hexlen = PyUnicode_GET_LENGTH(string); byteslen = hexlen/2; /* This overestimates if there are spaces */ newstring = PyBytes_FromStringAndSize(NULL, byteslen); if (!newstring) return NULL; buf = PyBytes_AS_STRING(newstring); for (i = j = 0; i < hexlen; i += 2) { /* skip over spaces in the input */ while (PyUnicode_READ(kind, data, i) == ' ') i++; if (i >= hexlen) break; top = hex_digit_to_int(PyUnicode_READ(kind, data, i)); bot = hex_digit_to_int(PyUnicode_READ(kind, data, i+1)); if (top == -1 || bot == -1) { PyErr_Format(PyExc_ValueError, "non-hexadecimal number found in " "fromhex() arg at position %zd", i); goto error; } buf[j++] = (top << 4) + bot; } if (j != byteslen && _PyBytes_Resize(&newstring, j) < 0) goto error; return newstring; error: Py_XDECREF(newstring); return NULL; }
0
[ "CWE-190" ]
cpython
fd8614c5c5466a14a945db5b059c10c0fb8f76d9
56,819,761,174,125,950,000,000,000,000,000,000,000
46
bpo-30657: Fix CVE-2017-1000158 (#4664) Fixes possible integer overflow in PyBytes_DecodeEscape. Co-Authored-By: Jay Bosamiya <[email protected]>
Linker* PackLinuxElf32x86::newLinker() const { return new ElfLinkerX86; }
0
[ "CWE-476" ]
upx
ef336dbcc6dc8344482f8cf6c909ae96c3286317
146,174,642,242,806,260,000,000,000,000,000,000,000
4
Protect against bad crafted input. https://github.com/upx/upx/issues/128 modified: p_lx_elf.cpp
void PlayerGeneric::setMasterVolume(mp_sint32 vol) { masterVolume = vol; if (player) player->setMasterVolume(vol); }
0
[ "CWE-416" ]
MilkyTracker
7afd55c42ad80d01a339197a2d8b5461d214edaf
244,420,812,152,325,700,000,000,000,000,000,000,000
6
Fix use-after-free in PlayerGeneric destructor
int ssl3_get_client_hello(SSL *s) { int i, j, ok, al, ret = -1; unsigned int cookie_len; long n; unsigned long id; unsigned char *p, *d, *q; SSL_CIPHER *c; #ifndef OPENSSL_NO_COMP SSL_COMP *comp = NULL; #endif STACK_OF(SSL_CIPHER) *ciphers = NULL; /* * We do this so that we will respond with our native type. If we are * TLSv1 and we get SSLv3, we will respond with TLSv1, This down * switching should be handled by a different method. If we are SSLv3, we * will respond with SSLv3, even if prompted with TLSv1. */ if (s->state == SSL3_ST_SR_CLNT_HELLO_A) { s->state = SSL3_ST_SR_CLNT_HELLO_B; } s->first_packet = 1; n = s->method->ssl_get_message(s, SSL3_ST_SR_CLNT_HELLO_B, SSL3_ST_SR_CLNT_HELLO_C, SSL3_MT_CLIENT_HELLO, SSL3_RT_MAX_PLAIN_LENGTH, &ok); if (!ok) return ((int)n); s->first_packet = 0; d = p = (unsigned char *)s->init_msg; /* * 2 bytes for client version, SSL3_RANDOM_SIZE bytes for random, 1 byte * for session id length */ if (n < 2 + SSL3_RANDOM_SIZE + 1) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT); goto f_err; } /* * use version from inside client hello, not from record header (may * differ: see RFC 2246, Appendix E, second paragraph) */ s->client_version = (((int)p[0]) << 8) | (int)p[1]; p += 2; if ((s->version == DTLS1_VERSION && s->client_version > s->version) || (s->version != DTLS1_VERSION && s->client_version < s->version)) { SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_WRONG_VERSION_NUMBER); if ((s->client_version >> 8) == SSL3_VERSION_MAJOR) { /* * similar to ssl3_get_record, send alert using remote version * number */ s->version = s->client_version; } al = SSL_AD_PROTOCOL_VERSION; goto f_err; } /* * If we require cookies and this ClientHello doesn't contain one, just * return since we do not want to allocate any memory yet. So check * cookie length... */ if (SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE) { unsigned int session_length, cookie_length; session_length = *(p + SSL3_RANDOM_SIZE); if (p + SSL3_RANDOM_SIZE + session_length + 1 >= d + n) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT); goto f_err; } cookie_length = *(p + SSL3_RANDOM_SIZE + session_length + 1); if (cookie_length == 0) return 1; } /* load the client random */ memcpy(s->s3->client_random, p, SSL3_RANDOM_SIZE); p += SSL3_RANDOM_SIZE; /* get the session-id */ j = *(p++); if (p + j > d + n) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT); goto f_err; } s->hit = 0; /* * Versions before 0.9.7 always allow session reuse during renegotiation * (i.e. when s->new_session is true), option * SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION is new with 0.9.7. Maybe * this optional behaviour should always have been the default, but we * cannot safely change the default behaviour (or new applications might * be written that become totally unsecure when compiled with an earlier * library version) */ if ((s->new_session && (s->options & SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION))) { if (!ssl_get_new_session(s, 1)) goto err; } else { i = ssl_get_prev_session(s, p, j, d + n); if (i == 1) { /* previous session */ s->hit = 1; } else if (i == -1) goto err; else { /* i == 0 */ if (!ssl_get_new_session(s, 1)) goto err; } } p += j; if (s->version == DTLS1_VERSION || s->version == DTLS1_BAD_VER) { /* cookie stuff */ if (p + 1 > d + n) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT); goto f_err; } cookie_len = *(p++); if (p + cookie_len > d + n) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT); goto f_err; } /* * The ClientHello may contain a cookie even if the * HelloVerify message has not been sent--make sure that it * does not cause an overflow. */ if (cookie_len > sizeof(s->d1->rcvd_cookie)) { /* too much data */ al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_COOKIE_MISMATCH); goto f_err; } /* verify the cookie if appropriate option is set. */ if ((SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE) && cookie_len > 0) { memcpy(s->d1->rcvd_cookie, p, cookie_len); if (s->ctx->app_verify_cookie_cb != NULL) { if (s->ctx->app_verify_cookie_cb(s, s->d1->rcvd_cookie, cookie_len) == 0) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_COOKIE_MISMATCH); goto f_err; } /* else cookie verification succeeded */ } /* default verification */ else if (memcmp(s->d1->rcvd_cookie, s->d1->cookie, s->d1->cookie_len) != 0) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_COOKIE_MISMATCH); goto f_err; } ret = 2; } p += cookie_len; } if (p + 2 > d + n) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT); goto f_err; } n2s(p, i); if ((i == 0) && (j != 0)) { /* we need a cipher if we are not resuming a session */ al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_NO_CIPHERS_SPECIFIED); goto f_err; } /* i bytes of cipher data + 1 byte for compression length later */ if ((p + i + 1) > (d + n)) { /* not enough data */ al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_MISMATCH); goto f_err; } if ((i > 0) && (ssl_bytes_to_cipher_list(s, p, i, &(ciphers)) == NULL)) { goto err; } p += i; /* If it is a hit, check that the cipher is in the list */ if ((s->hit) && (i > 0)) { j = 0; id = s->session->cipher->id; #ifdef CIPHER_DEBUG printf("client sent %d ciphers\n", sk_num(ciphers)); #endif for (i = 0; i < sk_SSL_CIPHER_num(ciphers); i++) { c = sk_SSL_CIPHER_value(ciphers, i); #ifdef CIPHER_DEBUG printf("client [%2d of %2d]:%s\n", i, sk_num(ciphers), SSL_CIPHER_get_name(c)); #endif if (c->id == id) { j = 1; break; } } /* * Disabled because it can be used in a ciphersuite downgrade attack: * CVE-2010-4180. */ #if 0 if (j == 0 && (s->options & SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG) && (sk_SSL_CIPHER_num(ciphers) == 1)) { /* * Special case as client bug workaround: the previously used * cipher may not be in the current list, the client instead * might be trying to continue using a cipher that before wasn't * chosen due to server preferences. We'll have to reject the * connection if the cipher is not enabled, though. */ c = sk_SSL_CIPHER_value(ciphers, 0); if (sk_SSL_CIPHER_find(SSL_get_ciphers(s), c) >= 0) { s->session->cipher = c; j = 1; } } #endif if (j == 0) { /* * we need to have the cipher in the cipher list if we are asked * to reuse it */ al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_REQUIRED_CIPHER_MISSING); goto f_err; } } /* compression */ i = *(p++); if ((p + i) > (d + n)) { /* not enough data */ al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_MISMATCH); goto f_err; } q = p; for (j = 0; j < i; j++) { if (p[j] == 0) break; } p += i; if (j >= i) { /* no compress */ al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_NO_COMPRESSION_SPECIFIED); goto f_err; } #ifndef OPENSSL_NO_TLSEXT /* TLS extensions */ if (s->version >= SSL3_VERSION) { if (!ssl_parse_clienthello_tlsext(s, &p, d, n, &al)) { /* 'al' set by ssl_parse_clienthello_tlsext */ SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_PARSE_TLSEXT); goto f_err; } } if (ssl_check_clienthello_tlsext_early(s) <= 0) { SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT); goto err; } /* * Check if we want to use external pre-shared secret for this handshake * for not reused session only. We need to generate server_random before * calling tls_session_secret_cb in order to allow SessionTicket * processing to use it in key derivation. */ { unsigned long Time; unsigned char *pos; Time = (unsigned long)time(NULL); /* Time */ pos = s->s3->server_random; l2n(Time, pos); if (RAND_pseudo_bytes(pos, SSL3_RANDOM_SIZE - 4) <= 0) { al = SSL_AD_INTERNAL_ERROR; goto f_err; } } if (!s->hit && s->version >= TLS1_VERSION && s->tls_session_secret_cb) { SSL_CIPHER *pref_cipher = NULL; s->session->master_key_length = sizeof(s->session->master_key); if (s->tls_session_secret_cb(s, s->session->master_key, &s->session->master_key_length, ciphers, &pref_cipher, s->tls_session_secret_cb_arg)) { s->hit = 1; s->session->ciphers = ciphers; s->session->verify_result = X509_V_OK; ciphers = NULL; /* check if some cipher was preferred by call back */ pref_cipher = pref_cipher ? pref_cipher : ssl3_choose_cipher(s, s-> session->ciphers, SSL_get_ciphers (s)); if (pref_cipher == NULL) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_NO_SHARED_CIPHER); goto f_err; } s->session->cipher = pref_cipher; if (s->cipher_list) sk_SSL_CIPHER_free(s->cipher_list); if (s->cipher_list_by_id) sk_SSL_CIPHER_free(s->cipher_list_by_id); s->cipher_list = sk_SSL_CIPHER_dup(s->session->ciphers); s->cipher_list_by_id = sk_SSL_CIPHER_dup(s->session->ciphers); } } #endif /* * Worst case, we will use the NULL compression, but if we have other * options, we will now look for them. We have i-1 compression * algorithms from the client, starting at q. */ s->s3->tmp.new_compression = NULL; #ifndef OPENSSL_NO_COMP /* This only happens if we have a cache hit */ if (s->session->compress_meth != 0) { int m, comp_id = s->session->compress_meth; /* Perform sanity checks on resumed compression algorithm */ /* Can't disable compression */ if (s->options & SSL_OP_NO_COMPRESSION) { al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_INCONSISTENT_COMPRESSION); goto f_err; } /* Look for resumed compression method */ for (m = 0; m < sk_SSL_COMP_num(s->ctx->comp_methods); m++) { comp = sk_SSL_COMP_value(s->ctx->comp_methods, m); if (comp_id == comp->id) { s->s3->tmp.new_compression = comp; break; } } if (s->s3->tmp.new_compression == NULL) { al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_INVALID_COMPRESSION_ALGORITHM); goto f_err; } /* Look for resumed method in compression list */ for (m = 0; m < i; m++) { if (q[m] == comp_id) break; } if (m >= i) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_REQUIRED_COMPRESSSION_ALGORITHM_MISSING); goto f_err; } } else if (s->hit) comp = NULL; else if (!(s->options & SSL_OP_NO_COMPRESSION) && s->ctx->comp_methods) { /* See if we have a match */ int m, nn, o, v, done = 0; nn = sk_SSL_COMP_num(s->ctx->comp_methods); for (m = 0; m < nn; m++) { comp = sk_SSL_COMP_value(s->ctx->comp_methods, m); v = comp->id; for (o = 0; o < i; o++) { if (v == q[o]) { done = 1; break; } } if (done) break; } if (done) s->s3->tmp.new_compression = comp; else comp = NULL; } #else /* * If compression is disabled we'd better not try to resume a session * using compression. */ if (s->session->compress_meth != 0) { al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_INCONSISTENT_COMPRESSION); goto f_err; } #endif /* * Given s->session->ciphers and SSL_get_ciphers, we must pick a cipher */ if (!s->hit) { #ifdef OPENSSL_NO_COMP s->session->compress_meth = 0; #else s->session->compress_meth = (comp == NULL) ? 0 : comp->id; #endif if (s->session->ciphers != NULL) sk_SSL_CIPHER_free(s->session->ciphers); s->session->ciphers = ciphers; if (ciphers == NULL) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_NO_CIPHERS_PASSED); goto f_err; } ciphers = NULL; c = ssl3_choose_cipher(s, s->session->ciphers, SSL_get_ciphers(s)); if (c == NULL) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_NO_SHARED_CIPHER); goto f_err; } s->s3->tmp.new_cipher = c; } else { /* Session-id reuse */ #ifdef REUSE_CIPHER_BUG STACK_OF(SSL_CIPHER) *sk; SSL_CIPHER *nc = NULL; SSL_CIPHER *ec = NULL; if (s->options & SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG) { sk = s->session->ciphers; for (i = 0; i < sk_SSL_CIPHER_num(sk); i++) { c = sk_SSL_CIPHER_value(sk, i); if (c->algorithm_enc & SSL_eNULL) nc = c; if (SSL_C_IS_EXPORT(c)) ec = c; } if (nc != NULL) s->s3->tmp.new_cipher = nc; else if (ec != NULL) s->s3->tmp.new_cipher = ec; else s->s3->tmp.new_cipher = s->session->cipher; } else #endif s->s3->tmp.new_cipher = s->session->cipher; } if (!ssl3_digest_cached_records(s)) { al = SSL_AD_INTERNAL_ERROR; goto f_err; } /*- * we now have the following setup. * client_random * cipher_list - our prefered list of ciphers * ciphers - the clients prefered list of ciphers * compression - basically ignored right now * ssl version is set - sslv3 * s->session - The ssl session has been setup. * s->hit - session reuse flag * s->tmp.new_cipher - the new cipher to use. */ #ifndef OPENSSL_NO_TLSEXT /* Handles TLS extensions that we couldn't check earlier */ if (s->version >= SSL3_VERSION) { if (ssl_check_clienthello_tlsext_late(s) <= 0) { SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT); goto err; } } #endif if (ret < 0) ret = 1; if (0) { f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); } err: if (ciphers != NULL) sk_SSL_CIPHER_free(ciphers); return (ret); }
0
[]
openssl
1392c238657ec745af6a40def03d67d4ce02a082
181,428,057,863,667,530,000,000,000,000,000,000,000
526
Fix PSK handling. The PSK identity hint should be stored in the SSL_SESSION structure and not in the parent context (which will overwrite values used by other SSL structures with the same SSL_CTX). Use BUF_strndup when copying identity as it may not be null terminated. Reviewed-by: Tim Hudson <[email protected]> (cherry picked from commit 3c66a669dfc7b3792f7af0758ea26fe8502ce70c)
virDomainSetTime(virDomainPtr dom, long long seconds, unsigned int nseconds, unsigned int flags) { VIR_DOMAIN_DEBUG(dom, "seconds=%lld, nseconds=%u, flags=%x", seconds, nseconds, flags); virResetLastError(); virCheckDomainReturn(dom, -1); virCheckReadOnlyGoto(dom->conn->flags, error); if (dom->conn->driver->domainSetTime) { int ret = dom->conn->driver->domainSetTime(dom, seconds, nseconds, flags); if (ret < 0) goto error; return ret; } virReportUnsupportedError(); error: virDispatchError(dom->conn); return -1; }
0
[ "CWE-254" ]
libvirt
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
110,293,873,954,293,750,000,000,000,000,000,000,000
27
virDomainGetTime: Deny on RO connections We have a policy that if API may end up talking to a guest agent it should require RW connection. We don't obey the rule in virDomainGetTime(). Signed-off-by: Michal Privoznik <[email protected]>
read_cups_files_conf(cups_file_t *fp) /* I - File to read from */ { int i, /* Looping var */ linenum; /* Current line number */ char line[HTTP_MAX_BUFFER], /* Line from file */ *value; /* Value from line */ struct group *group; /* Group */ static const char * const prohibited_env[] = { /* Prohibited environment variables */ "APPLE_LANGUAGE", "AUTH_DOMAIN", "AUTH_INFO_REQUIRED", "AUTH_NEGOTIATE", "AUTH_PASSWORD", "AUTH_UID", "AUTH_USERNAME", "CHARSET", "CLASS", "CLASSIFICATION", "CONTENT_TYPE", "CUPS_CACHEDIR", "CUPS_DATADIR", "CUPS_DOCROOT", "CUPS_FILETYPE", "CUPS_FONTPATH", "CUPS_MAX_MESSAGE", "CUPS_REQUESTROOT", "CUPS_SERVERBIN", "CUPS_SERVERROOT", "CUPS_STATEDIR", "DEVICE_URI", "FINAL_CONTENT_TYPE", "HOME", "LANG", "PPD", "PRINTER", "PRINTER_INFO", "PRINTER_LOCATION", "PRINTER_STATE_REASONS", "RIP_CACHE", "SERVER_ADMIN", "SOFTWARE", "TMPDIR", "USER" }; /* * Loop through each line in the file... */ linenum = 0; while (cupsFileGetConf(fp, line, sizeof(line), &value, &linenum)) { if (!_cups_strcasecmp(line, "FatalErrors")) FatalErrors = parse_fatal_errors(value); else if (!_cups_strcasecmp(line, "Group") && value) { /* * Group ID to run as... */ if (isdigit(value[0])) Group = (gid_t)atoi(value); else { endgrent(); group = getgrnam(value); if (group != NULL) Group = group->gr_gid; else { cupsdLogMessage(CUPSD_LOG_ERROR, "Unknown Group \"%s\" on line %d of %s.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } } else if (!_cups_strcasecmp(line, "PassEnv") && value) { /* * PassEnv variable [... variable] */ int valuelen; /* Length of variable name */ for (; *value;) { for (valuelen = 0; value[valuelen]; valuelen ++) if (_cups_isspace(value[valuelen]) || value[valuelen] == ',') break; if (value[valuelen]) { value[valuelen] = '\0'; valuelen ++; } for (i = 0; i < (int)(sizeof(prohibited_env) / sizeof(prohibited_env[0])); i ++) { if (!strcmp(value, prohibited_env[i])) { cupsdLogMessage(CUPSD_LOG_ERROR, "Environment variable \"%s\" cannot be passed through on line %d of %s.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); else break; } } if (i >= (int)(sizeof(prohibited_env) / sizeof(prohibited_env[0]))) cupsdSetEnv(value, NULL); for (value += valuelen; *value; value ++) if (!_cups_isspace(*value) || *value != ',') break; } } else if (!_cups_strcasecmp(line, "PrintcapFormat") && value) { /* * Format of printcap file? */ if (!_cups_strcasecmp(value, "bsd")) PrintcapFormat = PRINTCAP_BSD; else if (!_cups_strcasecmp(value, "plist")) PrintcapFormat = PRINTCAP_PLIST; else if (!_cups_strcasecmp(value, "solaris")) PrintcapFormat = PRINTCAP_SOLARIS; else { cupsdLogMessage(CUPSD_LOG_ERROR, "Unknown PrintcapFormat \"%s\" on line %d of %s.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } else if (!_cups_strcasecmp(line, "Sandboxing") && value) { /* * Level of sandboxing? */ if (!_cups_strcasecmp(value, "off") && getuid()) { Sandboxing = CUPSD_SANDBOXING_OFF; cupsdLogMessage(CUPSD_LOG_WARN, "Disabling sandboxing is not recommended (line %d of %s)", linenum, CupsFilesFile); } else if (!_cups_strcasecmp(value, "relaxed")) Sandboxing = CUPSD_SANDBOXING_RELAXED; else if (!_cups_strcasecmp(value, "strict")) Sandboxing = CUPSD_SANDBOXING_STRICT; else { cupsdLogMessage(CUPSD_LOG_ERROR, "Unknown Sandboxing \"%s\" on line %d of %s.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } else if (!_cups_strcasecmp(line, "SetEnv") && value) { /* * SetEnv variable value */ char *valueptr; /* Pointer to environment variable value */ for (valueptr = value; *valueptr && !isspace(*valueptr & 255); valueptr ++); if (*valueptr) { /* * Found a value... */ while (isspace(*valueptr & 255)) *valueptr++ = '\0'; for (i = 0; i < (int)(sizeof(prohibited_env) / sizeof(prohibited_env[0])); i ++) { if (!strcmp(value, prohibited_env[i])) { cupsdLogMessage(CUPSD_LOG_ERROR, "Environment variable \"%s\" cannot be set on line %d of %s.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); else break; } } if (i >= (int)(sizeof(prohibited_env) / sizeof(prohibited_env[0]))) cupsdSetEnv(value, valueptr); } else cupsdLogMessage(CUPSD_LOG_ERROR, "Missing value for SetEnv directive on line %d of %s.", linenum, ConfigurationFile); } else if (!_cups_strcasecmp(line, "SystemGroup") && value) { /* * SystemGroup (admin) group(s)... */ if (!parse_groups(value, linenum)) { if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } else if (!_cups_strcasecmp(line, "User") && value) { /* * User ID to run as... */ if (isdigit(value[0] & 255)) { int uid = atoi(value); if (!uid) { cupsdLogMessage(CUPSD_LOG_ERROR, "Will not use User 0 as specified on line %d of %s " "for security reasons. You must use a non-" "privileged account instead.", linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } else User = (uid_t)atoi(value); } else { struct passwd *p; /* Password information */ endpwent(); p = getpwnam(value); if (p) { if (!p->pw_uid) { cupsdLogMessage(CUPSD_LOG_ERROR, "Will not use User %s (UID=0) as specified on line " "%d of %s for security reasons. You must use a " "non-privileged account instead.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } else User = p->pw_uid; } else { cupsdLogMessage(CUPSD_LOG_ERROR, "Unknown User \"%s\" on line %d of %s.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } } else if (!_cups_strcasecmp(line, "ServerCertificate") || !_cups_strcasecmp(line, "ServerKey")) { cupsdLogMessage(CUPSD_LOG_INFO, "The \"%s\" directive on line %d of %s is no longer " "supported; this will become an error in a future " "release.", line, linenum, CupsFilesFile); } else if (!parse_variable(CupsFilesFile, linenum, line, value, sizeof(cupsfiles_vars) / sizeof(cupsfiles_vars[0]), cupsfiles_vars) && (FatalErrors & CUPSD_FATAL_CONFIG)) return (0); } return (1); }
0
[]
cups
d47f6aec436e0e9df6554436e391471097686ecc
39,830,376,392,441,490,000,000,000,000,000,000,000
293
Fix local privilege escalation to root and sandbox bypasses in scheduler (rdar://37836779, rdar://37836995, rdar://37837252, rdar://37837581)
GC_INNER void GC_setpagesize(void) { # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) GC_page_size = GETPAGESIZE(); if (!GC_page_size) ABORT("getpagesize failed"); # else /* It's acceptable to fake it. */ GC_page_size = HBLKSIZE; # endif }
1
[ "CWE-119" ]
bdwgc
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
48,184,684,668,835,490,000,000,000,000,000,000,000
10
Fix malloc routines to prevent size value wrap-around See issue #135 on Github. * allchblk.c (GC_allochblk, GC_allochblk_nth): Use OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS. * malloc.c (GC_alloc_large): Likewise. * alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent overflow when computing GC_heapsize+bytes > GC_max_heapsize. * dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page, GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc, GC_debug_generic_malloc_inner, GC_debug_generic_malloc_inner_ignore_off_page, GC_debug_malloc_stubborn, GC_debug_malloc_atomic, GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable): Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb value. * fnlz_mlc.c (GC_finalized_malloc): Likewise. * gcj_mlc.c (GC_debug_gcj_malloc): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, ROUNDUP_PAGESIZE): Likewise. * include/private/gcconfig.h (GET_MEM): Likewise. * mallocx.c (GC_malloc_many, GC_memalign): Likewise. * os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise. * typd_mlc.c (GC_malloc_explicitly_typed, GC_malloc_explicitly_typed_ignore_off_page, GC_calloc_explicitly_typed): Likewise. * headers.c (GC_scratch_alloc): Change type of bytes_to_get from word to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed). * include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already defined). * include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from malloc.c file. * include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before include gcconfig.h). * include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type to size_t. * os_dep.c (GC_page_size): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument. * include/private/gcconfig.h (GET_MEM): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE, ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb". * include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro. * include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem, GC_unix_get_mem): Change argument type from word to int. * os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem, GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise. * malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only if no value wrap around is guaranteed. * malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case (because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value wrap around). * mallocx.c (GC_generic_malloc_ignore_off_page): Likewise. * misc.c (GC_init_size_map): Change "i" local variable type from int to size_t. * os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise. * misc.c (GC_envfile_init): Cast len to size_t when passed to ROUNDUP_PAGESIZE_IF_MMAP. * os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and GETPAGESIZE() to size_t (when setting GC_page_size). * os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection): Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking (the argument is of word type). * os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with ~GC_page_size+1 (because GC_page_size is unsigned); remove redundant cast to size_t. * os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size to SBRK_ARG_T. * os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable to size_t. * typd_mlc.c: Do not include limits.h. * typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in gc_priv.h now).
int read_super_2(squashfs_operations **s_ops, void *s) { squashfs_super_block_3 *sBlk_3 = s; if(sBlk_3->s_magic != SQUASHFS_MAGIC || sBlk_3->s_major != 2 || sBlk_3->s_minor > 1) return -1; sBlk.s.s_magic = sBlk_3->s_magic; sBlk.s.inodes = sBlk_3->inodes; sBlk.s.mkfs_time = sBlk_3->mkfs_time; sBlk.s.block_size = sBlk_3->block_size; sBlk.s.fragments = sBlk_3->fragments; sBlk.s.block_log = sBlk_3->block_log; sBlk.s.flags = sBlk_3->flags; sBlk.s.s_major = sBlk_3->s_major; sBlk.s.s_minor = sBlk_3->s_minor; sBlk.s.root_inode = sBlk_3->root_inode; sBlk.s.bytes_used = sBlk_3->bytes_used_2; sBlk.s.inode_table_start = sBlk_3->inode_table_start; sBlk.s.directory_table_start = sBlk_3->directory_table_start_2; sBlk.s.fragment_table_start = sBlk_3->fragment_table_start_2; sBlk.s.inode_table_start = sBlk_3->inode_table_start_2; sBlk.no_uids = sBlk_3->no_uids; sBlk.no_guids = sBlk_3->no_guids; sBlk.uid_start = sBlk_3->uid_start_2; sBlk.guid_start = sBlk_3->guid_start_2; sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK; *s_ops = &ops; /* * 2.x filesystems use gzip compression. */ comp = lookup_compressor("gzip"); return TRUE; }
1
[ "CWE-200", "CWE-59", "CWE-22" ]
squashfs-tools
e0485802ec72996c20026da320650d8362f555bd
8,840,025,808,340,767,000,000,000,000,000,000,000
37
Unsquashfs: additional write outside destination directory exploit fix An issue on github (https://github.com/plougher/squashfs-tools/issues/72) showed how some specially crafted Squashfs filesystems containing invalid file names (with '/' and '..') can cause Unsquashfs to write files outside of the destination directory. Since then it has been shown that specially crafted Squashfs filesystems that contain a symbolic link pointing outside of the destination directory, coupled with an identically named file within the same directory, can cause Unsquashfs to write files outside of the destination directory. Specifically the symbolic link produces a pathname pointing outside of the destination directory, which is then followed when writing the duplicate identically named file within the directory. This commit fixes this exploit by explictly checking for duplicate filenames within a directory. As directories in v2.1, v3.x, and v4.0 filesystems are sorted, this is achieved by checking for consecutively identical filenames. Additionally directories are checked to ensure they are sorted, to avoid attempts to evade the duplicate check. Version 1.x and 2.0 filesystems (where the directories were unsorted) are sorted and then the above duplicate filename check is applied. Signed-off-by: Phillip Lougher <[email protected]>
JSObject *create(JSContext *cx, HandleObject self, Mode mode, HandleObject owner, HandleValue initv) { RootedObject headers(cx, create(cx, self, mode, owner)); if (!headers) return nullptr; bool consumed = false; if (!maybe_consume_sequence_or_record<detail::append_header_value>(cx, initv, headers, &consumed, "Headers")) { return nullptr; } if (!consumed) { report_sequence_or_record_arg_error(cx, "Headers", ""); return nullptr; } return headers; }
0
[ "CWE-94" ]
js-compute-runtime
65524ffc962644e9fc39f4b368a326b6253912a9
63,327,196,906,957,970,000,000,000,000,000,000,000
19
use rangom_get instead of arc4random as arc4random does not work correctly with wizer wizer causes the seed in arc4random to be the same between executions which is not random
static Image *ReadPICTImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define ThrowPICTException(exception,message) \ { \ if (tile_image != (Image *) NULL) \ tile_image=DestroyImage(tile_image); \ if (read_info != (ImageInfo *) NULL) \ read_info=DestroyImageInfo(read_info); \ ThrowReaderException((exception),(message)); \ } char geometry[MaxTextExtent], header_ole[4]; Image *image, *tile_image; ImageInfo *read_info; IndexPacket index; int c, code; MagickBooleanType jpeg, status; PICTRectangle frame; PICTPixmap pixmap; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; register ssize_t i; size_t extent, length; ssize_t count, flags, j, version, y; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read PICT header. */ read_info=(ImageInfo *) NULL; tile_image=(Image *) NULL; pixmap.bits_per_pixel=0; pixmap.component_count=0; /* Skip header : 512 for standard PICT and 4, ie "PICT" for OLE2 */ header_ole[0]=ReadBlobByte(image); header_ole[1]=ReadBlobByte(image); header_ole[2]=ReadBlobByte(image); header_ole[3]=ReadBlobByte(image); if (!((header_ole[0] == 0x50) && (header_ole[1] == 0x49) && (header_ole[2] == 0x43) && (header_ole[3] == 0x54))) for (i=0; i < 508; i++) if (ReadBlobByte(image) == EOF) break; (void) ReadBlobMSBShort(image); /* skip picture size */ if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); while ((c=ReadBlobByte(image)) == 0) ; if (c != 0x11) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); version=(ssize_t) ReadBlobByte(image); if (version == 2) { c=ReadBlobByte(image); if (c != 0xff) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); } else if (version != 1) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if ((frame.left < 0) || (frame.right < 0) || (frame.top < 0) || (frame.bottom < 0) || (frame.left >= frame.right) || (frame.top >= frame.bottom)) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); /* Create black canvas. */ flags=0; image->depth=8; image->columns=(unsigned int) (frame.right-frame.left); image->rows=(unsigned int) (frame.bottom-frame.top); image->x_resolution=DefaultResolution; image->y_resolution=DefaultResolution; image->units=UndefinedResolution; if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status != MagickFalse) status=ResetImagePixels(image,exception); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Interpret PICT opcodes. */ jpeg=MagickFalse; for (code=0; EOFBlob(image) == MagickFalse; ) { if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if ((version == 1) || ((TellBlob(image) % 2) != 0)) code=ReadBlobByte(image); if (version == 2) code=ReadBlobMSBSignedShort(image); if (code < 0) break; if (code == 0) continue; if (code > 0xa1) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"%04X:",code); } else { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %04X %s: %s",code,codes[code].name,codes[code].description); switch (code) { case 0x01: { /* Clipping rectangle. */ length=ReadBlobMSBShort(image); if (length != 0x000a) { for (i=0; i < (ssize_t) (length-2); i++) if (ReadBlobByte(image) == EOF) break; break; } if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if (((frame.left & 0x8000) != 0) || ((frame.top & 0x8000) != 0)) break; image->columns=(size_t) (frame.right-frame.left); image->rows=(size_t) (frame.bottom-frame.top); status=SetImageExtent(image,image->columns,image->rows); if (status != MagickFalse) status=ResetImagePixels(image,&image->exception); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } break; } case 0x12: case 0x13: case 0x14: { ssize_t pattern; size_t height, width; /* Skip pattern definition. */ pattern=(ssize_t) ReadBlobMSBShort(image); for (i=0; i < 8; i++) if (ReadBlobByte(image) == EOF) break; if (pattern == 2) { for (i=0; i < 5; i++) if (ReadBlobByte(image) == EOF) break; break; } if (pattern != 1) ThrowPICTException(CorruptImageError,"UnknownPatternType"); length=ReadBlobMSBShort(image); if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if (ReadPixmap(image,&pixmap) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); image->depth=(size_t) pixmap.component_size; image->x_resolution=1.0*pixmap.horizontal_resolution; image->y_resolution=1.0*pixmap.vertical_resolution; image->units=PixelsPerInchResolution; (void) ReadBlobMSBLong(image); flags=(ssize_t) ReadBlobMSBShort(image); length=ReadBlobMSBShort(image); for (i=0; i <= (ssize_t) length; i++) (void) ReadBlobMSBLong(image); width=(size_t) (frame.bottom-frame.top); height=(size_t) (frame.right-frame.left); if (pixmap.bits_per_pixel <= 8) length&=0x7fff; if (pixmap.bits_per_pixel == 16) width<<=1; if (length == 0) length=width; if (length < 8) { for (i=0; i < (ssize_t) (length*height); i++) if (ReadBlobByte(image) == EOF) break; } else for (i=0; i < (ssize_t) height; i++) { if (EOFBlob(image) != MagickFalse) break; if (length > 200) { for (j=0; j < (ssize_t) ReadBlobMSBShort(image); j++) if (ReadBlobByte(image) == EOF) break; } else for (j=0; j < (ssize_t) ReadBlobByte(image); j++) if (ReadBlobByte(image) == EOF) break; } break; } case 0x1b: { /* Initialize image background color. */ image->background_color.red=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); image->background_color.green=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); image->background_color.blue=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); break; } case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: { /* Skip polygon or region. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) (length-2); i++) if (ReadBlobByte(image) == EOF) break; break; } case 0x90: case 0x91: case 0x98: case 0x99: case 0x9a: case 0x9b: { ssize_t bytes_per_line; PICTRectangle source, destination; register unsigned char *p; size_t j; unsigned char *pixels; /* Pixmap clipped by a rectangle. */ bytes_per_line=0; if ((code != 0x9a) && (code != 0x9b)) bytes_per_line=(ssize_t) ReadBlobMSBShort(image); else { (void) ReadBlobMSBShort(image); (void) ReadBlobMSBShort(image); (void) ReadBlobMSBShort(image); } if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); /* Initialize tile image. */ tile_image=CloneImage(image,(size_t) (frame.right-frame.left), (size_t) (frame.bottom-frame.top),MagickTrue,exception); if (tile_image == (Image *) NULL) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if ((code == 0x9a) || (code == 0x9b) || ((bytes_per_line & 0x8000) != 0)) { if (ReadPixmap(image,&pixmap) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); tile_image->depth=(size_t) pixmap.component_size; tile_image->matte=pixmap.component_count == 4 ? MagickTrue : MagickFalse; tile_image->x_resolution=(double) pixmap.horizontal_resolution; tile_image->y_resolution=(double) pixmap.vertical_resolution; tile_image->units=PixelsPerInchResolution; if (tile_image->matte != MagickFalse) (void) SetImageAlphaChannel(tile_image,OpaqueAlphaChannel); } if ((code != 0x9a) && (code != 0x9b)) { /* Initialize colormap. */ tile_image->colors=2; if ((bytes_per_line & 0x8000) != 0) { (void) ReadBlobMSBLong(image); flags=(ssize_t) ReadBlobMSBShort(image); tile_image->colors=1UL*ReadBlobMSBShort(image)+1; } status=AcquireImageColormap(tile_image,tile_image->colors); if (status == MagickFalse) ThrowPICTException(ResourceLimitError, "MemoryAllocationFailed"); if ((bytes_per_line & 0x8000) != 0) { for (i=0; i < (ssize_t) tile_image->colors; i++) { j=ReadBlobMSBShort(image) % tile_image->colors; if ((flags & 0x8000) != 0) j=(size_t) i; tile_image->colormap[j].red=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); tile_image->colormap[j].green=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); tile_image->colormap[j].blue=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); } } else { for (i=0; i < (ssize_t) tile_image->colors; i++) { tile_image->colormap[i].red=(Quantum) (QuantumRange- tile_image->colormap[i].red); tile_image->colormap[i].green=(Quantum) (QuantumRange- tile_image->colormap[i].green); tile_image->colormap[i].blue=(Quantum) (QuantumRange- tile_image->colormap[i].blue); } } } if (EOFBlob(image) != MagickFalse) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); if (ReadRectangle(image,&source) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if (ReadRectangle(image,&destination) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlobMSBShort(image); if ((code == 0x91) || (code == 0x99) || (code == 0x9b)) { /* Skip region. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) (length-2); i++) if (ReadBlobByte(image) == EOF) break; } if ((code != 0x9a) && (code != 0x9b) && (bytes_per_line & 0x8000) == 0) pixels=DecodeImage(image,tile_image,(size_t) bytes_per_line,1, &extent); else pixels=DecodeImage(image,tile_image,(unsigned int) bytes_per_line, (unsigned int) pixmap.bits_per_pixel,&extent); if (pixels == (unsigned char *) NULL) ThrowPICTException(CorruptImageError,"UnableToUncompressImage"); /* Convert PICT tile image to pixel packets. */ p=pixels; for (y=0; y < (ssize_t) tile_image->rows; y++) { if (p > (pixels+extent+image->columns)) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowPICTException(CorruptImageError,"NotEnoughPixelData"); } q=QueueAuthenticPixels(tile_image,0,y,tile_image->columns,1, exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(tile_image); for (x=0; x < (ssize_t) tile_image->columns; x++) { if (tile_image->storage_class == PseudoClass) { index=ConstrainColormapIndex(tile_image,*p); SetPixelIndex(indexes+x,index); SetPixelRed(q, tile_image->colormap[(ssize_t) index].red); SetPixelGreen(q, tile_image->colormap[(ssize_t) index].green); SetPixelBlue(q, tile_image->colormap[(ssize_t) index].blue); } else { if (pixmap.bits_per_pixel == 16) { i=(ssize_t) (*p++); j=(*p); SetPixelRed(q,ScaleCharToQuantum( (unsigned char) ((i & 0x7c) << 1))); SetPixelGreen(q,ScaleCharToQuantum( (unsigned char) (((i & 0x03) << 6) | ((j & 0xe0) >> 2)))); SetPixelBlue(q,ScaleCharToQuantum( (unsigned char) ((j & 0x1f) << 3))); } else if (tile_image->matte == MagickFalse) { if (p > (pixels+extent+2*image->columns)) ThrowPICTException(CorruptImageError, "NotEnoughPixelData"); SetPixelRed(q,ScaleCharToQuantum(*p)); SetPixelGreen(q,ScaleCharToQuantum( *(p+tile_image->columns))); SetPixelBlue(q,ScaleCharToQuantum( *(p+2*tile_image->columns))); } else { if (p > (pixels+extent+3*image->columns)) ThrowPICTException(CorruptImageError, "NotEnoughPixelData"); SetPixelAlpha(q,ScaleCharToQuantum(*p)); SetPixelRed(q,ScaleCharToQuantum( *(p+tile_image->columns))); SetPixelGreen(q,ScaleCharToQuantum( *(p+2*tile_image->columns))); SetPixelBlue(q,ScaleCharToQuantum( *(p+3*tile_image->columns))); } } p++; q++; } if (SyncAuthenticPixels(tile_image,exception) == MagickFalse) break; if ((tile_image->storage_class == DirectClass) && (pixmap.bits_per_pixel != 16)) { p+=(pixmap.component_count-1)*tile_image->columns; if (p < pixels) break; } status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, tile_image->rows); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if ((jpeg == MagickFalse) && (EOFBlob(image) == MagickFalse)) if ((code == 0x9a) || (code == 0x9b) || ((bytes_per_line & 0x8000) != 0)) (void) CompositeImage(image,CopyCompositeOp,tile_image, (ssize_t) destination.left,(ssize_t) destination.top); tile_image=DestroyImage(tile_image); break; } case 0xa1: { unsigned char *info; size_t type; /* Comment. */ type=ReadBlobMSBShort(image); length=ReadBlobMSBShort(image); if (length == 0) break; (void) ReadBlobMSBLong(image); length-=MagickMin(length,4); if (length == 0) break; info=(unsigned char *) AcquireQuantumMemory(length,sizeof(*info)); if (info == (unsigned char *) NULL) break; count=ReadBlob(image,length,info); if (count != (ssize_t) length) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowPICTException(ResourceLimitError,"UnableToReadImageData"); } switch (type) { case 0xe0: { profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,info); status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowPICTException(ResourceLimitError, "MemoryAllocationFailed"); } break; } case 0x1f2: { profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,info); status=SetImageProfile(image,"iptc",profile); if (status == MagickFalse) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowPICTException(ResourceLimitError, "MemoryAllocationFailed"); } profile=DestroyStringInfo(profile); break; } default: break; } info=(unsigned char *) RelinquishMagickMemory(info); break; } default: { /* Skip to next op code. */ if (codes[code].length == -1) (void) ReadBlobMSBShort(image); else for (i=0; i < (ssize_t) codes[code].length; i++) if (ReadBlobByte(image) == EOF) break; } } } if (code == 0xc00) { /* Skip header. */ for (i=0; i < 24; i++) if (ReadBlobByte(image) == EOF) break; continue; } if (((code >= 0xb0) && (code <= 0xcf)) || ((code >= 0x8000) && (code <= 0x80ff))) continue; if (code == 0x8200) { char filename[MaxTextExtent]; FILE *file; int unique_file; /* Embedded JPEG. */ jpeg=MagickTrue; read_info=CloneImageInfo(image_info); SetImageInfoBlob(read_info,(void *) NULL,0); file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); (void) FormatLocaleString(read_info->filename,MaxTextExtent,"jpeg:%s", filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) { (void) RelinquishUniqueFileResource(read_info->filename); (void) CopyMagickString(image->filename,read_info->filename, MaxTextExtent); ThrowPICTException(FileOpenError,"UnableToCreateTemporaryFile"); } length=ReadBlobMSBLong(image); if (length > 154) { for (i=0; i < 6; i++) (void) ReadBlobMSBLong(image); if (ReadRectangle(image,&frame) == MagickFalse) { (void) fclose(file); (void) RelinquishUniqueFileResource(read_info->filename); ThrowPICTException(CorruptImageError,"ImproperImageHeader"); } for (i=0; i < 122; i++) if (ReadBlobByte(image) == EOF) break; for (i=0; i < (ssize_t) (length-154); i++) { c=ReadBlobByte(image); if (c == EOF) break; (void) fputc(c,file); } } (void) fclose(file); (void) close(unique_file); tile_image=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(filename); read_info=DestroyImageInfo(read_info); if (tile_image == (Image *) NULL) continue; (void) FormatLocaleString(geometry,MaxTextExtent,"%.20gx%.20g", (double) MagickMax(image->columns,tile_image->columns), (double) MagickMax(image->rows,tile_image->rows)); (void) SetImageExtent(image, MagickMax(image->columns,tile_image->columns), MagickMax(image->rows,tile_image->rows)); (void) TransformImageColorspace(image,tile_image->colorspace); (void) CompositeImage(image,CopyCompositeOp,tile_image,(ssize_t) frame.left,(ssize_t) frame.right); image->compression=tile_image->compression; tile_image=DestroyImage(tile_image); continue; } if ((code == 0xff) || (code == 0xffff)) break; if (((code >= 0xd0) && (code <= 0xfe)) || ((code >= 0x8100) && (code <= 0xffff))) { /* Skip reserved. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) length; i++) if (ReadBlobByte(image) == EOF) break; continue; } if ((code >= 0x100) && (code <= 0x7fff)) { /* Skip reserved. */ length=(size_t) ((code >> 7) & 0xff); for (i=0; i < (ssize_t) length; i++) if (ReadBlobByte(image) == EOF) break; continue; } } (void) CloseBlob(image); return(GetFirstImageInList(image)); }
1
[ "CWE-252" ]
ImageMagick6
11d9dac3d991c62289d1ef7a097670166480e76c
8,549,124,370,999,496,000,000,000,000,000,000,000
724
https://github.com/ImageMagick/ImageMagick/issues/1199
static int proc_pident_readdir(struct file *file, struct dir_context *ctx, const struct pid_entry *ents, unsigned int nents) { struct task_struct *task = get_proc_task(file_inode(file)); const struct pid_entry *p; if (!task) return -ENOENT; if (!dir_emit_dots(file, ctx)) goto out; if (ctx->pos >= nents + 2) goto out; for (p = ents + (ctx->pos - 2); p < ents + nents; p++) { if (!proc_fill_cache(file, ctx, p->name, p->len, proc_pident_instantiate, task, p)) break; ctx->pos++; } out: put_task_struct(task); return 0; }
0
[ "CWE-119" ]
linux
7f7ccc2ccc2e70c6054685f5e3522efa81556830
307,987,316,801,387,220,000,000,000,000,000,000,000
25
proc: do not access cmdline nor environ from file-backed areas proc_pid_cmdline_read() and environ_read() directly access the target process' VM to retrieve the command line and environment. If this process remaps these areas onto a file via mmap(), the requesting process may experience various issues such as extra delays if the underlying device is slow to respond. Let's simply refuse to access file-backed areas in these functions. For this we add a new FOLL_ANON gup flag that is passed to all calls to access_remote_vm(). The code already takes care of such failures (including unmapped areas). Accesses via /proc/pid/mem were not changed though. This was assigned CVE-2018-1120. Note for stable backports: the patch may apply to kernels prior to 4.11 but silently miss one location; it must be checked that no call to access_remote_vm() keeps zero as the last argument. Reported-by: Qualys Security Advisory <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: [email protected] Signed-off-by: Willy Tarreau <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str) { struct acpi_table_header *h = get_header(cfg); if (!h) return -EINVAL; return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id); }
0
[ "CWE-862" ]
linux
75b0cea7bf307f362057cc778efe89af4c615354
293,429,847,349,226,600,000,000,000,000,000,000,000
9
ACPI: configfs: Disallow loading ACPI tables when locked down Like other vectors already patched, this one here allows the root user to load ACPI tables, which enables arbitrary physical address writes, which in turn makes it possible to disable lockdown. Prevents this by checking the lockdown status before allowing a new ACPI table to be installed. The link in the trailer shows a PoC of how this might be used. Link: https://git.zx2c4.com/american-unsigned-language/tree/american-unsigned-language-2.sh Cc: 5.4+ <[email protected]> # 5.4+ Signed-off-by: Jason A. Donenfeld <[email protected]> Signed-off-by: Rafael J. Wysocki <[email protected]>
static void js_defvar(js_State *J, const char *name) { jsR_defproperty(J, J->E->variables, name, JS_DONTENUM | JS_DONTCONF, NULL, NULL, NULL); }
0
[ "CWE-476" ]
mujs
77ab465f1c394bb77f00966cd950650f3f53cb24
264,005,704,300,937,080,000,000,000,000,000,000,000
4
Fix 697401: Error when dropping extra arguments to lightweight functions.
g_file_real_set_attributes_async (GFile *file, GFileInfo *info, GFileQueryInfoFlags flags, int io_priority, GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data) { GTask *task; SetInfoAsyncData *data; data = g_new0 (SetInfoAsyncData, 1); data->info = g_file_info_dup (info); data->flags = flags; task = g_task_new (file, cancellable, callback, user_data); g_task_set_source_tag (task, g_file_real_set_attributes_async); g_task_set_task_data (task, data, (GDestroyNotify)set_info_data_free); g_task_set_priority (task, io_priority); g_task_run_in_thread (task, set_info_async_thread); g_object_unref (task); }
0
[ "CWE-362" ]
glib
d8f8f4d637ce43f8699ba94c9b7648beda0ca174
66,043,660,217,081,490,000,000,000,000,000,000,000
23
gfile: Limit access to files when copying file_copy_fallback creates new files with default permissions and set the correct permissions after the operation is finished. This might cause that the files can be accessible by more users during the operation than expected. Use G_FILE_CREATE_PRIVATE for the new files to limit access to those files.
static int process_text(ASS_Track *track, char *str) { char *p = str; while (1) { char *q; while (1) { if ((*p == '\r') || (*p == '\n')) ++p; else if (p[0] == '\xef' && p[1] == '\xbb' && p[2] == '\xbf') p += 3; // U+FFFE (BOM) else break; } for (q = p; ((*q != '\0') && (*q != '\r') && (*q != '\n')); ++q) { }; if (q == p) break; if (*q != '\0') *(q++) = '\0'; process_line(track, p); if (*q == '\0') break; p = q; } // there is no explicit end-of-font marker in ssa/ass if (track->parser_priv->fontname) decode_font(track); return 0; }
0
[ "CWE-369", "CWE-787" ]
libass
017137471d0043e0321e377ed8da48e45a3ec632
140,979,223,776,216,840,000,000,000,000,000,000,000
29
decode_font: fix subtraction broken by change to unsigned type This caused a one-byte buffer overwrite and an assertion failure. Regression in commit 910211f1c0078e37546f73e95306724358b89be2. Discovered by OSS-Fuzz. Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26674. Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26678.
ZEND_VM_HANDLER(96, ZEND_FETCH_DIM_UNSET, VAR|CV, CONST|TMPVAR|CV) { USE_OPLINE zend_free_op free_op1, free_op2; zval *container; SAVE_OPLINE(); container = GET_OP1_ZVAL_PTR_PTR_UNDEF(BP_VAR_UNSET); zend_fetch_dimension_address_UNSET(container, GET_OP2_ZVAL_PTR_UNDEF(BP_VAR_R), OP2_TYPE OPLINE_CC EXECUTE_DATA_CC); FREE_OP2(); if (OP1_TYPE == IS_VAR) { zval *result = EX_VAR(opline->result.var); FREE_VAR_PTR_AND_EXTRACT_RESULT_IF_NECESSARY(free_op1, result); } ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION(); }
0
[ "CWE-787" ]
php-src
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
105,743,517,961,228,670,000,000,000,000,000,000,000
16
Fix #73122: Integer Overflow when concatenating strings We must avoid integer overflows in memory allocations, so we introduce an additional check in the VM, and bail out in the rare case of an overflow. Since the recent fix for bug #74960 still doesn't catch all possible overflows, we fix that right away.
main (int argc, char **argv) { int last_argc = -1; gpg_error_t err; int rc; parsed_uri_t uri; uri_tuple_t r; http_t hd; int c; unsigned int my_http_flags = 0; int no_out = 0; int tls_dbg = 0; int no_crl = 0; const char *cafile = NULL; http_session_t session = NULL; unsigned int timeout = 0; gpgrt_init (); log_set_prefix (PGM, GPGRT_LOG_WITH_PREFIX | GPGRT_LOG_WITH_PID); if (argc) { argc--; argv++; } while (argc && last_argc != argc ) { last_argc = argc; if (!strcmp (*argv, "--")) { argc--; argv++; break; } else if (!strcmp (*argv, "--help")) { fputs ("usage: " PGM " URL\n" "Options:\n" " --verbose print timings etc.\n" " --debug flyswatter\n" " --tls-debug N use TLS debug level N\n" " --cacert FNAME expect CA certificate in file FNAME\n" " --timeout MS timeout for connect in MS\n" " --no-verify do not verify the certificate\n" " --force-tls use HTTP_FLAG_FORCE_TLS\n" " --force-tor use HTTP_FLAG_FORCE_TOR\n" " --no-out do not print the content\n" " --no-crl do not consuilt a CRL\n", stdout); exit (0); } else if (!strcmp (*argv, "--verbose")) { verbose++; argc--; argv++; } else if (!strcmp (*argv, "--debug")) { verbose += 2; debug++; argc--; argv++; } else if (!strcmp (*argv, "--tls-debug")) { argc--; argv++; if (argc) { tls_dbg = atoi (*argv); argc--; argv++; } } else if (!strcmp (*argv, "--cacert")) { argc--; argv++; if (argc) { cafile = *argv; argc--; argv++; } } else if (!strcmp (*argv, "--timeout")) { argc--; argv++; if (argc) { timeout = strtoul (*argv, NULL, 10); argc--; argv++; } } else if (!strcmp (*argv, "--no-verify")) { no_verify = 1; argc--; argv++; } else if (!strcmp (*argv, "--force-tls")) { my_http_flags |= HTTP_FLAG_FORCE_TLS; argc--; argv++; } else if (!strcmp (*argv, "--force-tor")) { my_http_flags |= HTTP_FLAG_FORCE_TOR; argc--; argv++; } else if (!strcmp (*argv, "--no-out")) { no_out = 1; argc--; argv++; } else if (!strcmp (*argv, "--no-crl")) { no_crl = 1; argc--; argv++; } else if (!strncmp (*argv, "--", 2)) { fprintf (stderr, PGM ": unknown option '%s'\n", *argv); exit (1); } } if (argc != 1) { fprintf (stderr, PGM ": no or too many URLS given\n"); exit (1); } if (!cafile) cafile = prepend_srcdir ("tls-ca.pem"); if (verbose) my_http_flags |= HTTP_FLAG_LOG_RESP; if (verbose || debug) http_set_verbose (verbose, debug); /* http.c makes use of the assuan socket wrapper. */ assuan_sock_init (); if ((my_http_flags & HTTP_FLAG_FORCE_TOR)) { enable_dns_tormode (1); if (assuan_sock_set_flag (ASSUAN_INVALID_FD, "tor-mode", 1)) { log_error ("error enabling Tor mode: %s\n", strerror (errno)); log_info ("(is your Libassuan recent enough?)\n"); } } #if HTTP_USE_NTBTLS log_info ("new session.\n"); err = http_session_new (&session, NULL, ((no_crl? HTTP_FLAG_NO_CRL : 0) | HTTP_FLAG_TRUST_DEF), my_http_tls_verify_cb, NULL); if (err) log_error ("http_session_new failed: %s\n", gpg_strerror (err)); ntbtls_set_debug (tls_dbg, NULL, NULL); #elif HTTP_USE_GNUTLS rc = gnutls_global_init (); if (rc) log_error ("gnutls_global_init failed: %s\n", gnutls_strerror (rc)); http_register_tls_callback (verify_callback); http_register_tls_ca (cafile); err = http_session_new (&session, NULL, ((no_crl? HTTP_FLAG_NO_CRL : 0) | HTTP_FLAG_TRUST_DEF), NULL, NULL); if (err) log_error ("http_session_new failed: %s\n", gpg_strerror (err)); /* rc = gnutls_dh_params_init(&dh_params); */ /* if (rc) */ /* log_error ("gnutls_dh_params_init failed: %s\n", gnutls_strerror (rc)); */ /* read_dh_params ("dh_param.pem"); */ /* rc = gnutls_certificate_set_x509_trust_file */ /* (certcred, "ca.pem", GNUTLS_X509_FMT_PEM); */ /* if (rc) */ /* log_error ("gnutls_certificate_set_x509_trust_file failed: %s\n", */ /* gnutls_strerror (rc)); */ /* gnutls_certificate_set_dh_params (certcred, dh_params); */ gnutls_global_set_log_function (my_gnutls_log); if (tls_dbg) gnutls_global_set_log_level (tls_dbg); #else (void)err; (void)tls_dbg; (void)no_crl; #endif /*HTTP_USE_GNUTLS*/ rc = http_parse_uri (&uri, *argv, 1); if (rc) { log_error ("'%s': %s\n", *argv, gpg_strerror (rc)); return 1; } printf ("Scheme: %s\n", uri->scheme); if (uri->opaque) printf ("Value : %s\n", uri->path); else { printf ("Auth : %s\n", uri->auth? uri->auth:"[none]"); printf ("Host : %s (off=%hu)\n", uri->host, uri->off_host); printf ("Port : %u\n", uri->port); printf ("Path : %s (off=%hu)\n", uri->path, uri->off_path); for (r = uri->params; r; r = r->next) { printf ("Params: %s", r->name); if (!r->no_value) { printf ("=%s", r->value); if (strlen (r->value) != r->valuelen) printf (" [real length=%d]", (int) r->valuelen); } putchar ('\n'); } for (r = uri->query; r; r = r->next) { printf ("Query : %s", r->name); if (!r->no_value) { printf ("=%s", r->value); if (strlen (r->value) != r->valuelen) printf (" [real length=%d]", (int) r->valuelen); } putchar ('\n'); } printf ("Flags :%s%s%s%s\n", uri->is_http? " http":"", uri->opaque? " opaque":"", uri->v6lit? " v6lit":"", uri->onion? " onion":""); printf ("TLS : %s\n", uri->use_tls? "yes": (my_http_flags&HTTP_FLAG_FORCE_TLS)? "forced" : "no"); printf ("Tor : %s\n", (my_http_flags&HTTP_FLAG_FORCE_TOR)? "yes" : "no"); } fflush (stdout); http_release_parsed_uri (uri); uri = NULL; if (session) http_session_set_timeout (session, timeout); rc = http_open_document (&hd, *argv, NULL, my_http_flags, NULL, session, NULL, NULL); if (rc) { log_error ("can't get '%s': %s\n", *argv, gpg_strerror (rc)); return 1; } log_info ("open_http_document succeeded; status=%u\n", http_get_status_code (hd)); { const char **names; int i; names = http_get_header_names (hd); if (!names) log_fatal ("http_get_header_names failed: %s\n", gpg_strerror (gpg_error_from_syserror ())); for (i = 0; names[i]; i++) printf ("HDR: %s: %s\n", names[i], http_get_header (hd, names[i])); xfree (names); } fflush (stdout); switch (http_get_status_code (hd)) { case 200: case 400: case 401: case 403: case 404: { unsigned long count = 0; while ((c = es_getc (http_get_read_ptr (hd))) != EOF) { count++; if (!no_out) putchar (c); } log_info ("Received bytes: %lu\n", count); } break; case 301: case 302: case 307: log_info ("Redirected to: %s\n", http_get_header (hd, "Location")); break; } http_close (hd, 0); http_session_release (session); #ifdef HTTP_USE_GNUTLS gnutls_global_deinit (); #endif /*HTTP_USE_GNUTLS*/ return 0; }
0
[ "CWE-352" ]
gnupg
4a4bb874f63741026bd26264c43bb32b1099f060
135,775,384,681,152,200,000,000,000,000,000,000,000
305
dirmngr: Avoid possible CSRF attacks via http redirects. * dirmngr/http.h (parsed_uri_s): Add fields off_host and off_path. (http_redir_info_t): New. * dirmngr/http.c (do_parse_uri): Set new fields. (same_host_p): New. (http_prepare_redirect): New. * dirmngr/t-http-basic.c: New test. * dirmngr/ks-engine-hkp.c (send_request): Use http_prepare_redirect instead of the open code. * dirmngr/ks-engine-http.c (ks_http_fetch): Ditto. -- With this change a http query will not follow a redirect unless the Location header gives the same host. If the host is different only the host and port is taken from the Location header and the original path and query parts are kept. Signed-off-by: Werner Koch <[email protected]> (cherry picked from commit fa1b1eaa4241ff3f0634c8bdf8591cbc7c464144)
static void k_shift(struct vc_data *vc, unsigned char value, char up_flag) { int old_state = shift_state; if (rep) return; /* * Mimic typewriter: * a CapsShift key acts like Shift but undoes CapsLock */ if (value == KVAL(K_CAPSSHIFT)) { value = KVAL(K_SHIFT); if (!up_flag) clr_vc_kbd_led(kbd, VC_CAPSLOCK); } if (up_flag) { /* * handle the case that two shift or control * keys are depressed simultaneously */ if (shift_down[value]) shift_down[value]--; } else shift_down[value]++; if (shift_down[value]) shift_state |= (1 << value); else shift_state &= ~(1 << value); /* kludge */ if (up_flag && shift_state != old_state && npadch != -1) { if (kbd->kbdmode == VC_UNICODE) to_utf8(vc, npadch); else put_queue(vc, npadch & 0xff); npadch = -1; } }
1
[ "CWE-190" ]
linux
b86dab054059b970111b5516ae548efaae5b3aae
198,264,764,006,323,200,000,000,000,000,000,000,000
40
vt: keyboard: avoid signed integer overflow in k_ascii When k_ascii is invoked several times in a row there is a potential for signed integer overflow: UBSAN: Undefined behaviour in drivers/tty/vt/keyboard.c:888:19 signed integer overflow: 10 * 1111111111 cannot be represented in type 'int' CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.6.11 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: <IRQ> __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0xce/0x128 lib/dump_stack.c:118 ubsan_epilogue+0xe/0x30 lib/ubsan.c:154 handle_overflow+0xdc/0xf0 lib/ubsan.c:184 __ubsan_handle_mul_overflow+0x2a/0x40 lib/ubsan.c:205 k_ascii+0xbf/0xd0 drivers/tty/vt/keyboard.c:888 kbd_keycode drivers/tty/vt/keyboard.c:1477 [inline] kbd_event+0x888/0x3be0 drivers/tty/vt/keyboard.c:1495 While it can be worked around by using check_mul_overflow()/ check_add_overflow(), it is better to introduce a separate flag to signal that number pad is being used to compose a symbol, and change type of the accumulator from signed to unsigned, thus avoiding undefined behavior when it overflows. Reported-by: Kyungtae Kim <[email protected]> Signed-off-by: Dmitry Torokhov <[email protected]> Cc: stable <[email protected]> Link: https://lore.kernel.org/r/20200525232740.GA262061@dtor-ws Signed-off-by: Greg Kroah-Hartman <[email protected]>
virtual bool maintenanceOk() const { return false; }
0
[ "CWE-20" ]
mongo
5c7c6729c37514760fd34da462b6961a2e385417
275,211,029,182,359,400,000,000,000,000,000,000,000
3
SERVER-38275 ban explain with UUID
struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int noblock, int *err) { int error; struct sk_buff *skb; long timeo; timeo = sock_rcvtimeo(sk, noblock); pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, MAX_SCHEDULE_TIMEOUT); do { /* Again only user level code calls this function, * so nothing interrupt level * will suddenly eat the receive_queue. * * Look at current nfs client by the way... * However, this function was correct in any case. 8) */ if (flags & MSG_PEEK) { spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) atomic_inc(&skb->users); spin_unlock_bh(&sk->sk_receive_queue.lock); } else { skb = skb_dequeue(&sk->sk_receive_queue); } if (skb) return skb; /* Caller is allowed not to check sk->sk_err before calling. */ error = sock_error(sk); if (error) goto no_packet; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk_can_busy_loop(sk) && sk_busy_loop(sk, noblock)) continue; /* User doesn't want to wait. */ error = -EAGAIN; if (!timeo) goto no_packet; } while (sctp_wait_for_packet(sk, err, &timeo) == 0); return NULL; no_packet: *err = error; return NULL; }
0
[ "CWE-362", "CWE-703" ]
linux
2d45a02d0166caf2627fe91897c6ffc3b19514c4
162,629,754,220,409,710,000,000,000,000,000,000,000
57
sctp: fix ASCONF list handling ->auto_asconf_splist is per namespace and mangled by functions like sctp_setsockopt_auto_asconf() which doesn't guarantee any serialization. Also, the call to inet_sk_copy_descendant() was backuping ->auto_asconf_list through the copy but was not honoring ->do_auto_asconf, which could lead to list corruption if it was different between both sockets. This commit thus fixes the list handling by using ->addr_wq_lock spinlock to protect the list. A special handling is done upon socket creation and destruction for that. Error handlig on sctp_init_sock() will never return an error after having initialized asconf, so sctp_destroy_sock() can be called without addrq_wq_lock. The lock now will be take on sctp_close_sock(), before locking the socket, so we don't do it in inverse order compared to sctp_addr_wq_timeout_handler(). Instead of taking the lock on sctp_sock_migrate() for copying and restoring the list values, it's preferred to avoid rewritting it by implementing sctp_copy_descendant(). Issue was found with a test application that kept flipping sysctl default_auto_asconf on and off, but one could trigger it by issuing simultaneous setsockopt() calls on multiple sockets or by creating/destroying sockets fast enough. This is only triggerable locally. Fixes: 9f7d653b67ae ("sctp: Add Auto-ASCONF support (core).") Reported-by: Ji Jianwen <[email protected]> Suggested-by: Neil Horman <[email protected]> Suggested-by: Hannes Frederic Sowa <[email protected]> Acked-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: Marcelo Ricardo Leitner <[email protected]> Signed-off-by: David S. Miller <[email protected]>
ImagingDestroyArray(Imaging im) { int y; if (im->image) for (y = 0; y < im->ysize; y++) if (im->image[y]) free(im->image[y]); }
0
[ "CWE-284" ]
Pillow
5d8a0be45aad78c5a22c8d099118ee26ef8144af
311,032,314,840,410,800,000,000,000,000,000,000,000
9
Memory error in Storage.c when accepting negative image size arguments
void *Pipe::DelayedDelivery::entry() { Mutex::Locker locker(delay_lock); lgeneric_subdout(pipe->msgr->cct, ms, 20) << *pipe << "DelayedDelivery::entry start" << dendl; while (!stop_delayed_delivery) { if (delay_queue.empty()) { lgeneric_subdout(pipe->msgr->cct, ms, 30) << *pipe << "DelayedDelivery::entry sleeping on delay_cond because delay queue is empty" << dendl; delay_cond.Wait(delay_lock); continue; } utime_t release = delay_queue.front().first; Message *m = delay_queue.front().second; string delay_msg_type = pipe->msgr->cct->_conf->ms_inject_delay_msg_type; if (!flush_count && (release > ceph_clock_now() && (delay_msg_type.empty() || m->get_type_name() == delay_msg_type))) { lgeneric_subdout(pipe->msgr->cct, ms, 10) << *pipe << "DelayedDelivery::entry sleeping on delay_cond until " << release << dendl; delay_cond.WaitUntil(delay_lock, release); continue; } lgeneric_subdout(pipe->msgr->cct, ms, 10) << *pipe << "DelayedDelivery::entry dequeuing message " << m << " for delivery, past " << release << dendl; delay_queue.pop_front(); if (flush_count > 0) { --flush_count; active_flush = true; } if (pipe->in_q->can_fast_dispatch(m)) { if (!stop_fast_dispatching_flag) { delay_dispatching = true; delay_lock.Unlock(); pipe->in_q->fast_dispatch(m); delay_lock.Lock(); delay_dispatching = false; if (stop_fast_dispatching_flag) { // we need to let the stopping thread proceed delay_cond.Signal(); delay_lock.Unlock(); delay_lock.Lock(); } } } else { pipe->in_q->enqueue(m, m->get_priority(), pipe->conn_id); } active_flush = false; } lgeneric_subdout(pipe->msgr->cct, ms, 20) << *pipe << "DelayedDelivery::entry stop" << dendl; return NULL; }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
158,706,031,177,861,240,000,000,000,000,000,000,000
49
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <[email protected]> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) { struct mm_struct *mm = vma->vm_mm; int retval; pte_t *pte, entry; spinlock_t *ptl; retval = -ENOMEM; pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; retval = -EBUSY; if (!pte_none(*pte)) goto out_unlock; /* Ok, finally just insert the thing.. */ entry = pte_mkspecial(pfn_pte(pfn, prot)); set_pte_at(mm, addr, pte, entry); update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ retval = 0; out_unlock: pte_unmap_unlock(pte, ptl); out: return retval; }
0
[ "CWE-20" ]
linux-2.6
89f5b7da2a6bad2e84670422ab8192382a5aeb9f
203,031,341,165,556,800,000,000,000,000,000,000,000
27
Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit 557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed the ZERO_PAGE from the VM mappings, any users of get_user_pages() will generally now populate the VM with real empty pages needlessly. We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but since fault handling no longer uses ZERO_PAGE for new anonymous pages, we now need to handle that special case in follow_page() instead. In particular, the removal of ZERO_PAGE effectively removed the core file writing optimization where we would skip writing pages that had not been populated at all, and increased memory pressure a lot by allocating all those useless newly zeroed pages. This reinstates the optimization by making the unmapped PTE case the same as for a non-existent page table, which already did this correctly. While at it, this also fixes the XIP case for follow_page(), where the caller could not differentiate between the case of a page that simply could not be used (because it had no "struct page" associated with it) and a page that just wasn't mapped. We do that by simply returning an error pointer for pages that could not be turned into a "struct page *". The error is arbitrarily picked to be EFAULT, since that was what get_user_pages() already used for the equivalent IO-mapped page case. [ Also removed an impossible test for pte_offset_map_lock() failing: that's not how that function works ] Acked-by: Oleg Nesterov <[email protected]> Acked-by: Nick Piggin <[email protected]> Cc: KAMEZAWA Hiroyuki <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Roland McGrath <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
TEST(QueryProjectionTest, InvalidPositionalProjectionDefaultPathMatchExpression) { QueryTestServiceContext serviceCtx; auto opCtx = serviceCtx.makeOperationContext(); const boost::intrusive_ptr<ExpressionContext> expCtx( new ExpressionContext(opCtx.get(), nullptr, kTestNss)); unique_ptr<MatchExpression> queryMatchExpr(new AlwaysFalseMatchExpression()); ASSERT_EQ(nullptr, queryMatchExpr->path().rawData()); BSONObj projObj = fromjson("{'a.$': 1}"); ASSERT_THROWS(projection_ast::parse( expCtx, projObj, queryMatchExpr.get(), BSONObj(), ProjectionPolicies{}), DBException); // Projecting onto empty field should fail. BSONObj emptyFieldProjObj = fromjson("{'.$': 1}"); ASSERT_THROWS( projection_ast::parse( expCtx, emptyFieldProjObj, queryMatchExpr.get(), BSONObj(), ProjectionPolicies{}), DBException); }
0
[ "CWE-732" ]
mongo
cd583b6c4d8aa2364f255992708b9bb54e110cf4
164,421,914,102,528,580,000,000,000,000,000,000,000
21
SERVER-53929 Add stricter parser checks around positional projection
RateLimit *ClientHandler::get_rlimit() { return &conn_.rlimit; }
0
[]
nghttp2
95efb3e19d174354ca50c65d5d7227d92bcd60e1
155,403,242,450,772,260,000,000,000,000,000,000,000
1
Don't read too greedily
ReportLedBadType(CompatInfo *info, LedInfo *ledi, const char *field, const char *wanted) { return ReportBadType(info->ctx, "indicator map", field, xkb_atom_text(info->ctx, ledi->led.name), wanted); }
0
[ "CWE-476" ]
libxkbcommon
96df3106d49438e442510c59acad306e94f3db4d
45,901,630,547,395,600,000,000,000,000,000,000,000
7
xkbcomp: Don't crash on no-op modmask expressions If we have an expression of the form 'l1' in an interp section, we unconditionally try to dereference its args, even if it has none. Signed-off-by: Daniel Stone <[email protected]>
virDomainHostdevDefCheckABIStability(virDomainHostdevDefPtr src, virDomainHostdevDefPtr dst) { if (src->mode != dst->mode) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("Target host device mode %s does not match source %s"), virDomainHostdevModeTypeToString(dst->mode), virDomainHostdevModeTypeToString(src->mode)); return false; } if (src->mode == VIR_DOMAIN_HOSTDEV_MODE_SUBSYS && src->source.subsys.type != dst->source.subsys.type) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("Target host device subsystem %s does not match source %s"), virDomainHostdevSubsysTypeToString(dst->source.subsys.type), virDomainHostdevSubsysTypeToString(src->source.subsys.type)); return false; } if (!virDomainDeviceInfoCheckABIStability(src->info, dst->info)) return false; return true; }
0
[ "CWE-212" ]
libvirt
a5b064bf4b17a9884d7d361733737fb614ad8979
156,426,285,909,933,740,000,000,000,000,000,000,000
25
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410 (v6.1.0-122-g3b076391be) we support http cookies. Since they may contain somewhat sensitive information we should not format them into the XML unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted. Reported-by: Han Han <[email protected]> Signed-off-by: Peter Krempa <[email protected]> Reviewed-by: Erik Skultety <[email protected]>
static int chmd_read_headers(struct mspack_system *sys, struct mspack_file *fh, struct mschmd_header *chm, int entire) { unsigned int section, name_len, x, errors, num_chunks; unsigned char buf[0x54], *chunk = NULL, *name, *p, *end; struct mschmd_file *fi, *link = NULL; off_t offset, length; int num_entries; /* initialise pointers */ chm->files = NULL; chm->sysfiles = NULL; chm->chunk_cache = NULL; chm->sec0.base.chm = chm; chm->sec0.base.id = 0; chm->sec1.base.chm = chm; chm->sec1.base.id = 1; chm->sec1.content = NULL; chm->sec1.control = NULL; chm->sec1.spaninfo = NULL; chm->sec1.rtable = NULL; /* read the first header */ if (sys->read(fh, &buf[0], chmhead_SIZEOF) != chmhead_SIZEOF) { return MSPACK_ERR_READ; } /* check ITSF signature */ if (EndGetI32(&buf[chmhead_Signature]) != 0x46535449) { return MSPACK_ERR_SIGNATURE; } /* check both header GUIDs */ if (mspack_memcmp(&buf[chmhead_GUID1], &guids[0], 32L) != 0) { D(("incorrect GUIDs")) return MSPACK_ERR_SIGNATURE; } chm->version = EndGetI32(&buf[chmhead_Version]); chm->timestamp = EndGetM32(&buf[chmhead_Timestamp]); chm->language = EndGetI32(&buf[chmhead_LanguageID]); if (chm->version > 3) { sys->message(fh, "WARNING; CHM version > 3"); } /* read the header section table */ if (sys->read(fh, &buf[0], chmhst3_SIZEOF) != chmhst3_SIZEOF) { return MSPACK_ERR_READ; } /* chmhst3_OffsetCS0 does not exist in version 1 or 2 CHM files. * The offset will be corrected later, once HS1 is read. */ if (read_off64(&offset, &buf[chmhst_OffsetHS0], sys, fh) || read_off64(&chm->dir_offset, &buf[chmhst_OffsetHS1], sys, fh) || read_off64(&chm->sec0.offset, &buf[chmhst3_OffsetCS0], sys, fh)) { return MSPACK_ERR_DATAFORMAT; } /* seek to header section 0 */ if (sys->seek(fh, offset, MSPACK_SYS_SEEK_START)) { return MSPACK_ERR_SEEK; } /* read header section 0 */ if (sys->read(fh, &buf[0], chmhs0_SIZEOF) != chmhs0_SIZEOF) { return MSPACK_ERR_READ; } if (read_off64(&chm->length, &buf[chmhs0_FileLen], sys, fh)) { return MSPACK_ERR_DATAFORMAT; } /* seek to header section 1 */ if (sys->seek(fh, chm->dir_offset, MSPACK_SYS_SEEK_START)) { return MSPACK_ERR_SEEK; } /* read header section 1 */ if (sys->read(fh, &buf[0], chmhs1_SIZEOF) != chmhs1_SIZEOF) { return MSPACK_ERR_READ; } chm->dir_offset = sys->tell(fh); chm->chunk_size = EndGetI32(&buf[chmhs1_ChunkSize]); chm->density = EndGetI32(&buf[chmhs1_Density]); chm->depth = EndGetI32(&buf[chmhs1_Depth]); chm->index_root = EndGetI32(&buf[chmhs1_IndexRoot]); chm->num_chunks = EndGetI32(&buf[chmhs1_NumChunks]); chm->first_pmgl = EndGetI32(&buf[chmhs1_FirstPMGL]); chm->last_pmgl = EndGetI32(&buf[chmhs1_LastPMGL]); if (chm->version < 3) { /* versions before 3 don't have chmhst3_OffsetCS0 */ chm->sec0.offset = chm->dir_offset + (chm->chunk_size * chm->num_chunks); } /* check if content offset or file size is wrong */ if (chm->sec0.offset > chm->length) { D(("content section begins after file has ended")) return MSPACK_ERR_DATAFORMAT; } /* ensure there are chunks and that chunk size is * large enough for signature and num_entries */ if (chm->chunk_size < (pmgl_Entries + 2)) { D(("chunk size not large enough")) return MSPACK_ERR_DATAFORMAT; } if (chm->num_chunks == 0) { D(("no chunks")) return MSPACK_ERR_DATAFORMAT; } /* The chunk_cache data structure is not great; large values for num_chunks * or num_chunks*chunk_size can exhaust all memory. Until a better chunk * cache is implemented, put arbitrary limits on num_chunks and chunk size. */ if (chm->num_chunks > 100000) { D(("more than 100,000 chunks")) return MSPACK_ERR_DATAFORMAT; } if ((off_t)chm->chunk_size * (off_t)chm->num_chunks > chm->length) { D(("chunks larger than entire file")) return MSPACK_ERR_DATAFORMAT; } /* common sense checks on header section 1 fields */ if ((chm->chunk_size & (chm->chunk_size - 1)) != 0) { sys->message(fh, "WARNING; chunk size is not a power of two"); } if (chm->first_pmgl != 0) { sys->message(fh, "WARNING; first PMGL chunk is not zero"); } if (chm->first_pmgl > chm->last_pmgl) { D(("first pmgl chunk is after last pmgl chunk")) return MSPACK_ERR_DATAFORMAT; } if (chm->index_root != 0xFFFFFFFF && chm->index_root >= chm->num_chunks) { D(("index_root outside valid range")) return MSPACK_ERR_DATAFORMAT; } /* if we are doing a quick read, stop here! */ if (!entire) { return MSPACK_ERR_OK; } /* seek to the first PMGL chunk, and reduce the number of chunks to read */ if ((x = chm->first_pmgl) != 0) { if (sys->seek(fh,(off_t) (x * chm->chunk_size), MSPACK_SYS_SEEK_CUR)) { return MSPACK_ERR_SEEK; } } num_chunks = chm->last_pmgl - x + 1; if (!(chunk = (unsigned char *) sys->alloc(sys, (size_t)chm->chunk_size))) { return MSPACK_ERR_NOMEMORY; } /* read and process all chunks from FirstPMGL to LastPMGL */ errors = 0; while (num_chunks--) { /* read next chunk */ if (sys->read(fh, chunk, (int)chm->chunk_size) != (int)chm->chunk_size) { sys->free(chunk); return MSPACK_ERR_READ; } /* process only directory (PMGL) chunks */ if (EndGetI32(&chunk[pmgl_Signature]) != 0x4C474D50) continue; if (EndGetI32(&chunk[pmgl_QuickRefSize]) < 2) { sys->message(fh, "WARNING; PMGL quickref area is too small"); } if (EndGetI32(&chunk[pmgl_QuickRefSize]) > ((int)chm->chunk_size - pmgl_Entries)) { sys->message(fh, "WARNING; PMGL quickref area is too large"); } p = &chunk[pmgl_Entries]; end = &chunk[chm->chunk_size - 2]; num_entries = EndGetI16(end); while (num_entries--) { READ_ENCINT(name_len); if (name_len > (unsigned int) (end - p)) goto chunk_end; name = p; p += name_len; READ_ENCINT(section); READ_ENCINT(offset); READ_ENCINT(length); /* ignore blank or one-char (e.g. "/") filenames we'd return as blank */ if (name_len < 2 || !name[0] || !name[1]) continue; /* empty files and directory names are stored as a file entry at * offset 0 with length 0. We want to keep empty files, but not * directory names, which end with a "/" */ if ((offset == 0) && (length == 0)) { if ((name_len > 0) && (name[name_len-1] == '/')) continue; } if (section > 1) { sys->message(fh, "invalid section number '%u'.", section); continue; } if (!(fi = (struct mschmd_file *) sys->alloc(sys, sizeof(struct mschmd_file) + name_len + 1))) { sys->free(chunk); return MSPACK_ERR_NOMEMORY; } fi->next = NULL; fi->filename = (char *) &fi[1]; fi->section = ((section == 0) ? (struct mschmd_section *) (&chm->sec0) : (struct mschmd_section *) (&chm->sec1)); fi->offset = offset; fi->length = length; sys->copy(name, fi->filename, (size_t) name_len); fi->filename[name_len] = '\0'; if (name[0] == ':' && name[1] == ':') { /* system file */ if (mspack_memcmp(&name[2], &content_name[2], 31L) == 0) { if (mspack_memcmp(&name[33], &content_name[33], 8L) == 0) { chm->sec1.content = fi; } else if (mspack_memcmp(&name[33], &control_name[33], 11L) == 0) { chm->sec1.control = fi; } else if (mspack_memcmp(&name[33], &spaninfo_name[33], 8L) == 0) { chm->sec1.spaninfo = fi; } else if (mspack_memcmp(&name[33], &rtable_name[33], 72L) == 0) { chm->sec1.rtable = fi; } } fi->next = chm->sysfiles; chm->sysfiles = fi; } else { /* normal file */ if (link) link->next = fi; else chm->files = fi; link = fi; } } /* this is reached either when num_entries runs out, or if * reading data from the chunk reached a premature end of chunk */ chunk_end: if (num_entries >= 0) { D(("chunk ended before all entries could be read")) errors++; } } sys->free(chunk); return (errors > 0) ? MSPACK_ERR_DATAFORMAT : MSPACK_ERR_OK; }
0
[ "CWE-476" ]
libmspack
8759da8db6ec9e866cb8eb143313f397f925bb4f
37,544,985,868,185,216,000,000,000,000,000,000,000
260
Avoid returning CHM file entries that are "blank" because they have embedded null bytes
static bool is_class_idx_in_code_classes(RBinDexObj *bin, int class_idx) { int i; for (i = 0; i < bin->header.class_size; i++) { if (class_idx == bin->classes[i].class_id) { return true; } } return false; }
0
[ "CWE-125" ]
radare2
ead645853a63bf83d8386702cad0cf23b31d7eeb
131,816,987,618,313,970,000,000,000,000,000,000,000
9
fix #6857
static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data) { struct pcie_service_card *card = adapter->card; iowrite32(data, card->pci_mmap1 + reg); return 0; }
0
[ "CWE-400", "CWE-200", "CWE-401" ]
linux
d10dcb615c8e29d403a24d35f8310a7a53e3050c
107,275,804,385,202,000,000,000,000,000,000,000,000
8
mwifiex: pcie: Fix memory leak in mwifiex_pcie_init_evt_ring In mwifiex_pcie_init_evt_ring, a new skb is allocated which should be released if mwifiex_map_pci_memory() fails. The release for skb and card->evtbd_ring_vbase is added. Fixes: 0732484b47b5 ("mwifiex: separate ring initialization and ring creation routines") Signed-off-by: Navid Emamdoost <[email protected]> Acked-by: Ganapathi Bhat <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
void Context::onLog() { if (in_vm_context_created_ && wasm_->onLog_) { wasm_->onLog_(this, id_); } }
0
[ "CWE-476" ]
envoy
8788a3cf255b647fd14e6b5e2585abaaedb28153
249,399,499,636,023,500,000,000,000,000,000,000,000
5
1.4 - Do not call into the VM unless the VM Context has been created. (#24) * Ensure that the in VM Context is created before onDone is called. Signed-off-by: John Plevyak <[email protected]> * Update as per offline discussion. Signed-off-by: John Plevyak <[email protected]> * Set in_vm_context_created_ in onNetworkNewConnection. Signed-off-by: John Plevyak <[email protected]> * Add guards to other network calls. Signed-off-by: John Plevyak <[email protected]> * Fix common/wasm tests. Signed-off-by: John Plevyak <[email protected]> * Patch tests. Signed-off-by: John Plevyak <[email protected]> * Remove unecessary file from cherry-pick. Signed-off-by: John Plevyak <[email protected]>
static int wanxl_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { port_t *port = dev_to_port(dev); if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI) return -EINVAL; if (parity != PARITY_NONE && parity != PARITY_CRC32_PR1_CCITT && parity != PARITY_CRC16_PR1_CCITT && parity != PARITY_CRC32_PR0_CCITT && parity != PARITY_CRC16_PR0_CCITT) return -EINVAL; get_status(port)->encoding = encoding; get_status(port)->parity = parity; return 0; }
0
[ "CWE-399" ]
linux
2b13d06c9584b4eb773f1e80bbaedab9a1c344e1
124,717,719,172,057,000,000,000,000,000,000,000,000
20
wanxl: fix info leak in ioctl The wanxl_ioctl() code fails to initialize the two padding bytes of struct sync_serial_settings after the ->loopback member. Add an explicit memset(0) before filling the structure to avoid the info leak. Signed-off-by: Salva Peiró <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void unfold_ifblk(struct block **blk) { struct ifblock *ifblk; struct unfold_elm *ue; u_int32 a = vlabel++; u_int32 b = vlabel++; u_int32 c = vlabel++; /* * the virtual labels represent the three points of an if block: * * if (conds) { * a -> * ... * jmp c; * b -> * } else { * ... * } * c -> * * if the conds are true, jump to 'a' * if the conds are false, jump to 'b' * 'c' is used to skip the else if the conds were true */ /* the progress bar */ ef_debug(1, "#"); /* cast the if block */ ifblk = (*blk)->un.ifb; /* compile the conditions */ unfold_conds(ifblk->conds, a, b); /* if the conditions are match, jump here */ SAFE_CALLOC(ue, 1, sizeof(struct unfold_elm)); ue->label = a; TAILQ_INSERT_TAIL(&unfolded_tree, ue, next); /* check if the block is empty. i.e. { } */ if (ifblk->blk != NULL) { /* recursively compile the main block */ unfold_blk(&ifblk->blk); } /* * if there is the else block, we have to skip it * if the condition was true */ if (ifblk->elseblk != NULL) { SAFE_CALLOC(ue, 1, sizeof(struct unfold_elm)); ue->fop.opcode = FOP_JMP; ue->fop.op.jmp = c; TAILQ_INSERT_TAIL(&unfolded_tree, ue, next); } /* if the conditions are NOT match, jump here (after the block) */ SAFE_CALLOC(ue, 1, sizeof(struct unfold_elm)); ue->label = b; TAILQ_INSERT_TAIL(&unfolded_tree, ue, next); /* recursively compile the else block */ if (ifblk->elseblk != NULL) { unfold_blk(&ifblk->elseblk); /* this is the label to skip the else if the condition was true */ SAFE_CALLOC(ue, 1, sizeof(struct unfold_elm)); ue->label = c; TAILQ_INSERT_TAIL(&unfolded_tree, ue, next); } }
0
[ "CWE-703", "CWE-125" ]
ettercap
626dc56686f15f2dda13c48f78c2a666cb6d8506
164,094,646,373,499,750,000,000,000,000,000,000,000
72
Exit gracefully in case of corrupted filters (Closes issue #782)
ebb_ews_gal_store_contact (EContact *contact, goffset offset, const gchar *sha1, guint percent, gpointer user_data, GCancellable *cancellable, GError **error) { struct _db_data *data = (struct _db_data *) user_data; if (contact) { const gchar *uid = e_contact_get_const (contact, E_CONTACT_UID); EBookMetaBackendInfo *nfo; ebews_populate_rev (contact, NULL); e_vcard_util_set_x_attribute (E_VCARD (contact), X_EWS_GAL_SHA1, sha1); if (data->fetch_gal_photos && !g_cancellable_is_cancelled (cancellable)) { GError *local_error = NULL; if (!ebb_ews_fetch_gal_photo_sync (data->bbews, contact, cancellable, &local_error)) ebb_ews_store_photo_check_date (contact, NULL); /* The server can kick-off the flood of photo requests, then better stop it */ if (g_error_matches (local_error, EWS_CONNECTION_ERROR, EWS_CONNECTION_ERROR_SERVERBUSY)) data->fetch_gal_photos = FALSE; g_clear_error (&local_error); } nfo = e_book_meta_backend_info_new (uid, e_contact_get_const (contact, E_CONTACT_REV), NULL, NULL); nfo->object = e_vcard_to_string (E_VCARD (contact), EVC_FORMAT_VCARD_30); if (g_hash_table_remove (data->uids, uid)) { data->changed++; data->modified_objects = g_slist_prepend (data->modified_objects, nfo); } else { data->added++; data->created_objects = g_slist_prepend (data->created_objects, nfo); } } if (data->percent != percent) { data->percent = percent; d (printf ("GAL processing contacts, %d%% complete (%d added, %d changed, %d unchanged\n", percent, data->added, data->changed, data->unchanged)); } }
0
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
113,375,346,797,309,600,000,000,000,000,000,000,000
49
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
inline SegmentReader::SegmentReader(Arena* arena, SegmentId id, kj::ArrayPtr<const word> ptr, ReadLimiter* readLimiter) : arena(arena), id(id), ptr(ptr), readLimiter(readLimiter) {}
0
[ "CWE-400", "CWE-703" ]
capnproto
104870608fde3c698483fdef6b97f093fc15685d
32,338,081,387,954,930,000,000,000,000,000,000,000
3
SECURITY: CPU usage amplification attack. Details: https://github.com/sandstorm-io/capnproto/tree/master/security-advisories/2014-03-02-0-all-cpu-amplification.md
inline void FurnaceGUI::patternRow(int i, bool isPlaying, float lineHeight, int chans, int ord, const DivPattern** patCache) { static char id[32]; bool selectedRow=(i>=sel1.y && i<=sel2.y); ImGui::TableNextRow(0,lineHeight); ImGui::TableNextColumn(); float cursorPosY=ImGui::GetCursorPos().y-ImGui::GetScrollY(); // check if the row is visible if (cursorPosY<-lineHeight || cursorPosY>ImGui::GetWindowSize().y) { return; } // check if we are in range if (ord<0 || ord>=e->song.ordersLen) { return; } if (i<0 || i>=e->song.patLen) { return; } bool isPushing=false; ImVec4 activeColor=uiColors[GUI_COLOR_PATTERN_ACTIVE]; ImVec4 inactiveColor=uiColors[GUI_COLOR_PATTERN_INACTIVE]; ImVec4 rowIndexColor=uiColors[GUI_COLOR_PATTERN_ROW_INDEX]; if (e->song.hilightB>0 && !(i%e->song.hilightB)) { activeColor=uiColors[GUI_COLOR_PATTERN_ACTIVE_HI2]; inactiveColor=uiColors[GUI_COLOR_PATTERN_INACTIVE_HI2]; rowIndexColor=uiColors[GUI_COLOR_PATTERN_ROW_INDEX_HI2]; } else if (e->song.hilightA>0 && !(i%e->song.hilightA)) { activeColor=uiColors[GUI_COLOR_PATTERN_ACTIVE_HI1]; inactiveColor=uiColors[GUI_COLOR_PATTERN_INACTIVE_HI1]; rowIndexColor=uiColors[GUI_COLOR_PATTERN_ROW_INDEX_HI1]; } // check overflow highlight if (settings.overflowHighlight) { if (edit && cursor.y==i) { ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_EDITING])); } else if (isPlaying && oldRow==i) { ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_PLAY_HEAD])); } else if (e->song.hilightB>0 && !(i%e->song.hilightB)) { ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_2])); } else if (e->song.hilightA>0 && !(i%e->song.hilightA)) { ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_1])); } } else { isPushing=true; if (edit && cursor.y==i) { ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_EDITING])); } else if (isPlaying && oldRow==i) { ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_PLAY_HEAD])); } else if (e->song.hilightB>0 && !(i%e->song.hilightB)) { ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_2])); } else if (e->song.hilightA>0 && !(i%e->song.hilightA)) { ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_1])); } else { isPushing=false; } } // row number if (settings.patRowsBase==1) { ImGui::TextColored(rowIndexColor," %.2X ",i); } else { ImGui::TextColored(rowIndexColor,"%3d ",i); } // for each column for (int j=0; j<chans; j++) { // check if channel is not hidden if (!e->song.chanShow[j]) { patChanX[j]=ImGui::GetCursorPosX(); continue; } int chanVolMax=e->getMaxVolumeChan(j); if (chanVolMax<1) chanVolMax=1; const DivPattern* pat=patCache[j]; ImGui::TableNextColumn(); patChanX[j]=ImGui::GetCursorPosX(); // selection highlight flags int sel1XSum=sel1.xCoarse*32+sel1.xFine; int sel2XSum=sel2.xCoarse*32+sel2.xFine; int j32=j*32; bool selectedNote=selectedRow && (j32>=sel1XSum && j32<=sel2XSum); bool selectedIns=selectedRow && (j32+1>=sel1XSum && j32+1<=sel2XSum); bool selectedVol=selectedRow && (j32+2>=sel1XSum && j32+2<=sel2XSum); bool cursorNote=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==0); bool cursorIns=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==1); bool cursorVol=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==2); // note sprintf(id,"%s##PN_%d_%d",noteName(pat->data[i][0],pat->data[i][1]),i,j); if (pat->data[i][0]==0 && pat->data[i][1]==0) { ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor); } else { ImGui::PushStyleColor(ImGuiCol_Text,activeColor); } if (cursorNote) { ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]); ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]); ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]); ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,threeChars); demandX=ImGui::GetCursorPosX(); ImGui::PopStyleColor(3); } else { if (selectedNote) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]); ImGui::Selectable(id,isPushing || selectedNote,ImGuiSelectableFlags_NoPadWithHalfSpacing,threeChars); if (selectedNote) ImGui::PopStyleColor(); } if (ImGui::IsItemClicked()) { startSelection(j,0,i); } if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) { updateSelection(j,0,i); } ImGui::PopStyleColor(); // the following is only visible when the channel is not collapsed if (!e->song.chanCollapse[j]) { // instrument if (pat->data[i][2]==-1) { ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor); sprintf(id,"..##PI_%d_%d",i,j); } else { if (pat->data[i][2]<0 || pat->data[i][2]>=e->song.insLen) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_INS_ERROR]); } else { DivInstrumentType t=e->song.ins[pat->data[i][2]]->type; if (t!=DIV_INS_AMIGA && t!=e->getPreferInsType(j)) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_INS_WARN]); } else { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_INS]); } } sprintf(id,"%.2X##PI_%d_%d",pat->data[i][2],i,j); } ImGui::SameLine(0.0f,0.0f); if (cursorIns) { ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]); ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]); ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]); ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars); demandX=ImGui::GetCursorPosX(); ImGui::PopStyleColor(3); } else { if (selectedIns) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]); ImGui::Selectable(id,isPushing || selectedIns,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars); if (selectedIns) ImGui::PopStyleColor(); } if (ImGui::IsItemClicked()) { startSelection(j,1,i); } if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) { updateSelection(j,1,i); } ImGui::PopStyleColor(); // volume if (pat->data[i][3]==-1) { sprintf(id,"..##PV_%d_%d",i,j); ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor); } else { int volColor=(pat->data[i][3]*127)/chanVolMax; if (volColor>127) volColor=127; if (volColor<0) volColor=0; sprintf(id,"%.2X##PV_%d_%d",pat->data[i][3],i,j); ImGui::PushStyleColor(ImGuiCol_Text,volColors[volColor]); } ImGui::SameLine(0.0f,0.0f); if (cursorVol) { ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]); ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]); ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]); ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars); demandX=ImGui::GetCursorPosX(); ImGui::PopStyleColor(3); } else { if (selectedVol) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]); ImGui::Selectable(id,isPushing || selectedVol,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars); if (selectedVol) ImGui::PopStyleColor(); } if (ImGui::IsItemClicked()) { startSelection(j,2,i); } if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) { updateSelection(j,2,i); } ImGui::PopStyleColor(); // effects for (int k=0; k<e->song.pat[j].effectRows; k++) { int index=4+(k<<1); bool selectedEffect=selectedRow && (j32+index-1>=sel1XSum && j32+index-1<=sel2XSum); bool selectedEffectVal=selectedRow && (j32+index>=sel1XSum && j32+index<=sel2XSum); bool cursorEffect=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==index-1); bool cursorEffectVal=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==index); // effect if (pat->data[i][index]==-1) { sprintf(id,"..##PE%d_%d_%d",k,i,j); ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor); } else { sprintf(id,"%.2X##PE%d_%d_%d",pat->data[i][index],k,i,j); if (pat->data[i][index]<0x10) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[fxColors[pat->data[i][index]]]); } else if (pat->data[i][index]<0x20) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SYS_PRIMARY]); } else if (pat->data[i][index]<0x30) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SYS_SECONDARY]); } else if (pat->data[i][index]<0x48) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SYS_PRIMARY]); } else if (pat->data[i][index]<0x90) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_INVALID]); } else if (pat->data[i][index]<0xa0) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_MISC]); } else if (pat->data[i][index]<0xc0) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_INVALID]); } else if (pat->data[i][index]<0xd0) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SPEED]); } else if (pat->data[i][index]<0xe0) { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_INVALID]); } else { ImGui::PushStyleColor(ImGuiCol_Text,uiColors[extFxColors[pat->data[i][index]-0xe0]]); } } ImGui::SameLine(0.0f,0.0f); if (cursorEffect) { ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]); ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]); ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]); ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars); demandX=ImGui::GetCursorPosX(); ImGui::PopStyleColor(3); } else { if (selectedEffect) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]); ImGui::Selectable(id,isPushing || selectedEffect,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars); if (selectedEffect) ImGui::PopStyleColor(); } if (ImGui::IsItemClicked()) { startSelection(j,index-1,i); } if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) { updateSelection(j,index-1,i); } // effect value if (pat->data[i][index+1]==-1) { sprintf(id,"..##PF%d_%d_%d",k,i,j); } else { sprintf(id,"%.2X##PF%d_%d_%d",pat->data[i][index+1],k,i,j); } ImGui::SameLine(0.0f,0.0f); if (cursorEffectVal) { ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]); ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]); ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]); ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars); demandX=ImGui::GetCursorPosX(); ImGui::PopStyleColor(3); } else { if (selectedEffectVal) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]); ImGui::Selectable(id,isPushing || selectedEffectVal,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars); if (selectedEffectVal) ImGui::PopStyleColor(); } if (ImGui::IsItemClicked()) { startSelection(j,index,i); } if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) { updateSelection(j,index,i); } ImGui::PopStyleColor(); } } } if (isPushing) { ImGui::PopStyleColor(); } ImGui::TableNextColumn(); patChanX[chans]=ImGui::GetCursorPosX(); }
1
[ "CWE-703" ]
furnace
0eb02422d5161767e9983bdaa5c429762d3477ce
136,686,899,264,564,970,000,000,000,000,000,000,000
275
fix possible pattern crash issue #325
int ncp_dirhandle_free(struct ncp_server* server, __u8 dirhandle) { int result; ncp_init_request_s(server, 20); ncp_add_byte(server, dirhandle); result = ncp_request(server, 22); ncp_unlock_server(server); return result; }
0
[ "CWE-119" ]
staging
4c41aa24baa4ed338241d05494f2c595c885af8f
169,165,948,226,001,030,000,000,000,000,000,000,000
9
staging: ncpfs: memory corruption in ncp_read_kernel() If the server is malicious then *bytes_read could be larger than the size of the "target" buffer. It would lead to memory corruption when we do the memcpy(). Reported-by: Dr Silvio Cesare of InfoSect <Silvio Cesare <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static void sdl_serialize_key(zend_string *key, smart_str *out) { if (key) { WSDL_CACHE_PUT_INT(ZSTR_LEN(key), out); WSDL_CACHE_PUT_N(ZSTR_VAL(key), ZSTR_LEN(key), out); } else { WSDL_CACHE_PUT_INT(WSDL_NO_STRING_MARKER, out); } }
0
[ "CWE-476" ]
php-src
3c939e3f69955d087e0bb671868f7267dfb2a502
239,233,758,368,533,930,000,000,000,000,000,000,000
9
Fix bug #80672 - Null Dereference in SoapClient
PHP_FUNCTION(utf8_decode) { char *arg; size_t arg_len; zend_string *decoded; if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &arg, &arg_len) == FAILURE) { return; } decoded = xml_utf8_decode((XML_Char*)arg, arg_len, (XML_Char*)"ISO-8859-1"); if (decoded == NULL) { RETURN_FALSE; } RETURN_STR(decoded); }
0
[ "CWE-190" ]
php-src
57b997ebf99e0eb9a073e0dafd2ab100bd4a112d
208,309,066,478,322,300,000,000,000,000,000,000,000
16
Fix bug #71637: Multiple Heap Overflow due to integer overflows
inline MetadataType StreamType2MetadataType(StreamType type) { return static_cast<MetadataType>(type); }
0
[ "CWE-476" ]
envoy
8788a3cf255b647fd14e6b5e2585abaaedb28153
186,733,749,645,562,750,000,000,000,000,000,000,000
3
1.4 - Do not call into the VM unless the VM Context has been created. (#24) * Ensure that the in VM Context is created before onDone is called. Signed-off-by: John Plevyak <[email protected]> * Update as per offline discussion. Signed-off-by: John Plevyak <[email protected]> * Set in_vm_context_created_ in onNetworkNewConnection. Signed-off-by: John Plevyak <[email protected]> * Add guards to other network calls. Signed-off-by: John Plevyak <[email protected]> * Fix common/wasm tests. Signed-off-by: John Plevyak <[email protected]> * Patch tests. Signed-off-by: John Plevyak <[email protected]> * Remove unecessary file from cherry-pick. Signed-off-by: John Plevyak <[email protected]>
gss_sign (minor_status, context_handle, qop_req, message_buffer, msg_token) OM_uint32 * minor_status; gss_ctx_id_t context_handle; int qop_req; gss_buffer_t message_buffer; gss_buffer_t msg_token; { return (gss_get_mic(minor_status, context_handle, (gss_qop_t) qop_req, message_buffer, msg_token)); }
0
[ "CWE-415" ]
krb5
56f7b1bc95a2a3eeb420e069e7655fb181ade5cf
9,033,848,565,011,629,000,000,000,000,000,000,000
16
Preserve GSS context on init/accept failure After gss_init_sec_context() or gss_accept_sec_context() has created a context, don't delete the mechglue context on failures from subsequent calls, even if the mechanism deletes the mech-specific context (which is allowed by RFC 2744 but not preferred). Check for union contexts with no mechanism context in each GSS function which accepts a gss_ctx_id_t. CVE-2017-11462: RFC 2744 permits a GSS-API implementation to delete an existing security context on a second or subsequent call to gss_init_sec_context() or gss_accept_sec_context() if the call results in an error. This API behavior has been found to be dangerous, leading to the possibility of memory errors in some callers. For safety, GSS-API implementations should instead preserve existing security contexts on error until the caller deletes them. All versions of MIT krb5 prior to this change may delete acceptor contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through 1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on error. ticket: 8598 (new) target_version: 1.15-next target_version: 1.14-next tags: pullup
**/ static const CImg<Tuchar>& lines_LUT256() { static const unsigned char pal[] = { 0,255,255,0,0,28,125,125,235,210,186,182,36,0,125,255, 53,32,255,210,89,186,65,45,125,210,210,97,130,194,0,125, 206,53,190,89,255,146,20,190,154,73,255,36,130,215,0,138, 101,210,61,194,206,0,77,45,255,154,174,0,190,239,89,125, 16,36,158,223,117,0,97,69,223,255,40,239,0,0,255,0, 97,170,93,255,138,40,117,210,0,170,53,158,186,255,0,121, 227,121,186,40,20,190,89,255,77,57,130,142,255,73,186,85, 210,8,32,166,243,130,210,40,255,45,61,142,223,49,121,255, 20,162,158,73,89,255,53,138,210,190,57,235,36,73,255,49, 210,0,210,85,57,97,255,121,85,174,40,255,162,178,0,121, 166,125,53,146,166,255,97,121,65,89,235,231,12,170,36,190, 85,255,166,97,198,77,20,146,109,166,255,28,40,202,121,81, 247,0,210,255,49,0,65,255,36,166,93,77,255,85,251,0, 170,178,0,182,255,0,162,16,154,142,162,223,223,0,0,81, 215,4,215,162,215,125,77,206,121,36,125,231,101,16,255,121, 0,57,190,215,65,125,89,142,255,101,73,53,146,223,125,125, 0,255,0,255,0,206,93,138,49,255,0,202,154,85,45,219, 251,53,0,255,40,130,219,158,16,117,186,130,202,49,65,239, 89,202,49,28,247,134,150,0,255,117,202,4,215,81,186,57, 202,89,73,210,40,93,45,251,206,28,223,142,40,134,162,125, 32,247,97,170,0,255,57,134,73,247,162,0,251,40,142,142, 8,166,206,81,154,194,93,89,125,243,28,109,227,0,190,65, 194,186,0,255,53,45,109,186,186,0,255,130,49,170,69,210, 154,0,109,227,45,255,125,105,81,81,255,0,219,134,170,85, 146,28,170,89,223,97,8,210,255,158,49,40,125,174,174,125, 0,227,166,28,219,130,0,93,239,0,85,255,81,178,125,49, 89,255,53,206,73,113,146,255,0,150,36,219,162,0,210,125, 69,134,255,85,40,89,235,49,215,121,0,206,36,223,174,69, 40,182,178,130,69,45,255,210,85,77,215,0,231,146,0,194, 125,174,0,255,40,89,121,206,57,0,206,170,231,150,81,0, 125,255,4,174,4,190,121,255,4,166,109,130,49,239,170,93, 16,174,210,0,255,16,105,158,93,255,0,125,0,255,158,85, 0,255,0,0,255,170,166,61,121,28,198,215,45,243,61,97, 255,53,81,130,109,255,8,117,235,121,40,178,174,0,182,49, 162,121,255,69,206,0,219,125,0,101,255,239,121,32,210,130, 36,231,32,125,81,142,215,158,4,178,255,0,40,251,125,125, 219,89,130,0,166,255,24,65,194,125,255,125,77,125,93,125, 202,24,138,174,178,32,255,85,194,40,85,36,174,174,125,210, 85,255,53,16,93,206,40,130,170,202,93,255,0,24,117,255, 97,113,105,81,255,186,194,57,69,206,57,53,223,190,4,255, 85,97,130,255,85,0,125,223,85,219,0,215,146,77,40,239, 89,36,142,154,227,0,255,85,162,0,162,0,235,178,45,166, 0,247,255,20,69,210,89,142,53,255,40,146,166,255,69,0, 174,154,142,130,162,0,215,255,0,89,40,255,166,61,146,69, 162,40,255,32,121,255,117,178,0,186,206,0,57,215,215,81, 158,77,166,210,77,89,210,0,24,202,150,186,0,255,20,97, 57,170,235,251,16,73,142,251,93,0,202,0,255,121,219,4, 73,219,8,162,206,16,219,93,117,0,255,8,130,174,223,45 }; static const CImg<Tuchar> colormap(pal,1,256,1,3,false); return colormap;
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
284,560,327,455,937,440,000,000,000,000,000,000,000
53
.
bool has_int_value() const { return state == SHORT_DATA_VALUE && value.type_handler()->cmp_type() == INT_RESULT; }
0
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
205,905,049,443,590,330,000,000,000,000,000,000,000
5
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
static void php_stream_apply_filter_list(php_stream *stream, char *filterlist, int read_chain, int write_chain TSRMLS_DC) /* {{{ */ { char *p, *token; php_stream_filter *temp_filter; p = php_strtok_r(filterlist, "|", &token); while (p) { php_url_decode(p, strlen(p)); if (read_chain) { if ((temp_filter = php_stream_filter_create(p, NULL, php_stream_is_persistent(stream) TSRMLS_CC))) { php_stream_filter_append(&stream->readfilters, temp_filter); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to create filter (%s)", p); } } if (write_chain) { if ((temp_filter = php_stream_filter_create(p, NULL, php_stream_is_persistent(stream) TSRMLS_CC))) { php_stream_filter_append(&stream->writefilters, temp_filter); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to create filter (%s)", p); } } p = php_strtok_r(NULL, "|", &token); } }
0
[]
php-src
2438490addfbfba51e12246a74588b2382caa08a
160,027,849,059,151,310,000,000,000,000,000,000,000
25
slim post data
static void init_gro_hash(struct napi_struct *napi) { int i; for (i = 0; i < GRO_HASH_BUCKETS; i++) { INIT_LIST_HEAD(&napi->gro_hash[i].list); napi->gro_hash[i].count = 0; } napi->gro_bitmask = 0;
0
[ "CWE-416" ]
linux
a4270d6795b0580287453ea55974d948393e66ef
225,770,147,666,052,800,000,000,000,000,000,000,000
10
net-gro: fix use-after-free read in napi_gro_frags() If a network driver provides to napi_gro_frags() an skb with a page fragment of exactly 14 bytes, the call to gro_pull_from_frag0() will 'consume' the fragment by calling skb_frag_unref(skb, 0), and the page might be freed and reused. Reading eth->h_proto at the end of napi_frags_skb() might read mangled data, or crash under specific debugging features. BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline] BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 Read of size 2 at addr ffff88809366840c by task syz-executor599/8957 CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x172/0x1f0 lib/dump_stack.c:113 print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188 __kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317 kasan_report+0x12/0x20 mm/kasan/common.c:614 __asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142 napi_frags_skb net/core/dev.c:5833 [inline] napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991 tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037 call_write_iter include/linux/fs.h:1872 [inline] do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693 do_iter_write fs/read_write.c:970 [inline] do_iter_write+0x184/0x610 fs/read_write.c:951 vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015 do_writev+0x15b/0x330 fs/read_write.c:1058 Fixes: a50e233c50db ("net-gro: restore frag0 optimization") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: syzbot <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int LUKS2_keyslot_for_segment(struct luks2_hdr *hdr, int keyslot, int segment) { int r = -EINVAL; /* no need to check anything */ if (segment == CRYPT_ANY_SEGMENT) return 0; /* ok */ if (segment == CRYPT_DEFAULT_SEGMENT) { segment = LUKS2_get_default_segment(hdr); if (segment < 0) return segment; } r = _keyslot_for_segment(hdr, keyslot, segment); if (r < 0) return r; return r >= 1 ? 0 : -ENOENT; }
0
[ "CWE-345" ]
cryptsetup
0113ac2d889c5322659ad0596d4cfc6da53e356c
55,227,386,064,018,400,000,000,000,000,000,000,000
19
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack Fix possible attacks against data confidentiality through LUKS2 online reencryption extension crash recovery. An attacker can modify on-disk metadata to simulate decryption in progress with crashed (unfinished) reencryption step and persistently decrypt part of the LUKS device. This attack requires repeated physical access to the LUKS device but no knowledge of user passphrases. The decryption step is performed after a valid user activates the device with a correct passphrase and modified metadata. There are no visible warnings for the user that such recovery happened (except using the luksDump command). The attack can also be reversed afterward (simulating crashed encryption from a plaintext) with possible modification of revealed plaintext. The problem was caused by reusing a mechanism designed for actual reencryption operation without reassessing the security impact for new encryption and decryption operations. While the reencryption requires calculating and verifying both key digests, no digest was needed to initiate decryption recovery if the destination is plaintext (no encryption key). Also, some metadata (like encryption cipher) is not protected, and an attacker could change it. Note that LUKS2 protects visible metadata only when a random change occurs. It does not protect against intentional modification but such modification must not cause a violation of data confidentiality. The fix introduces additional digest protection of reencryption metadata. The digest is calculated from known keys and critical reencryption metadata. Now an attacker cannot create correct metadata digest without knowledge of a passphrase for used keyslots. For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
static int addr_contains(IPAddressOrRanges *parent, IPAddressOrRanges *child, int length) { unsigned char p_min[ADDR_RAW_BUF_LEN], p_max[ADDR_RAW_BUF_LEN]; unsigned char c_min[ADDR_RAW_BUF_LEN], c_max[ADDR_RAW_BUF_LEN]; int p, c; if (child == NULL || parent == child) return 1; if (parent == NULL) return 0; p = 0; for (c = 0; c < sk_IPAddressOrRange_num(child); c++) { if (!extract_min_max(sk_IPAddressOrRange_value(child, c), c_min, c_max, length)) return -1; for (;; p++) { if (p >= sk_IPAddressOrRange_num(parent)) return 0; if (!extract_min_max(sk_IPAddressOrRange_value(parent, p), p_min, p_max, length)) return 0; if (memcmp(p_max, c_max, length) < 0) continue; if (memcmp(p_min, c_min, length) > 0) return 0; break; } } return 1; }
0
[ "CWE-119", "CWE-787" ]
openssl
068b963bb7afc57f5bdd723de0dd15e7795d5822
74,487,212,512,330,440,000,000,000,000,000,000,000
33
Avoid out-of-bounds read Fixes CVE 2017-3735 Reviewed-by: Kurt Roeckx <[email protected]> (Merged from https://github.com/openssl/openssl/pull/4276) (cherry picked from commit b23171744b01e473ebbfd6edad70c1c3825ffbcd)
int ssl3_get_server_certificate(SSL *s) { int al,i,ok,ret= -1; unsigned long n,nc,llen,l; X509 *x=NULL; const unsigned char *q,*p; unsigned char *d; STACK_OF(X509) *sk=NULL; SESS_CERT *sc; EVP_PKEY *pkey=NULL; int need_cert = 1; /* VRS: 0=> will allow null cert if auth == KRB5 */ n=s->method->ssl_get_message(s, SSL3_ST_CR_CERT_A, SSL3_ST_CR_CERT_B, -1, s->max_cert_list, &ok); if (!ok) return((int)n); if ((s->s3->tmp.message_type == SSL3_MT_SERVER_KEY_EXCHANGE) || ((s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5) && (s->s3->tmp.message_type == SSL3_MT_SERVER_DONE))) { s->s3->tmp.reuse_message=1; return(1); } if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE) { al=SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_BAD_MESSAGE_TYPE); goto f_err; } p=d=(unsigned char *)s->init_msg; if ((sk=sk_X509_new_null()) == NULL) { SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,ERR_R_MALLOC_FAILURE); goto err; } n2l3(p,llen); if (llen+3 != n) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_LENGTH_MISMATCH); goto f_err; } for (nc=0; nc<llen; ) { n2l3(p,l); if ((l+nc+3) > llen) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH); goto f_err; } q=p; x=d2i_X509(NULL,&q,l); if (x == NULL) { al=SSL_AD_BAD_CERTIFICATE; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,ERR_R_ASN1_LIB); goto f_err; } if (q != (p+l)) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH); goto f_err; } if (!sk_X509_push(sk,x)) { SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,ERR_R_MALLOC_FAILURE); goto err; } x=NULL; nc+=l+3; p=q; } i=ssl_verify_cert_chain(s,sk); if ((s->verify_mode != SSL_VERIFY_NONE) && (i <= 0) #ifndef OPENSSL_NO_KRB5 && !((s->s3->tmp.new_cipher->algorithm_mkey & SSL_kKRB5) && (s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5)) #endif /* OPENSSL_NO_KRB5 */ ) { al=ssl_verify_alarm_type(s->verify_result); SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_CERTIFICATE_VERIFY_FAILED); goto f_err; } ERR_clear_error(); /* but we keep s->verify_result */ sc=ssl_sess_cert_new(); if (sc == NULL) goto err; if (s->session->sess_cert) ssl_sess_cert_free(s->session->sess_cert); s->session->sess_cert=sc; sc->cert_chain=sk; /* Inconsistency alert: cert_chain does include the peer's * certificate, which we don't include in s3_srvr.c */ x=sk_X509_value(sk,0); sk=NULL; /* VRS 19990621: possible memory leak; sk=null ==> !sk_pop_free() @end*/ pkey=X509_get_pubkey(x); /* VRS: allow null cert if auth == KRB5 */ need_cert = ((s->s3->tmp.new_cipher->algorithm_mkey & SSL_kKRB5) && (s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5)) ? 0 : 1; #ifdef KSSL_DEBUG printf("pkey,x = %p, %p\n", pkey,x); printf("ssl_cert_type(x,pkey) = %d\n", ssl_cert_type(x,pkey)); printf("cipher, alg, nc = %s, %lx, %lx, %d\n", s->s3->tmp.new_cipher->name, s->s3->tmp.new_cipher->algorithm_mkey, s->s3->tmp.new_cipher->algorithm_auth, need_cert); #endif /* KSSL_DEBUG */ if (need_cert && ((pkey == NULL) || EVP_PKEY_missing_parameters(pkey))) { x=NULL; al=SSL3_AL_FATAL; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS); goto f_err; } i=ssl_cert_type(x,pkey); if (need_cert && i < 0) { x=NULL; al=SSL3_AL_FATAL; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, SSL_R_UNKNOWN_CERTIFICATE_TYPE); goto f_err; } if (need_cert) { sc->peer_cert_type=i; CRYPTO_add(&x->references,1,CRYPTO_LOCK_X509); /* Why would the following ever happen? * We just created sc a couple of lines ago. */ if (sc->peer_pkeys[i].x509 != NULL) X509_free(sc->peer_pkeys[i].x509); sc->peer_pkeys[i].x509=x; sc->peer_key= &(sc->peer_pkeys[i]); if (s->session->peer != NULL) X509_free(s->session->peer); CRYPTO_add(&x->references,1,CRYPTO_LOCK_X509); s->session->peer=x; } else { sc->peer_cert_type=i; sc->peer_key= NULL; if (s->session->peer != NULL) X509_free(s->session->peer); s->session->peer=NULL; } s->session->verify_result = s->verify_result; x=NULL; ret=1; if (0) { f_err: ssl3_send_alert(s,SSL3_AL_FATAL,al); } err: EVP_PKEY_free(pkey); X509_free(x); sk_X509_pop_free(sk,X509_free); return(ret); }
0
[]
openssl
ee2ffc279417f15fef3b1073c7dc81a908991516
40,966,642,144,390,030,000,000,000,000,000,000,000
185
Add Next Protocol Negotiation.
static int fsl_lpspi_dma_configure(struct spi_controller *controller) { int ret; enum dma_slave_buswidth buswidth; struct dma_slave_config rx = {}, tx = {}; struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) { case 4: buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; break; case 2: buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; break; case 1: buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; break; default: return -EINVAL; } tx.direction = DMA_MEM_TO_DEV; tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR; tx.dst_addr_width = buswidth; tx.dst_maxburst = 1; ret = dmaengine_slave_config(controller->dma_tx, &tx); if (ret) { dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n", ret); return ret; } rx.direction = DMA_DEV_TO_MEM; rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR; rx.src_addr_width = buswidth; rx.src_maxburst = 1; ret = dmaengine_slave_config(controller->dma_rx, &rx); if (ret) { dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n", ret); return ret; } return 0; }
0
[ "CWE-400", "CWE-401" ]
linux
057b8945f78f76d0b04eeb5c27cd9225e5e7ad86
156,241,198,000,015,860,000,000,000,000,000,000,000
46
spi: lpspi: fix memory leak in fsl_lpspi_probe In fsl_lpspi_probe an SPI controller is allocated either via spi_alloc_slave or spi_alloc_master. In all but one error cases this controller is put by going to error handling code. This commit fixes the case when pm_runtime_get_sync fails and it should go to the error handling path. Fixes: 944c01a889d9 ("spi: lpspi: enable runtime pm for lpspi") Signed-off-by: Navid Emamdoost <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Mark Brown <[email protected]>
static void kvm_machine_check(void) { #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) struct pt_regs regs = { .cs = 3, /* Fake ring 3 no matter what the guest ran on */ .flags = X86_EFLAGS_IF, }; do_machine_check(&regs, 0); #endif }
0
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
265,567,153,254,354,500,000,000,000,000,000,000,000
11
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
Status V2UserDocumentParser::parseRoleVector(const BSONArray& rolesArray, std::vector<RoleName>* result) { std::vector<RoleName> roles; for (BSONObjIterator it(rolesArray); it.more(); it.next()) { if ((*it).type() != Object) { return Status(ErrorCodes::TypeMismatch, "Roles must be objects."); } RoleName role; Status status = parseRoleName((*it).Obj(), &role); if (!status.isOK()) return status; roles.push_back(role); } std::swap(*result, roles); return Status::OK(); }
0
[ "CWE-613" ]
mongo
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
97,682,726,723,940,820,000,000,000,000,000,000,000
16
SERVER-38984 Validate unique User ID on UserCache hit
static int oidc_handle_logout(request_rec *r, oidc_cfg *c, oidc_session_t *session) { oidc_provider_t *provider = NULL; /* pickup the command or URL where the user wants to go after logout */ char *url = NULL; char *error_str = NULL; char *error_description = NULL; oidc_util_get_request_parameter(r, OIDC_REDIRECT_URI_REQUEST_LOGOUT, &url); oidc_debug(r, "enter (url=%s)", url); if (oidc_is_front_channel_logout(url)) { return oidc_handle_logout_request(r, c, session, url); } else if (oidc_is_back_channel_logout(url)) { return oidc_handle_logout_backchannel(r, c); } if ((url == NULL) || (apr_strnatcmp(url, "") == 0)) { url = c->default_slo_url; } else { /* do input validation on the logout parameter value */ if (oidc_validate_redirect_url(r, c, url, TRUE, &error_str, &error_description) == FALSE) { return oidc_util_html_send_error(r, c->error_template, error_str, error_description, HTTP_BAD_REQUEST); } } oidc_get_provider_from_session(r, c, session, &provider); if ((provider != NULL) && (provider->end_session_endpoint != NULL)) { const char *id_token_hint = oidc_session_get_idtoken(r, session); char *logout_request = apr_pstrdup(r->pool, provider->end_session_endpoint); if (id_token_hint != NULL) { logout_request = apr_psprintf(r->pool, "%s%sid_token_hint=%s", logout_request, strchr(logout_request ? logout_request : "", OIDC_CHAR_QUERY) != NULL ? OIDC_STR_AMP : OIDC_STR_QUERY, oidc_util_escape_string(r, id_token_hint)); } if (url != NULL) { logout_request = apr_psprintf(r->pool, "%s%spost_logout_redirect_uri=%s", logout_request, strchr(logout_request ? logout_request : "", OIDC_CHAR_QUERY) != NULL ? OIDC_STR_AMP : OIDC_STR_QUERY, oidc_util_escape_string(r, url)); } url = logout_request; } return oidc_handle_logout_request(r, c, session, url); }
0
[ "CWE-79" ]
mod_auth_openidc
55ea0a085290cd2c8cdfdd960a230cbc38ba8b56
254,763,731,751,258,450,000,000,000,000,000,000,000
65
Add a function to escape Javascript characters
int search_binary_handler(struct linux_binprm *bprm) { unsigned int depth = bprm->recursion_depth; int try,retval; struct linux_binfmt *fmt; pid_t old_pid, old_vpid; /* This allows 4 levels of binfmt rewrites before failing hard. */ if (depth > 5) return -ELOOP; retval = security_bprm_check(bprm); if (retval) return retval; retval = audit_bprm(bprm); if (retval) return retval; /* Need to fetch pid before load_binary changes it */ old_pid = current->pid; rcu_read_lock(); old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); rcu_read_unlock(); retval = -ENOENT; for (try=0; try<2; try++) { read_lock(&binfmt_lock); list_for_each_entry(fmt, &formats, lh) { int (*fn)(struct linux_binprm *) = fmt->load_binary; if (!fn) continue; if (!try_module_get(fmt->module)) continue; read_unlock(&binfmt_lock); bprm->recursion_depth = depth + 1; retval = fn(bprm); bprm->recursion_depth = depth; if (retval >= 0) { if (depth == 0) { trace_sched_process_exec(current, old_pid, bprm); ptrace_event(PTRACE_EVENT_EXEC, old_vpid); } put_binfmt(fmt); allow_write_access(bprm->file); if (bprm->file) fput(bprm->file); bprm->file = NULL; current->did_exec = 1; proc_exec_connector(current); return retval; } read_lock(&binfmt_lock); put_binfmt(fmt); if (retval != -ENOEXEC || bprm->mm == NULL) break; if (!bprm->file) { read_unlock(&binfmt_lock); return retval; } } read_unlock(&binfmt_lock); #ifdef CONFIG_MODULES if (retval != -ENOEXEC || bprm->mm == NULL) { break; } else { #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) if (printable(bprm->buf[0]) && printable(bprm->buf[1]) && printable(bprm->buf[2]) && printable(bprm->buf[3])) break; /* -ENOEXEC */ if (try) break; /* -ENOEXEC */ request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2])); } #else break; #endif } return retval; }
0
[ "CWE-200" ]
linux-2.6
b66c5984017533316fd1951770302649baf1aa33
129,248,715,759,501,120,000,000,000,000,000,000,000
82
exec: do not leave bprm->interp on stack If a series of scripts are executed, each triggering module loading via unprintable bytes in the script header, kernel stack contents can leak into the command line. Normally execution of binfmt_script and binfmt_misc happens recursively. However, when modules are enabled, and unprintable bytes exist in the bprm->buf, execution will restart after attempting to load matching binfmt modules. Unfortunately, the logic in binfmt_script and binfmt_misc does not expect to get restarted. They leave bprm->interp pointing to their local stack. This means on restart bprm->interp is left pointing into unused stack memory which can then be copied into the userspace argv areas. After additional study, it seems that both recursion and restart remains the desirable way to handle exec with scripts, misc, and modules. As such, we need to protect the changes to interp. This changes the logic to require allocation for any changes to the bprm->interp. To avoid adding a new kmalloc to every exec, the default value is left as-is. Only when passing through binfmt_script or binfmt_misc does an allocation take place. For a proof of concept, see DoTest.sh from: http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/ Signed-off-by: Kees Cook <[email protected]> Cc: halfdog <[email protected]> Cc: P J P <[email protected]> Cc: Alexander Viro <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void nft_fwd_netdev_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_fwd_netdev *priv = nft_expr_priv(expr); int oif = regs->data[priv->sreg_dev]; struct sk_buff *skb = pkt->skb; /* This is used by ifb only. */ skb->skb_iif = skb->dev->ifindex; skb_set_redirected(skb, nft_hook(pkt) == NF_NETDEV_INGRESS); nf_fwd_netdev_egress(pkt, oif); regs->verdict.code = NF_STOLEN; }
0
[ "CWE-269" ]
nf
b1a5983f56e371046dcf164f90bfaf704d2b89f6
202,565,483,239,757,800,000,000,000,000,000,000,000
15
netfilter: nf_tables_offload: incorrect flow offload action array size immediate verdict expression needs to allocate one slot in the flow offload action array, however, immediate data expression does not need to do so. fwd and dup expression need to allocate one slot, this is missing. Add a new offload_action interface to report if this expression needs to allocate one slot in the flow offload action array. Fixes: be2861dc36d7 ("netfilter: nft_{fwd,dup}_netdev: add offload support") Reported-and-tested-by: Nick Gregory <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
static void sas_scsi_clear_queue_port(struct list_head *error_q, struct asd_sas_port *port) { struct scsi_cmnd *cmd, *n; list_for_each_entry_safe(cmd, n, error_q, eh_entry) { struct domain_device *dev = cmd_to_domain_dev(cmd); struct asd_sas_port *x = dev->port; if (x == port) sas_eh_finish_cmd(cmd); } }
0
[]
linux
318aaf34f1179b39fa9c30fa0f3288b645beee39
52,232,986,869,252,900,000,000,000,000,000,000,000
13
scsi: libsas: defer ata device eh commands to libata When ata device doing EH, some commands still attached with tasks are not passed to libata when abort failed or recover failed, so libata did not handle these commands. After these commands done, sas task is freed, but ata qc is not freed. This will cause ata qc leak and trigger a warning like below: WARNING: CPU: 0 PID: 28512 at drivers/ata/libata-eh.c:4037 ata_eh_finish+0xb4/0xcc CPU: 0 PID: 28512 Comm: kworker/u32:2 Tainted: G W OE 4.14.0#1 ...... Call trace: [<ffff0000088b7bd0>] ata_eh_finish+0xb4/0xcc [<ffff0000088b8420>] ata_do_eh+0xc4/0xd8 [<ffff0000088b8478>] ata_std_error_handler+0x44/0x8c [<ffff0000088b8068>] ata_scsi_port_error_handler+0x480/0x694 [<ffff000008875fc4>] async_sas_ata_eh+0x4c/0x80 [<ffff0000080f6be8>] async_run_entry_fn+0x4c/0x170 [<ffff0000080ebd70>] process_one_work+0x144/0x390 [<ffff0000080ec100>] worker_thread+0x144/0x418 [<ffff0000080f2c98>] kthread+0x10c/0x138 [<ffff0000080855dc>] ret_from_fork+0x10/0x18 If ata qc leaked too many, ata tag allocation will fail and io blocked for ever. As suggested by Dan Williams, defer ata device commands to libata and merge sas_eh_finish_cmd() with sas_eh_defer_cmd(). libata will handle ata qcs correctly after this. Signed-off-by: Jason Yan <[email protected]> CC: Xiaofei Tan <[email protected]> CC: John Garry <[email protected]> CC: Dan Williams <[email protected]> Reviewed-by: Dan Williams <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
static void fixup_inode_flags(struct inode *dir, struct inode *inode) { struct btrfs_inode *b_dir = BTRFS_I(dir); struct btrfs_inode *b_inode = BTRFS_I(inode); if (b_dir->flags & BTRFS_INODE_NODATACOW) b_inode->flags |= BTRFS_INODE_NODATACOW; else b_inode->flags &= ~BTRFS_INODE_NODATACOW; if (b_dir->flags & BTRFS_INODE_COMPRESS) { b_inode->flags |= BTRFS_INODE_COMPRESS; b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS; } else { b_inode->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } }
0
[ "CWE-310" ]
linux-2.6
9c52057c698fb96f8f07e7a4bcf4801a092bda89
201,290,330,167,052,600,000,000,000,000,000,000,000
18
Btrfs: fix hash overflow handling The handling for directory crc hash overflows was fairly obscure, split_leaf returns EOVERFLOW when we try to extend the item and that is supposed to bubble up to userland. For a while it did so, but along the way we added better handling of errors and forced the FS readonly if we hit IO errors during the directory insertion. Along the way, we started testing only for EEXIST and the EOVERFLOW case was dropped. The end result is that we may force the FS readonly if we catch a directory hash bucket overflow. This fixes a few problem spots. First I add tests for EOVERFLOW in the places where we can safely just return the error up the chain. btrfs_rename is harder though, because it tries to insert the new directory item only after it has already unlinked anything the rename was going to overwrite. Rather than adding very complex logic, I added a helper to test for the hash overflow case early while it is still safe to bail out. Snapshot and subvolume creation had a similar problem, so they are using the new helper now too. Signed-off-by: Chris Mason <[email protected]> Reported-by: Pascal Junod <[email protected]>
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) { size_t off; ssize_t n; int npages, i; n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off); if (n < 0) return n; npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; if (WARN_ON(npages == 0)) return -EINVAL; /* Add one extra for linking */ sg_init_table(sgl->sg, npages + 1); for (i = 0, len = n; i < npages; i++) { int plen = min_t(int, len, PAGE_SIZE - off); sg_set_page(sgl->sg + i, sgl->pages[i], plen, off); off = 0; len -= plen; } sg_mark_end(sgl->sg + npages - 1); sgl->npages = npages; return n; }
0
[ "CWE-416" ]
linux
9060cb719e61b685ec0102574e10337fa5f445ea
96,856,754,790,956,920,000,000,000,000,000,000,000
29
net: crypto set sk to NULL when af_alg_release. KASAN has found use-after-free in sockfs_setattr. The existed commit 6d8c50dcb029 ("socket: close race condition between sock_close() and sockfs_setattr()") is to fix this simillar issue, but it seems to ignore that crypto module forgets to set the sk to NULL after af_alg_release. KASAN report details as below: BUG: KASAN: use-after-free in sockfs_setattr+0x120/0x150 Write of size 4 at addr ffff88837b956128 by task syz-executor0/4186 CPU: 2 PID: 4186 Comm: syz-executor0 Not tainted xxx + #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014 Call Trace: dump_stack+0xca/0x13e print_address_description+0x79/0x330 ? vprintk_func+0x5e/0xf0 kasan_report+0x18a/0x2e0 ? sockfs_setattr+0x120/0x150 sockfs_setattr+0x120/0x150 ? sock_register+0x2d0/0x2d0 notify_change+0x90c/0xd40 ? chown_common+0x2ef/0x510 chown_common+0x2ef/0x510 ? chmod_common+0x3b0/0x3b0 ? __lock_is_held+0xbc/0x160 ? __sb_start_write+0x13d/0x2b0 ? __mnt_want_write+0x19a/0x250 do_fchownat+0x15c/0x190 ? __ia32_sys_chmod+0x80/0x80 ? trace_hardirqs_on_thunk+0x1a/0x1c __x64_sys_fchownat+0xbf/0x160 ? lockdep_hardirqs_on+0x39a/0x5e0 do_syscall_64+0xc8/0x580 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x462589 Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fb4b2c83c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000104 RAX: ffffffffffffffda RBX: 000000000072bfa0 RCX: 0000000000462589 RDX: 0000000000000000 RSI: 00000000200000c0 RDI: 0000000000000007 RBP: 0000000000000005 R08: 0000000000001000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007fb4b2c846bc R13: 00000000004bc733 R14: 00000000006f5138 R15: 00000000ffffffff Allocated by task 4185: kasan_kmalloc+0xa0/0xd0 __kmalloc+0x14a/0x350 sk_prot_alloc+0xf6/0x290 sk_alloc+0x3d/0xc00 af_alg_accept+0x9e/0x670 hash_accept+0x4a3/0x650 __sys_accept4+0x306/0x5c0 __x64_sys_accept4+0x98/0x100 do_syscall_64+0xc8/0x580 entry_SYSCALL_64_after_hwframe+0x49/0xbe Freed by task 4184: __kasan_slab_free+0x12e/0x180 kfree+0xeb/0x2f0 __sk_destruct+0x4e6/0x6a0 sk_destruct+0x48/0x70 __sk_free+0xa9/0x270 sk_free+0x2a/0x30 af_alg_release+0x5c/0x70 __sock_release+0xd3/0x280 sock_close+0x1a/0x20 __fput+0x27f/0x7f0 task_work_run+0x136/0x1b0 exit_to_usermode_loop+0x1a7/0x1d0 do_syscall_64+0x461/0x580 entry_SYSCALL_64_after_hwframe+0x49/0xbe Syzkaller reproducer: r0 = perf_event_open(&(0x7f0000000000)={0x0, 0x70, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @perf_config_ext}, 0x0, 0x0, 0xffffffffffffffff, 0x0) r1 = socket$alg(0x26, 0x5, 0x0) getrusage(0x0, 0x0) bind(r1, &(0x7f00000001c0)=@alg={0x26, 'hash\x00', 0x0, 0x0, 'sha256-ssse3\x00'}, 0x80) r2 = accept(r1, 0x0, 0x0) r3 = accept4$unix(r2, 0x0, 0x0, 0x0) r4 = dup3(r3, r0, 0x0) fchownat(r4, &(0x7f00000000c0)='\x00', 0x0, 0x0, 0x1000) Fixes: 6d8c50dcb029 ("socket: close race condition between sock_close() and sockfs_setattr()") Signed-off-by: Mao Wenan <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void extract_nonewprivs(ProcessHandle sandbox) { struct stat s; if (process_rootfs_stat(sandbox, RUN_NONEWPRIVS_CFG, &s) == 0) arg_nonewprivs = 1; }
0
[ "CWE-269", "CWE-94" ]
firejail
27cde3d7d1e4e16d4190932347c7151dc2a84c50
163,570,683,727,811,400,000,000,000,000,000,000,000
6
fixing CVE-2022-31214
IOBuf::IOBuf(CreateOp, std::size_t capacity) : next_(this), prev_(this), data_(nullptr), length_(0), flagsAndSharedInfo_(0) { SharedInfo* info; allocExtBuffer(capacity, &buf_, &info, &capacity_); setSharedInfo(info); data_ = buf_; }
0
[ "CWE-787" ]
folly
4f304af1411e68851bdd00ef6140e9de4616f7d3
59,633,191,805,228,320,000,000,000,000,000,000,000
11
[folly] Add additional overflow checks to IOBuf - CVE-2021-24036 Summary: As per title CVE-2021-24036 Reviewed By: jan Differential Revision: D27938605 fbshipit-source-id: 7481c54ae6fbb7b67b15b3631d5357c2f7043f9c
static bool checkreturn pb_dec_varint(pb_istream_t *stream, const pb_field_t *field, void *dest) { uint64_t value; if (!pb_decode_varint(stream, &value)) return false; switch (field->data_size) { case 1: *(int8_t*)dest = (int8_t)value; break; case 2: *(int16_t*)dest = (int16_t)value; break; case 4: *(int32_t*)dest = (int32_t)value; break; case 8: *(int64_t*)dest = (int64_t)value; break; default: PB_RETURN_ERROR(stream, "invalid data_size"); } return true; }
0
[ "CWE-125" ]
nanopb
7b396821ddd06df8e39143f16e1dc0a4645b89a3
83,107,385,871,670,190,000,000,000,000,000,000,000
17
Fix invalid free() after failed realloc() (GHSA-gcx3-7m76-287p)
void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasInstanceTypeAndBranch* result = new(zone()) HHasInstanceTypeAndBranch(value, JS_REGEXP_TYPE); return ast_context()->ReturnControl(result, call->id()); }
0
[]
node
fd80a31e0697d6317ce8c2d289575399f4e06d21
124,976,755,026,110,160,000,000,000,000,000,000,000
8
deps: backport 5f836c from v8 upstream Original commit message: Fix Hydrogen bounds check elimination When combining bounds checks, they must all be moved before the first load/store that they are guarding. BUG=chromium:344186 LOG=y [email protected] Review URL: https://codereview.chromium.org/172093002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 fix #8070
static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (!enable_vnmi) { /* * Tracking the NMI-blocked state in software is built upon * finding the next open IRQ window. This, in turn, depends on * well-behaving guests: They have to keep IRQs disabled at * least as long as the NMI handler runs. Otherwise we may * cause NMI nesting, maybe breaking the guest. But as this is * highly unlikely, we can live with the residual risk. */ vmx->loaded_vmcs->soft_vnmi_blocked = 1; vmx->loaded_vmcs->vnmi_blocked_time = 0; } ++vcpu->stat.nmi_injections; vmx->loaded_vmcs->nmi_known_unmasked = false; if (vmx->rmode.vm86_active) { kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0); return; } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); vmx_clear_hlt(vcpu); }
0
[ "CWE-787" ]
linux
04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a
92,266,066,420,362,380,000,000,000,000,000,000,000
30
KVM: VMX: Don't use vcpu->run->internal.ndata as an array index __vmx_handle_exit() uses vcpu->run->internal.ndata as an index for an array access. Since vcpu->run is (can be) mapped to a user address space with a writer permission, the 'ndata' could be updated by the user process at anytime (the user process can set it to outside the bounds of the array). So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way. Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information") Signed-off-by: Reiji Watanabe <[email protected]> Reviewed-by: Jim Mattson <[email protected]> Message-Id: <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
static ssize_t mon_text_read_t(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct mon_reader_text *rp = file->private_data; struct mon_event_text *ep; struct mon_text_ptr ptr; ssize_t ret; mutex_lock(&rp->printf_lock); if (rp->printf_togo == 0) { ep = mon_text_read_wait(rp, file); if (IS_ERR(ep)) { mutex_unlock(&rp->printf_lock); return PTR_ERR(ep); } ptr.cnt = 0; ptr.pbuf = rp->printf_buf; ptr.limit = rp->printf_size; mon_text_read_head_t(rp, &ptr, ep); mon_text_read_statset(rp, &ptr, ep); ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, " %d", ep->length); mon_text_read_data(rp, &ptr, ep); rp->printf_togo = ptr.cnt; rp->printf_offset = 0; kmem_cache_free(rp->e_slab, ep); } ret = mon_text_copy_to_user(rp, buf, nbytes); mutex_unlock(&rp->printf_lock); return ret; }
0
[ "CWE-787" ]
linux
a5f596830e27e15f7a0ecd6be55e433d776986d8
226,882,409,573,894,800,000,000,000,000,000,000,000
37
usb: usbmon: Read text within supplied buffer size This change fixes buffer overflows and silent data corruption with the usbmon device driver text file read operations. Signed-off-by: Fredrik Noring <[email protected]> Signed-off-by: Pete Zaitcev <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int airspy_set_mixer_gain(struct airspy *s) { int ret; u8 u8tmp; dev_dbg(s->dev, "mixer auto=%d->%d val=%d->%d\n", s->mixer_gain_auto->cur.val, s->mixer_gain_auto->val, s->mixer_gain->cur.val, s->mixer_gain->val); ret = airspy_ctrl_msg(s, CMD_SET_MIXER_AGC, 0, s->mixer_gain_auto->val, &u8tmp, 1); if (ret) goto err; if (s->mixer_gain_auto->val == false) { ret = airspy_ctrl_msg(s, CMD_SET_MIXER_GAIN, 0, s->mixer_gain->val, &u8tmp, 1); if (ret) goto err; } err: if (ret) dev_dbg(s->dev, "failed=%d\n", ret); return ret; }
0
[ "CWE-119" ]
media_tree
aa93d1fee85c890a34f2510a310e55ee76a27848
78,759,142,985,736,010,000,000,000,000,000,000,000
26
media: fix airspy usb probe error path Fix a memory leak on probe error of the airspy usb device driver. The problem is triggered when more than 64 usb devices register with v4l2 of type VFL_TYPE_SDR or VFL_TYPE_SUBDEV. The memory leak is caused by the probe function of the airspy driver mishandeling errors and not freeing the corresponding control structures when an error occours registering the device to v4l2 core. A badusb device can emulate 64 of these devices, and then through continual emulated connect/disconnect of the 65th device, cause the kernel to run out of RAM and crash the kernel, thus causing a local DOS vulnerability. Fixes CVE-2016-5400 Signed-off-by: James Patrick-Evans <[email protected]> Reviewed-by: Kees Cook <[email protected]> Cc: [email protected] # 3.17+ Signed-off-by: Linus Torvalds <[email protected]>
RAND_DRBG *RAND_DRBG_get0_master(void) { if (!RUN_ONCE(&rand_drbg_init, do_rand_drbg_init)) return NULL; return master_drbg; }
0
[ "CWE-330" ]
openssl
1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be
22,985,913,611,822,387,000,000,000,000,000,000,000
7
drbg: ensure fork-safety without using a pthread_atfork handler When the new OpenSSL CSPRNG was introduced in version 1.1.1, it was announced in the release notes that it would be fork-safe, which the old CSPRNG hadn't been. The fork-safety was implemented using a fork count, which was incremented by a pthread_atfork handler. Initially, this handler was enabled by default. Unfortunately, the default behaviour had to be changed for other reasons in commit b5319bdbd095, so the new OpenSSL CSPRNG failed to keep its promise. This commit restores the fork-safety using a different approach. It replaces the fork count by a fork id, which coincides with the process id on UNIX-like operating systems and is zero on other operating systems. It is used to detect when an automatic reseed after a fork is necessary. To prevent a future regression, it also adds a test to verify that the child reseeds after fork. CVE-2019-1549 Reviewed-by: Paul Dale <[email protected]> Reviewed-by: Matt Caswell <[email protected]> (Merged from https://github.com/openssl/openssl/pull/9802)
static inline int do_tuner_callback(struct dvb_frontend *fe, int cmd, int arg) { struct xc2028_data *priv = fe->tuner_priv; /* analog side (tuner-core) uses i2c_adap->algo_data. * digital side is not guaranteed to have algo_data defined. * * digital side will always have fe->dvb defined. * analog side (tuner-core) doesn't (yet) define fe->dvb. */ return (!fe->callback) ? -EINVAL : fe->callback(((fe->dvb) && (fe->dvb->priv)) ? fe->dvb->priv : priv->i2c_props.adap->algo_data, DVB_FRONTEND_COMPONENT_TUNER, cmd, arg); }
0
[ "CWE-416", "CWE-284" ]
linux
8dfbcc4351a0b6d2f2d77f367552f48ffefafe18
276,834,652,686,565,400,000,000,000,000,000,000,000
16
[media] xc2028: avoid use after free If struct xc2028_config is passed without a firmware name, the following trouble may happen: [11009.907205] xc2028 5-0061: type set to XCeive xc2028/xc3028 tuner [11009.907491] ================================================================== [11009.907750] BUG: KASAN: use-after-free in strcmp+0x96/0xb0 at addr ffff8803bd78ab40 [11009.907992] Read of size 1 by task modprobe/28992 [11009.907994] ============================================================================= [11009.907997] BUG kmalloc-16 (Tainted: G W ): kasan: bad access detected [11009.907999] ----------------------------------------------------------------------------- [11009.908008] INFO: Allocated in xhci_urb_enqueue+0x214/0x14c0 [xhci_hcd] age=0 cpu=3 pid=28992 [11009.908012] ___slab_alloc+0x581/0x5b0 [11009.908014] __slab_alloc+0x51/0x90 [11009.908017] __kmalloc+0x27b/0x350 [11009.908022] xhci_urb_enqueue+0x214/0x14c0 [xhci_hcd] [11009.908026] usb_hcd_submit_urb+0x1e8/0x1c60 [11009.908029] usb_submit_urb+0xb0e/0x1200 [11009.908032] usb_serial_generic_write_start+0xb6/0x4c0 [11009.908035] usb_serial_generic_write+0x92/0xc0 [11009.908039] usb_console_write+0x38a/0x560 [11009.908045] call_console_drivers.constprop.14+0x1ee/0x2c0 [11009.908051] console_unlock+0x40d/0x900 [11009.908056] vprintk_emit+0x4b4/0x830 [11009.908061] vprintk_default+0x1f/0x30 [11009.908064] printk+0x99/0xb5 [11009.908067] kasan_report_error+0x10a/0x550 [11009.908070] __asan_report_load1_noabort+0x43/0x50 [11009.908074] INFO: Freed in xc2028_set_config+0x90/0x630 [tuner_xc2028] age=1 cpu=3 pid=28992 [11009.908077] __slab_free+0x2ec/0x460 [11009.908080] kfree+0x266/0x280 [11009.908083] xc2028_set_config+0x90/0x630 [tuner_xc2028] [11009.908086] xc2028_attach+0x310/0x8a0 [tuner_xc2028] [11009.908090] em28xx_attach_xc3028.constprop.7+0x1f9/0x30d [em28xx_dvb] [11009.908094] em28xx_dvb_init.part.3+0x8e4/0x5cf4 [em28xx_dvb] [11009.908098] em28xx_dvb_init+0x81/0x8a [em28xx_dvb] [11009.908101] em28xx_register_extension+0xd9/0x190 [em28xx] [11009.908105] em28xx_dvb_register+0x10/0x1000 [em28xx_dvb] [11009.908108] do_one_initcall+0x141/0x300 [11009.908111] do_init_module+0x1d0/0x5ad [11009.908114] load_module+0x6666/0x9ba0 [11009.908117] SyS_finit_module+0x108/0x130 [11009.908120] entry_SYSCALL_64_fastpath+0x16/0x76 [11009.908123] INFO: Slab 0xffffea000ef5e280 objects=25 used=25 fp=0x (null) flags=0x2ffff8000004080 [11009.908126] INFO: Object 0xffff8803bd78ab40 @offset=2880 fp=0x0000000000000001 [11009.908130] Bytes b4 ffff8803bd78ab30: 01 00 00 00 2a 07 00 00 9d 28 00 00 01 00 00 00 ....*....(...... [11009.908133] Object ffff8803bd78ab40: 01 00 00 00 00 00 00 00 b0 1d c3 6a 00 88 ff ff ...........j.... [11009.908137] CPU: 3 PID: 28992 Comm: modprobe Tainted: G B W 4.5.0-rc1+ #43 [11009.908140] Hardware name: /NUC5i7RYB, BIOS RYBDWi35.86A.0350.2015.0812.1722 08/12/2015 [11009.908142] ffff8803bd78a000 ffff8802c273f1b8 ffffffff81932007 ffff8803c6407a80 [11009.908148] ffff8802c273f1e8 ffffffff81556759 ffff8803c6407a80 ffffea000ef5e280 [11009.908153] ffff8803bd78ab40 dffffc0000000000 ffff8802c273f210 ffffffff8155ccb4 [11009.908158] Call Trace: [11009.908162] [<ffffffff81932007>] dump_stack+0x4b/0x64 [11009.908165] [<ffffffff81556759>] print_trailer+0xf9/0x150 [11009.908168] [<ffffffff8155ccb4>] object_err+0x34/0x40 [11009.908171] [<ffffffff8155f260>] kasan_report_error+0x230/0x550 [11009.908175] [<ffffffff81237d71>] ? trace_hardirqs_off_caller+0x21/0x290 [11009.908179] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50 [11009.908182] [<ffffffff8155f5c3>] __asan_report_load1_noabort+0x43/0x50 [11009.908185] [<ffffffff8155ea00>] ? __asan_register_globals+0x50/0xa0 [11009.908189] [<ffffffff8194cea6>] ? strcmp+0x96/0xb0 [11009.908192] [<ffffffff8194cea6>] strcmp+0x96/0xb0 [11009.908196] [<ffffffffa13ba4ac>] xc2028_set_config+0x15c/0x630 [tuner_xc2028] [11009.908200] [<ffffffffa13bac90>] xc2028_attach+0x310/0x8a0 [tuner_xc2028] [11009.908203] [<ffffffff8155ea78>] ? memset+0x28/0x30 [11009.908206] [<ffffffffa13ba980>] ? xc2028_set_config+0x630/0x630 [tuner_xc2028] [11009.908211] [<ffffffffa157a59a>] em28xx_attach_xc3028.constprop.7+0x1f9/0x30d [em28xx_dvb] [11009.908215] [<ffffffffa157aa2a>] ? em28xx_dvb_init.part.3+0x37c/0x5cf4 [em28xx_dvb] [11009.908219] [<ffffffffa157a3a1>] ? hauppauge_hvr930c_init+0x487/0x487 [em28xx_dvb] [11009.908222] [<ffffffffa01795ac>] ? lgdt330x_attach+0x1cc/0x370 [lgdt330x] [11009.908226] [<ffffffffa01793e0>] ? i2c_read_demod_bytes.isra.2+0x210/0x210 [lgdt330x] [11009.908230] [<ffffffff812e87d0>] ? ref_module.part.15+0x10/0x10 [11009.908233] [<ffffffff812e56e0>] ? module_assert_mutex_or_preempt+0x80/0x80 [11009.908238] [<ffffffffa157af92>] em28xx_dvb_init.part.3+0x8e4/0x5cf4 [em28xx_dvb] [11009.908242] [<ffffffffa157a6ae>] ? em28xx_attach_xc3028.constprop.7+0x30d/0x30d [em28xx_dvb] [11009.908245] [<ffffffff8195222d>] ? string+0x14d/0x1f0 [11009.908249] [<ffffffff8195381f>] ? symbol_string+0xff/0x1a0 [11009.908253] [<ffffffff81953720>] ? uuid_string+0x6f0/0x6f0 [11009.908257] [<ffffffff811a775e>] ? __kernel_text_address+0x7e/0xa0 [11009.908260] [<ffffffff8104b02f>] ? print_context_stack+0x7f/0xf0 [11009.908264] [<ffffffff812e9846>] ? __module_address+0xb6/0x360 [11009.908268] [<ffffffff8137fdc9>] ? is_ftrace_trampoline+0x99/0xe0 [11009.908271] [<ffffffff811a775e>] ? __kernel_text_address+0x7e/0xa0 [11009.908275] [<ffffffff81240a70>] ? debug_check_no_locks_freed+0x290/0x290 [11009.908278] [<ffffffff8104a24b>] ? dump_trace+0x11b/0x300 [11009.908282] [<ffffffffa13e8143>] ? em28xx_register_extension+0x23/0x190 [em28xx] [11009.908285] [<ffffffff81237d71>] ? trace_hardirqs_off_caller+0x21/0x290 [11009.908289] [<ffffffff8123ff56>] ? trace_hardirqs_on_caller+0x16/0x590 [11009.908292] [<ffffffff812404dd>] ? trace_hardirqs_on+0xd/0x10 [11009.908296] [<ffffffffa13e8143>] ? em28xx_register_extension+0x23/0x190 [em28xx] [11009.908299] [<ffffffff822dcbb0>] ? mutex_trylock+0x400/0x400 [11009.908302] [<ffffffff810021a1>] ? do_one_initcall+0x131/0x300 [11009.908306] [<ffffffff81296dc7>] ? call_rcu_sched+0x17/0x20 [11009.908309] [<ffffffff8159e708>] ? put_object+0x48/0x70 [11009.908314] [<ffffffffa1579f11>] em28xx_dvb_init+0x81/0x8a [em28xx_dvb] [11009.908317] [<ffffffffa13e81f9>] em28xx_register_extension+0xd9/0x190 [em28xx] [11009.908320] [<ffffffffa0150000>] ? 0xffffffffa0150000 [11009.908324] [<ffffffffa0150010>] em28xx_dvb_register+0x10/0x1000 [em28xx_dvb] [11009.908327] [<ffffffff810021b1>] do_one_initcall+0x141/0x300 [11009.908330] [<ffffffff81002070>] ? try_to_run_init_process+0x40/0x40 [11009.908333] [<ffffffff8123ff56>] ? trace_hardirqs_on_caller+0x16/0x590 [11009.908337] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50 [11009.908340] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50 [11009.908343] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50 [11009.908346] [<ffffffff8155ea37>] ? __asan_register_globals+0x87/0xa0 [11009.908350] [<ffffffff8144da7b>] do_init_module+0x1d0/0x5ad [11009.908353] [<ffffffff812f2626>] load_module+0x6666/0x9ba0 [11009.908356] [<ffffffff812e9c90>] ? symbol_put_addr+0x50/0x50 [11009.908361] [<ffffffffa1580037>] ? em28xx_dvb_init.part.3+0x5989/0x5cf4 [em28xx_dvb] [11009.908366] [<ffffffff812ebfc0>] ? module_frob_arch_sections+0x20/0x20 [11009.908369] [<ffffffff815bc940>] ? open_exec+0x50/0x50 [11009.908374] [<ffffffff811671bb>] ? ns_capable+0x5b/0xd0 [11009.908377] [<ffffffff812f5e58>] SyS_finit_module+0x108/0x130 [11009.908379] [<ffffffff812f5d50>] ? SyS_init_module+0x1f0/0x1f0 [11009.908383] [<ffffffff81004044>] ? lockdep_sys_exit_thunk+0x12/0x14 [11009.908394] [<ffffffff822e6936>] entry_SYSCALL_64_fastpath+0x16/0x76 [11009.908396] Memory state around the buggy address: [11009.908398] ffff8803bd78aa00: 00 00 fc fc fc fc fc fc fc fc fc fc fc fc fc fc [11009.908401] ffff8803bd78aa80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [11009.908403] >ffff8803bd78ab00: fc fc fc fc fc fc fc fc 00 00 fc fc fc fc fc fc [11009.908405] ^ [11009.908407] ffff8803bd78ab80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [11009.908409] ffff8803bd78ac00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [11009.908411] ================================================================== In order to avoid it, let's set the cached value of the firmware name to NULL after freeing it. While here, return an error if the memory allocation fails. Signed-off-by: Mauro Carvalho Chehab <[email protected]>
static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div) { cputime_t res = cputime_div(time, div); return max_t(cputime_t, res, 1); }
0
[ "CWE-189" ]
linux
f8bd2258e2d520dff28c855658bd24bdafb5102d
297,885,766,771,935,800,000,000,000,000,000,000,000
6
remove div_long_long_rem x86 is the only arch right now, which provides an optimized for div_long_long_rem and it has the downside that one has to be very careful that the divide doesn't overflow. The API is a little akward, as the arguments for the unsigned divide are signed. The signed version also doesn't handle a negative divisor and produces worse code on 64bit archs. There is little incentive to keep this API alive, so this converts the few users to the new API. Signed-off-by: Roman Zippel <[email protected]> Cc: Ralf Baechle <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: john stultz <[email protected]> Cc: Christoph Lameter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static int check_certificate_expiration (X509 *peercert, int silent) { if (option (OPTSSLVERIFYDATES) != MUTT_NO) { if (X509_cmp_current_time (X509_get0_notBefore (peercert)) >= 0) { if (!silent) { dprint (2, (debugfile, "Server certificate is not yet valid\n")); mutt_error (_("Server certificate is not yet valid")); mutt_sleep (2); } return 0; } if (X509_cmp_current_time (X509_get0_notAfter (peercert)) <= 0) { if (!silent) { dprint (2, (debugfile, "Server certificate has expired\n")); mutt_error (_("Server certificate has expired")); mutt_sleep (2); } return 0; } } return 1; }
0
[ "CWE-74" ]
mutt
c547433cdf2e79191b15c6932c57f1472bfb5ff4
336,165,923,395,599,670,000,000,000,000,000,000,000
28
Fix STARTTLS response injection attack. Thanks again to Damian Poddebniak and Fabian Ising from the Münster University of Applied Sciences for reporting this issue. Their summary in ticket 248 states the issue clearly: We found another STARTTLS-related issue in Mutt. Unfortunately, it affects SMTP, POP3 and IMAP. When the server responds with its "let's do TLS now message", e.g. A OK begin TLS\r\n in IMAP or +OK begin TLS\r\n in POP3, Mutt will also read any data after the \r\n and save it into some internal buffer for later processing. This is problematic, because a MITM attacker can inject arbitrary responses. There is a nice blogpost by Wietse Venema about a "command injection" in postfix (http://www.postfix.org/CVE-2011-0411.html). What we have here is the problem in reverse, i.e. not a command injection, but a "response injection." This commit fixes the issue by clearing the CONNECTION input buffer in mutt_ssl_starttls(). To make backporting this fix easier, the new functions only clear the top-level CONNECTION buffer; they don't handle nested buffering in mutt_zstrm.c or mutt_sasl.c. However both of those wrap the connection *after* STARTTLS, so this is currently okay. mutt_tunnel.c occurs before connecting, but it does not perform any nesting.
measured_bw_line_apply(measured_bw_line_t *parsed_line, smartlist_t *routerstatuses) { vote_routerstatus_t *rs = NULL; if (!routerstatuses) return 0; rs = smartlist_bsearch(routerstatuses, parsed_line->node_id, compare_digest_to_vote_routerstatus_entry); if (rs) { rs->has_measured_bw = 1; rs->measured_bw_kb = (uint32_t)parsed_line->bw_kb; } else { log_info(LD_DIRSERV, "Node ID %s not found in routerstatus list", parsed_line->node_hex); } return rs != NULL; }
0
[]
tor
02e05bd74dbec614397b696cfcda6525562a4675
107,891,025,617,698,880,000,000,000,000,000,000,000
20
When examining descriptors as a dirserver, reject ones with bad versions This is an extra fix for bug 21278: it ensures that these descriptors and platforms will never be listed in a legit consensus.
static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int error = 0; struct inode *target; /* * If we are going to change the parent - check write permissions, * we'll need to flip '..'. */ if (new_dir != old_dir) { error = permission(old_dentry->d_inode, MAY_WRITE, NULL); if (error) return error; } error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); if (error) return error; target = new_dentry->d_inode; if (target) { mutex_lock(&target->i_mutex); dentry_unhash(new_dentry); } if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) error = -EBUSY; else error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (target) { if (!error) target->i_flags |= S_DEAD; mutex_unlock(&target->i_mutex); if (d_unhashed(new_dentry)) d_rehash(new_dentry); dput(new_dentry); } if (!error) if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry,new_dentry); return error; }
0
[ "CWE-120" ]
linux-2.6
d70b67c8bc72ee23b55381bd6a884f4796692f77
143,741,278,809,206,520,000,000,000,000,000,000,000
42
[patch] vfs: fix lookup on deleted directory Lookup can install a child dentry for a deleted directory. This keeps the directory dentry alive, and the inode pinned in the cache and on disk, even after all external references have gone away. This isn't a big problem normally, since memory pressure or umount will clear out the directory dentry and its children, releasing the inode. But for UBIFS this causes problems because its orphan area can overflow. Fix this by returning ENOENT for all lookups on a S_DEAD directory before creating a child dentry. Thanks to Zoltan Sogor for noticing this while testing UBIFS, and Artem for the excellent analysis of the problem and testing. Reported-by: Artem Bityutskiy <[email protected]> Tested-by: Artem Bityutskiy <[email protected]> Signed-off-by: Miklos Szeredi <[email protected]> Signed-off-by: Al Viro <[email protected]>