func
stringlengths
0
484k
target
int64
0
1
cwe
sequencelengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
char * get_fru_area_str(uint8_t * data, uint32_t * offset) { static const char bcd_plus[] = "0123456789 -.:,_"; char * str; int len, off, size, i, j, k, typecode, char_idx; union { uint32_t bits; char chars[4]; } u; size = 0; off = *offset; /* bits 6:7 contain format */ typecode = ((data[off] & 0xC0) >> 6); // printf("Typecode:%i\n", typecode); /* bits 0:5 contain length */ len = data[off++]; len &= 0x3f; switch (typecode) { case 0: /* 00b: binary/unspecified */ case 1: /* 01b: BCD plus */ /* hex dump or BCD -> 2x length */ size = (len * 2); break; case 2: /* 10b: 6-bit ASCII */ /* 4 chars per group of 1-3 bytes */ size = (((len * 4 + 2) / 3) & ~3); break; case 3: /* 11b: 8-bit ASCII */ /* no length adjustment */ size = len; break; } if (size < 1) { *offset = off; return NULL; } str = malloc(size+1); if (!str) return NULL; memset(str, 0, size+1); if (size == 0) { str[0] = '\0'; *offset = off; return str; } switch (typecode) { case 0: /* Binary */ strncpy(str, buf2str(&data[off], len), size); break; case 1: /* BCD plus */ for (k = 0; k < size; k++) str[k] = bcd_plus[((data[off + k / 2] >> ((k % 2) ? 0 : 4)) & 0x0f)]; str[k] = '\0'; break; case 2: /* 6-bit ASCII */ for (i = j = 0; i < len; i += 3) { u.bits = 0; k = ((len - i) < 3 ? (len - i) : 3); #if WORDS_BIGENDIAN u.chars[3] = data[off+i]; u.chars[2] = (k > 1 ? data[off+i+1] : 0); u.chars[1] = (k > 2 ? data[off+i+2] : 0); char_idx = 3; #else memcpy((void *)&u.bits, &data[off+i], k); char_idx = 0; #endif for (k=0; k<4; k++) { str[j++] = ((u.chars[char_idx] & 0x3f) + 0x20); u.bits >>= 6; } } str[j] = '\0'; break; case 3: memcpy(str, &data[off], size); str[size] = '\0'; break; } off += len; *offset = off; return str; }
0
[ "CWE-120", "CWE-787" ]
ipmitool
e824c23316ae50beb7f7488f2055ac65e8b341f2
285,415,390,883,413,900,000,000,000,000,000,000,000
95
fru: Fix buffer overflow vulnerabilities Partial fix for CVE-2020-5208, see https://github.com/ipmitool/ipmitool/security/advisories/GHSA-g659-9qxw-p7cp The `read_fru_area_section` function only performs size validation of requested read size, and falsely assumes that the IPMI message will not respond with more than the requested amount of data; it uses the unvalidated response size to copy into `frubuf`. If the response is larger than the request, this can result in overflowing the buffer. The same issue affects the `read_fru_area` function.
static int wait_while_ack(gitno_buffer *buf) { int error; git_pkt_ack *pkt = NULL; while (1) { git__free(pkt); if ((error = recv_pkt((git_pkt **)&pkt, buf)) < 0) return error; if (pkt->type == GIT_PKT_NAK) break; if (pkt->type == GIT_PKT_ACK && (pkt->status != GIT_ACK_CONTINUE && pkt->status != GIT_ACK_COMMON)) { git__free(pkt); return 0; } } git__free(pkt); return 0; }
0
[ "CWE-476", "CWE-119" ]
libgit2
2fdef641fd0dd2828bd948234ae86de75221a11a
92,859,474,581,297,050,000,000,000,000,000,000,000
25
smart_pkt: treat empty packet lines as error The Git protocol does not specify what should happen in the case of an empty packet line (that is a packet line "0004"). We currently indicate success, but do not return a packet in the case where we hit an empty line. The smart protocol was not prepared to handle such packets in all cases, though, resulting in a `NULL` pointer dereference. Fix the issue by returning an error instead. As such kind of packets is not even specified by upstream, this is the right thing to do.
static int mov_write_sv3d_tag(AVFormatContext *s, AVIOContext *pb, AVSphericalMapping *spherical_mapping) { int64_t sv3d_pos, svhd_pos, proj_pos; const char* metadata_source = s->flags & AVFMT_FLAG_BITEXACT ? "Lavf" : LIBAVFORMAT_IDENT; if (spherical_mapping->projection != AV_SPHERICAL_EQUIRECTANGULAR && spherical_mapping->projection != AV_SPHERICAL_EQUIRECTANGULAR_TILE && spherical_mapping->projection != AV_SPHERICAL_CUBEMAP) { av_log(pb, AV_LOG_WARNING, "Unsupported projection %d. sv3d not written.\n", spherical_mapping->projection); return 0; } sv3d_pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "sv3d"); svhd_pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "svhd"); avio_wb32(pb, 0); /* version = 0 & flags = 0 */ avio_put_str(pb, metadata_source); update_size(pb, svhd_pos); proj_pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "proj"); avio_wb32(pb, 24); /* size */ ffio_wfourcc(pb, "prhd"); avio_wb32(pb, 0); /* version = 0 & flags = 0 */ avio_wb32(pb, spherical_mapping->yaw); avio_wb32(pb, spherical_mapping->pitch); avio_wb32(pb, spherical_mapping->roll); switch (spherical_mapping->projection) { case AV_SPHERICAL_EQUIRECTANGULAR: case AV_SPHERICAL_EQUIRECTANGULAR_TILE: avio_wb32(pb, 28); /* size */ ffio_wfourcc(pb, "equi"); avio_wb32(pb, 0); /* version = 0 & flags = 0 */ avio_wb32(pb, spherical_mapping->bound_top); avio_wb32(pb, spherical_mapping->bound_bottom); avio_wb32(pb, spherical_mapping->bound_left); avio_wb32(pb, spherical_mapping->bound_right); break; case AV_SPHERICAL_CUBEMAP: avio_wb32(pb, 20); /* size */ ffio_wfourcc(pb, "cbmp"); avio_wb32(pb, 0); /* version = 0 & flags = 0 */ avio_wb32(pb, 0); /* layout */ avio_wb32(pb, spherical_mapping->padding); /* padding */ break; } update_size(pb, proj_pos); return update_size(pb, sv3d_pos); }
0
[ "CWE-125" ]
FFmpeg
95556e27e2c1d56d9e18f5db34d6f756f3011148
252,508,449,634,740,930,000,000,000,000,000,000,000
57
avformat/movenc: Do not pass AVCodecParameters in avpriv_request_sample Fixes: out of array read Fixes: ffmpeg_crash_8.avi Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart Signed-off-by: Michael Niedermayer <[email protected]>
ofpacts_pull_openflow_actions(struct ofpbuf *openflow, unsigned int actions_len, enum ofp_version version, const struct vl_mff_map *vl_mff_map, uint64_t *ofpacts_tlv_bitmap, struct ofpbuf *ofpacts) { return ofpacts_pull_openflow_actions__(openflow, actions_len, version, 1u << OVSINST_OFPIT11_APPLY_ACTIONS, ofpacts, 0, vl_mff_map, ofpacts_tlv_bitmap); }
0
[ "CWE-125" ]
ovs
9237a63c47bd314b807cda0bd2216264e82edbe8
10,787,457,862,517,170,000,000,000,000,000,000,000
12
ofp-actions: Avoid buffer overread in BUNDLE action decoding. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052 Signed-off-by: Ben Pfaff <[email protected]> Acked-by: Justin Pettit <[email protected]>
getpattern (value, quoted, expandpat) char *value; int quoted, expandpat; { char *pat, *tword; WORD_LIST *l; #if 0 int i; #endif /* There is a problem here: how to handle single or double quotes in the pattern string when the whole expression is between double quotes? POSIX.2 says that enclosing double quotes do not cause the pattern to be quoted, but does that leave us a problem with @ and array[@] and their expansions inside a pattern? */ #if 0 if (expandpat && (quoted & (Q_HERE_DOCUMENT|Q_DOUBLE_QUOTES)) && *tword) { i = 0; pat = string_extract_double_quoted (tword, &i, 1); free (tword); tword = pat; } #endif /* expand_string_for_rhs () leaves WORD quoted and does not perform word splitting. */ l = *value ? expand_string_for_rhs (value, (quoted & (Q_HERE_DOCUMENT|Q_DOUBLE_QUOTES)) ? Q_PATQUOTE : quoted, (int *)NULL, (int *)NULL) : (WORD_LIST *)0; pat = string_list (l); dispose_words (l); if (pat) { tword = quote_string_for_globbing (pat, QGLOB_CVTNULL); free (pat); pat = tword; } return (pat); }
1
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
118,580,553,315,232,570,000,000,000,000,000,000,000
40
bash-4.4-rc2 release
int ssl3_new(SSL *s) { SSL3_STATE *s3; if ((s3=OPENSSL_malloc(sizeof *s3)) == NULL) goto err; memset(s3,0,sizeof *s3); EVP_MD_CTX_init(&s3->finish_dgst1); EVP_MD_CTX_init(&s3->finish_dgst2); pq_64bit_init(&(s3->rrec.seq_num)); pq_64bit_init(&(s3->wrec.seq_num)); s->s3=s3; s->method->ssl_clear(s); return(1); err: return(0); }
0
[ "CWE-310" ]
openssl
c6a876473cbff0fd323c8abcaace98ee2d21863d
216,344,751,293,597,700,000,000,000,000,000,000,000
18
Support TLS_FALLBACK_SCSV. Reviewed-by: Stephen Henson <[email protected]>
struct sk_buff *pep_read(struct sock *sk) { struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); if (sk->sk_state == TCP_ESTABLISHED) pipe_grant_credits(sk, GFP_ATOMIC); return skb; }
0
[ "CWE-200" ]
net
bcd0f93353326954817a4f9fa55ec57fb38acbb0
53,151,706,976,004,390,000,000,000,000,000,000,000
8
phonet: refcount leak in pep_sock_accep sock_hold(sk) is invoked in pep_sock_accept(), but __sock_put(sk) is not invoked in subsequent failure branches(pep_accept_conn() != 0). Signed-off-by: Hangyu Hua <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == lapic_in_kernel(vcpu); }
0
[ "CWE-369" ]
linux
0185604c2d82c560dab2f2933a18f797e74ab5a8
61,440,665,645,817,390,000,000,000,000,000,000,000
4
KVM: x86: Reload pit counters for all channels when restoring state Currently if userspace restores the pit counters with a count of 0 on channels 1 or 2 and the guest attempts to read the count on those channels, then KVM will perform a mod of 0 and crash. This will ensure that 0 values are converted to 65536 as per the spec. This is CVE-2015-7513. Signed-off-by: Andy Honig <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
void recordLatestDataFilter(const typename FilterList<T>::iterator current_filter, T*& latest_filter, const FilterList<T>& filters) { // If this is the first time we're calling onData, just record the current filter. if (latest_filter == nullptr) { latest_filter = current_filter->get(); return; } // We want to keep this pointing at the latest filter in the filter list that has received the // onData callback. To do so, we compare the current latest with the *previous* filter. If they // match, then we must be processing a new filter for the first time. We omit this check if we're // the first filter, since the above check handles that case. // // We compare against the previous filter to avoid multiple filter iterations from reseting the // pointer: If we just set latest to current, then the first onData filter iteration would // correctly iterate over the filters and set latest, but on subsequent onData iterations // we'd start from the beginning again, potentially allowing filter N to modify the buffer even // though filter M > N was the filter that inserted data into the buffer. if (current_filter != filters.begin() && latest_filter == std::prev(current_filter)->get()) { latest_filter = current_filter->get(); } }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
193,123,026,659,815,100,000,000,000,000,000,000,000
22
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
Number* Parser::lexed_dimension(const ParserState& pstate, const std::string& parsed) { size_t L = parsed.length(); size_t num_pos = parsed.find_first_not_of(" \n\r\t"); if (num_pos == std::string::npos) num_pos = L; size_t unit_pos = parsed.find_first_not_of("-+0123456789.", num_pos); if (parsed[unit_pos] == 'e' && is_number(parsed[unit_pos+1]) ) { unit_pos = parsed.find_first_not_of("-+0123456789.", ++ unit_pos); } if (unit_pos == std::string::npos) unit_pos = L; const std::string& num = parsed.substr(num_pos, unit_pos - num_pos); Number* nr = SASS_MEMORY_NEW(Number, pstate, sass_strtod(num.c_str()), Token(number(parsed.c_str())), number_has_zero(parsed)); nr->is_interpolant(false); nr->is_delayed(true); return nr; }
0
[ "CWE-674" ]
libsass
f2db04883e5fff4e03777dcc1eb60d4373c45be1
257,500,565,028,567,370,000,000,000,000,000,000,000
20
Make `parse_css_variable_value` non-recursive Fixes #2658 stack overflow
void omap_iommu_debugfs_remove(struct omap_iommu *obj) { if (!obj->debug_dir) return; debugfs_remove_recursive(obj->debug_dir); }
0
[]
linux
e203db293863fa15b4b1917d4398fb5bd63c4e88
182,318,064,757,528,100,000,000,000,000,000,000,000
7
iommu/omap: Fix debug_read_tlb() to use seq_printf() The debug_read_tlb() uses the sprintf() functions directly on the buffer allocated by buf = kmalloc(count), without taking into account the size of the buffer, with the consequence corrupting the heap, depending on the count requested by the user. The patch fixes the issue replacing sprintf() by seq_printf(). Signed-off-by: Salva Peiró <[email protected]> Signed-off-by: Joerg Roedel <[email protected]>
end_unicast_message(struct neighbour *neigh, int type, int bytes) { assert(unicast_neighbour == neigh && unicast_buffered >= bytes + 2 && unicast_buffer[unicast_buffered - bytes - 2] == type && unicast_buffer[unicast_buffered - bytes - 1] == bytes); schedule_unicast_flush(jitter(babel_get_if_nfo(neigh->ifp), 0)); }
0
[ "CWE-787" ]
frr
c3793352a8d76d2eee1edc38a9a16c1c8a6573f4
30,723,402,315,496,632,000,000,000,000,000,000,000
7
babeld: fix #10502 #10503 by repairing the checks on length This patch repairs the checking conditions on length in four functions: babel_packet_examin, parse_hello_subtlv, parse_ihu_subtlv, and parse_update_subtlv Signed-off-by: qingkaishi <[email protected]>
// Evaluation procedure. double operator()(const double x, const double y, const double z, const double c) { mem[_cimg_mp_slot_x] = x; mem[_cimg_mp_slot_y] = y; mem[_cimg_mp_slot_z] = z; mem[_cimg_mp_slot_c] = c; for (p_code = code; p_code<p_code_end; ++p_code) { opcode._data = p_code->_data; const ulongT target = opcode[1]; mem[target] = _cimg_mp_defunc(*this); } return *result;
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
176,083,751,326,972,140,000,000,000,000,000,000,000
9
Fix other issues in 'CImg<T>::load_bmp()'.
f_balloon_split(typval_T *argvars, typval_T *rettv UNUSED) { if (rettv_list_alloc(rettv) == OK) { char_u *msg = tv_get_string_chk(&argvars[0]); if (msg != NULL) { pumitem_T *array; int size = split_message(msg, &array); int i; /* Skip the first and last item, they are always empty. */ for (i = 1; i < size - 1; ++i) list_append_string(rettv->vval.v_list, array[i].pum_text, -1); while (size > 0) vim_free(array[--size].pum_text); vim_free(array); } } }
0
[ "CWE-78" ]
vim
8c62a08faf89663e5633dc5036cd8695c80f1075
335,714,384,485,547,850,000,000,000,000,000,000,000
21
patch 8.1.0881: can execute shell commands in rvim through interfaces Problem: Can execute shell commands in rvim through interfaces. Solution: Disable using interfaces in restricted mode. Allow for writing file with writefile(), histadd() and a few others.
bool r_pkcs7_parse_digestalgorithmidentifier (RPKCS7DigestAlgorithmIdentifiers *dai, RASN1Object *object) { ut32 i; if (!dai && !object) { return false; } if (object->list.length > 0) { dai->elements = (RX509AlgorithmIdentifier **) calloc (object->list.length, sizeof (RX509AlgorithmIdentifier*)); if (!dai->elements) { return false; } dai->length = object->list.length; for (i = 0; i < dai->length; ++i) { // r_x509_parse_algorithmidentifier returns bool, // so i have to allocate before calling the function dai->elements[i] = (RX509AlgorithmIdentifier *) malloc (sizeof (RX509AlgorithmIdentifier)); //should i handle invalid memory? the function checks the pointer //or it should return if dai->elements[i] == NULL ? if (dai->elements[i]) { //Memset is needed to initialize to 0 the structure and avoid garbage. memset (dai->elements[i], 0, sizeof (RX509AlgorithmIdentifier)); r_x509_parse_algorithmidentifier (dai->elements[i], object->list.objects[i]); } } } return true; }
0
[ "CWE-476" ]
radare2
7ab66cca5bbdf6cb2d69339ef4f513d95e532dbf
161,362,603,553,187,300,000,000,000,000,000,000,000
26
Fix #7152 - Null deref in cms
int ssl3_read_bytes(SSL *s, int type, int *recvd_type, unsigned char *buf, int len, int peek) { int al, i, j, ret; unsigned int n, curr_rec, num_recs, read_bytes; SSL3_RECORD *rr; SSL3_BUFFER *rbuf; void (*cb) (const SSL *ssl, int type2, int val) = NULL; rbuf = &s->rlayer.rbuf; if (!SSL3_BUFFER_is_initialised(rbuf)) { /* Not initialized yet */ if (!ssl3_setup_read_buffer(s)) return (-1); } if ((type && (type != SSL3_RT_APPLICATION_DATA) && (type != SSL3_RT_HANDSHAKE)) || (peek && (type != SSL3_RT_APPLICATION_DATA))) { SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR); return -1; } if ((type == SSL3_RT_HANDSHAKE) && (s->rlayer.handshake_fragment_len > 0)) /* (partially) satisfy request from storage */ { unsigned char *src = s->rlayer.handshake_fragment; unsigned char *dst = buf; unsigned int k; /* peek == 0 */ n = 0; while ((len > 0) && (s->rlayer.handshake_fragment_len > 0)) { *dst++ = *src++; len--; s->rlayer.handshake_fragment_len--; n++; } /* move any remaining fragment bytes: */ for (k = 0; k < s->rlayer.handshake_fragment_len; k++) s->rlayer.handshake_fragment[k] = *src++; if (recvd_type != NULL) *recvd_type = SSL3_RT_HANDSHAKE; return n; } /* * Now s->rlayer.handshake_fragment_len == 0 if type == SSL3_RT_HANDSHAKE. */ if (!ossl_statem_get_in_handshake(s) && SSL_in_init(s)) { /* type == SSL3_RT_APPLICATION_DATA */ i = s->handshake_func(s); if (i < 0) return (i); if (i == 0) { SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE); return (-1); } } start: s->rwstate = SSL_NOTHING; /*- * For each record 'i' up to |num_recs] * rr[i].type - is the type of record * rr[i].data, - data * rr[i].off, - offset into 'data' for next read * rr[i].length, - number of bytes. */ rr = s->rlayer.rrec; num_recs = RECORD_LAYER_get_numrpipes(&s->rlayer); do { /* get new records if necessary */ if (num_recs == 0) { ret = ssl3_get_record(s); if (ret <= 0) return (ret); num_recs = RECORD_LAYER_get_numrpipes(&s->rlayer); if (num_recs == 0) { /* Shouldn't happen */ al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR); goto f_err; } } /* Skip over any records we have already read */ for (curr_rec = 0; curr_rec < num_recs && SSL3_RECORD_is_read(&rr[curr_rec]); curr_rec++) ; if (curr_rec == num_recs) { RECORD_LAYER_set_numrpipes(&s->rlayer, 0); num_recs = 0; curr_rec = 0; } } while (num_recs == 0); rr = &rr[curr_rec]; /* * Reset the count of consecutive warning alerts if we've got a non-empty * record that isn't an alert. */ if (SSL3_RECORD_get_type(rr) != SSL3_RT_ALERT && SSL3_RECORD_get_length(rr) != 0) s->rlayer.alert_count = 0; /* we now have a packet which can be read and processed */ if (s->s3->change_cipher_spec /* set when we receive ChangeCipherSpec, * reset by ssl3_get_finished */ && (SSL3_RECORD_get_type(rr) != SSL3_RT_HANDSHAKE)) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_DATA_BETWEEN_CCS_AND_FINISHED); goto f_err; } /* * If the other end has shut down, throw anything we read away (even in * 'peek' mode) */ if (s->shutdown & SSL_RECEIVED_SHUTDOWN) { SSL3_RECORD_set_length(rr, 0); s->rwstate = SSL_NOTHING; return (0); } if (type == SSL3_RECORD_get_type(rr) || (SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC && type == SSL3_RT_HANDSHAKE && recvd_type != NULL)) { /* * SSL3_RT_APPLICATION_DATA or * SSL3_RT_HANDSHAKE or * SSL3_RT_CHANGE_CIPHER_SPEC */ /* * make sure that we are not getting application data when we are * doing a handshake for the first time */ if (SSL_in_init(s) && (type == SSL3_RT_APPLICATION_DATA) && (s->enc_read_ctx == NULL)) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_APP_DATA_IN_HANDSHAKE); goto f_err; } if (type == SSL3_RT_HANDSHAKE && SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC && s->rlayer.handshake_fragment_len > 0) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_CCS_RECEIVED_EARLY); goto f_err; } if (recvd_type != NULL) *recvd_type = SSL3_RECORD_get_type(rr); if (len <= 0) return (len); read_bytes = 0; do { if ((unsigned int)len - read_bytes > SSL3_RECORD_get_length(rr)) n = SSL3_RECORD_get_length(rr); else n = (unsigned int)len - read_bytes; memcpy(buf, &(rr->data[rr->off]), n); buf += n; if (!peek) { SSL3_RECORD_sub_length(rr, n); SSL3_RECORD_add_off(rr, n); if (SSL3_RECORD_get_length(rr) == 0) { s->rlayer.rstate = SSL_ST_READ_HEADER; SSL3_RECORD_set_off(rr, 0); SSL3_RECORD_set_read(rr); } } if (SSL3_RECORD_get_length(rr) == 0 || (peek && n == SSL3_RECORD_get_length(rr))) { curr_rec++; rr++; } read_bytes += n; } while (type == SSL3_RT_APPLICATION_DATA && curr_rec < num_recs && read_bytes < (unsigned int)len); if (read_bytes == 0) { /* We must have read empty records. Get more data */ goto start; } if (!peek && curr_rec == num_recs && (s->mode & SSL_MODE_RELEASE_BUFFERS) && SSL3_BUFFER_get_left(rbuf) == 0) ssl3_release_read_buffer(s); return read_bytes; } /* * If we get here, then type != rr->type; if we have a handshake message, * then it was unexpected (Hello Request or Client Hello) or invalid (we * were actually expecting a CCS). */ /* * Lets just double check that we've not got an SSLv2 record */ if (rr->rec_version == SSL2_VERSION) { /* * Should never happen. ssl3_get_record() should only give us an SSLv2 * record back if this is the first packet and we are looking for an * initial ClientHello. Therefore |type| should always be equal to * |rr->type|. If not then something has gone horribly wrong */ al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR); goto f_err; } if (s->method->version == TLS_ANY_VERSION && (s->server || rr->type != SSL3_RT_ALERT)) { /* * If we've got this far and still haven't decided on what version * we're using then this must be a client side alert we're dealing with * (we don't allow heartbeats yet). We shouldn't be receiving anything * other than a ClientHello if we are a server. */ s->version = rr->rec_version; al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_MESSAGE); goto f_err; } /* * In case of record types for which we have 'fragment' storage, fill * that so that we can process the data at a fixed place. */ { unsigned int dest_maxlen = 0; unsigned char *dest = NULL; unsigned int *dest_len = NULL; if (SSL3_RECORD_get_type(rr) == SSL3_RT_HANDSHAKE) { dest_maxlen = sizeof s->rlayer.handshake_fragment; dest = s->rlayer.handshake_fragment; dest_len = &s->rlayer.handshake_fragment_len; } else if (SSL3_RECORD_get_type(rr) == SSL3_RT_ALERT) { dest_maxlen = sizeof s->rlayer.alert_fragment; dest = s->rlayer.alert_fragment; dest_len = &s->rlayer.alert_fragment_len; } if (dest_maxlen > 0) { n = dest_maxlen - *dest_len; /* available space in 'dest' */ if (SSL3_RECORD_get_length(rr) < n) n = SSL3_RECORD_get_length(rr); /* available bytes */ /* now move 'n' bytes: */ while (n-- > 0) { dest[(*dest_len)++] = SSL3_RECORD_get_data(rr)[SSL3_RECORD_get_off(rr)]; SSL3_RECORD_add_off(rr, 1); SSL3_RECORD_add_length(rr, -1); } if (*dest_len < dest_maxlen) { SSL3_RECORD_set_read(rr); goto start; /* fragment was too small */ } } } /*- * s->rlayer.handshake_fragment_len == 4 iff rr->type == SSL3_RT_HANDSHAKE; * s->rlayer.alert_fragment_len == 2 iff rr->type == SSL3_RT_ALERT. * (Possibly rr is 'empty' now, i.e. rr->length may be 0.) */ /* If we are a client, check for an incoming 'Hello Request': */ if ((!s->server) && (s->rlayer.handshake_fragment_len >= 4) && (s->rlayer.handshake_fragment[0] == SSL3_MT_HELLO_REQUEST) && (s->session != NULL) && (s->session->cipher != NULL)) { s->rlayer.handshake_fragment_len = 0; if ((s->rlayer.handshake_fragment[1] != 0) || (s->rlayer.handshake_fragment[2] != 0) || (s->rlayer.handshake_fragment[3] != 0)) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_BAD_HELLO_REQUEST); goto f_err; } if (s->msg_callback) s->msg_callback(0, s->version, SSL3_RT_HANDSHAKE, s->rlayer.handshake_fragment, 4, s, s->msg_callback_arg); if (SSL_is_init_finished(s) && !(s->s3->flags & SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS) && !s->s3->renegotiate) { ssl3_renegotiate(s); if (ssl3_renegotiate_check(s)) { i = s->handshake_func(s); if (i < 0) return (i); if (i == 0) { SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE); return (-1); } if (!(s->mode & SSL_MODE_AUTO_RETRY)) { if (SSL3_BUFFER_get_left(rbuf) == 0) { /* no read-ahead left? */ BIO *bio; /* * In the case where we try to read application data, * but we trigger an SSL handshake, we return -1 with * the retry option set. Otherwise renegotiation may * cause nasty problems in the blocking world */ s->rwstate = SSL_READING; bio = SSL_get_rbio(s); BIO_clear_retry_flags(bio); BIO_set_retry_read(bio); return (-1); } } } } /* * we either finished a handshake or ignored the request, now try * again to obtain the (application) data we were asked for */ goto start; } /* * If we are a server and get a client hello when renegotiation isn't * allowed send back a no renegotiation alert and carry on. WARNING: * experimental code, needs reviewing (steve) */ if (s->server && SSL_is_init_finished(s) && !s->s3->send_connection_binding && (s->version > SSL3_VERSION) && (s->rlayer.handshake_fragment_len >= 4) && (s->rlayer.handshake_fragment[0] == SSL3_MT_CLIENT_HELLO) && (s->session != NULL) && (s->session->cipher != NULL) && !(s->ctx->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) { SSL3_RECORD_set_length(rr, 0); SSL3_RECORD_set_read(rr); ssl3_send_alert(s, SSL3_AL_WARNING, SSL_AD_NO_RENEGOTIATION); goto start; } if (s->rlayer.alert_fragment_len >= 2) { int alert_level = s->rlayer.alert_fragment[0]; int alert_descr = s->rlayer.alert_fragment[1]; s->rlayer.alert_fragment_len = 0; if (s->msg_callback) s->msg_callback(0, s->version, SSL3_RT_ALERT, s->rlayer.alert_fragment, 2, s, s->msg_callback_arg); if (s->info_callback != NULL) cb = s->info_callback; else if (s->ctx->info_callback != NULL) cb = s->ctx->info_callback; if (cb != NULL) { j = (alert_level << 8) | alert_descr; cb(s, SSL_CB_READ_ALERT, j); } if (alert_level == SSL3_AL_WARNING) { s->s3->warn_alert = alert_descr; SSL3_RECORD_set_read(rr); s->rlayer.alert_count++; if (s->rlayer.alert_count == MAX_WARN_ALERT_COUNT) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_TOO_MANY_WARN_ALERTS); goto f_err; } if (alert_descr == SSL_AD_CLOSE_NOTIFY) { s->shutdown |= SSL_RECEIVED_SHUTDOWN; return (0); } /* * This is a warning but we receive it if we requested * renegotiation and the peer denied it. Terminate with a fatal * alert because if application tried to renegotiate it * presumably had a good reason and expects it to succeed. In * future we might have a renegotiation where we don't care if * the peer refused it where we carry on. */ else if (alert_descr == SSL_AD_NO_RENEGOTIATION) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_NO_RENEGOTIATION); goto f_err; } #ifdef SSL_AD_MISSING_SRP_USERNAME else if (alert_descr == SSL_AD_MISSING_SRP_USERNAME) return (0); #endif } else if (alert_level == SSL3_AL_FATAL) { char tmp[16]; s->rwstate = SSL_NOTHING; s->s3->fatal_alert = alert_descr; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_AD_REASON_OFFSET + alert_descr); BIO_snprintf(tmp, sizeof tmp, "%d", alert_descr); ERR_add_error_data(2, "SSL alert number ", tmp); s->shutdown |= SSL_RECEIVED_SHUTDOWN; SSL3_RECORD_set_read(rr); SSL_CTX_remove_session(s->session_ctx, s->session); return (0); } else { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNKNOWN_ALERT_TYPE); goto f_err; } goto start; } if (s->shutdown & SSL_SENT_SHUTDOWN) { /* but we have not received a * shutdown */ s->rwstate = SSL_NOTHING; SSL3_RECORD_set_length(rr, 0); SSL3_RECORD_set_read(rr); return (0); } if (SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_CCS_RECEIVED_EARLY); goto f_err; } /* * Unexpected handshake message (Client Hello, or protocol violation) */ if ((s->rlayer.handshake_fragment_len >= 4) && !ossl_statem_get_in_handshake(s)) { if (SSL_is_init_finished(s) && !(s->s3->flags & SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS)) { ossl_statem_set_in_init(s, 1); s->renegotiate = 1; s->new_session = 1; } i = s->handshake_func(s); if (i < 0) return (i); if (i == 0) { SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE); return (-1); } if (!(s->mode & SSL_MODE_AUTO_RETRY)) { if (SSL3_BUFFER_get_left(rbuf) == 0) { /* no read-ahead left? */ BIO *bio; /* * In the case where we try to read application data, but we * trigger an SSL handshake, we return -1 with the retry * option set. Otherwise renegotiation may cause nasty * problems in the blocking world */ s->rwstate = SSL_READING; bio = SSL_get_rbio(s); BIO_clear_retry_flags(bio); BIO_set_retry_read(bio); return (-1); } } goto start; } switch (SSL3_RECORD_get_type(rr)) { default: /* * TLS up to v1.1 just ignores unknown message types: TLS v1.2 give * an unexpected message alert. */ if (s->version >= TLS1_VERSION && s->version <= TLS1_1_VERSION) { SSL3_RECORD_set_length(rr, 0); SSL3_RECORD_set_read(rr); goto start; } al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_RECORD); goto f_err; case SSL3_RT_CHANGE_CIPHER_SPEC: case SSL3_RT_ALERT: case SSL3_RT_HANDSHAKE: /* * we already handled all of these, with the possible exception of * SSL3_RT_HANDSHAKE when ossl_statem_get_in_handshake(s) is true, but * that should not happen when type != rr->type */ al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR); goto f_err; case SSL3_RT_APPLICATION_DATA: /* * At this point, we were expecting handshake data, but have * application data. If the library was running inside ssl3_read() * (i.e. in_read_app_data is set) and it makes sense to read * application data at this point (session renegotiation not yet * started), we will indulge it. */ if (ossl_statem_app_data_allowed(s)) { s->s3->in_read_app_data = 2; return (-1); } else { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_RECORD); goto f_err; } } /* not reached */ f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); return (-1); }
1
[ "CWE-20" ]
openssl
63658103d4441924f8dbfc517b99bb54758a98b9
229,842,592,254,180,100,000,000,000,000,000,000,000
532
Fix a hang with SSL_peek() If while calling SSL_peek() we read an empty record then we go into an infinite loop, continually trying to read data from the empty record and never making any progress. This could be exploited by a malicious peer in a Denial Of Service attack. CVE-2016-6305 GitHub Issue #1563 Reviewed-by: Rich Salz <[email protected]>
static void urlParseQueryString(struct HashMap *hashmap, const char *query, int len) { const char *key = query; const char *value = NULL; for (const char *ampersand = query; len-- >= 0; ampersand++) { char ch = len >= 0 ? *ampersand : '\000'; if (ch == '=' && !value) { value = ampersand + 1; } else if (ch == '&' || len < 0) { int kl = (value ? value-1 : ampersand) - key; int vl = value ? ampersand - value : 0; if (kl) { char *k = urlMakeString(key, kl); urlUnescape(k); char *v = NULL; if (value) { v = urlMakeString(value, vl); urlUnescape(v); } addToHashMap(hashmap, k, v); } key = ampersand + 1; value = NULL; } if (!ch) { break; } } }
0
[ "CWE-400", "CWE-703", "CWE-835" ]
shellinabox
4f0ecc31ac6f985e0dd3f5a52cbfc0e9251f6361
26,023,007,428,824,003,000,000,000,000,000,000,000
28
Rolling code for version 2.21
htc_request_check_host_hdr(struct http *hp) { int u; int seen_host = 0; for (u = HTTP_HDR_FIRST; u < hp->nhd; u++) { if (hp->hd[u].b == NULL) continue; AN(hp->hd[u].b); AN(hp->hd[u].e); if (http_IsHdr(&hp->hd[u], H_Host)) { if (seen_host) { return (400); } seen_host = 1; } } return (0); }
1
[]
Varnish-Cache
29870c8fe95e4e8a672f6f28c5fbe692bea09e9c
29,849,788,972,958,780,000,000,000,000,000,000,000
18
Check for duplicate Content-Length headers in requests If a duplicate CL header is in the request, we fail the request with a 400 (Bad Request) Fix a test case that was sending duplicate CL by misstake and would not fail because of that.
template<typename t> CImg<T>& operator<<=(const t value) { if (is_empty()) return *this; cimg_openmp_for(*this,((longT)*ptr) << (int)value,65536); return *this;
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
326,438,639,339,481,840,000,000,000,000,000,000,000
5
.
valid_civil_p(VALUE y, int m, int d, double sg, VALUE *nth, int *ry, int *rm, int *rd, int *rjd, int *ns) { double style = guess_style(y, sg); int r; if (style == 0) { int jd; r = c_valid_civil_p(FIX2INT(y), m, d, sg, rm, rd, &jd, ns); if (!r) return 0; decode_jd(INT2FIX(jd), nth, rjd); if (f_zero_p(*nth)) *ry = FIX2INT(y); else { VALUE nth2; decode_year(y, *ns ? -1 : +1, &nth2, ry); } } else { decode_year(y, style, nth, ry); if (style < 0) r = c_valid_gregorian_p(*ry, m, d, rm, rd); else r = c_valid_julian_p(*ry, m, d, rm, rd); if (!r) return 0; c_civil_to_jd(*ry, *rm, *rd, style, rjd, ns); } return r; }
0
[]
date
3959accef8da5c128f8a8e2fd54e932a4fb253b0
130,058,601,847,042,560,000,000,000,000,000,000,000
34
Add length limit option for methods that parses date strings `Date.parse` now raises an ArgumentError when a given date string is longer than 128. You can configure the limit by giving `limit` keyword arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`, the limit is disabled. Not only `Date.parse` but also the following methods are changed. * Date._parse * Date.parse * DateTime.parse * Date._iso8601 * Date.iso8601 * DateTime.iso8601 * Date._rfc3339 * Date.rfc3339 * DateTime.rfc3339 * Date._xmlschema * Date.xmlschema * DateTime.xmlschema * Date._rfc2822 * Date.rfc2822 * DateTime.rfc2822 * Date._rfc822 * Date.rfc822 * DateTime.rfc822 * Date._jisx0301 * Date.jisx0301 * DateTime.jisx0301
SCM_DEFINE (scm_chown, "chown", 3, 0, 0, (SCM object, SCM owner, SCM group), "Change the ownership and group of the file referred to by @var{object} to\n" "the integer values @var{owner} and @var{group}. @var{object} can be\n" "a string containing a file name or, if the platform\n" "supports fchown, a port or integer file descriptor\n" "which is open on the file. The return value\n" "is unspecified.\n\n" "If @var{object} is a symbolic link, either the\n" "ownership of the link or the ownership of the referenced file will be\n" "changed depending on the operating system (lchown is\n" "unsupported at present). If @var{owner} or @var{group} is specified\n" "as @code{-1}, then that ID is not changed.") #define FUNC_NAME s_scm_chown { int rv; object = SCM_COERCE_OUTPORT (object); #ifdef HAVE_FCHOWN if (scm_is_integer (object) || (SCM_OPFPORTP (object))) { int fdes = (SCM_OPFPORTP (object)? SCM_FPORT_FDES (object) : scm_to_int (object)); SCM_SYSCALL (rv = fchown (fdes, scm_to_int (owner), scm_to_int (group))); } else #endif { STRING_SYSCALL (object, c_object, rv = chown (c_object, scm_to_int (owner), scm_to_int (group))); } if (rv == -1) SCM_SYSERROR; return SCM_UNSPECIFIED; }
0
[]
guile
245608911698adb3472803856019bdd5670b6614
44,907,589,533,616,700,000,000,000,000,000,000,000
38
Remove 'umask' calls from 'mkdir'. Fixes <http://bugs.gnu.org/24659>. * libguile/filesys.c (SCM_DEFINE): Remove calls to 'umask' when MODE is unbound; instead, use 0777 as the mode. Update docstring to clarify this. * doc/ref/posix.texi (File System): Adjust accordingly. * NEWS: Mention it.
static inline int put_words(OHCIState *ohci, dma_addr_t addr, uint16_t *buf, int num) { int i; addr += ohci->localmem_base; for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { uint16_t tmp = cpu_to_le16(*buf); if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) { return -1; } } return 0; }
0
[ "CWE-835" ]
qemu
95ed56939eb2eaa4e2f349fe6dcd13ca4edfd8fb
45,924,928,270,257,550,000,000,000,000,000,000,000
16
usb: ohci: limit the number of link eds The guest may builds an infinite loop with link eds. This patch limit the number of linked ed to avoid this. Signed-off-by: Li Qiang <[email protected]> Message-id: [email protected] Signed-off-by: Gerd Hoffmann <[email protected]>
tiff_error_handler (const char *mod, const char *fmt, va_list ap) { if (global_error) { /* Blah, loader called us twice */ return; } global_error = g_strdup_vprintf (fmt, ap); }
0
[ "CWE-20" ]
gdk-pixbuf
3bac204e0d0241a0d68586ece7099e6acf0e9bea
6,054,889,614,893,061,000,000,000,000,000,000,000
9
Initial stab at getting the focus code to work. Fri Jun 1 18:54:47 2001 Jonathan Blandford <[email protected]> * gtk/gtktreeview.c: (gtk_tree_view_focus): Initial stab at getting the focus code to work. (gtk_tree_view_class_init): Add a bunch of keybindings. * gtk/gtktreeviewcolumn.c (gtk_tree_view_column_set_cell_data_func): s/GtkCellDataFunc/GtkTreeCellDataFunc. (_gtk_tree_view_column_set_tree_view): Use "notify::model" instead of "properties_changed" to help justify the death of the latter signal. (-: * tests/testtreefocus.c (main): Let some columns be focussable to test focus better.
static void vnc_desktop_resize(VncState *vs) { DisplayState *ds = vs->ds; if (vs->csock == -1 || !vnc_has_feature(vs, VNC_FEATURE_RESIZE)) { return; } if (vs->client_width == ds_get_width(ds) && vs->client_height == ds_get_height(ds)) { return; } vs->client_width = ds_get_width(ds); vs->client_height = ds_get_height(ds); vnc_lock_output(vs); vnc_write_u8(vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE); vnc_write_u8(vs, 0); vnc_write_u16(vs, 1); /* number of rects */ vnc_framebuffer_update(vs, 0, 0, vs->client_width, vs->client_height, VNC_ENCODING_DESKTOPRESIZE); vnc_unlock_output(vs); vnc_flush(vs); }
0
[ "CWE-125" ]
qemu
9f64916da20eea67121d544698676295bbb105a7
95,842,844,172,701,800,000,000,000,000,000,000,000
22
pixman/vnc: use pixman images in vnc. The vnc code uses *three* DisplaySurfaces: First is the surface of the actual QemuConsole, usually the guest screen, but could also be a text console (monitor/serial reachable via Ctrl-Alt-<nr> keys). This is left as-is. Second is the current server's view of the screen content. The vnc code uses this to figure which parts of the guest screen did _really_ change to reduce the amount of updates sent to the vnc clients. It is also used as data source when sending out the updates to the clients. This surface gets replaced by a pixman image. The format changes too, instead of using the guest screen format we'll use fixed 32bit rgb framebuffer and convert the pixels on the fly when comparing and updating the server framebuffer. Third surface carries the format expected by the vnc client. That isn't used to store image data. This surface is switched to PixelFormat and a boolean for bigendian byte order. Signed-off-by: Gerd Hoffmann <[email protected]>
int udev_util_replace_whitespace(const char *str, char *to, size_t len) { size_t i, j; /* strip trailing whitespace */ len = strnlen(str, len); while (len && isspace(str[len-1])) len--; /* strip leading whitespace */ i = 0; while (isspace(str[i]) && (i < len)) i++; j = 0; while (i < len) { /* substitute multiple whitespace with a single '_' */ if (isspace(str[i])) { while (isspace(str[i])) i++; to[j++] = '_'; } to[j++] = str[i++]; } to[j] = '\0'; return 0; }
0
[ "CWE-120" ]
udev
662c3110803bd8c1aedacc36788e6fd028944314
88,884,199,138,926,270,000,000,000,000,000,000,000
27
path_encode: fix max length calculation Sebastian Krahmer wrote: > it should reserve 4 times not 3 times len :)
static inline void clear_file(struct inode *inode, int type) { F2FS_I(inode)->i_advise &= ~type; f2fs_mark_inode_dirty_sync(inode, true); }
0
[ "CWE-476" ]
linux
4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6
172,733,504,257,119,500,000,000,000,000,000,000,000
5
f2fs: support swap file w/ DIO Signed-off-by: Jaegeuk Kim <[email protected]>
static void phar_do_404(phar_archive_data *phar, char *fname, int fname_len, char *f404, int f404_len, char *entry, int entry_len TSRMLS_DC) /* {{{ */ { sapi_header_line ctr = {0}; phar_entry_info *info; if (phar && f404_len) { info = phar_get_entry_info(phar, f404, f404_len, NULL, 1 TSRMLS_CC); if (info) { phar_file_action(phar, info, "text/html", PHAR_MIME_PHP, f404, f404_len, fname, NULL, NULL, 0 TSRMLS_CC); return; } } ctr.response_code = 404; ctr.line_len = sizeof("HTTP/1.0 404 Not Found")-1; ctr.line = "HTTP/1.0 404 Not Found"; sapi_header_op(SAPI_HEADER_REPLACE, &ctr TSRMLS_CC); sapi_send_headers(TSRMLS_C); PHPWRITE("<html>\n <head>\n <title>File Not Found</title>\n </head>\n <body>\n <h1>404 - File ", sizeof("<html>\n <head>\n <title>File Not Found</title>\n </head>\n <body>\n <h1>404 - File ") - 1); PHPWRITE(entry, entry_len); PHPWRITE(" Not Found</h1>\n </body>\n</html>", sizeof(" Not Found</h1>\n </body>\n</html>") - 1); }
1
[ "CWE-79" ]
php-src
6e64aba47f4e41d97c4d010024c68320c0855f45
283,501,678,353,022,400,000,000,000,000,000,000,000
23
Fix #76129 - remove more potential unfiltered outputs for phar
static const char *md_config_set_names_old(cmd_parms *cmd, void *dc, int argc, char *const argv[]) { ap_log_error( APLOG_MARK, APLOG_WARNING, 0, cmd->server, "mod_md: directive 'ManagedDomain' is deprecated, replace with 'MDomain'."); return md_config_set_names(cmd, dc, argc, argv); }
0
[ "CWE-476" ]
mod_md
e71001955809247b3aa4d269e1e0741b4fe0fc3d
143,002,902,061,334,460,000,000,000,000,000,000,000
7
v1.1.12, notifycmd improvements
const char* ExpressionRound::getOpName() const { return "$round"; }
0
[]
mongo
1772b9a0393b55e6a280a35e8f0a1f75c014f301
224,432,259,582,466,270,000,000,000,000,000,000,000
3
SERVER-49404 Enforce additional checks in $arrayToObject
static tjhandle _tjInitCompress(tjinstance *this) { static unsigned char buffer[1]; unsigned char *buf = buffer; unsigned long size = 1; /* This is also straight out of example.txt */ this->cinfo.err = jpeg_std_error(&this->jerr.pub); this->jerr.pub.error_exit = my_error_exit; this->jerr.pub.output_message = my_output_message; this->jerr.emit_message = this->jerr.pub.emit_message; this->jerr.pub.emit_message = my_emit_message; this->jerr.pub.addon_message_table = turbojpeg_message_table; this->jerr.pub.first_addon_message = JMSG_FIRSTADDONCODE; this->jerr.pub.last_addon_message = JMSG_LASTADDONCODE; if (setjmp(this->jerr.setjmp_buffer)) { /* If we get here, the JPEG code has signaled an error. */ if (this) free(this); return NULL; } jpeg_create_compress(&this->cinfo); /* Make an initial call so it will create the destination manager */ jpeg_mem_dest_tj(&this->cinfo, &buf, &size, 0); this->init |= COMPRESS; return (tjhandle)this; }
0
[ "CWE-787" ]
libjpeg-turbo
3d9c64e9f8aa1ee954d1d0bb3390fc894bb84da3
90,082,465,233,716,180,000,000,000,000,000,000,000
29
tjLoadImage(): Fix int overflow/segfault w/big BMP Fixes #304
addRule(FileInfo *nested, TranslationTableOpcode opcode, CharsString *ruleChars, CharsString *ruleDots, TranslationTableCharacterAttributes after, TranslationTableCharacterAttributes before, TranslationTableOffset *newRuleOffset, TranslationTableRule **newRule, int noback, int nofor, TranslationTableHeader **table) { /* Add a rule to the table, using the hash function to find the start of * chains and chaining both the chars and dots strings */ int ruleSize = sizeof(TranslationTableRule) - (DEFAULTRULESIZE * CHARSIZE); if (ruleChars) ruleSize += CHARSIZE * ruleChars->length; if (ruleDots) ruleSize += CHARSIZE * ruleDots->length; if (!allocateSpaceInTable(nested, newRuleOffset, ruleSize, table)) return 0; TranslationTableRule *rule = (TranslationTableRule *)&(*table)->ruleArea[*newRuleOffset]; *newRule = rule; rule->opcode = opcode; rule->after = after; rule->before = before; if (ruleChars) memcpy(&rule->charsdots[0], &ruleChars->chars[0], CHARSIZE * (rule->charslen = ruleChars->length)); else rule->charslen = 0; if (ruleDots) memcpy(&rule->charsdots[rule->charslen], &ruleDots->chars[0], CHARSIZE * (rule->dotslen = ruleDots->length)); else rule->dotslen = 0; if (!charactersDefined(nested, rule, *table)) return 0; /* link new rule into table. */ if (opcode == CTO_SwapCc || opcode == CTO_SwapCd || opcode == CTO_SwapDd) return 1; if (opcode >= CTO_Context && opcode <= CTO_Pass4) if (!(opcode == CTO_Context && rule->charslen > 0)) { if (!nofor) if (!addForwardPassRule(newRuleOffset, rule, *table)) return 0; if (!noback) if (!addBackwardPassRule(newRuleOffset, rule, *table)) return 0; return 1; } if (!nofor) { if (rule->charslen == 1) addForwardRuleWithSingleChar(nested, newRuleOffset, rule, *table); else if (rule->charslen > 1) addForwardRuleWithMultipleChars(newRuleOffset, rule, *table); } if (!noback) { widechar *cells; int count; if (rule->opcode == CTO_Context) { cells = &rule->charsdots[0]; count = rule->charslen; } else { cells = &rule->charsdots[rule->charslen]; count = rule->dotslen; } if (count == 1) addBackwardRuleWithSingleCell(nested, *cells, newRuleOffset, rule, *table); else if (count > 1) addBackwardRuleWithMultipleCells(cells, count, newRuleOffset, rule, *table); } return 1; }
0
[ "CWE-787" ]
liblouis
fb2bfce4ed49ac4656a8f7e5b5526e4838da1dde
239,499,538,888,719,220,000,000,000,000,000,000,000
64
Fix yet another buffer overflow in the braille table parser Reported by Henri Salo Fixes #592
static zend_object_value php_zip_object_new(zend_class_entry *class_type TSRMLS_DC) /* {{{ */ { ze_zip_object *intern; zval *tmp; zend_object_value retval; intern = emalloc(sizeof(ze_zip_object)); memset(&intern->zo, 0, sizeof(zend_object)); intern->za = NULL; intern->buffers = NULL; intern->filename = NULL; intern->buffers_cnt = 0; intern->prop_handler = &zip_prop_handlers; #if ((PHP_MAJOR_VERSION == 5 && PHP_MINOR_VERSION > 1) || (PHP_MAJOR_VERSION == 5 && PHP_MINOR_VERSION == 1 && PHP_RELEASE_VERSION > 2)) zend_object_std_init(&intern->zo, class_type TSRMLS_CC); #else ALLOC_HASHTABLE(intern->zo.properties); zend_hash_init(intern->zo.properties, 0, NULL, ZVAL_PTR_DTOR, 0); intern->zo.ce = class_type; #endif zend_hash_copy(intern->zo.properties, &class_type->default_properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); retval.handle = zend_objects_store_put(intern, NULL, (zend_objects_free_object_storage_t) php_zip_object_free_storage, NULL TSRMLS_CC); retval.handlers = (zend_object_handlers *) & zip_object_handlers; return retval; }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
148,912,251,013,772,420,000,000,000,000,000,000,000
35
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; u32 tmp; card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32), &card->sleep_cookie_pbase); if (!card->sleep_cookie_vbase) { mwifiex_dbg(adapter, ERROR, "pci_alloc_consistent failed!\n"); return -ENOMEM; } /* Init val of Sleep Cookie */ tmp = FW_AWAKE_COOKIE; put_unaligned(tmp, card->sleep_cookie_vbase); mwifiex_dbg(adapter, INFO, "alloc_scook: sleep cookie=0x%x\n", get_unaligned(card->sleep_cookie_vbase)); return 0; }
0
[ "CWE-400", "CWE-200", "CWE-401" ]
linux
d10dcb615c8e29d403a24d35f8310a7a53e3050c
73,316,775,988,856,290,000,000,000,000,000,000,000
22
mwifiex: pcie: Fix memory leak in mwifiex_pcie_init_evt_ring In mwifiex_pcie_init_evt_ring, a new skb is allocated which should be released if mwifiex_map_pci_memory() fails. The release for skb and card->evtbd_ring_vbase is added. Fixes: 0732484b47b5 ("mwifiex: separate ring initialization and ring creation routines") Signed-off-by: Navid Emamdoost <[email protected]> Acked-by: Ganapathi Bhat <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen) { return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen); }
0
[ "CWE-20", "CWE-399" ]
linux
9de7922bc709eee2f609cd01d98aaedc4cf5ea74
252,531,933,404,267,680,000,000,000,000,000,000,000
5
net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks Commit 6f4c618ddb0 ("SCTP : Add paramters validity check for ASCONF chunk") added basic verification of ASCONF chunks, however, it is still possible to remotely crash a server by sending a special crafted ASCONF chunk, even up to pre 2.6.12 kernels: skb_over_panic: text:ffffffffa01ea1c3 len:31056 put:30768 head:ffff88011bd81800 data:ffff88011bd81800 tail:0x7950 end:0x440 dev:<NULL> ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:129! [...] Call Trace: <IRQ> [<ffffffff8144fb1c>] skb_put+0x5c/0x70 [<ffffffffa01ea1c3>] sctp_addto_chunk+0x63/0xd0 [sctp] [<ffffffffa01eadaf>] sctp_process_asconf+0x1af/0x540 [sctp] [<ffffffff8152d025>] ? _read_unlock_bh+0x15/0x20 [<ffffffffa01e0038>] sctp_sf_do_asconf+0x168/0x240 [sctp] [<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp] [<ffffffff8147645d>] ? fib_rules_lookup+0xad/0xf0 [<ffffffffa01e6b22>] ? sctp_cmp_addr_exact+0x32/0x40 [sctp] [<ffffffffa01e8393>] sctp_assoc_bh_rcv+0xd3/0x180 [sctp] [<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp] [<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp] [<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter] [<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0 [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0 [<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120 [<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0 [<ffffffff81496ded>] ip_local_deliver_finish+0xdd/0x2d0 [<ffffffff81497078>] ip_local_deliver+0x98/0xa0 [<ffffffff8149653d>] ip_rcv_finish+0x12d/0x440 [<ffffffff81496ac5>] ip_rcv+0x275/0x350 [<ffffffff8145c88b>] __netif_receive_skb+0x4ab/0x750 [<ffffffff81460588>] netif_receive_skb+0x58/0x60 This can be triggered e.g., through a simple scripted nmap connection scan injecting the chunk after the handshake, for example, ... -------------- INIT[ASCONF; ASCONF_ACK] -------------> <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------ -------------------- COOKIE-ECHO --------------------> <-------------------- COOKIE-ACK --------------------- ------------------ ASCONF; UNKNOWN ------------------> ... where ASCONF chunk of length 280 contains 2 parameters ... 1) Add IP address parameter (param length: 16) 2) Add/del IP address parameter (param length: 255) ... followed by an UNKNOWN chunk of e.g. 4 bytes. Here, the Address Parameter in the ASCONF chunk is even missing, too. This is just an example and similarly-crafted ASCONF chunks could be used just as well. The ASCONF chunk passes through sctp_verify_asconf() as all parameters passed sanity checks, and after walking, we ended up successfully at the chunk end boundary, and thus may invoke sctp_process_asconf(). Parameter walking is done with WORD_ROUND() to take padding into account. In sctp_process_asconf()'s TLV processing, we may fail in sctp_process_asconf_param() e.g., due to removal of the IP address that is also the source address of the packet containing the ASCONF chunk, and thus we need to add all TLVs after the failure to our ASCONF response to remote via helper function sctp_add_asconf_response(), which basically invokes a sctp_addto_chunk() adding the error parameters to the given skb. When walking to the next parameter this time, we proceed with ... length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; ... instead of the WORD_ROUND()'ed length, thus resulting here in an off-by-one that leads to reading the follow-up garbage parameter length of 12336, and thus throwing an skb_over_panic for the reply when trying to sctp_addto_chunk() next time, which implicitly calls the skb_put() with that length. Fix it by using sctp_walk_params() [ which is also used in INIT parameter processing ] macro in the verification *and* in ASCONF processing: it will make sure we don't spill over, that we walk parameters WORD_ROUND()'ed. Moreover, we're being more defensive and guard against unknown parameter types and missized addresses. Joint work with Vlad Yasevich. Fixes: b896b82be4ae ("[SCTP] ADDIP: Support for processing incoming ASCONF_ACK chunks.") Signed-off-by: Daniel Borkmann <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Acked-by: Neil Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]>
findmust(p, g) struct parse *p; register struct re_guts *g; { register sop *scan; sop *UNINIT_VAR(start); register sop *UNINIT_VAR(newstart); register sopno newlen; register sop s; register char *cp; register sopno i; /* avoid making error situations worse */ if (p->error != 0) return; /* find the longest OCHAR sequence in strip */ newlen = 0; scan = g->strip + 1; do { s = *scan++; switch (OP(s)) { case OCHAR: /* sequence member */ if (newlen == 0) /* new sequence */ newstart = scan - 1; newlen++; break; case OPLUS_: /* things that don't break one */ case OLPAREN: case ORPAREN: break; case OQUEST_: /* things that must be skipped */ case OCH_: scan--; do { scan += OPND(s); s = *scan; /* assert() interferes w debug printouts */ if (OP(s) != O_QUEST && OP(s) != O_CH && OP(s) != OOR2) { g->iflags |= BAD; return; } } while (OP(s) != O_QUEST && OP(s) != O_CH); /* fallthrough */ default: /* things that break a sequence */ if (newlen > g->mlen) { /* ends one */ start = newstart; g->mlen = newlen; } newlen = 0; break; } } while (OP(s) != OEND); if (g->mlen == 0) /* there isn't one */ return; /* turn it into a character string */ g->must = malloc((size_t)g->mlen + 1); if (g->must == NULL) { /* argh; just forget it */ g->mlen = 0; return; } cp = g->must; scan = start; for (i = g->mlen; i > 0; i--) { while (OP(s = *scan++) != OCHAR) continue; assert(cp < g->must + g->mlen); *cp++ = (char)OPND(s); } assert(cp == g->must + g->mlen); *cp++ = '\0'; /* just on general principles */ }
0
[ "CWE-190" ]
mysql-server
dc45e408250c582eb532417a42cef5b5a8e2fe77
203,452,294,913,835,160,000,000,000,000,000,000,000
75
Bug#20642505: HENRY SPENCER REGULAR EXPRESSIONS (REGEX) LIBRARY The MySQL server uses Henry Spencer's library for regular expressions to support the REGEXP/RLIKE string operator. This changeset adapts a recent fix from the upstream for better 32-bit compatiblity. (Note that we cannot simply use the current upstream version as a drop-in replacement for the version used by the server as the latter has been extended to understand MySQL charsets etc.)
static int vqa_decode_chunk(VqaContext *s, AVFrame *frame) { unsigned int chunk_type; unsigned int chunk_size; int byte_skip; unsigned int index = 0; int i; unsigned char r, g, b; int index_shift; int res; int cbf0_chunk = -1; int cbfz_chunk = -1; int cbp0_chunk = -1; int cbpz_chunk = -1; int cpl0_chunk = -1; int cplz_chunk = -1; int vptz_chunk = -1; int x, y; int lines = 0; int pixel_ptr; int vector_index = 0; int lobyte = 0; int hibyte = 0; int lobytes = 0; int hibytes = s->decode_buffer_size / 2; /* first, traverse through the frame and find the subchunks */ while (bytestream2_get_bytes_left(&s->gb) >= 8) { chunk_type = bytestream2_get_be32u(&s->gb); index = bytestream2_tell(&s->gb); chunk_size = bytestream2_get_be32u(&s->gb); switch (chunk_type) { case CBF0_TAG: cbf0_chunk = index; break; case CBFZ_TAG: cbfz_chunk = index; break; case CBP0_TAG: cbp0_chunk = index; break; case CBPZ_TAG: cbpz_chunk = index; break; case CPL0_TAG: cpl0_chunk = index; break; case CPLZ_TAG: cplz_chunk = index; break; case VPTZ_TAG: vptz_chunk = index; break; default: av_log(s->avctx, AV_LOG_ERROR, "Found unknown chunk type: %s (%08X)\n", av_fourcc2str(av_bswap32(chunk_type)), chunk_type); break; } byte_skip = chunk_size & 0x01; bytestream2_skip(&s->gb, chunk_size + byte_skip); } /* next, deal with the palette */ if ((cpl0_chunk != -1) && (cplz_chunk != -1)) { /* a chunk should not have both chunk types */ av_log(s->avctx, AV_LOG_ERROR, "problem: found both CPL0 and CPLZ chunks\n"); return AVERROR_INVALIDDATA; } /* decompress the palette chunk */ if (cplz_chunk != -1) { /* yet to be handled */ } /* convert the RGB palette into the machine's endian format */ if (cpl0_chunk != -1) { bytestream2_seek(&s->gb, cpl0_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); /* sanity check the palette size */ if (chunk_size / 3 > 256 || chunk_size > bytestream2_get_bytes_left(&s->gb)) { av_log(s->avctx, AV_LOG_ERROR, "problem: found a palette chunk with %d colors\n", chunk_size / 3); return AVERROR_INVALIDDATA; } for (i = 0; i < chunk_size / 3; i++) { /* scale by 4 to transform 6-bit palette -> 8-bit */ r = bytestream2_get_byteu(&s->gb) * 4; g = bytestream2_get_byteu(&s->gb) * 4; b = bytestream2_get_byteu(&s->gb) * 4; s->palette[i] = 0xFFU << 24 | r << 16 | g << 8 | b; s->palette[i] |= s->palette[i] >> 6 & 0x30303; } } /* next, look for a full codebook */ if ((cbf0_chunk != -1) && (cbfz_chunk != -1)) { /* a chunk should not have both chunk types */ av_log(s->avctx, AV_LOG_ERROR, "problem: found both CBF0 and CBFZ chunks\n"); return AVERROR_INVALIDDATA; } /* decompress the full codebook chunk */ if (cbfz_chunk != -1) { bytestream2_seek(&s->gb, cbfz_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); if ((res = decode_format80(s, chunk_size, s->codebook, s->codebook_size, 0)) < 0) return res; } /* copy a full codebook */ if (cbf0_chunk != -1) { bytestream2_seek(&s->gb, cbf0_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); /* sanity check the full codebook size */ if (chunk_size > MAX_CODEBOOK_SIZE) { av_log(s->avctx, AV_LOG_ERROR, "problem: CBF0 chunk too large (0x%X bytes)\n", chunk_size); return AVERROR_INVALIDDATA; } bytestream2_get_buffer(&s->gb, s->codebook, chunk_size); } /* decode the frame */ if (vptz_chunk == -1) { /* something is wrong if there is no VPTZ chunk */ av_log(s->avctx, AV_LOG_ERROR, "problem: no VPTZ chunk found\n"); return AVERROR_INVALIDDATA; } bytestream2_seek(&s->gb, vptz_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); if ((res = decode_format80(s, chunk_size, s->decode_buffer, s->decode_buffer_size, 1)) < 0) return res; /* render the final PAL8 frame */ if (s->vector_height == 4) index_shift = 4; else index_shift = 3; for (y = 0; y < s->height; y += s->vector_height) { for (x = 0; x < s->width; x += 4, lobytes++, hibytes++) { pixel_ptr = y * frame->linesize[0] + x; /* get the vector index, the method for which varies according to * VQA file version */ switch (s->vqa_version) { case 1: lobyte = s->decode_buffer[lobytes * 2]; hibyte = s->decode_buffer[(lobytes * 2) + 1]; vector_index = ((hibyte << 8) | lobyte) >> 3; vector_index <<= index_shift; lines = s->vector_height; /* uniform color fill - a quick hack */ if (hibyte == 0xFF) { while (lines--) { frame->data[0][pixel_ptr + 0] = 255 - lobyte; frame->data[0][pixel_ptr + 1] = 255 - lobyte; frame->data[0][pixel_ptr + 2] = 255 - lobyte; frame->data[0][pixel_ptr + 3] = 255 - lobyte; pixel_ptr += frame->linesize[0]; } lines=0; } break; case 2: lobyte = s->decode_buffer[lobytes]; hibyte = s->decode_buffer[hibytes]; vector_index = (hibyte << 8) | lobyte; vector_index <<= index_shift; lines = s->vector_height; break; case 3: /* not implemented yet */ lines = 0; break; } while (lines--) { frame->data[0][pixel_ptr + 0] = s->codebook[vector_index++]; frame->data[0][pixel_ptr + 1] = s->codebook[vector_index++]; frame->data[0][pixel_ptr + 2] = s->codebook[vector_index++]; frame->data[0][pixel_ptr + 3] = s->codebook[vector_index++]; pixel_ptr += frame->linesize[0]; } } } /* handle partial codebook */ if ((cbp0_chunk != -1) && (cbpz_chunk != -1)) { /* a chunk should not have both chunk types */ av_log(s->avctx, AV_LOG_ERROR, "problem: found both CBP0 and CBPZ chunks\n"); return AVERROR_INVALIDDATA; } if (cbp0_chunk != -1) { bytestream2_seek(&s->gb, cbp0_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); if (chunk_size > MAX_CODEBOOK_SIZE - s->next_codebook_buffer_index) { av_log(s->avctx, AV_LOG_ERROR, "cbp0 chunk too large (%u bytes)\n", chunk_size); return AVERROR_INVALIDDATA; } /* accumulate partial codebook */ bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index], chunk_size); s->next_codebook_buffer_index += chunk_size; s->partial_countdown--; if (s->partial_countdown <= 0) { /* time to replace codebook */ memcpy(s->codebook, s->next_codebook_buffer, s->next_codebook_buffer_index); /* reset accounting */ s->next_codebook_buffer_index = 0; s->partial_countdown = s->partial_count; } } if (cbpz_chunk != -1) { bytestream2_seek(&s->gb, cbpz_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); if (chunk_size > MAX_CODEBOOK_SIZE - s->next_codebook_buffer_index) { av_log(s->avctx, AV_LOG_ERROR, "cbpz chunk too large (%u bytes)\n", chunk_size); return AVERROR_INVALIDDATA; } /* accumulate partial codebook */ bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index], chunk_size); s->next_codebook_buffer_index += chunk_size; s->partial_countdown--; if (s->partial_countdown <= 0) { bytestream2_init(&s->gb, s->next_codebook_buffer, s->next_codebook_buffer_index); /* decompress codebook */ if ((res = decode_format80(s, s->next_codebook_buffer_index, s->codebook, s->codebook_size, 0)) < 0) return res; /* reset accounting */ s->next_codebook_buffer_index = 0; s->partial_countdown = s->partial_count; } } return 0; }
0
[ "CWE-20", "CWE-129", "CWE-787" ]
FFmpeg
02f909dc24b1f05cfbba75077c7707b905e63cd2
303,893,863,622,869,220,000,000,000,000,000,000,000
282
avcodec/vqavideo: Set video size Fixes: out of array access Fixes: 15919/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_VQA_fuzzer-5657368257363968 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg Signed-off-by: Michael Niedermayer <[email protected]>
static int rds_release(struct socket *sock) { struct sock *sk = sock->sk; struct rds_sock *rs; if (!sk) goto out; rs = rds_sk_to_rs(sk); sock_orphan(sk); /* Note - rds_clear_recv_queue grabs rs_recv_lock, so * that ensures the recv path has completed messing * with the socket. */ rds_clear_recv_queue(rs); rds_cong_remove_socket(rs); rds_remove_bound(rs); rds_send_drop_to(rs, NULL); rds_rdma_drop_keys(rs); rds_notify_queue_get(rs, NULL); spin_lock_bh(&rds_sock_lock); list_del_init(&rs->rs_item); rds_sock_count--; spin_unlock_bh(&rds_sock_lock); rds_trans_put(rs->rs_transport); sock->sk = NULL; sock_put(sk); out: return 0; }
0
[ "CWE-787" ]
linux
780e982905bef61d13496d9af5310bf4af3a64d3
163,675,534,786,259,260,000,000,000,000,000,000,000
35
RDS: validate the requested traces user input against max supported Larger than supported value can lead to array read/write overflow. Reported-by: Colin Ian King <[email protected]> Signed-off-by: Santosh Shilimkar <[email protected]> Signed-off-by: David S. Miller <[email protected]>
EC_Group::~EC_Group() { // shared_ptr possibly freed here }
0
[ "CWE-200" ]
botan
48fc8df51d99f9d8ba251219367b3d629cc848e3
124,869,680,310,286,300,000,000,000,000,000,000,000
4
Address DSA/ECDSA side channel
HttpTransact::is_request_cache_lookupable(State* s) { // ummm, someone has already decided that proxy should tunnel if (s->current.mode == TUNNELLING_PROXY) { return false; } // don't bother with remaining checks if we already did a cache lookup if (s->cache_info.lookup_count > 0) { return true; } // is cache turned on? if (!s->txn_conf->cache_http) { SET_VIA_STRING(VIA_DETAIL_TUNNEL, VIA_DETAIL_TUNNEL_CACHE_OFF); return false; } // GET, HEAD, POST, DELETE, and PUT are all cache lookupable if (!HttpTransactHeaders::is_method_cache_lookupable(s->method) && s->api_req_cacheable == false) { SET_VIA_STRING(VIA_DETAIL_TUNNEL, VIA_DETAIL_TUNNEL_METHOD); return false; } // don't cache page if URL "looks dynamic" and this filter is enabled // We can do the check in is_response_cacheable() or here. // It may be more efficient if we are not going to cache dynamic looking urls // (the default config?) since we don't even need to do cache lookup. // So for the time being, it'll be left here. // If url looks dynamic but a ttl is set, request is cache lookupable if ((!s->txn_conf->cache_urls_that_look_dynamic) && url_looks_dynamic(s->hdr_info.client_request.url_get()) && (s->cache_control.ttl_in_cache <= 0)) { // We do not want to forward the request for a dynamic URL onto the // origin server if the value of the Max-Forwards header is zero. int max_forwards = -1; if (s->hdr_info.client_request.presence(MIME_PRESENCE_MAX_FORWARDS)) { MIMEField *max_forwards_f = s->hdr_info.client_request.field_find(MIME_FIELD_MAX_FORWARDS, MIME_LEN_MAX_FORWARDS); if (max_forwards_f) max_forwards = max_forwards_f->value_get_int(); } if (max_forwards != 0) { SET_VIA_STRING(VIA_DETAIL_TUNNEL, VIA_DETAIL_TUNNEL_URL); return false; } } // Don't look in cache if it's a RANGE request but the cache is not enabled for RANGE. if (!s->txn_conf->cache_range_lookup && s->hdr_info.client_request.presence(MIME_PRESENCE_RANGE)) { SET_VIA_STRING(VIA_DETAIL_TUNNEL, VIA_DETAIL_TUNNEL_HEADER_FIELD); return false; } // Even with "no-cache" directive, we want to do a cache lookup // because we need to update our cached copy. // Client request "no-cache" directive is handle elsewhere: // update_cache_control_information_from_config() return true; }
0
[ "CWE-119" ]
trafficserver
8b5f0345dade6b2822d9b52c8ad12e63011a5c12
151,551,422,465,381,860,000,000,000,000,000,000,000
58
Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug
AsyncPipeOp(async_op_t op, HANDLE pipe, LPVOID buffer, DWORD size, DWORD count, LPHANDLE events) { int i; BOOL success; HANDLE io_event; DWORD res, bytes = 0; OVERLAPPED overlapped; LPHANDLE handles = NULL; io_event = InitOverlapped(&overlapped); if (!io_event) { goto out; } handles = malloc((count + 1) * sizeof(HANDLE)); if (!handles) { goto out; } if (op == write) { success = WriteFile(pipe, buffer, size, NULL, &overlapped); } else { success = ReadFile(pipe, buffer, size, NULL, &overlapped); } if (!success && GetLastError() != ERROR_IO_PENDING && GetLastError() != ERROR_MORE_DATA) { goto out; } handles[0] = io_event; for (i = 0; i < count; i++) { handles[i + 1] = events[i]; } res = WaitForMultipleObjects(count + 1, handles, FALSE, op == peek ? INFINITE : IO_TIMEOUT); if (res != WAIT_OBJECT_0) { CancelIo(pipe); goto out; } if (op == peek) { PeekNamedPipe(pipe, NULL, 0, NULL, &bytes, NULL); } else { GetOverlappedResult(pipe, &overlapped, &bytes, TRUE); } out: CloseHandleEx(&io_event); free(handles); return bytes; }
0
[ "CWE-415" ]
openvpn
1394192b210cb3c6624a7419bcf3ff966742e79b
186,726,517,987,100,940,000,000,000,000,000,000,000
62
Fix potential double-free() in Interactive Service (CVE-2018-9336) Malformed input data on the service pipe towards the OpenVPN interactive service (normally used by the OpenVPN GUI to request openvpn instances from the service) can result in a double free() in the error handling code. This usually only leads to a process crash (DoS by an unprivileged local account) but since it could possibly lead to memory corruption if happening while multiple other threads are active at the same time, CVE-2018-9336 has been assigned to acknowledge this risk. Fix by ensuring that sud->directory is set to NULL in GetStartUpData() for all error cases (thus not being free()ed in FreeStartupData()). Rewrite control flow to use explicit error label for error exit. Discovered and reported by Jacob Baines <[email protected]>. CVE: 2018-9336 Signed-off-by: Gert Doering <[email protected]> Acked-by: Selva Nair <[email protected]> Message-Id: <[email protected]> URL: https://www.mail-archive.com/search?l=mid&[email protected] Signed-off-by: Gert Doering <[email protected]>
static int ZEND_FASTCALL ZEND_CONCAT_SPEC_TMP_TMP_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zend_free_op free_op1, free_op2; concat_function(&EX_T(opline->result.u.var).tmp_var, _get_zval_ptr_tmp(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC), _get_zval_ptr_tmp(&opline->op2, EX(Ts), &free_op2 TSRMLS_CC) TSRMLS_CC); zval_dtor(free_op1.var); zval_dtor(free_op2.var); ZEND_VM_NEXT_OPCODE(); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
125,928,023,668,019,400,000,000,000,000,000,000,000
12
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
static RBinObject *r_bin_file_object_find_by_id(RBinFile *binfile, ut32 binobj_id) { RBinObject *obj; RListIter *iter; if (binfile) { r_list_foreach (binfile->objs, iter, obj) { if (obj->id == binobj_id) { return obj; } } } return NULL; }
0
[ "CWE-125" ]
radare2
d31c4d3cbdbe01ea3ded16a584de94149ecd31d9
39,150,974,374,296,024,000,000,000,000,000,000,000
13
Fix #8748 - Fix oobread on string search
static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local, struct sk_buff *skb) { struct timespec skb_arv; struct ieee80211_tx_latency_bin_ranges *tx_latency; tx_latency = rcu_dereference(local->tx_latency); if (!tx_latency) return; ktime_get_ts(&skb_arv); skb->tstamp = ktime_set(skb_arv.tv_sec, skb_arv.tv_nsec); }
0
[ "CWE-362" ]
linux
1d147bfa64293b2723c4fec50922168658e613ba
117,586,795,337,251,320,000,000,000,000,000,000,000
13
mac80211: fix AP powersave TX vs. wakeup race There is a race between the TX path and the STA wakeup: while a station is sleeping, mac80211 buffers frames until it wakes up, then the frames are transmitted. However, the RX and TX path are concurrent, so the packet indicating wakeup can be processed while a packet is being transmitted. This can lead to a situation where the buffered frames list is emptied on the one side, while a frame is being added on the other side, as the station is still seen as sleeping in the TX path. As a result, the newly added frame will not be send anytime soon. It might be sent much later (and out of order) when the station goes to sleep and wakes up the next time. Additionally, it can lead to the crash below. Fix all this by synchronising both paths with a new lock. Both path are not fastpath since they handle PS situations. In a later patch we'll remove the extra skb queue locks to reduce locking overhead. BUG: unable to handle kernel NULL pointer dereference at 000000b0 IP: [<ff6f1791>] ieee80211_report_used_skb+0x11/0x3e0 [mac80211] *pde = 00000000 Oops: 0000 [#1] SMP DEBUG_PAGEALLOC EIP: 0060:[<ff6f1791>] EFLAGS: 00210282 CPU: 1 EIP is at ieee80211_report_used_skb+0x11/0x3e0 [mac80211] EAX: e5900da0 EBX: 00000000 ECX: 00000001 EDX: 00000000 ESI: e41d00c0 EDI: e5900da0 EBP: ebe458e4 ESP: ebe458b0 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 CR0: 8005003b CR2: 000000b0 CR3: 25a78000 CR4: 000407d0 DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000 DR6: ffff0ff0 DR7: 00000400 Process iperf (pid: 3934, ti=ebe44000 task=e757c0b0 task.ti=ebe44000) iwlwifi 0000:02:00.0: I iwl_pcie_enqueue_hcmd Sending command LQ_CMD (#4e), seq: 0x0903, 92 bytes at 3[3]:9 Stack: e403b32c ebe458c4 00200002 00200286 e403b338 ebe458cc c10960bb e5900da0 ff76a6ec ebe458d8 00000000 e41d00c0 e5900da0 ebe458f0 ff6f1b75 e403b210 ebe4598c ff723dc1 00000000 ff76a6ec e597c978 e403b758 00000002 00000002 Call Trace: [<ff6f1b75>] ieee80211_free_txskb+0x15/0x20 [mac80211] [<ff723dc1>] invoke_tx_handlers+0x1661/0x1780 [mac80211] [<ff7248a5>] ieee80211_tx+0x75/0x100 [mac80211] [<ff7249bf>] ieee80211_xmit+0x8f/0xc0 [mac80211] [<ff72550e>] ieee80211_subif_start_xmit+0x4fe/0xe20 [mac80211] [<c149ef70>] dev_hard_start_xmit+0x450/0x950 [<c14b9aa9>] sch_direct_xmit+0xa9/0x250 [<c14b9c9b>] __qdisc_run+0x4b/0x150 [<c149f732>] dev_queue_xmit+0x2c2/0xca0 Cc: [email protected] Reported-by: Yaara Rozenblum <[email protected]> Signed-off-by: Emmanuel Grumbach <[email protected]> Reviewed-by: Stanislaw Gruszka <[email protected]> [reword commit log, use a separate lock] Signed-off-by: Johannes Berg <[email protected]>
UTI_CreateDirAndParents(const char *path, mode_t mode, uid_t uid, gid_t gid) { char *p; int i, j, k, last; /* Don't try to create current directory */ if (!strcmp(path, ".")) return 1; p = (char *)Malloc(1 + strlen(path)); i = k = 0; while (1) { p[i++] = path[k++]; if (path[k] == '/' || !path[k]) { /* Check whether its end of string, a trailing / or group of / */ last = 1; j = k; while (path[j]) { if (path[j] != '/') { /* Pick up a / into p[] thru the assignment at the top of the loop */ k = j - 1; last = 0; break; } j++; } p[i] = 0; if (!create_dir(p, last ? mode : 0755, last ? uid : 0, last ? gid : 0)) { Free(p); return 0; } if (last) break; } if (!path[k]) break; } Free(p); return 1; }
0
[ "CWE-59" ]
chrony
7a4c396bba8f92a3ee8018620983529152050c74
248,945,075,384,653,170,000,000,000,000,000,000,000
47
util: add functions for common file operations Add a function to open a file for reading, writing, or appending. In uppercase modes errors are handled as fatal, i.e. the caller doesn't need to check for NULL. To avoid string manipulations in the callers, the function accepts an optional directory and suffix. New files are created with specified permissions, which will be needed for saving keys. The O_EXCL flag is used in the writing mode to make sure a new file is created (on filesystems that support it). Also, add a function to rename a temporary file by changing its suffix, and a function to remove a file. All functions log all errors, at least as debug messages.
static int tcm_loop_make_nexus( struct tcm_loop_tpg *tl_tpg, const char *name) { struct se_portal_group *se_tpg; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; struct tcm_loop_nexus *tl_nexus; int ret = -ENOMEM; if (tl_tpg->tl_hba->tl_nexus) { printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); return -EEXIST; } se_tpg = &tl_tpg->tl_se_tpg; tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); if (!tl_nexus) { printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n"); return -ENOMEM; } /* * Initialize the struct se_session pointer */ tl_nexus->se_sess = transport_init_session(); if (IS_ERR(tl_nexus->se_sess)) { ret = PTR_ERR(tl_nexus->se_sess); goto out; } /* * Since we are running in 'demo mode' this call with generate a * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI * Initiator port name of the passed configfs group 'name'. */ tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl( se_tpg, (unsigned char *)name); if (!tl_nexus->se_sess->se_node_acl) { transport_free_session(tl_nexus->se_sess); goto out; } /* * Now, register the SAS I_T Nexus as active with the call to * transport_register_session() */ __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, tl_nexus->se_sess, tl_nexus); tl_tpg->tl_hba->tl_nexus = tl_nexus; printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), name); return 0; out: kfree(tl_nexus); return ret; }
0
[ "CWE-119", "CWE-787" ]
linux
12f09ccb4612734a53e47ed5302e0479c10a50f8
265,289,047,090,621,370,000,000,000,000,000,000,000
55
loopback: off by one in tcm_loop_make_naa_tpg() This is an off by one 'tgpt' check in tcm_loop_make_naa_tpg() that could result in memory corruption. Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: Nicholas A. Bellinger <[email protected]>
static void detach_mnt(struct mount *mnt, struct path *old_path) { old_path->dentry = mnt->mnt_mountpoint; old_path->mnt = &mnt->mnt_parent->mnt; mnt->mnt_parent = mnt; mnt->mnt_mountpoint = mnt->mnt.mnt_root; list_del_init(&mnt->mnt_child); list_del_init(&mnt->mnt_hash); dentry_reset_mounted(old_path->dentry); }
0
[ "CWE-284", "CWE-264" ]
linux
3151527ee007b73a0ebd296010f1c0454a919c7d
149,889,340,947,601,300,000,000,000,000,000,000,000
10
userns: Don't allow creation if the user is chrooted Guarantee that the policy of which files may be access that is established by setting the root directory will not be violated by user namespaces by verifying that the root directory points to the root of the mount namespace at the time of user namespace creation. Changing the root is a privileged operation, and as a matter of policy it serves to limit unprivileged processes to files below the current root directory. For reasons of simplicity and comprehensibility the privilege to change the root directory is gated solely on the CAP_SYS_CHROOT capability in the user namespace. Therefore when creating a user namespace we must ensure that the policy of which files may be access can not be violated by changing the root directory. Anyone who runs a processes in a chroot and would like to use user namespace can setup the same view of filesystems with a mount namespace instead. With this result that this is not a practical limitation for using user namespaces. Cc: [email protected] Acked-by: Serge Hallyn <[email protected]> Reported-by: Andy Lutomirski <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]>
static void skfp_ctl_set_multicast_list(struct net_device *dev) { struct s_smc *smc = netdev_priv(dev); skfddi_priv *bp = &smc->os; unsigned long Flags; spin_lock_irqsave(&bp->DriverLock, Flags); skfp_ctl_set_multicast_list_wo_lock(dev); spin_unlock_irqrestore(&bp->DriverLock, Flags); return; } // skfp_ctl_set_multicast_list
0
[ "CWE-264" ]
linux-2.6
c25b9abbc2c2c0da88e180c3933d6e773245815a
94,407,343,372,598,520,000,000,000,000,000,000,000
11
drivers/net/skfp: if !capable(CAP_NET_ADMIN): inverted logic Fix inverted logic Signed-off-by: Roel Kluin <[email protected]> Signed-off-by: David S. Miller <[email protected]>
repodata_unset(Repodata *data, Id solvid, Id keyname) { Repokey key; key.name = keyname; key.type = REPOKEY_TYPE_DELETED; key.size = 0; key.storage = KEY_STORAGE_INCORE; repodata_set(data, solvid, &key, 0); }
0
[ "CWE-125" ]
libsolv
fdb9c9c03508990e4583046b590c30d958f272da
146,338,794,506,937,880,000,000,000,000,000,000,000
9
repodata_schema2id: fix heap-buffer-overflow in memcmp When the length of last schema in data->schemadata is less than length of input schema, we got a read overflow in asan test. Signed-off-by: Zhipeng Xie <[email protected]>
int sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int val; int valbool; struct linger ling; int ret = 0; /* * Options without arguments */ if (optname == SO_BINDTODEVICE) return sock_bindtodevice(sk, optval, optlen); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; valbool = val ? 1 : 0; lock_sock(sk); switch (optname) { case SO_DEBUG: if (val && !capable(CAP_NET_ADMIN)) ret = -EACCES; else sock_valbool_flag(sk, SOCK_DBG, valbool); break; case SO_REUSEADDR: sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); break; case SO_TYPE: case SO_PROTOCOL: case SO_DOMAIN: case SO_ERROR: ret = -ENOPROTOOPT; break; case SO_DONTROUTE: sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); break; case SO_BROADCAST: sock_valbool_flag(sk, SOCK_BROADCAST, valbool); break; case SO_SNDBUF: /* Don't error on this BSD doesn't and if you think * about it this is right. Otherwise apps have to * play 'guess the biggest size' games. RCVBUF/SNDBUF * are treated in BSD as hints */ val = min_t(u32, val, sysctl_wmem_max); set_sndbuf: sk->sk_userlocks |= SOCK_SNDBUF_LOCK; sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); /* Wake up sending tasks if we upped the value. */ sk->sk_write_space(sk); break; case SO_SNDBUFFORCE: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } goto set_sndbuf; case SO_RCVBUF: /* Don't error on this BSD doesn't and if you think * about it this is right. Otherwise apps have to * play 'guess the biggest size' games. RCVBUF/SNDBUF * are treated in BSD as hints */ val = min_t(u32, val, sysctl_rmem_max); set_rcvbuf: sk->sk_userlocks |= SOCK_RCVBUF_LOCK; /* * We double it on the way in to account for * "struct sk_buff" etc. overhead. Applications * assume that the SO_RCVBUF setting they make will * allow that much actual data to be received on that * socket. * * Applications are unaware that "struct sk_buff" and * other overheads allocate from the receive buffer * during socket buffer allocation. * * And after considering the possible alternatives, * returning the value we actually used in getsockopt * is the most desirable behavior. */ sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); break; case SO_RCVBUFFORCE: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } goto set_rcvbuf; case SO_KEEPALIVE: #ifdef CONFIG_INET if (sk->sk_protocol == IPPROTO_TCP && sk->sk_type == SOCK_STREAM) tcp_set_keepalive(sk, valbool); #endif sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); break; case SO_OOBINLINE: sock_valbool_flag(sk, SOCK_URGINLINE, valbool); break; case SO_NO_CHECK: sk->sk_no_check = valbool; break; case SO_PRIORITY: if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) sk->sk_priority = val; else ret = -EPERM; break; case SO_LINGER: if (optlen < sizeof(ling)) { ret = -EINVAL; /* 1003.1g */ break; } if (copy_from_user(&ling, optval, sizeof(ling))) { ret = -EFAULT; break; } if (!ling.l_onoff) sock_reset_flag(sk, SOCK_LINGER); else { #if (BITS_PER_LONG == 32) if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; else #endif sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; sock_set_flag(sk, SOCK_LINGER); } break; case SO_BSDCOMPAT: sock_warn_obsolete_bsdism("setsockopt"); break; case SO_PASSCRED: if (valbool) set_bit(SOCK_PASSCRED, &sock->flags); else clear_bit(SOCK_PASSCRED, &sock->flags); break; case SO_TIMESTAMP: case SO_TIMESTAMPNS: if (valbool) { if (optname == SO_TIMESTAMP) sock_reset_flag(sk, SOCK_RCVTSTAMPNS); else sock_set_flag(sk, SOCK_RCVTSTAMPNS); sock_set_flag(sk, SOCK_RCVTSTAMP); sock_enable_timestamp(sk, SOCK_TIMESTAMP); } else { sock_reset_flag(sk, SOCK_RCVTSTAMP); sock_reset_flag(sk, SOCK_RCVTSTAMPNS); } break; case SO_TIMESTAMPING: if (val & ~SOF_TIMESTAMPING_MASK) { ret = -EINVAL; break; } sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, val & SOF_TIMESTAMPING_TX_HARDWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, val & SOF_TIMESTAMPING_TX_SOFTWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, val & SOF_TIMESTAMPING_RX_HARDWARE); if (val & SOF_TIMESTAMPING_RX_SOFTWARE) sock_enable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); else sock_disable_timestamp(sk, (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, val & SOF_TIMESTAMPING_SOFTWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, val & SOF_TIMESTAMPING_SYS_HARDWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, val & SOF_TIMESTAMPING_RAW_HARDWARE); break; case SO_RCVLOWAT: if (val < 0) val = INT_MAX; sk->sk_rcvlowat = val ? : 1; break; case SO_RCVTIMEO: ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); break; case SO_SNDTIMEO: ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); break; case SO_ATTACH_FILTER: ret = -EINVAL; if (optlen == sizeof(struct sock_fprog)) { struct sock_fprog fprog; ret = -EFAULT; if (copy_from_user(&fprog, optval, sizeof(fprog))) break; ret = sk_attach_filter(&fprog, sk); } break; case SO_DETACH_FILTER: ret = sk_detach_filter(sk); break; case SO_PASSSEC: if (valbool) set_bit(SOCK_PASSSEC, &sock->flags); else clear_bit(SOCK_PASSSEC, &sock->flags); break; case SO_MARK: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else sk->sk_mark = val; break; /* We implement the SO_SNDLOWAT etc to not be settable (1003.1g 5.3) */ case SO_RXQ_OVFL: sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); break; case SO_WIFI_STATUS: sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); break; case SO_PEEK_OFF: if (sock->ops->set_peek_off) sock->ops->set_peek_off(sk, val); else ret = -EOPNOTSUPP; break; case SO_NOFCS: sock_valbool_flag(sk, SOCK_NOFCS, valbool); break; default: ret = -ENOPROTOOPT; break; } release_sock(sk); return ret; }
0
[ "CWE-284", "CWE-264" ]
linux
3e10986d1d698140747fcfc2761ec9cb64c1d582
59,866,883,534,030,990,000,000,000,000,000,000,000
272
net: guard tcp_set_keepalive() to tcp sockets Its possible to use RAW sockets to get a crash in tcp_set_keepalive() / sk_reset_timer() Fix is to make sure socket is a SOCK_STREAM one. Reported-by: Dave Jones <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
CmdCreateUser() : BasicCommand("createUser") {}
0
[ "CWE-613" ]
mongo
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
122,677,715,070,815,800,000,000,000,000,000,000,000
1
SERVER-38984 Validate unique User ID on UserCache hit
void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = div32(a->min, b->max, &r); c->openmin = (r || a->openmin || b->openmax); if (b->min > 0) { c->max = div32(a->max, b->min, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmin); } else { c->max = UINT_MAX; c->openmax = 0; } c->integer = 0; }
0
[ "CWE-416", "CWE-362" ]
linux
3aa02cb664c5fb1042958c8d1aa8c35055a2ebc4
247,569,068,539,639,300,000,000,000,000,000,000,000
23
ALSA: pcm : Call kill_fasync() in stream lock Currently kill_fasync() is called outside the stream lock in snd_pcm_period_elapsed(). This is potentially racy, since the stream may get released even during the irq handler is running. Although snd_pcm_release_substream() calls snd_pcm_drop(), this doesn't guarantee that the irq handler finishes, thus the kill_fasync() call outside the stream spin lock may be invoked after the substream is detached, as recently reported by KASAN. As a quick workaround, move kill_fasync() call inside the stream lock. The fasync is rarely used interface, so this shouldn't have a big impact from the performance POV. Ideally, we should implement some sync mechanism for the proper finish of stream and irq handler. But this oneliner should suffice for most cases, so far. Reported-by: Baozeng Ding <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
lyd_free_value(lyd_val value, LY_DATA_TYPE value_type, uint8_t value_flags, struct lys_type *type, lyd_val *old_val, LY_DATA_TYPE *old_val_type, uint8_t *old_val_flags) { if (old_val) { *old_val = value; *old_val_type = value_type; *old_val_flags = value_flags; /* we only backup the values for now */ return; } /* otherwise the value is correctly freed */ if (value_flags & LY_VALUE_USER) { assert(type->der && type->der->module); lytype_free(type->der->module, type->der->name, value); } else { switch (value_type) { case LY_TYPE_BITS: if (value.bit) { free(value.bit); } break; case LY_TYPE_INST: if (!(value_flags & LY_VALUE_UNRES)) { break; } /* fallthrough */ case LY_TYPE_UNION: /* unresolved union leaf */ lydict_remove(type->parent->module->ctx, value.string); break; default: break; } } }
0
[ "CWE-119" ]
libyang
32fb4993bc8bb49e93e84016af3c10ea53964be5
134,797,039,815,980,760,000,000,000,000,000,000,000
36
schema tree BUGFIX do not check features while still resolving schema Fixes #723
long ssl3_ctx_callback_ctrl(SSL_CTX *ctx, int cmd, void (*fp)(void)) { CERT *cert; cert=ctx->cert; switch (cmd) { #ifndef OPENSSL_NO_RSA case SSL_CTRL_SET_TMP_RSA_CB: { cert->rsa_tmp_cb = (RSA *(*)(SSL *, int, int))fp; } break; #endif #ifndef OPENSSL_NO_DH case SSL_CTRL_SET_TMP_DH_CB: { cert->dh_tmp_cb = (DH *(*)(SSL *, int, int))fp; } break; #endif #ifndef OPENSSL_NO_ECDH case SSL_CTRL_SET_TMP_ECDH_CB: { cert->ecdh_tmp_cb = (EC_KEY *(*)(SSL *, int, int))fp; } break; #endif #ifndef OPENSSL_NO_TLSEXT case SSL_CTRL_SET_TLSEXT_SERVERNAME_CB: ctx->tlsext_servername_callback=(int (*)(SSL *,int *,void *))fp; break; #ifdef TLSEXT_TYPE_opaque_prf_input case SSL_CTRL_SET_TLSEXT_OPAQUE_PRF_INPUT_CB: ctx->tlsext_opaque_prf_input_callback = (int (*)(SSL *,void *, size_t, void *))fp; break; #endif case SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB: ctx->tlsext_status_cb=(int (*)(SSL *,void *))fp; break; case SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB: ctx->tlsext_ticket_key_cb=(int (*)(SSL *,unsigned char *, unsigned char *, EVP_CIPHER_CTX *, HMAC_CTX *, int))fp; break; #ifndef OPENSSL_NO_SRP case SSL_CTRL_SET_SRP_VERIFY_PARAM_CB: ctx->srp_ctx.srp_Mask|=SSL_kSRP; ctx->srp_ctx.SRP_verify_param_callback=(int (*)(SSL *,void *))fp; break; case SSL_CTRL_SET_TLS_EXT_SRP_USERNAME_CB: ctx->srp_ctx.srp_Mask|=SSL_kSRP; ctx->srp_ctx.TLS_ext_srp_username_callback=(int (*)(SSL *,int *,void *))fp; break; case SSL_CTRL_SET_SRP_GIVE_CLIENT_PWD_CB: ctx->srp_ctx.srp_Mask|=SSL_kSRP; ctx->srp_ctx.SRP_give_srp_client_pwd_callback=(char *(*)(SSL *,void *))fp; break; #endif #endif default: return(0); } return(1); }
0
[ "CWE-310" ]
openssl
6bfe55380abbf7528e04e59f18921bd6c896af1c
250,760,782,668,224,500,000,000,000,000,000,000,000
72
Support TLS_FALLBACK_SCSV. Reviewed-by: Rich Salz <[email protected]>
static void php_mysqlnd_stats_free_mem(void * _packet) { MYSQLND_PACKET_STATS *p= (MYSQLND_PACKET_STATS *) _packet; if (p->message.s) { mnd_efree(p->message.s); p->message.s = NULL; }
0
[ "CWE-120" ]
php-src
58006537fc5f133ae8549efe5118cde418b3ace9
195,035,971,650,827,540,000,000,000,000,000,000,000
8
Fix bug #81719: mysqlnd/pdo password buffer overflow
static ut64 bin_obj_va2pa(ut64 p, ut32 *offset, ut32 *left, RBinFile *bf) { if (!bf || !bf->o || !bf->o->bin_obj) { return 0; } RDyldCache *cache = (RDyldCache*) ((struct MACH0_(obj_t)*)bf->o->bin_obj)->user; if (!cache) { return 0; } ut64 slide = rebase_infos_get_slide (cache); ut64 res = va2pa (p, cache->n_maps, cache->maps, cache->buf, slide, offset, left); if (res == UT64_MAX) { res = 0; } return res; }
0
[ "CWE-787" ]
radare2
c84b7232626badd075caf3ae29661b609164bac6
220,393,137,997,586,000,000,000,000,000,000,000,000
17
Fix heap buffer overflow in dyldcache parser ##crash * Reported by: Lazymio via huntr.dev * Reproducer: dyldovf
void rpc_exit_task(struct rpc_task *task) { task->tk_action = NULL; if (task->tk_ops->rpc_call_done != NULL) { task->tk_ops->rpc_call_done(task, task->tk_calldata); if (task->tk_action != NULL) { WARN_ON(RPC_ASSASSINATED(task)); /* Always release the RPC slot and buffer memory */ xprt_release(task); } } }
0
[ "CWE-400", "CWE-399", "CWE-703" ]
linux
0b760113a3a155269a3fba93a409c640031dd68f
33,849,960,518,954,783,000,000,000,000,000,000,000
12
NLM: Don't hang forever on NLM unlock requests If the NLM daemon is killed on the NFS server, we can currently end up hanging forever on an 'unlock' request, instead of aborting. Basically, if the rpcbind request fails, or the server keeps returning garbage, we really want to quit instead of retrying. Tested-by: Vasily Averin <[email protected]> Signed-off-by: Trond Myklebust <[email protected]> Cc: [email protected]
Bool gf_dom_event_fire_ex(GF_Node *node, GF_DOM_Event *event, GF_List *use_stack) { GF_SceneGraph *sg; GF_List *prev_use_stack; Bool prev_bub; GF_DOMEventTarget cur_target; u32 cur_par_idx; Bool can_bubble = GF_FALSE; if (!node || !event) return GF_FALSE; GF_LOG(GF_LOG_DEBUG, GF_LOG_INTERACT, ("[DOM Events ] Graph %p Time %f - Firing event %s.%s\n", gf_node_get_graph(node), gf_node_get_scene_time(node), gf_node_get_log_name(node), gf_dom_event_get_name(event->type))); /*flush any pending add_listener see "determine the current target's candidate event listeners" in http://www.w3.org/TR/DOM-Level-3-Events/events.html */ gf_dom_listener_process_add(node->sgprivate->scenegraph); event->consumed = 0; event->target = node; event->target_type = GF_DOM_EVENT_TARGET_NODE; if (node->sgprivate->interact && node->sgprivate->interact->dom_evt) { event->currentTarget = node->sgprivate->interact->dom_evt; } else { cur_target.ptr_type = GF_DOM_EVENT_TARGET_NODE; cur_target.ptr = node; cur_target.listeners = NULL; event->currentTarget = &cur_target; } /*capture phase - not 100% sure, the actual capture phase should be determined by the std using the DOM events SVGT doesn't use this phase, so we don't add it for now.*/ #if 0 if ((0)) { Bool aborted = GF_FALSE; u32 i, count; GF_List *parents; event->event_phase = GF_DOM_EVENT_PHASE_CAPTURE; parents = gf_list_new(); /*get all parents to top*/ gf_sg_dom_stack_parents(gf_node_get_parent(node, 0), parents); count = gf_list_count(parents); for (i=0; i<count; i++) { GF_Node *n = (GF_Node *)gf_list_get(parents, i); if (n->sgprivate->interact) gf_sg_fire_dom_event(n->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, n); /*event has been canceled*/ if (event->event_phase & (GF_DOM_EVENT_PHASE_CANCEL|GF_DOM_EVENT_PHASE_CANCEL_ALL) ) { aborted = GF_TRUE; break; } } gf_list_del(parents); if (aborted) { event->currentTarget = NULL; return GF_TRUE; } } #endif /*target phase*/ event->event_phase = GF_DOM_EVENT_PHASE_AT_TARGET; cur_par_idx = 0; if (use_stack) { cur_par_idx = gf_list_count(use_stack); if (cur_par_idx) cur_par_idx--; } sg = node->sgprivate->scenegraph; prev_use_stack = sg->use_stack ; prev_bub = sg->abort_bubbling; sg->use_stack = use_stack; sg->abort_bubbling = GF_FALSE; if (node->sgprivate->interact) { can_bubble = gf_sg_fire_dom_event(node->sgprivate->interact->dom_evt, event, node->sgprivate->scenegraph, node); } if ( (!node->sgprivate->interact || can_bubble) && event->bubbles) { /*bubbling phase*/ event->event_phase = GF_DOM_EVENT_PHASE_BUBBLE; gf_sg_dom_event_bubble(node, event, use_stack, cur_par_idx); } sg->use_stack = prev_use_stack; sg->abort_bubbling = prev_bub; event->currentTarget = NULL; return event->consumed ? GF_TRUE : GF_FALSE; }
0
[ "CWE-416" ]
gpac
9723dd0955894f2cb7be13b94cf7a47f2754b893
270,218,871,852,433,430,000,000,000,000,000,000,000
86
fixed #2109
bool InstanceKlass::is_override(const methodHandle& super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) { // Private methods can not be overridden if (super_method->is_private()) { return false; } // If super method is accessible, then override if ((super_method->is_protected()) || (super_method->is_public())) { return true; } // Package-private methods are not inherited outside of package assert(super_method->is_package_private(), "must be package private"); return(is_same_class_package(targetclassloader(), targetclassname)); }
0
[]
jdk11u-dev
41825fa33d605f8501164f9296572e4378e8183b
195,351,098,527,670,270,000,000,000,000,000,000,000
14
8270386: Better verification of scan methods Reviewed-by: mbaesken Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4
ip_rb_replaceSlaveTkCmdsCommand(clientData, interp, objc, objv) ClientData clientData; Tcl_Interp *interp; int objc; char *objv[]; #endif { char *slave_name; Tcl_Interp *slave; Tk_Window mainWin; if (objc != 2) { #ifdef Tcl_WrongNumArgs Tcl_WrongNumArgs(interp, 1, objv, "slave_name"); #else char *nameString; #if TCL_MAJOR_VERSION >= 8 nameString = Tcl_GetStringFromObj(objv[0], (int*)NULL); #else /* TCL_MAJOR_VERSION < 8 */ nameString = objv[0]; #endif Tcl_AppendResult(interp, "wrong number of arguments: should be \"", nameString, " slave_name\"", (char *) NULL); #endif } #if TCL_MAJOR_VERSION >= 8 slave_name = Tcl_GetStringFromObj(objv[1], (int*)NULL); #else slave_name = objv[1]; #endif slave = Tcl_GetSlave(interp, slave_name); if (slave == NULL) { Tcl_AppendResult(interp, "cannot find slave \"", slave_name, "\"", (char *)NULL); return TCL_ERROR; } mainWin = Tk_MainWindow(slave); /* replace 'exit' command --> 'interp_exit' command */ #if TCL_MAJOR_VERSION >= 8 DUMP1("Tcl_CreateObjCommand(\"exit\") --> \"interp_exit\""); Tcl_CreateObjCommand(slave, "exit", ip_InterpExitObjCmd, (ClientData)mainWin, (Tcl_CmdDeleteProc *)NULL); #else /* TCL_MAJOR_VERSION < 8 */ DUMP1("Tcl_CreateCommand(\"exit\") --> \"interp_exit\""); Tcl_CreateCommand(slave, "exit", ip_InterpExitCommand, (ClientData)mainWin, (Tcl_CmdDeleteProc *)NULL); #endif /* replace vwait and tkwait */ ip_replace_wait_commands(slave, mainWin); return TCL_OK; }
0
[]
tk
ebd0fc80d62eeb7b8556522256f8d035e013eb65
122,842,105,423,285,750,000,000,000,000,000,000,000
56
tcltklib.c: check argument * ext/tk/tcltklib.c (ip_cancel_eval_core): check argument type and length. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
static int nagios_core_worker(const char *path) { int sd, ret; char response[128]; is_worker = 1; set_loadctl_defaults(); sd = nsock_unix(path, NSOCK_TCP | NSOCK_CONNECT); if (sd < 0) { printf("Failed to connect to query socket '%s': %s: %s\n", path, nsock_strerror(sd), strerror(errno)); return 1; } ret = nsock_printf_nul(sd, "@wproc register name=Core Worker %ld;pid=%ld", (long)getpid(), (long)getpid()); if (ret < 0) { printf("Failed to register as worker.\n"); return 1; } ret = read(sd, response, 3); if (ret != 3) { printf("Failed to read response from wproc manager\n"); return 1; } if (memcmp(response, "OK", 3)) { read(sd, response + 3, sizeof(response) - 4); response[sizeof(response) - 2] = 0; printf("Failed to register with wproc manager: %s\n", response); return 1; } enter_worker(sd, start_cmd); return 0; }
0
[ "CWE-665", "CWE-284" ]
nagioscore
1b197346d490df2e2d3b1dcce5ac6134ad0c8752
270,363,596,144,049,680,000,000,000,000,000,000,000
37
halfway revert hack/configure changes - switch order of daemon_init/drop_privileges
static void nft_setelem_catchall_remove(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem) { struct nft_set_elem_catchall *catchall, *next; list_for_each_entry_safe(catchall, next, &set->catchall_list, list) { if (catchall->elem == elem->priv) { list_del_rcu(&catchall->list); kfree_rcu(catchall, rcu); break; } } }
0
[]
net
520778042ccca019f3ffa136dd0ca565c486cedd
285,279,658,421,908,600,000,000,000,000,000,000,000
14
netfilter: nf_tables: disallow non-stateful expression in sets earlier Since 3e135cd499bf ("netfilter: nft_dynset: dynamic stateful expression instantiation"), it is possible to attach stateful expressions to set elements. cd5125d8f518 ("netfilter: nf_tables: split set destruction in deactivate and destroy phase") introduces conditional destruction on the object to accomodate transaction semantics. nft_expr_init() calls expr->ops->init() first, then check for NFT_STATEFUL_EXPR, this stills allows to initialize a non-stateful lookup expressions which points to a set, which might lead to UAF since the set is not properly detached from the set->binding for this case. Anyway, this combination is non-sense from nf_tables perspective. This patch fixes this problem by checking for NFT_STATEFUL_EXPR before expr->ops->init() is called. The reporter provides a KASAN splat and a poc reproducer (similar to those autogenerated by syzbot to report use-after-free errors). It is unknown to me if they are using syzbot or if they use similar automated tool to locate the bug that they are reporting. For the record, this is the KASAN splat. [ 85.431824] ================================================================== [ 85.432901] BUG: KASAN: use-after-free in nf_tables_bind_set+0x81b/0xa20 [ 85.433825] Write of size 8 at addr ffff8880286f0e98 by task poc/776 [ 85.434756] [ 85.434999] CPU: 1 PID: 776 Comm: poc Tainted: G W 5.18.0+ #2 [ 85.436023] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 Fixes: 0b2d8a7b638b ("netfilter: nf_tables: add helper functions for expression handling") Reported-and-tested-by: Aaron Adams <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
function_using_block_scopes(ufunc_T *fp, cstack_T *cstack) { if (cstack != NULL && cstack->cs_idx >= 0) { int count = cstack->cs_idx + 1; int i; fp->uf_block_ids = ALLOC_MULT(int, count); if (fp->uf_block_ids != NULL) { mch_memmove(fp->uf_block_ids, cstack->cs_block_id, sizeof(int) * count); fp->uf_block_depth = count; } // Set flag in each block to indicate a function was defined. This // is used to keep the variable when leaving the block, see // hide_script_var(). for (i = 0; i <= cstack->cs_idx; ++i) cstack->cs_flags[i] |= CSF_FUNC_DEF; } }
0
[ "CWE-416" ]
vim
9c23f9bb5fe435b28245ba8ac65aa0ca6b902c04
264,018,939,586,902,900,000,000,000,000,000,000,000
22
patch 8.2.3902: Vim9: double free with nested :def function Problem: Vim9: double free with nested :def function. Solution: Pass "line_to_free" from compile_def_function() and make sure cmdlinep is valid.
void MoveIndexIfNecessary(HValue* index_raw, HBoundsCheck* insert_before, HInstruction* end_of_scan_range) { if (!index_raw->IsAdd() && !index_raw->IsSub()) { // index_raw can be HAdd(index_base, offset), HSub(index_base, offset), // or index_base directly. In the latter case, no need to move anything. return; } HBinaryOperation* index = HArithmeticBinaryOperation::cast(index_raw); HValue* left_input = index->left(); HValue* right_input = index->right(); bool must_move_index = false; bool must_move_left_input = false; bool must_move_right_input = false; for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) { if (cursor == left_input) must_move_left_input = true; if (cursor == right_input) must_move_right_input = true; if (cursor == index) must_move_index = true; if (cursor->previous() == NULL) { cursor = cursor->block()->dominator()->end(); } else { cursor = cursor->previous(); } } // The BCE algorithm only selects mergeable bounds checks that share // the same "index_base", so we'll only ever have to move constants. if (must_move_left_input) { HConstant::cast(left_input)->Unlink(); HConstant::cast(left_input)->InsertBefore(index); } if (must_move_right_input) { HConstant::cast(right_input)->Unlink(); HConstant::cast(right_input)->InsertBefore(index); } }
0
[]
node
3122e0eae64c5ab494b29d0a9cadef902d93f1f9
335,832,632,724,202,640,000,000,000,000,000,000,000
37
deps: fix up v8 after fd80a3 fd80a31e0697d6317ce8c2d289575399f4e06d21 has introduced a segfault during redundant boundary check elimination (#8208). The problem consists of two parts: 1. Abscense of instruction iterator in `EliminateRedundantBoundsChecks`. It was present in recent v8, but wasn't considered important at the time of backport. However, since the function is changing instructions order in block, it is important to not rely at `i->next()` at the end of the loop. 2. Too strict ASSERT in `MoveIndexIfNecessary`. It is essentially a backport of a45c96ab from v8's upstream. See https://github.com/v8/v8/commit/a45c96ab for details. fix #8208
static struct btrfs_block_group *btrfs_create_block_group_cache( struct btrfs_fs_info *fs_info, u64 start) { struct btrfs_block_group *cache; cache = kzalloc(sizeof(*cache), GFP_NOFS); if (!cache) return NULL; cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), GFP_NOFS); if (!cache->free_space_ctl) { kfree(cache); return NULL; } cache->start = start; cache->fs_info = fs_info; cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; refcount_set(&cache->refs, 1); spin_lock_init(&cache->lock); init_rwsem(&cache->data_rwsem); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); INIT_LIST_HEAD(&cache->bg_list); INIT_LIST_HEAD(&cache->ro_list); INIT_LIST_HEAD(&cache->discard_list); INIT_LIST_HEAD(&cache->dirty_list); INIT_LIST_HEAD(&cache->io_list); btrfs_init_free_space_ctl(cache, cache->free_space_ctl); atomic_set(&cache->frozen, 0); mutex_init(&cache->free_space_lock); btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); return cache; }
0
[ "CWE-703", "CWE-667" ]
linux
1cb3db1cf383a3c7dbda1aa0ce748b0958759947
171,976,931,938,594,800,000,000,000,000,000,000,000
40
btrfs: fix deadlock with concurrent chunk allocations involving system chunks When a task attempting to allocate a new chunk verifies that there is not currently enough free space in the system space_info and there is another task that allocated a new system chunk but it did not finish yet the creation of the respective block group, it waits for that other task to finish creating the block group. This is to avoid exhaustion of the system chunk array in the superblock, which is limited, when we have a thundering herd of tasks allocating new chunks. This problem was described and fixed by commit eafa4fd0ad0607 ("btrfs: fix exhaustion of the system chunk array due to concurrent allocations"). However there are two very similar scenarios where this can lead to a deadlock: 1) Task B allocated a new system chunk and task A is waiting on task B to finish creation of the respective system block group. However before task B ends its transaction handle and finishes the creation of the system block group, it attempts to allocate another chunk (like a data chunk for an fallocate operation for a very large range). Task B will be unable to progress and allocate the new chunk, because task A set space_info->chunk_alloc to 1 and therefore it loops at btrfs_chunk_alloc() waiting for task A to finish its chunk allocation and set space_info->chunk_alloc to 0, but task A is waiting on task B to finish creation of the new system block group, therefore resulting in a deadlock; 2) Task B allocated a new system chunk and task A is waiting on task B to finish creation of the respective system block group. By the time that task B enter the final phase of block group allocation, which happens at btrfs_create_pending_block_groups(), when it modifies the extent tree, the device tree or the chunk tree to insert the items for some new block group, it needs to allocate a new chunk, so it ends up at btrfs_chunk_alloc() and keeps looping there because task A has set space_info->chunk_alloc to 1, but task A is waiting for task B to finish creation of the new system block group and release the reserved system space, therefore resulting in a deadlock. In short, the problem is if a task B needs to allocate a new chunk after it previously allocated a new system chunk and if another task A is currently waiting for task B to complete the allocation of the new system chunk. Unfortunately this deadlock scenario introduced by the previous fix for the system chunk array exhaustion problem does not have a simple and short fix, and requires a big change to rework the chunk allocation code so that chunk btree updates are all made in the first phase of chunk allocation. And since this deadlock regression is being frequently hit on zoned filesystems and the system chunk array exhaustion problem is triggered in more extreme cases (originally observed on PowerPC with a node size of 64K when running the fallocate tests from stress-ng), revert the changes from that commit. The next patch in the series, with a subject of "btrfs: rework chunk allocation to avoid exhaustion of the system chunk array" does the necessary changes to fix the system chunk array exhaustion problem. Reported-by: Naohiro Aota <[email protected]> Link: https://lore.kernel.org/linux-btrfs/20210621015922.ewgbffxuawia7liz@naota-xeon/ Fixes: eafa4fd0ad0607 ("btrfs: fix exhaustion of the system chunk array due to concurrent allocations") CC: [email protected] # 5.12+ Tested-by: Shin'ichiro Kawasaki <[email protected]> Tested-by: Naohiro Aota <[email protected]> Signed-off-by: Filipe Manana <[email protected]> Tested-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
static s32 gf_vvc_read_sps_bs_internal(GF_BitStream *bs, VVCState *vvc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id; u32 i, CtbSizeY; VVC_SPS *sps; u8 sps_ptl_dpb_hrd_params_present_flag; if (vui_flag_pos) *vui_flag_pos = 0; sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if ((sps_id<0) || (sps_id >= 16)) { return -1; } vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) { return -1; } if (!vps_id && !vvc->vps[0].state) { vvc->vps[0].state = 1; vvc->vps[0].num_ptl = 1; vvc->vps[0].max_layers = 1; vvc->vps[0].all_layers_independent = 1; } sps = &vvc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->max_sublayers = 1 + gf_bs_read_int_log(bs, 3, "max_sublayers_minus1"); sps->chroma_format_idc = gf_bs_read_int_log(bs, 2, "chroma_format_idc"); sps->log2_ctu_size = 5 + gf_bs_read_int_log(bs, 2, "log2_ctu_size_minus5"); CtbSizeY = 1<<sps->log2_ctu_size; sps_ptl_dpb_hrd_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_ptl_dpb_hrd_params_present_flag"); if (sps_ptl_dpb_hrd_params_present_flag) { VVC_ProfileTierLevel ptl, *p_ptl; if (sps->vps_id) { p_ptl = &ptl; } else { p_ptl = &vvc->vps[0].ptl[0]; } memset(p_ptl, 0, sizeof(VVC_ProfileTierLevel)); p_ptl->pt_present = 1; p_ptl->ptl_max_tid = sps->max_sublayers-1; vvc_profile_tier_level(bs, p_ptl, 0); } sps->gdr_enabled = gf_bs_read_int_log(bs, 1, "gdr_enabled"); sps->ref_pic_resampling = gf_bs_read_int_log(bs, 1, "ref_pic_resampling"); if (sps->ref_pic_resampling) sps->res_change_in_clvs = gf_bs_read_int_log(bs, 1, "res_change_in_clvs"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); sps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_present_flag"); if (sps->conf_window) { u32 SubWidthC, SubHeightC; sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); if (sps->chroma_format_idc == 1) { SubWidthC = SubHeightC = 2; } else if (sps->chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else { SubWidthC = SubHeightC = 1; } sps->width -= SubWidthC * (sps->cw_left + sps->cw_right); sps->height -= SubHeightC * (sps->cw_top + sps->cw_bottom); } sps->subpic_info_present = gf_bs_read_int_log(bs, 1, "subpic_info_present"); if (sps->subpic_info_present) { sps->nb_subpics = 1 + gf_bs_read_ue_log(bs, "nb_subpics_minus1"); if (sps->nb_subpics>1) { u32 tmpWidthVal, tmpHeightVal; sps->independent_subpic_flags = gf_bs_read_int_log(bs, 1, "independent_subpic_flags"); sps->subpic_same_size = gf_bs_read_int_log(bs, 1, "subpic_same_size"); tmpWidthVal = (sps->width + CtbSizeY-1) / CtbSizeY; tmpWidthVal = gf_get_bit_size(tmpWidthVal); tmpHeightVal = (sps->height + CtbSizeY-1) / CtbSizeY; tmpHeightVal = gf_get_bit_size(tmpHeightVal); for (i=0; i<sps->nb_subpics; i++) { if( !sps->subpic_same_size || !i) { if (i && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_ctu_top_left_x"); if (i && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_ctu_top_left_y"); if ((i+1 < sps->nb_subpics) && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_width_minus1"); if ((i+1 < sps->nb_subpics) && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_height_minus1"); } if (!sps->independent_subpic_flags) { gf_bs_read_int_log(bs, 1, "subpic_treated_as_pic_flag"); gf_bs_read_int_log(bs, 1, "loop_filter_across_subpic_enabled_flag"); } } sps->subpicid_len = gf_bs_read_ue_log(bs, "subpic_id_len_minus1") + 1; sps->subpicid_mapping_explicit = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_explicitly_signalled_flag"); if (sps->subpicid_mapping_explicit) { sps->subpicid_mapping_present = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (sps->subpicid_mapping_present) { for (i=0; i<sps->nb_subpics; i++) { gf_bs_read_ue_log(bs, "subpic_id"); } } } } } else { sps->nb_subpics = 1; } sps->bitdepth = gf_bs_read_ue_log(bs, "bitdepth_minus8") + 8; sps->entropy_coding_sync_enabled_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); sps->entry_point_offsets_present_flag = gf_bs_read_int_log(bs, 1, "entry_point_offsets_present_flag"); sps->log2_max_poc_lsb = 4 + gf_bs_read_int_log(bs, 4, "log2_max_poc_lsb_minus4"); if ((sps->poc_msb_cycle_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_flag"))) sps->poc_msb_cycle_len = 1 + gf_bs_read_ue_log(bs, "poc_msb_cycle_len_minus1"); u8 sps_num_extra_ph_bits = 8 * gf_bs_read_int_log(bs, 2, "sps_num_extra_ph_bytes"); for (i=0; i<sps_num_extra_ph_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_ph_bit_present_flag", 1)) sps->ph_num_extra_bits++; } u8 sps_num_extra_sh_bits = 8 * gf_bs_read_int_log(bs, 2, "num_extra_sh_bytes"); for (i=0; i<sps_num_extra_sh_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_sh_bit_present_flag", i)) sps->sh_num_extra_bits++; } if (sps_ptl_dpb_hrd_params_present_flag) { u8 sps_sublayer_dpb_params_flag = 0; if (sps->max_sublayers>1) { sps_sublayer_dpb_params_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_dpb_params_flag"); } for (i=(sps_sublayer_dpb_params_flag ? 0 : sps->max_sublayers-1); i < sps->max_sublayers; i++ ) { gf_bs_read_ue_log_idx(bs, "dpb_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "dpb_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "dpb_max_latency_increase_plus1", i); } } gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); sps->partition_constraints_override_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_partition_constraints_override_enabled_flag"); gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); u8 sps_max_mtt_hierarchy_depth_intra_slice_luma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_luma"); if (sps_max_mtt_hierarchy_depth_intra_slice_luma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_luma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_luma"); } u8 sps_qtbtt_dual_tree_intra_flag = 0; if (sps->chroma_format_idc) { sps_qtbtt_dual_tree_intra_flag = gf_bs_read_int_log(bs, 1, "sps_qtbtt_dual_tree_intra_flag"); } if (sps_qtbtt_dual_tree_intra_flag) { gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_intra_slice_chroma"); u8 sps_max_mtt_hierarchy_depth_intra_slice_chroma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_chroma"); if( sps_max_mtt_hierarchy_depth_intra_slice_chroma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_chroma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_chroma"); } } gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_inter_slice"); u8 sps_max_mtt_hierarchy_depth_inter_slice = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_inter_slice"); if (sps_max_mtt_hierarchy_depth_inter_slice != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_inter_slice"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_inter_slice"); } u8 max_luma_transform_size_64_flag = 0; if (CtbSizeY > 32) { max_luma_transform_size_64_flag = gf_bs_read_int_log(bs, 1, "sps_max_luma_transform_size_64_flag"); } sps->transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_transform_skip_enabled_flag"); if (sps->transform_skip_enabled_flag) { gf_bs_read_ue_log(bs, "sps_log2_transform_skip_max_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_bdpcm_enabled_flag"); } if (gf_bs_read_int_log(bs, 1, "sps_mts_enabled_flag")) { gf_bs_read_int_log(bs, 1, "sps_explicit_mts_intra_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_explicit_mts_inter_enabled_flag"); } Bool lfnst_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_lfnst_enabled_flag"); sps->joint_cbcr_enabled_flag = 0; if (sps->chroma_format_idc) { sps->joint_cbcr_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_joint_cbcr_enabled_flag"); u8 sps_same_qp_table_for_chroma_flag = gf_bs_read_int_log(bs, 1, "sps_same_qp_table_for_chroma_flag"); u32 numQpTables = sps_same_qp_table_for_chroma_flag ? 1 : (sps->joint_cbcr_enabled_flag ? 3 : 2); for (i=0; i<numQpTables; i++) { gf_bs_read_se_log_idx(bs, "sps_qp_table_start_minus26", i); u32 j, sps_num_points_in_qp_table = 1 + gf_bs_read_ue_log_idx(bs, "sps_num_points_in_qp_table_minus1", i); for (j=0; j<sps_num_points_in_qp_table; j++) { gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_in_val_minus1", i, j); gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_diff_val", i, j); } } } sps->sao_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_sao_enabled_flag"); sps->alf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_alf_enabled_flag"); if (sps->alf_enabled_flag && sps->chroma_format_idc) { sps->ccalf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_ccalf_enabled_flag"); } sps->lmcs_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_lmcs_enabled_flag"); sps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "sps_weighted_pred_flag"); sps->weighted_bipred_flag = gf_bs_read_int_log(bs, 1, "sps_weighted_bipred_flag"); sps->long_term_ref_pics_flag = gf_bs_read_int_log(bs, 1, "sps_long_term_ref_pics_flag"); if (sps->vps_id>0) sps->inter_layer_prediction_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_inter_layer_prediction_enabled_flag"); sps->idr_rpl_present_flag = gf_bs_read_int_log(bs, 1, "sps_idr_rpl_present_flag"); u32 sps_rpl1_same_as_rpl0 = gf_bs_read_int_log(bs, 1, "sps_rpl1_same_as_rpl0_flag") ? 1: 2; for (i=0; i<sps_rpl1_same_as_rpl0; i++) { u32 j; sps->num_ref_pic_lists[i] = gf_bs_read_ue_log_idx(bs, "sps_num_ref_pic_lists", i); for (j=0; j<sps->num_ref_pic_lists[i]; j++) { s32 res = vvc_parse_ref_pic_list_struct(bs, sps, i, j, &sps->rps[i][j]); if (res<0) return res; } } gf_bs_read_int_log(bs, 1, "sps_ref_wraparound_enabled_flag"); sps->temporal_mvp_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_temporal_mvp_enabled_flag"); if (sps->temporal_mvp_enabled_flag) { gf_bs_read_int_log(bs, 1, "sps_sbtmvp_enabled_flag"); } Bool amvr_enabled = gf_bs_read_int_log(bs, 1, "sps_amvr_enabled_flag"); sps->bdof_control_present_in_ph_flag = 0; if (gf_bs_read_int_log(bs, 1, "sps_bdof_enabled_flag")) { sps->bdof_control_present_in_ph_flag = gf_bs_read_int_log(bs, 1, "sps_bdof_control_present_in_ph_flag"); } gf_bs_read_int_log(bs, 1, "sps_smvd_enabled_flag"); sps->dmvr_control_present_in_ph_flag = 0; if (gf_bs_read_int_log(bs, 1, "sps_dmvr_enabled_flag")) { sps->dmvr_control_present_in_ph_flag = gf_bs_read_int_log(bs, 1, "sps_dmvr_control_present_in_ph_flag"); } sps->mmvd_fullpel_only_enabled_flag = 0; if (gf_bs_read_int_log(bs, 1, "sps_mmvd_enabled_flag")) { sps->mmvd_fullpel_only_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_mmvd_fullpel_only_enabled_flag"); } u32 MaxNumMergeCand = 6 - gf_bs_read_ue_log(bs, "sps_six_minus_max_num_merge_cand"); sps->prof_control_present_in_ph_flag = 0; gf_bs_read_int_log(bs, 1, "sps_sbt_enabled_flag"); if (gf_bs_read_int_log(bs, 1, "sps_affine_enabled_flag")) { gf_bs_read_ue_log(bs, "sps_five_minus_max_num_subblock_merge_cand"); gf_bs_read_int_log(bs, 1, "sps_6param_affine_enabled_flag"); if (amvr_enabled) { gf_bs_read_int_log(bs, 1, "sps_affine_amvr_enabled_flag"); } if (gf_bs_read_int_log(bs, 1, "sps_affine_prof_enabled_flag")) { sps->prof_control_present_in_ph_flag = gf_bs_read_int_log(bs, 1, "sps_prof_control_present_in_ph_flag"); } } gf_bs_read_int_log(bs, 1, "sps_bcw_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_ciip_enabled_flag"); if (MaxNumMergeCand >= 2) { Bool gpm_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_gpm_enabled_flag"); if (gpm_enabled_flag && (MaxNumMergeCand >= 3)) { gf_bs_read_ue_log(bs, "sps_max_num_merge_cand_minus_max_num_gpm_cand"); } } gf_bs_read_ue_log(bs, "sps_log2_parallel_merge_level_minus2"); gf_bs_read_int_log(bs, 1, "sps_isp_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_mrl_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_mip_enabled_flag"); if (sps->chroma_format_idc != 0) { gf_bs_read_int_log(bs, 1, "sps_cclm_enabled_flag"); } if (sps->chroma_format_idc == 1) { gf_bs_read_int_log(bs, 1, "sps_chroma_horizontal_collocated_flag"); gf_bs_read_int_log(bs, 1, "sps_chroma_vertical_collocated_flag"); } Bool act_enabled_flag = GF_FALSE; Bool palette_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_palette_enabled_flag"); if ((sps->chroma_format_idc == 3) && !max_luma_transform_size_64_flag) { act_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_act_enabled_flag"); } if (sps->transform_skip_enabled_flag || palette_enabled_flag) { gf_bs_read_ue_log(bs, "sps_min_qp_prime_ts"); } if (gf_bs_read_int_log(bs, 1, "sps_ibc_enabled_flag")) { gf_bs_read_ue_log(bs, "sps_six_minus_max_num_ibc_merge_cand"); } if (gf_bs_read_int_log(bs, 1, "sps_ladf_enabled_flag")) { u32 num_ladf_intervals_minus2 = gf_bs_read_int_log(bs, 2, "sps_num_ladf_intervals_minus2"); gf_bs_read_se_log(bs, "sps_ladf_lowest_interval_qp_offset"); for (i=0; i<num_ladf_intervals_minus2+1; i++) { gf_bs_read_se_log_idx(bs, "sps_ladf_qp_offset", i); gf_bs_read_ue_log_idx(bs, "sps_ladf_delta_threshold_minus1", i); } } sps->explicit_scaling_list_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_explicit_scaling_list_enabled_flag"); if (lfnst_enabled_flag && sps->explicit_scaling_list_enabled_flag) { gf_bs_read_int_log(bs, 1, "sps_scaling_matrix_for_lfnst_disabled_flag"); } Bool scaling_matrix_for_alternative_colour_space_disabled_flag = 0; if (act_enabled_flag && sps->explicit_scaling_list_enabled_flag) { scaling_matrix_for_alternative_colour_space_disabled_flag = gf_bs_read_int_log(bs, 1, "sps_scaling_matrix_for_alternative_colour_space_disabled_flag"); } if (scaling_matrix_for_alternative_colour_space_disabled_flag) { gf_bs_read_int_log(bs, 1, "sps_scaling_matrix_designated_colour_space_flag"); } sps->dep_quant_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_dep_quant_enabled_flag"); sps->sign_data_hiding_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_sign_data_hiding_enabled_flag"); sps->virtual_boundaries_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_virtual_boundaries_enabled_flag"); if (sps->virtual_boundaries_enabled_flag) { sps->virtual_boundaries_present_flag = gf_bs_read_int_log(bs, 1, "sps_virtual_boundaries_present_flag"); if (sps->virtual_boundaries_present_flag) { u32 num_virtual_boundaries = gf_bs_read_ue_log(bs, "sps_num_ver_virtual_boundaries"); for (i=0; i<num_virtual_boundaries; i++) { gf_bs_read_ue_log_idx(bs, "sps_virtual_boundary_pos_x_minus1", i); } num_virtual_boundaries = gf_bs_read_ue_log(bs, "sps_num_hor_virtual_boundaries"); for (i=0; i<num_virtual_boundaries; i++) { gf_bs_read_ue_log_idx(bs, "sps_virtual_boundary_pos_y_minus1", i); } } } if (sps_ptl_dpb_hrd_params_present_flag) { if (gf_bs_read_int_log(bs, 1, "sps_timing_hrd_params_present_flag")) { Bool general_nal_hrd_params_present_flag, general_vcl_hrd_params_present_flag, general_du_hrd_params_present_flag; u32 hrd_cpb_cnt_minus1=0; u32 sublayer_cpb_params_present_flag = 0; vvc_parse_general_timing_hrd_parameters(bs, sps, NULL, &general_nal_hrd_params_present_flag, &general_vcl_hrd_params_present_flag, &general_du_hrd_params_present_flag, &hrd_cpb_cnt_minus1); if (sps->max_sublayers > 1) { sublayer_cpb_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_cpb_params_present_flag"); } u32 firstSubLayer = sublayer_cpb_params_present_flag ? 0 : sps->max_sublayers - 1; vvc_parse_ols_timing_hrd_parameters(bs, firstSubLayer, sps->max_sublayers-1, general_nal_hrd_params_present_flag, general_vcl_hrd_params_present_flag, general_du_hrd_params_present_flag, hrd_cpb_cnt_minus1); } } gf_bs_read_int_log(bs, 1, "sps_field_seq_flag"); if (vui_flag_pos) { *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); } //all this to get to VUI !!! if (gf_bs_read_int_log(bs, 1, "sps_vui_parameters_present_flag")) { gf_bs_read_ue_log(bs, "sps_vui_payload_size_minus1"); while (!gf_bs_is_align(bs)) { gf_bs_read_int_log(bs, 1, "sps_vui_alignment_zero_bit"); } //vui parameters Bool vui_progressive_source_flag = gf_bs_read_int_log(bs, 1, "vui_progressive_source_flag"); Bool vui_interlaced_source_flag = gf_bs_read_int_log(bs, 1, "vui_interlaced_source_flag"); gf_bs_read_int_log(bs, 1, "vui_non_packed_constraint_flag"); gf_bs_read_int_log(bs, 1, "vui_non_projected_constraint_flag"); sps->aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "vui_aspect_ratio_info_present_flag"); if (sps->aspect_ratio_info_present_flag) { gf_bs_read_int_log(bs, 1, "vui_aspect_ratio_constant_flag"); sps->sar_idc = gf_bs_read_int_log(bs, 8, "vui_aspect_ratio_idc"); if (sps->sar_idc== 0xFF) { sps->sar_width = gf_bs_read_int_log(bs, 16, "vui_sar_width"); sps->sar_height = gf_bs_read_int_log(bs, 16, "vui_sar_height"); } } sps->overscan_info_present_flag = gf_bs_read_int_log(bs, 1, "vui_overscan_info_present_flag"); if (sps->overscan_info_present_flag) { gf_bs_read_int_log(bs, 1, "vui_overscan_appropriate_flag"); } sps->colour_description_present_flag = gf_bs_read_int_log(bs, 1, "vui_colour_description_present_flag"); if (sps->colour_description_present_flag) { sps->colour_primaries = gf_bs_read_int_log(bs, 8, "vui_colour_primaries"); sps->transfer_characteristics = gf_bs_read_int_log(bs, 8, "vui_transfer_characteristics"); sps->matrix_coefficients = gf_bs_read_int_log(bs, 8, "vui_matrix_coeffs"); sps->video_full_range_flag = gf_bs_read_int_log(bs, 1, "vui_full_range_flag"); } if (gf_bs_read_int_log(bs, 1, " vui_chroma_loc_info_present_flag")) { if (vui_progressive_source_flag && !vui_interlaced_source_flag) { gf_bs_read_ue_log(bs, "vui_chroma_sample_loc_type_frame"); } else { gf_bs_read_ue_log(bs, "vui_chroma_sample_loc_type_top_field"); gf_bs_read_ue_log(bs, "vui_chroma_sample_loc_type_bottom_field"); } } //WE DON'T PARSE vui_payload_bit_equal_to_one because we dont parse the rest (sps extensions) //if needed, see rewrite_vui code } return sps_id; }
0
[ "CWE-190" ]
gpac
0cd19f4db70615d707e0e6202933c2ea0c1d36df
280,318,984,793,872,900,000,000,000,000,000,000,000
388
fixed #2067
xfrm_policy_lookup_inexact_addr(const struct rb_root *r, seqcount_spinlock_t *count, const xfrm_address_t *addr, u16 family) { const struct rb_node *parent; int seq; again: seq = read_seqcount_begin(count); parent = rcu_dereference_raw(r->rb_node); while (parent) { struct xfrm_pol_inexact_node *node; int delta; node = rb_entry(parent, struct xfrm_pol_inexact_node, node); delta = xfrm_policy_addr_delta(addr, &node->addr, node->prefixlen, family); if (delta < 0) { parent = rcu_dereference_raw(parent->rb_left); continue; } else if (delta > 0) { parent = rcu_dereference_raw(parent->rb_right); continue; } return node; } if (read_seqcount_retry(count, seq)) goto again; return NULL; }
0
[ "CWE-703" ]
linux
f85daf0e725358be78dfd208dea5fd665d8cb901
36,817,527,012,711,465,000,000,000,000,000,000,000
35
xfrm: xfrm_policy: fix a possible double xfrm_pols_put() in xfrm_bundle_lookup() xfrm_policy_lookup() will call xfrm_pol_hold_rcu() to get a refcount of pols[0]. This refcount can be dropped in xfrm_expand_policies() when xfrm_expand_policies() return error. pols[0]'s refcount is balanced in here. But xfrm_bundle_lookup() will also call xfrm_pols_put() with num_pols == 1 to drop this refcount when xfrm_expand_policies() return error. This patch also fix an illegal address access. pols[0] will save a error point when xfrm_policy_lookup fails. This lead to xfrm_pols_put to resolve an illegal address in xfrm_bundle_lookup's error path. Fix these by setting num_pols = 0 in xfrm_expand_policies()'s error path. Fixes: 80c802f3073e ("xfrm: cache bundles instead of policies for outgoing flows") Signed-off-by: Hangyu Hua <[email protected]> Signed-off-by: Steffen Klassert <[email protected]>
boost::optional<DocumentSource::DistributedPlanLogic> DocumentSourceGroup::distributedPlanLogic() { intrusive_ptr<DocumentSourceGroup> mergingGroup(new DocumentSourceGroup(pExpCtx)); mergingGroup->setDoingMerge(true); VariablesParseState vps = pExpCtx->variablesParseState; /* the merger will use the same grouping key */ mergingGroup->setIdExpression(ExpressionFieldPath::parse(pExpCtx, "$$ROOT._id", vps)); for (auto&& accumulatedField : _accumulatedFields) { // The merger's output field names will be the same, as will the accumulator factories. // However, for some accumulators, the expression to be accumulated will be different. The // original accumulator may be collecting an expression based on a field expression or // constant. Here, we accumulate the output of the same name from the prior group. auto copiedAccumulatedField = accumulatedField; copiedAccumulatedField.expr.argument = ExpressionFieldPath::parse(pExpCtx, "$$ROOT." + copiedAccumulatedField.fieldName, vps); mergingGroup->addAccumulator(copiedAccumulatedField); } // {shardsStage, mergingStage, sortPattern} return DistributedPlanLogic{this, mergingGroup, boost::none}; }
0
[]
mongo
07b8851825836911265e909d6842d4586832f9bb
264,574,960,936,225,760,000,000,000,000,000,000,000
22
SERVER-60218-44: SERVER-60218 add initialize helper function for document_source_group (cherry picked from commit 867f52afbb79bc00e35c70f8e0681b7d602f97b2)
void *cpu_physical_memory_map(hwaddr addr, hwaddr *plen, bool is_write) { return address_space_map(&address_space_memory, addr, plen, is_write, MEMTXATTRS_UNSPECIFIED); }
0
[ "CWE-787" ]
qemu
4bfb024bc76973d40a359476dc0291f46e435442
183,535,990,214,683,100,000,000,000,000,000,000,000
7
memory: clamp cached translation in case it points to an MMIO region In using the address_space_translate_internal API, address_space_cache_init forgot one piece of advice that can be found in the code for address_space_translate_internal: /* MMIO registers can be expected to perform full-width accesses based only * on their address, without considering adjacent registers that could * decode to completely different MemoryRegions. When such registers * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO * regions overlap wildly. For this reason we cannot clamp the accesses * here. * * If the length is small (as is the case for address_space_ldl/stl), * everything works fine. If the incoming length is large, however, * the caller really has to do the clamping through memory_access_size. */ address_space_cache_init is exactly one such case where "the incoming length is large", therefore we need to clamp the resulting length---not to memory_access_size though, since we are not doing an access yet, but to the size of the resulting section. This ensures that subsequent accesses to the cached MemoryRegionSection will be in range. With this patch, the enclosed testcase notices that the used ring does not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used" error. Signed-off-by: Paolo Bonzini <[email protected]>
void pre_sev_run(struct vcpu_svm *svm, int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, cpu); int asid = sev_get_asid(svm->vcpu.kvm); /* Assign the asid allocated with this SEV guest */ svm->vmcb->control.asid = asid; /* * Flush guest TLB: * * 1) when different VMCB for the same ASID is to be run on the same host CPU. * 2) or this VMCB was executed on different host CPU in previous VMRUNs. */ if (sd->sev_vmcbs[asid] == svm->vmcb && svm->vcpu.arch.last_vmentry_cpu == cpu) return; sd->sev_vmcbs[asid] = svm->vmcb; svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; vmcb_mark_dirty(svm->vmcb, VMCB_ASID); }
0
[]
linux
7be74942f184fdfba34ddd19a0d995deb34d4a03
82,306,258,198,673,680,000,000,000,000,000,000,000
22
KVM: SVM: Periodically schedule when unregistering regions on destroy There may be many encrypted regions that need to be unregistered when a SEV VM is destroyed. This can lead to soft lockups. For example, on a host running 4.15: watchdog: BUG: soft lockup - CPU#206 stuck for 11s! [t_virtual_machi:194348] CPU: 206 PID: 194348 Comm: t_virtual_machi RIP: 0010:free_unref_page_list+0x105/0x170 ... Call Trace: [<0>] release_pages+0x159/0x3d0 [<0>] sev_unpin_memory+0x2c/0x50 [kvm_amd] [<0>] __unregister_enc_region_locked+0x2f/0x70 [kvm_amd] [<0>] svm_vm_destroy+0xa9/0x200 [kvm_amd] [<0>] kvm_arch_destroy_vm+0x47/0x200 [<0>] kvm_put_kvm+0x1a8/0x2f0 [<0>] kvm_vm_release+0x25/0x30 [<0>] do_exit+0x335/0xc10 [<0>] do_group_exit+0x3f/0xa0 [<0>] get_signal+0x1bc/0x670 [<0>] do_signal+0x31/0x130 Although the CLFLUSH is no longer issued on every encrypted region to be unregistered, there are no other changes that can prevent soft lockups for very large SEV VMs in the latest kernel. Periodically schedule if necessary. This still holds kvm->lock across the resched, but since this only happens when the VM is destroyed this is assumed to be acceptable. Signed-off-by: David Rientjes <[email protected]> Message-Id: <alpine.DEB.2.23.453.2008251255240.2987727@chino.kir.corp.google.com> Signed-off-by: Paolo Bonzini <[email protected]>
int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp) { int result = 0; ltv_t *pLtv; bool_t ltvAllocated = FALSE; ENCSTRCT sEncryption; size_t len; #ifdef USE_WDS hcf_16 hcfPort = HCF_PORT_0; #endif /* USE_WDS */ /*------------------------------------------------------------------------*/ DBG_FUNC("wvlan_uil_put_info"); DBG_ENTER(DbgInfo); if (urq->hcfCtx == &(lp->hcfCtx)) { if (capable(CAP_NET_ADMIN)) { if ((urq->data != NULL) && (urq->len != 0)) { /* Make sure that we have at least a command and length to send. */ if (urq->len < (sizeof(hcf_16) * 2)) { urq->len = sizeof(lp->ltvRecord); urq->result = UIL_ERR_LEN; DBG_ERROR(DbgInfo, "No Length/Type in LTV!!!\n"); DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n"); DBG_LEAVE(DbgInfo); return result; } /* Verify the user buffer */ result = verify_area(VERIFY_READ, urq->data, urq->len); if (result != 0) { urq->result = UIL_FAILURE; DBG_ERROR(DbgInfo, "verify_area(), VERIFY_READ FAILED\n"); DBG_LEAVE(DbgInfo); return result; } /* Get only the command and length information. */ copy_from_user(&(lp->ltvRecord), urq->data, sizeof(hcf_16) * 2); /* Make sure the incoming LTV record length is within the bounds of the IOCTL length */ if (((lp->ltvRecord.len + 1) * sizeof(hcf_16)) > urq->len) { urq->len = sizeof(lp->ltvRecord); urq->result = UIL_ERR_LEN; DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n"); DBG_LEAVE(DbgInfo); return result; } /* If the requested length is greater than the size of our local LTV record, try to allocate it from the kernel stack. Otherwise, we just use our local LTV record. */ if (urq->len > sizeof(lp->ltvRecord)) { pLtv = kmalloc(urq->len, GFP_KERNEL); if (pLtv != NULL) { ltvAllocated = TRUE; } else { DBG_ERROR(DbgInfo, "Alloc FAILED\n"); urq->len = sizeof(lp->ltvRecord); urq->result = UIL_ERR_LEN; result = -ENOMEM; DBG_LEAVE(DbgInfo); return result; } } else { pLtv = &(lp->ltvRecord); } /* Copy the data from the user's buffer into the local LTV record data area. */ copy_from_user(pLtv, urq->data, urq->len); /* We need to snoop the commands to see if there is anything we need to store for the purposes of a reset or start/stop sequence. Perform endian translation as needed */ switch (pLtv->typ) { case CFG_CNF_PORT_TYPE: lp->PortType = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_OWN_MAC_ADDR: /* TODO: determine if we are going to store anything based on this */ break; case CFG_CNF_OWN_CHANNEL: lp->Channel = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; /* CFG_CNF_OWN_SSID currently same as CNF_DESIRED_SSID. Do we need separate storage for this? */ /* case CFG_CNF_OWN_SSID: */ case CFG_CNF_OWN_ATIM_WINDOW: lp->atimWindow = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_SYSTEM_SCALE: lp->DistanceBetweenAPs = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); case CFG_CNF_MAX_DATA_LEN: /* TODO: determine if we are going to store anything based on this */ break; case CFG_CNF_PM_ENABLED: lp->PMEnabled = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_MCAST_RX: lp->MulticastReceive = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_MAX_SLEEP_DURATION: lp->MaxSleepDuration = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_HOLDOVER_DURATION: lp->holdoverDuration = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_OWN_NAME: memset(lp->StationName, 0, sizeof(lp->StationName)); len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName)); strlcpy(lp->StationName, &pLtv->u.u8[2], len); pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_LOAD_BALANCING: lp->loadBalancing = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_MEDIUM_DISTRIBUTION: lp->mediumDistribution = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; #ifdef WARP case CFG_CNF_TX_POW_LVL: lp->txPowLevel = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; /* case CFG_CNF_SHORT_RETRY_LIMIT: */ /* Short Retry Limit */ /* case 0xFC33: */ /* Long Retry Limit */ case CFG_SUPPORTED_RATE_SET_CNTL: /* Supported Rate Set Control */ lp->srsc[0] = pLtv->u.u16[0]; lp->srsc[1] = pLtv->u.u16[1]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); pLtv->u.u16[1] = CNV_INT_TO_LITTLE(pLtv->u.u16[1]); break; case CFG_BASIC_RATE_SET_CNTL: /* Basic Rate Set Control */ lp->brsc[0] = pLtv->u.u16[0]; lp->brsc[1] = pLtv->u.u16[1]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); pLtv->u.u16[1] = CNV_INT_TO_LITTLE(pLtv->u.u16[1]); break; case CFG_CNF_CONNECTION_CNTL: lp->connectionControl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; /* case CFG_PROBE_DATA_RATE: */ #endif /* HERMES25 */ #if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */ /* ;?should we restore this to allow smaller memory footprint */ case CFG_CNF_OWN_DTIM_PERIOD: lp->DTIMPeriod = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; #ifdef WARP case CFG_CNF_OWN_BEACON_INTERVAL: /* Own Beacon Interval */ lp->ownBeaconInterval = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; #endif /* WARP */ case CFG_COEXISTENSE_BEHAVIOUR: /* Coexistence behavior */ lp->coexistence = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; #ifdef USE_WDS case CFG_CNF_WDS_ADDR1: memcpy(&lp->wds_port[0].wdsAddress, &pLtv->u.u8[0], ETH_ALEN); hcfPort = HCF_PORT_1; break; case CFG_CNF_WDS_ADDR2: memcpy(&lp->wds_port[1].wdsAddress, &pLtv->u.u8[0], ETH_ALEN); hcfPort = HCF_PORT_2; break; case CFG_CNF_WDS_ADDR3: memcpy(&lp->wds_port[2].wdsAddress, &pLtv->u.u8[0], ETH_ALEN); hcfPort = HCF_PORT_3; break; case CFG_CNF_WDS_ADDR4: memcpy(&lp->wds_port[3].wdsAddress, &pLtv->u.u8[0], ETH_ALEN); hcfPort = HCF_PORT_4; break; case CFG_CNF_WDS_ADDR5: memcpy(&lp->wds_port[4].wdsAddress, &pLtv->u.u8[0], ETH_ALEN); hcfPort = HCF_PORT_5; break; case CFG_CNF_WDS_ADDR6: memcpy(&lp->wds_port[5].wdsAddress, &pLtv->u.u8[0], ETH_ALEN); hcfPort = HCF_PORT_6; break; #endif /* USE_WDS */ case CFG_CNF_MCAST_PM_BUF: lp->multicastPMBuffering = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_REJECT_ANY: lp->RejectAny = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; #endif case CFG_CNF_ENCRYPTION: lp->EnableEncryption = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_AUTHENTICATION: lp->authentication = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; #if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */ /* ;?should we restore this to allow smaller memory footprint */ /* case CFG_CNF_EXCL_UNENCRYPTED: lp->ExcludeUnencrypted = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; */ case CFG_CNF_MCAST_RATE: /* TODO: determine if we are going to store anything based on this */ break; case CFG_CNF_INTRA_BSS_RELAY: lp->intraBSSRelay = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; #endif case CFG_CNF_MICRO_WAVE: /* TODO: determine if we are going to store anything based on this */ break; /*case CFG_CNF_LOAD_BALANCING:*/ /* TODO: determine if we are going to store anything based on this */ /* break; */ /* case CFG_CNF_MEDIUM_DISTRIBUTION: */ /* TODO: determine if we are going to store anything based on this */ /* break; */ /* case CFG_CNF_RX_ALL_GROUP_ADDRESS: */ /* TODO: determine if we are going to store anything based on this */ /* break; */ /* case CFG_CNF_COUNTRY_INFO: */ /* TODO: determine if we are going to store anything based on this */ /* break; */ case CFG_CNF_OWN_SSID: /* case CNF_DESIRED_SSID: */ case CFG_DESIRED_SSID: memset(lp->NetworkName, 0, sizeof(lp->NetworkName)); memcpy((void *)lp->NetworkName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]); pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); /* take care of the special network name "ANY" case */ if ((strlen(&pLtv->u.u8[2]) == 0) || (strcmp(&pLtv->u.u8[2], "ANY") == 0) || (strcmp(&pLtv->u.u8[2], "any") == 0)) { /* set the SSID_STRCT llen field (u16[0]) to zero, and the effectually null the string u8[2] */ pLtv->u.u16[0] = 0; pLtv->u.u8[2] = 0; } break; case CFG_GROUP_ADDR: /* TODO: determine if we are going to store anything based on this */ break; case CFG_CREATE_IBSS: lp->CreateIBSS = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_RTS_THRH: lp->RTSThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_TX_RATE_CNTL: lp->TxRateControl[0] = pLtv->u.u16[0]; lp->TxRateControl[1] = pLtv->u.u16[1]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); pLtv->u.u16[1] = CNV_INT_TO_LITTLE(pLtv->u.u16[1]); break; case CFG_PROMISCUOUS_MODE: /* TODO: determine if we are going to store anything based on this */ break; /* case CFG_WAKE_ON_LAN: */ /* TODO: determine if we are going to store anything based on this */ /* break; */ #if 1 /* ;? #if (HCF_TYPE) & HCF_TYPE_AP */ /* ;?should we restore this to allow smaller memory footprint */ case CFG_RTS_THRH0: lp->RTSThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_TX_RATE_CNTL0: /*;?no idea what this should be, get going so comment it out lp->TxRateControl = pLtv->u.u16[0];*/ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; #ifdef USE_WDS case CFG_RTS_THRH1: lp->wds_port[0].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_1; break; case CFG_RTS_THRH2: lp->wds_port[1].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_2; break; case CFG_RTS_THRH3: lp->wds_port[2].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_3; break; case CFG_RTS_THRH4: lp->wds_port[3].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_4; break; case CFG_RTS_THRH5: lp->wds_port[4].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_5; break; case CFG_RTS_THRH6: lp->wds_port[5].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_6; break; case CFG_TX_RATE_CNTL1: lp->wds_port[0].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_1; break; case CFG_TX_RATE_CNTL2: lp->wds_port[1].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_2; break; case CFG_TX_RATE_CNTL3: lp->wds_port[2].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_3; break; case CFG_TX_RATE_CNTL4: lp->wds_port[3].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_4; break; case CFG_TX_RATE_CNTL5: lp->wds_port[4].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_5; break; case CFG_TX_RATE_CNTL6: lp->wds_port[5].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); hcfPort = HCF_PORT_6; break; #endif /* USE_WDS */ #endif /* (HCF_TYPE) & HCF_TYPE_AP */ case CFG_DEFAULT_KEYS: { CFG_DEFAULT_KEYS_STRCT *pKeys = (CFG_DEFAULT_KEYS_STRCT *)pLtv; pKeys->key[0].len = CNV_INT_TO_LITTLE(pKeys->key[0].len); pKeys->key[1].len = CNV_INT_TO_LITTLE(pKeys->key[1].len); pKeys->key[2].len = CNV_INT_TO_LITTLE(pKeys->key[2].len); pKeys->key[3].len = CNV_INT_TO_LITTLE(pKeys->key[3].len); memcpy((void *)&(lp->DefaultKeys), (void *)pKeys, sizeof(CFG_DEFAULT_KEYS_STRCT)); } break; case CFG_TX_KEY_ID: lp->TransmitKeyID = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_SCAN_SSID: /* TODO: determine if we are going to store anything based on this */ break; case CFG_TICK_TIME: /* TODO: determine if we are going to store anything based on this */ break; /* these RIDS are Info RIDs, and should they be allowed for puts??? */ case CFG_MAX_LOAD_TIME: case CFG_DL_BUF: /* case CFG_HSI_SUP_RANGE: */ case CFG_NIC_SERIAL_NUMBER: case CFG_NIC_IDENTITY: case CFG_NIC_MFI_SUP_RANGE: case CFG_NIC_CFI_SUP_RANGE: case CFG_NIC_TEMP_TYPE: case CFG_NIC_PROFILE: case CFG_FW_IDENTITY: case CFG_FW_SUP_RANGE: case CFG_MFI_ACT_RANGES_STA: case CFG_CFI_ACT_RANGES_STA: case CFG_PORT_STAT: case CFG_CUR_SSID: case CFG_CUR_BSSID: case CFG_COMMS_QUALITY: case CFG_CUR_TX_RATE: case CFG_CUR_BEACON_INTERVAL: case CFG_CUR_SCALE_THRH: case CFG_PROTOCOL_RSP_TIME: case CFG_CUR_SHORT_RETRY_LIMIT: case CFG_CUR_LONG_RETRY_LIMIT: case CFG_MAX_TX_LIFETIME: case CFG_MAX_RX_LIFETIME: case CFG_CF_POLLABLE: case CFG_AUTHENTICATION_ALGORITHMS: case CFG_PRIVACY_OPT_IMPLEMENTED: /* case CFG_CURRENT_REMOTE_RATES: */ /* case CFG_CURRENT_USED_RATES: */ /* case CFG_CURRENT_SYSTEM_SCALE: */ /* case CFG_CURRENT_TX_RATE1: */ /* case CFG_CURRENT_TX_RATE2: */ /* case CFG_CURRENT_TX_RATE3: */ /* case CFG_CURRENT_TX_RATE4: */ /* case CFG_CURRENT_TX_RATE5: */ /* case CFG_CURRENT_TX_RATE6: */ case CFG_NIC_MAC_ADDR: case CFG_PCF_INFO: /* case CFG_CURRENT_COUNTRY_INFO: */ case CFG_PHY_TYPE: case CFG_CUR_CHANNEL: /* case CFG_CURRENT_POWER_STATE: */ /* case CFG_CCAMODE: */ case CFG_SUPPORTED_DATA_RATES: break; case CFG_AP_MODE: /*;? lp->DownloadFirmware = (pLtv->u.u16[0]) + 1; */ DBG_ERROR(DbgInfo, "set CFG_AP_MODE no longer supported\n"); break; case CFG_ENCRYPT_STRING: /* TODO: ENDIAN TRANSLATION HERE??? */ memset(lp->szEncryption, 0, sizeof(lp->szEncryption)); memcpy((void *)lp->szEncryption, (void *)&pLtv->u.u8[0], (pLtv->len * sizeof(hcf_16))); wl_wep_decode(CRYPT_CODE, &sEncryption, lp->szEncryption); /* the Linux driver likes to use 1-4 for the key IDs, and then convert to 0-3 when sending to the card. The Windows code base used 0-3 in the API DLL, which was ported to Linux. For the sake of the user experience, we decided to keep 0-3 as the numbers used in the DLL; and will perform the +1 conversion here. We could have converted the entire Linux driver, but this is less obtrusive. This may be a "todo" to convert the whole driver */ lp->TransmitKeyID = sEncryption.wTxKeyID + 1; lp->EnableEncryption = sEncryption.wEnabled; memcpy(&lp->DefaultKeys, &sEncryption.EncStr, sizeof(CFG_DEFAULT_KEYS_STRCT)); break; /*case CFG_COUNTRY_STRING: memset(lp->countryString, 0, sizeof(lp->countryString)); memcpy((void *)lp->countryString, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]); break; */ case CFG_DRIVER_ENABLE: lp->driverEnable = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_WOLAS_ENABLE: lp->wolasEnable = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_SET_WPA_AUTH_KEY_MGMT_SUITE: lp->AuthKeyMgmtSuite = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_DISASSOCIATE_ADDR: pLtv->u.u16[ETH_ALEN / 2] = CNV_INT_TO_LITTLE(pLtv->u.u16[ETH_ALEN / 2]); break; case CFG_ADD_TKIP_DEFAULT_KEY: case CFG_REMOVE_TKIP_DEFAULT_KEY: /* Endian convert the Tx Key Information */ pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_ADD_TKIP_MAPPED_KEY: break; case CFG_REMOVE_TKIP_MAPPED_KEY: break; /* some RIDs just can't be put */ case CFG_MB_INFO: case CFG_IFB: default: break; } /* This code will prevent Static Configuration Entities from being sent to the card, as they require a call to UIL_ACT_APPLY to take effect. Dynamic Entities will be sent immediately */ switch (pLtv->typ) { case CFG_CNF_PORT_TYPE: case CFG_CNF_OWN_MAC_ADDR: case CFG_CNF_OWN_CHANNEL: case CFG_CNF_OWN_SSID: case CFG_CNF_OWN_ATIM_WINDOW: case CFG_CNF_SYSTEM_SCALE: case CFG_CNF_MAX_DATA_LEN: case CFG_CNF_PM_ENABLED: case CFG_CNF_MCAST_RX: case CFG_CNF_MAX_SLEEP_DURATION: case CFG_CNF_HOLDOVER_DURATION: case CFG_CNF_OWN_NAME: case CFG_CNF_LOAD_BALANCING: case CFG_CNF_MEDIUM_DISTRIBUTION: #ifdef WARP case CFG_CNF_TX_POW_LVL: case CFG_CNF_CONNECTION_CNTL: /*case CFG_PROBE_DATA_RATE: */ #endif /* HERMES25 */ #if 1 /*;? (HCF_TYPE) & HCF_TYPE_AP */ /*;?should we restore this to allow smaller memory footprint */ case CFG_CNF_OWN_DTIM_PERIOD: #ifdef WARP case CFG_CNF_OWN_BEACON_INTERVAL: /* Own Beacon Interval */ #endif /* WARP */ #ifdef USE_WDS case CFG_CNF_WDS_ADDR1: case CFG_CNF_WDS_ADDR2: case CFG_CNF_WDS_ADDR3: case CFG_CNF_WDS_ADDR4: case CFG_CNF_WDS_ADDR5: case CFG_CNF_WDS_ADDR6: #endif case CFG_CNF_MCAST_PM_BUF: case CFG_CNF_REJECT_ANY: #endif case CFG_CNF_ENCRYPTION: case CFG_CNF_AUTHENTICATION: #if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */ /* ;?should we restore this to allow smaller memory footprint */ case CFG_CNF_EXCL_UNENCRYPTED: case CFG_CNF_MCAST_RATE: case CFG_CNF_INTRA_BSS_RELAY: #endif case CFG_CNF_MICRO_WAVE: /* case CFG_CNF_LOAD_BALANCING: */ /* case CFG_CNF_MEDIUM_DISTRIBUTION: */ /* case CFG_CNF_RX_ALL_GROUP_ADDRESS: */ /* case CFG_CNF_COUNTRY_INFO: */ /* case CFG_COUNTRY_STRING: */ case CFG_AP_MODE: case CFG_ENCRYPT_STRING: /* case CFG_DRIVER_ENABLE: */ case CFG_WOLAS_ENABLE: case CFG_MB_INFO: case CFG_IFB: break; /* Deal with this dynamic MSF RID, as it's required for WPA */ case CFG_DRIVER_ENABLE: if (lp->driverEnable) { hcf_cntl(&(lp->hcfCtx), HCF_CNTL_ENABLE | HCF_PORT_0); hcf_cntl(&(lp->hcfCtx), HCF_CNTL_CONNECT); } else { hcf_cntl(&(lp->hcfCtx), HCF_CNTL_DISABLE | HCF_PORT_0); hcf_cntl(&(lp->hcfCtx), HCF_CNTL_DISCONNECT); } break; default: wl_act_int_off(lp); urq->result = hcf_put_info(&(lp->hcfCtx), (LTVP) pLtv); wl_act_int_on(lp); break; } if (ltvAllocated) kfree(pLtv); } else { urq->result = UIL_FAILURE; } } else { DBG_ERROR(DbgInfo, "EPERM\n"); urq->result = UIL_FAILURE; result = -EPERM; } } else { DBG_ERROR(DbgInfo, "UIL_ERR_WRONG_IFB\n"); urq->result = UIL_ERR_WRONG_IFB; } DBG_LEAVE(DbgInfo); return result; } /* wvlan_uil_put_info */
0
[ "CWE-119", "CWE-787" ]
linux
b5e2f339865fb443107e5b10603e53bbc92dc054
141,593,053,337,897,410,000,000,000,000,000,000,000
600
staging: wlags49_h2: buffer overflow setting station name We need to check the length parameter before doing the memcpy(). I've actually changed it to strlcpy() as well so that it's NUL terminated. You need CAP_NET_ADMIN to trigger these so it's not the end of the world. Reported-by: Nico Golde <[email protected]> Reported-by: Fabian Yamaguchi <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
NTSTATUS dsdb_get_extended_dn_uint32(struct ldb_dn *dn, uint32_t *val, const char *component_name) { const struct ldb_val *v; int error = 0; v = ldb_dn_get_extended_component(dn, component_name); if (v == NULL) { return NT_STATUS_OBJECT_NAME_NOT_FOUND; } /* Just check we don't allow the caller to fill our stack */ if (v->length >= 32) { return NT_STATUS_INVALID_PARAMETER; } else { char s[v->length + 1]; memcpy(s, v->data, v->length); s[v->length] = 0; *val = smb_strtoul(s, NULL, 0, &error, SMB_STR_STANDARD); if (error != 0) { return NT_STATUS_INVALID_PARAMETER; } } return NT_STATUS_OK; }
0
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
152,376,615,412,320,630,000,000,000,000,000,000,000
25
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <[email protected]>
ClientHttpRequest::assignRequest(HttpRequest *newRequest) { assert(newRequest); assert(!request); const_cast<HttpRequest *&>(request) = newRequest; HTTPMSGLOCK(request); setLogUriToRequestUri(); }
0
[ "CWE-116" ]
squid
7024fb734a59409889e53df2257b3fc817809fb4
85,316,794,945,134,830,000,000,000,000,000,000,000
8
Handle more Range requests (#790) Also removed some effectively unused code.
static bool match_by_seid(const void *data, const void *user_data) { const struct avdtp_local_sep *sep = data; uint8_t seid = PTR_TO_UINT(user_data); return sep->info.seid == seid; }
0
[ "CWE-703" ]
bluez
7a80d2096f1b7125085e21448112aa02f49f5e9a
189,431,482,335,047,940,000,000,000,000,000,000,000
7
avdtp: Fix accepting invalid/malformed capabilities Check if capabilities are valid before attempting to copy them.
alt_reduce_in_look_behind(Node* node, regex_t* reg, ScanEnv* env) { int r; switch (NODE_TYPE(node)) { case NODE_ALT: do { r = list_reduce_in_look_behind(NODE_CAR(node)); } while (r == 0 && IS_NOT_NULL(node = NODE_CDR(node))); break; default: r = list_reduce_in_look_behind(node); break; } return r; }
0
[ "CWE-787" ]
oniguruma
cbe9f8bd9cfc6c3c87a60fbae58fa1a85db59df0
99,626,149,549,695,770,000,000,000,000,000,000,000
18
#207: Out-of-bounds write
plugin_can_extents (struct backend *b, struct connection *conn) { struct backend_plugin *p = container_of (b, struct backend_plugin, backend); assert (connection_get_handle (conn, 0)); if (p->plugin.can_extents) return p->plugin.can_extents (connection_get_handle (conn, 0)); else return p->plugin.extents != NULL; }
0
[ "CWE-406" ]
nbdkit
a6b88b195a959b17524d1c8353fd425d4891dc5f
222,056,088,453,999,070,000,000,000,000,000,000,000
11
server: Fix regression for NBD_OPT_INFO before NBD_OPT_GO Most known NBD clients do not bother with NBD_OPT_INFO (except for clients like 'qemu-nbd --list' that don't ever intend to connect), but go straight to NBD_OPT_GO. However, it's not too hard to hack up qemu to add in an extra client step (whether info on the same name, or more interestingly, info on a different name), as a patch against qemu commit 6f214b30445: | diff --git i/nbd/client.c w/nbd/client.c | index f6733962b49b..425292ac5ea9 100644 | --- i/nbd/client.c | +++ w/nbd/client.c | @@ -1038,6 +1038,14 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc, | * TLS). If it is not available, fall back to | * NBD_OPT_LIST for nicer error messages about a missing | * export, then use NBD_OPT_EXPORT_NAME. */ | + if (getenv ("HACK")) | + info->name[0]++; | + result = nbd_opt_info_or_go(ioc, NBD_OPT_INFO, info, errp); | + if (getenv ("HACK")) | + info->name[0]--; | + if (result < 0) { | + return -EINVAL; | + } | result = nbd_opt_info_or_go(ioc, NBD_OPT_GO, info, errp); | if (result < 0) { | return -EINVAL; This works just fine in 1.14.0, where we call .open only once (so the INFO and GO repeat calls into the same plugin handle), but in 1.14.1 it regressed into causing an assertion failure: we are now calling .open a second time on a connection that is already opened: $ nbdkit -rfv null & $ hacked-qemu-io -f raw -r nbd://localhost -c quit ... nbdkit: null[1]: debug: null: open readonly=1 nbdkit: backend.c:179: backend_open: Assertion `h->handle == NULL' failed. Worse, on the mainline development, we have recently made it possible for plugins to actively report different information for different export names; for example, a plugin may choose to report different answers for .can_write on export A than for export B; but if we share cached handles, then an NBD_OPT_INFO on one export prevents correct answers for NBD_OPT_GO on the second export name. (The HACK envvar in my qemu modifications can be used to demonstrate cross-name requests, which are even less likely in a real client). The solution is to call .close after NBD_OPT_INFO, coupled with enough glue logic to reset cached connection handles back to the state expected by .open. This in turn means factoring out another backend_* function, but also gives us an opportunity to change backend_set_handle to no longer accept NULL. The assertion failure is, to some extent, a possible denial of service attack (one client can force nbdkit to exit by merely sending OPT_INFO before OPT_GO, preventing the next client from connecting), although this is mitigated by using TLS to weed out untrusted clients. Still, the fact that we introduced a potential DoS attack while trying to fix a traffic amplification security bug is not very nice. Sadly, as there are no known clients that easily trigger this mode of operation (OPT_INFO before OPT_GO), there is no easy way to cover this via a testsuite addition. I may end up hacking something into libnbd. Fixes: c05686f957 Signed-off-by: Eric Blake <[email protected]>
static int add_array_entry(const char* loc_name, zval* hash_arr, char* key_name TSRMLS_DC) { char* key_value = NULL; char* cur_key_name = NULL; char* token = NULL; char* last_ptr = NULL; int result = 0; int cur_result = 0; int cnt = 0; if( strcmp(key_name , LOC_PRIVATE_TAG)==0 ){ key_value = get_private_subtags( loc_name ); result = 1; } else { key_value = get_icu_value_internal( loc_name , key_name , &result,1 ); } if( (strcmp(key_name , LOC_PRIVATE_TAG)==0) || ( strcmp(key_name , LOC_VARIANT_TAG)==0) ){ if( result > 0 && key_value){ /* Tokenize on the "_" or "-" */ token = php_strtok_r( key_value , DELIMITER ,&last_ptr); if( cur_key_name ){ efree( cur_key_name); } cur_key_name = (char*)ecalloc( 25, 25); sprintf( cur_key_name , "%s%d", key_name , cnt++); add_assoc_string( hash_arr, cur_key_name , token ,TRUE ); /* tokenize on the "_" or "-" and stop at singleton if any */ while( (token = php_strtok_r(NULL , DELIMITER , &last_ptr)) && (strlen(token)>1) ){ sprintf( cur_key_name , "%s%d", key_name , cnt++); add_assoc_string( hash_arr, cur_key_name , token , TRUE ); } /* if( strcmp(key_name, LOC_PRIVATE_TAG) == 0 ){ } */ } } else { if( result == 1 ){ add_assoc_string( hash_arr, key_name , key_value , TRUE ); cur_result = 1; } } if( cur_key_name ){ efree( cur_key_name); } /*if( key_name != LOC_PRIVATE_TAG && key_value){*/ if( key_value){ efree(key_value); } return cur_result; }
0
[ "CWE-125" ]
php-src
97eff7eb57fc2320c267a949cffd622c38712484
294,768,619,077,180,850,000,000,000,000,000,000,000
55
Fix bug #72241: get_icu_value_internal out-of-bounds read
static unsigned int tweak_transfer_flags(unsigned int flags) { flags &= ~URB_NO_TRANSFER_DMA_MAP; return flags; }
0
[ "CWE-200", "CWE-119" ]
linux
b348d7dddb6c4fbfc810b7a0626e8ec9e29f7cbb
129,756,346,073,017,860,000,000,000,000,000,000,000
5
USB: usbip: fix potential out-of-bounds write Fix potential out-of-bounds write to urb->transfer_buffer usbip handles network communication directly in the kernel. When receiving a packet from its peer, usbip code parses headers according to protocol. As part of this parsing urb->actual_length is filled. Since the input for urb->actual_length comes from the network, it should be treated as untrusted. Any entity controlling the network may put any value in the input and the preallocated urb->transfer_buffer may not be large enough to hold the data. Thus, the malicious entity is able to write arbitrary data to kernel memory. Signed-off-by: Ignat Korchagin <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
bool insert_precheck(THD *thd, TABLE_LIST *tables) { LEX *lex= thd->lex; DBUG_ENTER("insert_precheck"); /* Check that we have modify privileges for the first table and select privileges for the rest */ ulong privilege= (INSERT_ACL | (lex->duplicates == DUP_REPLACE ? DELETE_ACL : 0) | (lex->value_list.elements ? UPDATE_ACL : 0)); if (check_one_table_access(thd, privilege, tables)) DBUG_RETURN(TRUE); if (lex->update_list.elements != lex->value_list.elements) { my_message(ER_WRONG_VALUE_COUNT, ER_THD(thd, ER_WRONG_VALUE_COUNT), MYF(0)); DBUG_RETURN(TRUE); } DBUG_RETURN(FALSE); }
0
[]
server
ba4927e520190bbad763bb5260ae154f29a61231
73,811,636,474,253,480,000,000,000,000,000,000,000
23
MDEV-19398: Assertion `item1->type() == Item::FIELD_ITEM ... Window Functions code tries to minimize the number of times it needs to sort the select's resultset by finding "compatible" OVER (PARTITION BY ... ORDER BY ...) clauses. This employs compare_order_elements(). That function assumed that the order expressions are Item_field-derived objects (that refer to a temp.table). But this is not always the case: one can construct queries order expressions are arbitrary item expressions. Add handling for such expressions: sort them according to the window specification they appeared in. This means we cannot detect that two compatible PARTITION BY clauses that use expressions can share the sorting step. But at least we won't crash.
string RewriteOptimizedNodesGroup(const OptimizedNodesGroup& group) override { VLOG(2) << "Collapse Add/AddN: root=" << group.root_node->name() << " op=" << group.root_node->op() << " num_optimized_nodes=" << group.optimized_nodes.size() << " num_inputs=" << group.inputs.size(); // Do not optimize any of the nodes that are part of this group. MarkAllMembersWithTag(group, kAddOpsRewriteTag); // All new nodes will be placed under the scope of a root node. auto root_scope_and_name = ParseNodeScopeAndName(group.root_node->name()); // Find what shapes are present in the inputs of absorbed nodes. std::unordered_map<string, std::vector<InputAndShape>> shape_sig_to_inputs; for (const auto& input : group.inputs) { shape_sig_to_inputs[ShapeSignature(input.shape)].push_back(input); } using SigKV = decltype(shape_sig_to_inputs)::value_type; VLOG(3) << "Add/AddN group has " << shape_sig_to_inputs.size() << " unique shapes: " << absl::StrJoin(shape_sig_to_inputs, ", ", [](string* out, SigKV p) { strings::StrAppend(out, p.first); }); // Collect all the shapes from representative elements. std::vector<TensorShapeProto> shapes; shapes.reserve(shape_sig_to_inputs.size()); for (const auto& el : shape_sig_to_inputs) shapes.push_back(el.second[0].shape); // If all inputs have the same shape, rewrite whole group with a single AddN if (shapes.size() == 1) { string node_name = UniqueOptimizedNodeName(root_scope_and_name); AddInputsOfSymbolicallyEqualShape(*group.root_node, node_name, group.inputs); return node_name; } // For inputs of different shapes: // 1. Rewrite inputs of the same shape using AddN (leaf nodes) // 2. Build a tree of Add nodes, minimizing cost of broadcast std::sort(shapes.begin(), shapes.end(), [](const TensorShapeProto& left, const TensorShapeProto& right) { return CompareSymbolicallyShapedTensorSizes(left, right); }); // optimized name for leaf AddN nodes auto leaf_node_name = [&root_scope_and_name, this](int i) { return UniqueOptimizedNodeName(root_scope_and_name, strings::StrCat("Leaf_", i)); }; // optimized name for internal nodes of a tree built up from AddN leaves auto internal_node_name = [&root_scope_and_name, this](int i) { return UniqueOptimizedNodeName(root_scope_and_name, strings::StrCat("Internal_", i)); }; // Add/AddN nodes that must be added to the tree std::deque<InputAndShape> add_ops; // Prepare leaf AddN nodes for inputs of equal shape for (int i = 0, end = shapes.size(); i < end; ++i) { const auto node_name = leaf_node_name(i); const auto& inputs = shape_sig_to_inputs[ShapeSignature(shapes[i])]; add_ops.push_back(AddInputsOfSymbolicallyEqualShape(*group.root_node, node_name, inputs)); } // Build up a tree of Add ops int internal_nodes = 0; do { const InputAndShape lhs = add_ops.front(); add_ops.pop_front(); const InputAndShape rhs = add_ops.front(); add_ops.pop_front(); string name = add_ops.empty() ? UniqueOptimizedNodeName(root_scope_and_name) : internal_node_name(internal_nodes++); InputAndShape add = AddAggregatedInputs(*group.root_node, name, lhs, rhs); add_ops.push_front(add); } while (add_ops.size() > 1); InputAndShape optimized_root_node = add_ops.front(); return optimized_root_node.input; }
0
[ "CWE-476" ]
tensorflow
e6340f0665d53716ef3197ada88936c2a5f7a2d3
185,588,662,530,349,900,000,000,000,000,000,000,000
87
Handle a special grappler case resulting in crash. It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault. PiperOrigin-RevId: 369242852 Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
SecureElementStatus_t SecureElementInit( SecureElementNvmEvent seNvmCtxChanged ) { // Assign callback if( seNvmCtxChanged != 0 ) { SeNvmCtxChanged = seNvmCtxChanged; } else { SeNvmCtxChanged = DummyCB; } #if !defined( SECURE_ELEMENT_PRE_PROVISIONED ) #if( STATIC_DEVICE_EUI == 0 ) // Get a DevEUI from MCU unique ID SoftSeHalGetUniqueId( SeNvmCtx.DevEui ); #endif #endif SeNvmCtxChanged( ); return SECURE_ELEMENT_SUCCESS; }
0
[ "CWE-120", "CWE-787" ]
LoRaMac-node
e3063a91daa7ad8a687223efa63079f0c24568e4
318,150,782,210,093,870,000,000,000,000,000,000,000
23
Added received buffer size checks.
static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { struct xfrm_encap_tmpl *encap = x->encap; struct ip_esp_hdr *esph; __be16 sport, dport; int encap_type; spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; encap_type = encap->encap_type; spin_unlock_bh(&x->lock); switch (encap_type) { default: case UDP_ENCAP_ESPINUDP: case UDP_ENCAP_ESPINUDP_NON_IKE: esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport); break; case TCP_ENCAP_ESPINTCP: esph = esp6_output_tcp_encap(x, skb, esp); break; } if (IS_ERR(esph)) return PTR_ERR(esph); esp->esph = esph; return 0; }
0
[ "CWE-787" ]
linux
ebe48d368e97d007bfeb76fcb065d6cfc4c96645
194,543,550,058,034,800,000,000,000,000,000,000,000
32
esp: Fix possible buffer overflow in ESP transformation The maximum message size that can be send is bigger than the maximum site that skb_page_frag_refill can allocate. So it is possible to write beyond the allocated buffer. Fix this by doing a fallback to COW in that case. v2: Avoid get get_order() costs as suggested by Linus Torvalds. Fixes: cac2661c53f3 ("esp4: Avoid skb_cow_data whenever possible") Fixes: 03e2a30f6a27 ("esp6: Avoid skb_cow_data whenever possible") Reported-by: valis <[email protected]> Signed-off-by: Steffen Klassert <[email protected]>
static int vvalue_tvb_get1(tvbuff_t *tvb, int offset, void *val) { guint8 *ui1 = (guint8*)val; *ui1 = tvb_get_guint8(tvb, offset); return 1; }
0
[ "CWE-770" ]
wireshark
b7a0650e061b5418ab4a8f72c6e4b00317aff623
108,752,328,840,698,270,000,000,000,000,000,000,000
6
MS-WSP: Don't allocate huge amounts of memory. Add a couple of memory allocation sanity checks, one of which fixes #17331.
f_complete_info(typval_T *argvars, typval_T *rettv) { list_T *what_list = NULL; if (rettv_dict_alloc(rettv) == FAIL) return; if (in_vim9script() && check_for_opt_list_arg(argvars, 0) == FAIL) return; if (argvars[0].v_type != VAR_UNKNOWN) { if (check_for_list_arg(argvars, 0) == FAIL) return; what_list = argvars[0].vval.v_list; } get_complete_info(what_list, rettv->vval.v_dict); }
0
[ "CWE-416" ]
vim
0ff01835a40f549c5c4a550502f62a2ac9ac447c
25,674,089,917,583,960,000,000,000,000,000,000,000
18
patch 9.0.0579: using freed memory when 'tagfunc' wipes out buffer Problem: Using freed memory when 'tagfunc' wipes out buffer that holds 'complete'. Solution: Make a copy of the option. Make sure cursor position is valid.
static void kvm_init_msr_list(void) { struct x86_pmu_capability x86_pmu; u32 dummy[2]; unsigned i; BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3, "Please update the fixed PMCs in msrs_to_saved_all[]"); perf_get_x86_pmu_capability(&x86_pmu); num_msrs_to_save = 0; num_emulated_msrs = 0; num_msr_based_features = 0; for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) { if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0) continue; /* * Even MSRs that are valid in the host may not be exposed * to the guests in some cases. */ switch (msrs_to_save_all[i]) { case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported()) continue; break; case MSR_TSC_AUX: if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) && !kvm_cpu_cap_has(X86_FEATURE_RDPID)) continue; break; case MSR_IA32_UMWAIT_CONTROL: if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) continue; break; case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_STATUS: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) continue; break; case MSR_IA32_RTIT_CR3_MATCH: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering)) continue; break; case MSR_IA32_RTIT_OUTPUT_BASE: case MSR_IA32_RTIT_OUTPUT_MASK: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || (!intel_pt_validate_hw_cap(PT_CAP_topa_output) && !intel_pt_validate_hw_cap(PT_CAP_single_range_output))) continue; break; case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) continue; break; case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17: if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) continue; break; case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17: if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) continue; break; case MSR_IA32_XFD: case MSR_IA32_XFD_ERR: if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) continue; break; default: break; } msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i]; } for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) continue; emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; } for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) { struct kvm_msr_entry msr; msr.index = msr_based_features_all[i]; if (kvm_get_msr_feature(&msr)) continue; msr_based_features[num_msr_based_features++] = msr_based_features_all[i]; } }
0
[ "CWE-459" ]
linux
683412ccf61294d727ead4a73d97397396e69a6b
287,229,731,734,091,850,000,000,000,000,000,000,000
99
KVM: SEV: add cache flush to solve SEV cache incoherency issues Flush the CPU caches when memory is reclaimed from an SEV guest (where reclaim also includes it being unmapped from KVM's memslots). Due to lack of coherency for SEV encrypted memory, failure to flush results in silent data corruption if userspace is malicious/broken and doesn't ensure SEV guest memory is properly pinned and unpinned. Cache coherency is not enforced across the VM boundary in SEV (AMD APM vol.2 Section 15.34.7). Confidential cachelines, generated by confidential VM guests have to be explicitly flushed on the host side. If a memory page containing dirty confidential cachelines was released by VM and reallocated to another user, the cachelines may corrupt the new user at a later time. KVM takes a shortcut by assuming all confidential memory remain pinned until the end of VM lifetime. Therefore, KVM does not flush cache at mmu_notifier invalidation events. Because of this incorrect assumption and the lack of cache flushing, malicous userspace can crash the host kernel: creating a malicious VM and continuously allocates/releases unpinned confidential memory pages when the VM is running. Add cache flush operations to mmu_notifier operations to ensure that any physical memory leaving the guest VM get flushed. In particular, hook mmu_notifier_invalidate_range_start and mmu_notifier_release events and flush cache accordingly. The hook after releasing the mmu lock to avoid contention with other vCPUs. Cc: [email protected] Suggested-by: Sean Christpherson <[email protected]> Reported-by: Mingwei Zhang <[email protected]> Signed-off-by: Mingwei Zhang <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
JVM_ClassDepth(JNIEnv *env, jstring name) { jint result; Trc_SC_ClassDepth_Entry(env, name); result = (*env)->CallStaticIntMethod(env, jlClass, classDepthMID, name); /* CMVC 95169: ensure that the result is a well defined error value if an exception occurred */ if ((*env)->ExceptionCheck(env)) { result = -1; } Trc_SC_ClassDepth_Exit(env, result); return result; }
0
[ "CWE-119" ]
openj9
0971f22d88f42cf7332364ad7430e9bd8681c970
159,434,533,501,801,160,000,000,000,000,000,000,000
17
Clean up jio_snprintf and jio_vfprintf Fixes https://bugs.eclipse.org/bugs/show_bug.cgi?id=543659 Signed-off-by: Peter Bain <[email protected]>
static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); cp2112_gpio_direction_input(gc, d->hwirq); if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); } cp2112_gpio_irq_unmask(d); return 0; }
0
[ "CWE-388" ]
linux
8e9faa15469ed7c7467423db4c62aeed3ff4cae3
47,990,421,051,503,280,000,000,000,000,000,000,000
17
HID: cp2112: fix gpio-callback error handling In case of a zero-length report, the gpio direction_input callback would currently return success instead of an errno. Fixes: 1ffb3c40ffb5 ("HID: cp2112: make transfer buffers DMA capable") Cc: stable <[email protected]> # 4.9 Signed-off-by: Johan Hovold <[email protected]> Reviewed-by: Benjamin Tissoires <[email protected]> Signed-off-by: Jiri Kosina <[email protected]>
void Image::clearIccProfile() { iccProfile_.release(); }
0
[ "CWE-125" ]
exiv2
6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97
252,981,996,217,003,280,000,000,000,000,000,000,000
4
Fix https://github.com/Exiv2/exiv2/issues/55
static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.cr3 = __sme_set(root); mark_dirty(svm->vmcb, VMCB_CR); }
0
[ "CWE-401" ]
linux
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
287,850,328,786,932,600,000,000,000,000,000,000,000
7
KVM: SVM: Fix potential memory leak in svm_cpu_init() When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually the only possible outcome here. Reviewed-by: Liran Alon <[email protected]> Reviewed-by: Vitaly Kuznetsov <[email protected]> Signed-off-by: Miaohe Lin <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static void test_message_parser_garbage_suffix_mime_boundary(void) { static const char input_msg[] = "Content-Type: multipart/mixed; boundary=\"a\"\n" "\n" "--ab\n" "Content-Type: multipart/mixed; boundary=\"a\"\n" "\n" "--ac\n" "Content-Type: text/plain\n" "\n" "body\n"; struct istream *input; struct message_part *parts; struct message_size body_size, header_size; pool_t pool; test_begin("message parser garbage suffix mime boundary"); pool = pool_alloconly_create("message parser", 10240); input = test_istream_create(input_msg); test_assert(message_parse_stream(pool, input, &set_empty, FALSE, &parts) < 0); i_stream_seek(input, 0); test_message_parser_get_sizes(input, &body_size, &header_size, FALSE); test_assert(parts->children_count == 2); test_assert(parts->flags == (MESSAGE_PART_FLAG_MULTIPART | MESSAGE_PART_FLAG_IS_MIME)); test_assert(parts->header_size.lines == 2); test_assert(parts->header_size.physical_size == 45); test_assert(parts->header_size.virtual_size == 45+2); test_assert(parts->body_size.lines == 7); test_assert(parts->body_size.physical_size == 86); test_assert(parts->body_size.virtual_size == 86+7); test_message_parser_assert_sizes(parts, &body_size, &header_size); test_assert(parts->children->children_count == 1); test_assert(parts->children->flags == (MESSAGE_PART_FLAG_MULTIPART | MESSAGE_PART_FLAG_IS_MIME)); test_assert(parts->children->physical_pos == 50); test_assert(parts->children->header_size.lines == 2); test_assert(parts->children->header_size.physical_size == 45); test_assert(parts->children->header_size.virtual_size == 45+2); test_assert(parts->children->body_size.lines == 4); test_assert(parts->children->body_size.physical_size == 36); test_assert(parts->children->body_size.virtual_size == 36+4); test_assert(parts->children->children->children_count == 0); test_assert(parts->children->children->flags == (MESSAGE_PART_FLAG_TEXT | MESSAGE_PART_FLAG_IS_MIME)); test_assert(parts->children->children->physical_pos == 100); test_assert(parts->children->children->header_size.lines == 2); test_assert(parts->children->children->header_size.physical_size == 26); test_assert(parts->children->children->header_size.virtual_size == 26+2); test_assert(parts->children->children->body_size.lines == 1); test_assert(parts->children->children->body_size.physical_size == 5); test_assert(parts->children->children->body_size.virtual_size == 5+1); test_parsed_parts(input, parts); i_stream_unref(&input); pool_unref(&pool); test_end(); }
0
[ "CWE-20" ]
core
fb97a1cddbda4019e327fa736972a1c7433fedaa
65,649,076,372,113,810,000,000,000,000,000,000,000
60
lib-mail: message-parser - Fix assert-crash when enforcing MIME part limit The limit could have been exceeded with message/rfc822 parts.
static void usbredir_handle_data(USBDevice *udev, USBPacket *p) { USBRedirDevice *dev = USB_REDIRECT(udev); uint8_t ep; ep = p->ep->nr; if (p->pid == USB_TOKEN_IN) { ep |= USB_DIR_IN; } switch (dev->endpoint[EP2I(ep)].type) { case USB_ENDPOINT_XFER_CONTROL: ERROR("handle_data called for control transfer on ep %02X\n", ep); p->status = USB_RET_NAK; break; case USB_ENDPOINT_XFER_BULK: if (p->state == USB_PACKET_SETUP && p->pid == USB_TOKEN_IN && p->ep->pipeline) { p->status = USB_RET_ADD_TO_QUEUE; break; } usbredir_handle_bulk_data(dev, p, ep); break; case USB_ENDPOINT_XFER_ISOC: usbredir_handle_iso_data(dev, p, ep); break; case USB_ENDPOINT_XFER_INT: if (ep & USB_DIR_IN) { usbredir_handle_interrupt_in_data(dev, p, ep); } else { usbredir_handle_interrupt_out_data(dev, p, ep); } break; default: ERROR("handle_data ep %02X has unknown type %d\n", ep, dev->endpoint[EP2I(ep)].type); p->status = USB_RET_NAK; } }
0
[ "CWE-770" ]
qemu
7ec54f9eb62b5d177e30eb8b1cad795a5f8d8986
143,894,506,298,463,720,000,000,000,000,000,000,000
39
usb/redir: avoid dynamic stack allocation (CVE-2021-3527) Use autofree heap allocation instead. Fixes: 4f4321c11ff ("usb: use iovecs in USBPacket") Reviewed-by: Philippe Mathieu-Daudé <[email protected]> Signed-off-by: Gerd Hoffmann <[email protected]> Tested-by: Philippe Mathieu-Daudé <[email protected]> Message-Id: <[email protected]>
static void snd_seq_queue_process_event(struct snd_seq_queue *q, struct snd_seq_event *ev, int atomic, int hop) { switch (ev->type) { case SNDRV_SEQ_EVENT_START: snd_seq_prioq_leave(q->tickq, ev->source.client, 1); snd_seq_prioq_leave(q->timeq, ev->source.client, 1); if (! snd_seq_timer_start(q->timer)) queue_broadcast_event(q, ev, atomic, hop); break; case SNDRV_SEQ_EVENT_CONTINUE: if (! snd_seq_timer_continue(q->timer)) queue_broadcast_event(q, ev, atomic, hop); break; case SNDRV_SEQ_EVENT_STOP: snd_seq_timer_stop(q->timer); queue_broadcast_event(q, ev, atomic, hop); break; case SNDRV_SEQ_EVENT_TEMPO: snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value); queue_broadcast_event(q, ev, atomic, hop); break; case SNDRV_SEQ_EVENT_SETPOS_TICK: if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) { queue_broadcast_event(q, ev, atomic, hop); } break; case SNDRV_SEQ_EVENT_SETPOS_TIME: if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) { queue_broadcast_event(q, ev, atomic, hop); } break; case SNDRV_SEQ_EVENT_QUEUE_SKEW: if (snd_seq_timer_set_skew(q->timer, ev->data.queue.param.skew.value, ev->data.queue.param.skew.base) == 0) { queue_broadcast_event(q, ev, atomic, hop); } break; } }
0
[ "CWE-362" ]
linux
3567eb6af614dac436c4b16a8d426f9faed639b3
330,193,743,422,225,170,000,000,000,000,000,000,000
47
ALSA: seq: Fix race at timer setup and close ALSA sequencer code has an open race between the timer setup ioctl and the close of the client. This was triggered by syzkaller fuzzer, and a use-after-free was caught there as a result. This patch papers over it by adding a proper queue->timer_mutex lock around the timer-related calls in the relevant code path. Reported-by: Dmitry Vyukov <[email protected]> Tested-by: Dmitry Vyukov <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
TEST_F(QueryPlannerTest, NegationBelowElemMatchValue) { params.options = QueryPlannerParams::NO_TABLE_SCAN; // true means multikey addIndex(BSON("a" << 1), true); runQuery(fromjson("{a: {$elemMatch: {$ne: 2}}}")); assertNumSolutions(1U); assertSolutionExists( "{fetch: {filter: {a:{$elemMatch:{$ne:2}}}, node: " "{ixscan: {filter: null, pattern: {a: 1}, bounds: " "{a: [['MinKey',2,true,false], [2,'MaxKey',false,true]]}}}}}"); }
0
[ "CWE-834" ]
mongo
94d0e046baa64d1aa1a6af97e2d19bb466cc1ff5
127,000,388,698,735,600,000,000,000,000,000,000,000
13
SERVER-38164 $or pushdown optimization does not correctly handle $not within an $elemMatch
validOperatorName(const char *name) { size_t len = strlen(name); /* Can't be empty or too long */ if (len == 0 || len >= NAMEDATALEN) return false; /* Can't contain any invalid characters */ /* Test string here should match op_chars in scan.l */ if (strspn(name, "~!@#^&|`?+-*/%<>=") != len) return false; /* Can't contain slash-star or dash-dash (comment starts) */ if (strstr(name, "/*") || strstr(name, "--")) return false; /* * For SQL standard compatibility, '+' and '-' cannot be the last char of * a multi-char operator unless the operator contains chars that are not * in SQL operators. The idea is to lex '=-' as two operators, but not to * forbid operator names like '?-' that could not be sequences of standard * SQL operators. */ if (len > 1 && (name[len - 1] == '+' || name[len - 1] == '-')) { int ic; for (ic = len - 2; ic >= 0; ic--) { if (strchr("~!@#^&|`?%", name[ic])) break; } if (ic < 0) return false; /* nope, not valid */ } /* != isn't valid either, because parser will convert it to <> */ if (strcmp(name, "!=") == 0) return false; return true; }
0
[ "CWE-94" ]
postgres
b9b21acc766db54d8c337d508d0fe2f5bf2daab0
141,598,076,235,088,970,000,000,000,000,000,000,000
45
In extensions, don't replace objects not belonging to the extension. Previously, if an extension script did CREATE OR REPLACE and there was an existing object not belonging to the extension, it would overwrite the object and adopt it into the extension. This is problematic, first because the overwrite is probably unintentional, and second because we didn't change the object's ownership. Thus a hostile user could create an object in advance of an expected CREATE EXTENSION command, and would then have ownership rights on an extension object, which could be modified for trojan-horse-type attacks. Hence, forbid CREATE OR REPLACE of an existing object unless it already belongs to the extension. (Note that we've always forbidden replacing an object that belongs to some other extension; only the behavior for previously-free-standing objects changes here.) For the same reason, also fail CREATE IF NOT EXISTS when there is an existing object that doesn't belong to the extension. Our thanks to Sven Klemm for reporting this problem. Security: CVE-2022-2625
static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt) { struct cftype *cft = of_cft(of); if (cft->poll) return cft->poll(of, pt); return kernfs_generic_poll(of, pt); }
0
[ "CWE-416" ]
linux
a06247c6804f1a7c86a2e5398a4c1f1db1471848
128,532,240,066,268,950,000,000,000,000,000,000,000
9
psi: Fix uaf issue when psi trigger is destroyed while being polled With write operation on psi files replacing old trigger with a new one, the lifetime of its waitqueue is totally arbitrary. Overwriting an existing trigger causes its waitqueue to be freed and pending poll() will stumble on trigger->event_wait which was destroyed. Fix this by disallowing to redefine an existing psi trigger. If a write operation is used on a file descriptor with an already existing psi trigger, the operation will fail with EBUSY error. Also bypass a check for psi_disabled in the psi_trigger_destroy as the flag can be flipped after the trigger is created, leading to a memory leak. Fixes: 0e94682b73bf ("psi: introduce psi monitor") Reported-by: [email protected] Suggested-by: Linus Torvalds <[email protected]> Analyzed-by: Eric Biggers <[email protected]> Signed-off-by: Suren Baghdasaryan <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Eric Biggers <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: [email protected] Link: https://lore.kernel.org/r/[email protected]
static void vdi_header_to_cpu(VdiHeader *header) { le32_to_cpus(&header->signature); le32_to_cpus(&header->version); le32_to_cpus(&header->header_size); le32_to_cpus(&header->image_type); le32_to_cpus(&header->image_flags); le32_to_cpus(&header->offset_bmap); le32_to_cpus(&header->offset_data); le32_to_cpus(&header->cylinders); le32_to_cpus(&header->heads); le32_to_cpus(&header->sectors); le32_to_cpus(&header->sector_size); le64_to_cpus(&header->disk_size); le32_to_cpus(&header->block_size); le32_to_cpus(&header->block_extra); le32_to_cpus(&header->blocks_in_image); le32_to_cpus(&header->blocks_allocated); uuid_convert(header->uuid_image); uuid_convert(header->uuid_last_snap); uuid_convert(header->uuid_link); uuid_convert(header->uuid_parent); }
0
[ "CWE-20" ]
qemu
63fa06dc978f3669dbfd9443b33cde9e2a7f4b41
67,010,100,254,212,010,000,000,000,000,000,000,000
23
vdi: add bounds checks for blocks_in_image and disk_size header fields (CVE-2014-0144) The maximum blocks_in_image is 0xffffffff / 4, which also limits the maximum disk_size for a VDI image to 1024TB. Note that this is the maximum size that QEMU will currently support with this driver, not necessarily the maximum size allowed by the image format. This also fixes an incorrect error message, a bug introduced by commit 5b7aa9b56d1bfc79916262f380c3fc7961becb50 (Reported by Stefan Weil) Signed-off-by: Jeff Cody <[email protected]> Signed-off-by: Kevin Wolf <[email protected]> Signed-off-by: Stefan Hajnoczi <[email protected]>
TfLiteStatus Subgraph::PrepareOpsAndTensors() { if (!memory_planner_) { memory_planner_.reset(new ArenaPlanner( &context_, std::unique_ptr<GraphInfo>(new InterpreterInfo(this)), preserve_all_tensors_, kDefaultTensorAlignment)); memory_planner_->PlanAllocations(); } // Prepare original execution plan if any applied delegate wants it. // If any of the delegates is immutable, this won't be triggered // post-delegation (since we undo/redo delegation). For all other cases, other // delegates that do shape propagation themselves would still be able to. bool prepare_original_plan = false; if (!pre_delegation_execution_plan_.empty()) { for (int i = 0; i < delegates_applied_.size(); ++i) { if ((delegates_applied_[i]->flags & kTfLiteDelegateFlagsRequirePropagatedShapes)) { prepare_original_plan = true; break; } } } if (prepare_original_plan) { int last_original_exec_plan_index_prepared = 0; TF_LITE_ENSURE_STATUS(PrepareOpsStartingAt( next_execution_plan_index_to_prepare_, pre_delegation_execution_plan_, &last_original_exec_plan_index_prepared)); next_original_execution_plan_index_to_prepare_ = last_original_exec_plan_index_prepared + 1; } int last_exec_plan_index_prepared = 0; TF_LITE_ENSURE_STATUS( PrepareOpsStartingAt(next_execution_plan_index_to_prepare_, execution_plan_, &last_exec_plan_index_prepared)); next_execution_plan_index_to_prepare_ = last_exec_plan_index_prepared + 1; // Execute arena allocations. TF_LITE_ENSURE_STATUS(memory_planner_->ExecuteAllocations( next_execution_plan_index_to_plan_allocation_, last_exec_plan_index_prepared)); // Ensure custom allocations are large enough for applicable tensors. // This causes some extra validations for cases with dynamic tensors, but the // overhead should be minimal since the number of custom-allocated tensors // will typically be low. for (int i = 0; i < custom_allocations_.size(); ++i) { auto index_and_alloc = custom_allocations_[i]; TfLiteTensor* tensor_at_index = tensor(index_and_alloc.first); const auto& alloc = index_and_alloc.second; TF_LITE_ENSURE_EQ(context(), tensor_at_index->allocation_type, kTfLiteCustom); if (alloc.bytes < tensor_at_index->bytes) { ReportError("Custom allocation is too small for tensor idx: %d", index_and_alloc.first); return kTfLiteError; } } next_execution_plan_index_to_plan_allocation_ = last_exec_plan_index_prepared + 1; return kTfLiteOk; }
0
[ "CWE-476" ]
tensorflow
f8378920345f4f4604202d4ab15ef64b2aceaa16
275,672,955,916,329,220,000,000,000,000,000,000,000
64
Prevent a null pointer dereference in TFLite. PiperOrigin-RevId: 370800353 Change-Id: Ic9c9712ce5c6e384c954dcd640a5bd9ff05c9a05
void __init xen_early_init(void) { of_scan_flat_dt(fdt_find_hyper_node, NULL); if (!hyper_node.found) { pr_debug("No Xen support\n"); return; } if (hyper_node.version == NULL) { pr_debug("Xen version not found\n"); return; } pr_info("Xen %s support found\n", hyper_node.version); xen_domain_type = XEN_HVM_DOMAIN; xen_setup_features(); if (xen_feature(XENFEAT_dom0)) xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; if (!console_set_on_cmdline && !xen_initial_domain()) add_preferred_console("hvc", 0, NULL); }
0
[]
linux
fa1f57421e0b1c57843902c89728f823abc32f02
17,310,433,735,226,548,000,000,000,000,000,000,000
25
xen/virtio: Enable restricted memory access using Xen grant mappings In order to support virtio in Xen guests add a config option XEN_VIRTIO enabling the user to specify whether in all Xen guests virtio should be able to access memory via Xen grant mappings only on the host side. Also set PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS feature from the guest initialization code on Arm and x86 if CONFIG_XEN_VIRTIO is enabled. Signed-off-by: Juergen Gross <[email protected]> Signed-off-by: Oleksandr Tyshchenko <[email protected]> Reviewed-by: Stefano Stabellini <[email protected]> Reviewed-by: Boris Ostrovsky <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Juergen Gross <[email protected]>
ExifData::const_iterator serialNumber(const ExifData& ed) { static const char* keys[] = { "Exif.Image.CameraSerialNumber", "Exif.Canon.SerialNumber", "Exif.Nikon3.SerialNumber", "Exif.Nikon3.SerialNO", "Exif.Fujifilm.SerialNumber", "Exif.Olympus.SerialNumber2", "Exif.Sigma.SerialNumber" }; return findMetadatum(ed, keys, EXV_COUNTOF(keys)); }
0
[ "CWE-476" ]
exiv2
6e42c1b55e0fc4f360cc56010b0ffe19aa6062d9
138,663,209,248,239,090,000,000,000,000,000,000,000
13
Fix #561. Use proper counter for the idx variable
static void smack_inet_csk_clone(struct sock *sk, const struct request_sock *req) { struct socket_smack *ssp = sk->sk_security; char *smack; if (req->peer_secid != 0) { smack = smack_from_secid(req->peer_secid); strncpy(ssp->smk_packet, smack, SMK_MAXLEN); } else ssp->smk_packet[0] = '\0'; }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
17,180,468,634,097,893,000,000,000,000,000,000,000
12
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
static int jpc_pi_nextcprl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->prgvolfirst = 0; } for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { pirlvl = pi->picomp->pirlvls; pi->xstep = pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - 1)); pi->ystep = pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - 1)); for (rlvlno = 1, pirlvl = &pi->picomp->pirlvls[1]; rlvlno < pi->picomp->numrlvls; ++rlvlno, ++pirlvl) { pi->xstep = JAS_MIN(pi->xstep, pi->picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + pi->picomp->numrlvls - rlvlno - 1))); pi->ystep = JAS_MIN(pi->ystep, pi->picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + pi->picomp->numrlvls - rlvlno - 1))); } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; }
1
[ "CWE-125" ]
jasper
aa0b0f79ade5eef8b0e7a214c03f5af54b36ba7d
162,154,952,296,218,500,000,000,000,000,000,000,000
86
Fixed numerous integer overflow problems in the code for packet iterators in the JPC decoder.