func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) { const struct xt_match *m; int have_rev = 0; mutex_lock(&xt[af].mutex); list_for_each_entry(m, &xt[af].match, list) { if (strcmp(m->name, name) == 0) { if (m->revision > *bestp) *bestp = m->revision; if (m->revision == revision) have_rev = 1; } } mutex_unlock(&xt[af].mutex); if (af != NFPROTO_UNSPEC && !have_rev) return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); return have_rev; }
0
[]
linux
175e476b8cdf2a4de7432583b49c871345e4f8a1
240,918,109,668,058,420,000,000,000,000,000,000,000
21
netfilter: x_tables: Use correct memory barriers. When a new table value was assigned, it was followed by a write memory barrier. This ensured that all writes before this point would complete before any writes after this point. However, to determine whether the rules are unused, the sequence counter is read. To ensure that all writes have been done before these reads, a full memory barrier is needed, not just a write memory barrier. The same argument applies when incrementing the counter, before the rules are read. Changing to using smp_mb() instead of smp_wmb() fixes the kernel panic reported in cc00bcaa5899 (which is still present), while still maintaining the same speed of replacing tables. The smb_mb() barriers potentially slow the packet path, however testing has shown no measurable change in performance on a 4-core MIPS64 platform. Fixes: 7f5c6d4f665b ("netfilter: get rid of atomic ops in fast path") Signed-off-by: Mark Tomlinson <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
ProcessStartupPacket(Port *port, bool SSLdone) { int32 len; void *buf; ProtocolVersion proto; MemoryContext oldcontext; if (pq_getbytes((char *) &len, 4) == EOF) { /* * EOF after SSLdone probably means the client didn't like our * response to NEGOTIATE_SSL_CODE. That's not an error condition, so * don't clutter the log with a complaint. */ if (!SSLdone) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete startup packet"))); return STATUS_ERROR; } len = ntohl(len); len -= 4; if (len < (int32) sizeof(ProtocolVersion) || len > MAX_STARTUP_PACKET_LENGTH) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid length of startup packet"))); return STATUS_ERROR; } /* * Allocate at least the size of an old-style startup packet, plus one * extra byte, and make sure all are zeroes. This ensures we will have * null termination of all strings, in both fixed- and variable-length * packet layouts. */ if (len <= (int32) sizeof(StartupPacket)) buf = palloc0(sizeof(StartupPacket) + 1); else buf = palloc0(len + 1); if (pq_getbytes(buf, len) == EOF) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete startup packet"))); return STATUS_ERROR; } /* * The first field is either a protocol version number or a special * request code. */ port->proto = proto = ntohl(*((ProtocolVersion *) buf)); if (proto == CANCEL_REQUEST_CODE) { processCancelRequest(port, buf); /* Not really an error, but we don't want to proceed further */ return STATUS_ERROR; } if (proto == NEGOTIATE_SSL_CODE && !SSLdone) { char SSLok; #ifdef USE_SSL /* No SSL when disabled or on Unix sockets */ if (!EnableSSL || IS_AF_UNIX(port->laddr.addr.ss_family)) SSLok = 'N'; else SSLok = 'S'; /* Support for SSL */ #else SSLok = 'N'; /* No support for SSL */ #endif retry1: if (send(port->sock, &SSLok, 1, 0) != 1) { if (errno == EINTR) goto retry1; /* if interrupted, just retry */ ereport(COMMERROR, (errcode_for_socket_access(), errmsg("failed to send SSL negotiation response: %m"))); return STATUS_ERROR; /* close the connection */ } #ifdef USE_SSL if (SSLok == 'S' && secure_open_server(port) == -1) return STATUS_ERROR; #endif /* regular startup packet, cancel, etc packet should follow... */ /* but not another SSL negotiation request */ return ProcessStartupPacket(port, true); } /* Could add additional special packet types here */ /* * Set FrontendProtocol now so that ereport() knows what format to send if * we fail during startup. */ FrontendProtocol = proto; /* Check we can handle the protocol the frontend is using. */ if (PG_PROTOCOL_MAJOR(proto) < PG_PROTOCOL_MAJOR(PG_PROTOCOL_EARLIEST) || PG_PROTOCOL_MAJOR(proto) > PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) || (PG_PROTOCOL_MAJOR(proto) == PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) && PG_PROTOCOL_MINOR(proto) > PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST))) ereport(FATAL, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u", PG_PROTOCOL_MAJOR(proto), PG_PROTOCOL_MINOR(proto), PG_PROTOCOL_MAJOR(PG_PROTOCOL_EARLIEST), PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST), PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST)))); /* * Now fetch parameters out of startup packet and save them into the Port * structure. All data structures attached to the Port struct must be * allocated in TopMemoryContext so that they will remain available in a * running backend (even after PostmasterContext is destroyed). We need * not worry about leaking this storage on failure, since we aren't in the * postmaster process anymore. */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); if (PG_PROTOCOL_MAJOR(proto) >= 3) { int32 offset = sizeof(ProtocolVersion); /* * Scan packet body for name/option pairs. We can assume any string * beginning within the packet body is null-terminated, thanks to * zeroing extra byte above. */ port->guc_options = NIL; while (offset < len) { char *nameptr = ((char *) buf) + offset; int32 valoffset; char *valptr; if (*nameptr == '\0') break; /* found packet terminator */ valoffset = offset + strlen(nameptr) + 1; if (valoffset >= len) break; /* missing value, will complain below */ valptr = ((char *) buf) + valoffset; if (strcmp(nameptr, "database") == 0) port->database_name = pstrdup(valptr); else if (strcmp(nameptr, "user") == 0) port->user_name = pstrdup(valptr); else if (strcmp(nameptr, "options") == 0) port->cmdline_options = pstrdup(valptr); else if (strcmp(nameptr, "replication") == 0) { /* * Due to backward compatibility concerns the replication * parameter is a hybrid beast which allows the value to be * either boolean or the string 'database'. The latter * connects to a specific database which is e.g. required for * logical decoding while. */ if (strcmp(valptr, "database") == 0) { am_walsender = true; am_db_walsender = true; } else if (!parse_bool(valptr, &am_walsender)) ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid value for parameter \"replication\""), errhint("Valid values are: false, 0, true, 1, database."))); } else { /* Assume it's a generic GUC option */ port->guc_options = lappend(port->guc_options, pstrdup(nameptr)); port->guc_options = lappend(port->guc_options, pstrdup(valptr)); } offset = valoffset + strlen(valptr) + 1; } /* * If we didn't find a packet terminator exactly at the end of the * given packet length, complain. */ if (offset != len - 1) ereport(FATAL, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid startup packet layout: expected terminator as last byte"))); } else { /* * Get the parameters from the old-style, fixed-width-fields startup * packet as C strings. The packet destination was cleared first so a * short packet has zeros silently added. We have to be prepared to * truncate the pstrdup result for oversize fields, though. */ StartupPacket *packet = (StartupPacket *) buf; port->database_name = pstrdup(packet->database); if (strlen(port->database_name) > sizeof(packet->database)) port->database_name[sizeof(packet->database)] = '\0'; port->user_name = pstrdup(packet->user); if (strlen(port->user_name) > sizeof(packet->user)) port->user_name[sizeof(packet->user)] = '\0'; port->cmdline_options = pstrdup(packet->options); if (strlen(port->cmdline_options) > sizeof(packet->options)) port->cmdline_options[sizeof(packet->options)] = '\0'; port->guc_options = NIL; } /* Check a user name was given. */ if (port->user_name == NULL || port->user_name[0] == '\0') ereport(FATAL, (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION), errmsg("no PostgreSQL user name specified in startup packet"))); /* The database defaults to the user name. */ if (port->database_name == NULL || port->database_name[0] == '\0') port->database_name = pstrdup(port->user_name); if (Db_user_namespace) { /* * If user@, it is a global user, remove '@'. We only want to do this * if there is an '@' at the end and no earlier in the user string or * they may fake as a local user of another database attaching to this * database. */ if (strchr(port->user_name, '@') == port->user_name + strlen(port->user_name) - 1) *strchr(port->user_name, '@') = '\0'; else { /* Append '@' and dbname */ port->user_name = psprintf("%s@%s", port->user_name, port->database_name); } } /* * Truncate given database and user names to length of a Postgres name. * This avoids lookup failures when overlength names are given. */ if (strlen(port->database_name) >= NAMEDATALEN) port->database_name[NAMEDATALEN - 1] = '\0'; if (strlen(port->user_name) >= NAMEDATALEN) port->user_name[NAMEDATALEN - 1] = '\0'; /* * Normal walsender backends, e.g. for streaming replication, are not * connected to a particular database. But walsenders used for logical * replication need to connect to a specific database. We allow streaming * replication commands to be issued even if connected to a database as it * can make sense to first make a basebackup and then stream changes * starting from that. */ if (am_walsender && !am_db_walsender) port->database_name[0] = '\0'; /* * Done putting stuff in TopMemoryContext. */ MemoryContextSwitchTo(oldcontext); /* * If we're going to reject the connection due to database state, say so * now instead of wasting cycles on an authentication exchange. (This also * allows a pg_ping utility to be written.) */ switch (port->canAcceptConnections) { case CAC_STARTUP: ereport(FATAL, (errcode(ERRCODE_CANNOT_CONNECT_NOW), errmsg("the database system is starting up"))); break; case CAC_SHUTDOWN: ereport(FATAL, (errcode(ERRCODE_CANNOT_CONNECT_NOW), errmsg("the database system is shutting down"))); break; case CAC_RECOVERY: ereport(FATAL, (errcode(ERRCODE_CANNOT_CONNECT_NOW), errmsg("the database system is in recovery mode"))); break; case CAC_TOOMANY: ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("sorry, too many clients already"))); break; case CAC_WAITBACKUP: /* OK for now, will check in InitPostgres */ break; case CAC_OK: break; } return STATUS_OK; }
1
[ "CWE-89" ]
postgres
2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b
127,571,964,776,307,150,000,000,000,000,000,000,000
312
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
void X509_STORE_CTX_set_depth(X509_STORE_CTX *ctx, int depth) { X509_VERIFY_PARAM_set_depth(ctx->param, depth); }
0
[]
openssl
d65b8b2162f33ac0d53dace588a0847ed827626c
44,340,504,061,951,380,000,000,000,000,000,000,000
4
Backport OCSP fixes.
static void php_imagefontsize(INTERNAL_FUNCTION_PARAMETERS, int arg) { zend_long SIZE; gdFontPtr font; if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &SIZE) == FAILURE) { return; } font = php_find_gd_font(SIZE); RETURN_LONG(arg ? font->h : font->w); }
0
[ "CWE-787" ]
php-src
28022c9b1fd937436ab67bb3d61f652c108baf96
265,065,435,668,371,420,000,000,000,000,000,000,000
12
Fix bug#72697 - select_colors write out-of-bounds (cherry picked from commit b6f13a5ef9d6280cf984826a5de012a32c396cd4) Conflicts: ext/gd/gd.c
compileFile (const char *fileName) { /*Compile a table file */ FileInfo nested; fileCount++; nested.fileName = fileName; nested.encoding = noEncoding; nested.status = 0; nested.lineNumber = 0; if ((nested.in = findTable (fileName))) { while (getALine (&nested)) compileRule (&nested); fclose (nested.in); } else { if (fileCount > 1) lou_logPrint ("Cannot open table '%s'", nested.fileName); errorCount++; return 0; } return 1; }
1
[]
liblouis
dc97ef791a4fae9da11592c79f9f79e010596e0c
338,572,681,038,668,370,000,000,000,000,000,000,000
24
Merge branch 'table_resolver'
static TEE_Result op_attr_secret_value_from_user(void *attr, const void *buffer, size_t size) { struct tee_cryp_obj_secret *key = attr; /* Data size has to fit in allocated buffer */ if (size > key->alloc_size) return TEE_ERROR_SECURITY; memcpy(key + 1, buffer, size); key->key_size = size; return TEE_SUCCESS; }
0
[ "CWE-119", "CWE-787" ]
optee_os
a637243270fc1faae16de059091795c32d86e65e
160,198,768,455,629,080,000,000,000,000,000,000,000
12
svc: check for allocation overflow in crypto calls Without checking for overflow there is a risk of allocating a buffer with size smaller than anticipated and as a consequence of that it might lead to a heap based overflow with attacker controlled data written outside the boundaries of the buffer. Fixes: OP-TEE-2018-0010: "Integer overflow in crypto system calls (x2)" Signed-off-by: Joakim Bech <[email protected]> Tested-by: Joakim Bech <[email protected]> (QEMU v7, v8) Reviewed-by: Jens Wiklander <[email protected]> Reported-by: Riscure <[email protected]> Reported-by: Alyssa Milburn <[email protected]> Acked-by: Etienne Carriere <[email protected]>
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool can_sleep) { struct dst_entry *dst = NULL; int err; err = ip6_dst_lookup_tail(sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) fl6->daddr = *final_dst; if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); }
0
[ "CWE-119", "CWE-401" ]
linux
2811ebac2521ceac84f2bdae402455baa6a7fb47
339,377,340,932,948,850,000,000,000,000,000,000,000
17
ipv6: udp packets following an UFO enqueued packet need also be handled by UFO In the following scenario the socket is corked: If the first UDP packet is larger then the mtu we try to append it to the write queue via ip6_ufo_append_data. A following packet, which is smaller than the mtu would be appended to the already queued up gso-skb via plain ip6_append_data. This causes random memory corruptions. In ip6_ufo_append_data we also have to be careful to not queue up the same skb multiple times. So setup the gso frame only when no first skb is available. This also fixes a shortcoming where we add the current packet's length to cork->length but return early because of a packet > mtu with dontfrag set (instead of sutracting it again). Found with trinity. Cc: YOSHIFUJI Hideaki <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Reported-by: Dmitry Vyukov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
__global__ void SortedSegmentSumCustomKernel(const Index input_outer_dim_size, const Index inner_dim_size, const Index output_outer_dim_size, const Index* segment_ids, const T* input, T* output, const Index total_stripe_count) { for (int stripe_index : GpuGridRangeX(total_stripe_count)) { const Index segment_offset = stripe_index % inner_dim_size; const Index input_outer_dim_index_base = stripe_index / inner_dim_size * Index(OuterDimTileSize); T sum = T(0); Index first_segment_id = segment_ids[input_outer_dim_index_base]; Index last_output_segment_id = output_outer_dim_size; const Index actual_stripe_height = min(Index(OuterDimTileSize), input_outer_dim_size - input_outer_dim_index_base); for (Index j = 0; j < actual_stripe_height; j++) { Index current_output_segment_id = segment_ids[input_outer_dim_index_base + j]; // Decide whether to write result to global memory. // Result is only written to global memory if we move // to another segment. Otherwise we can keep accumulating // locally. if (current_output_segment_id > last_output_segment_id) { const Index output_index = last_output_segment_id * inner_dim_size + segment_offset; // decide whether to write result to global memory using atomic // operations if (last_output_segment_id == first_segment_id) { GpuAtomicAdd(output + output_index, sum); } else { *(output + output_index) = sum; } sum = T(0); } sum += ldg(input + (input_outer_dim_index_base + j) * inner_dim_size + segment_offset); last_output_segment_id = current_output_segment_id; } // For the last result in a strip, always write using atomic operations // due to possible race conditions with threads computing // the following strip. const Index output_index = last_output_segment_id * inner_dim_size + segment_offset; GpuAtomicAdd(output + output_index, sum); } }
0
[ "CWE-703", "CWE-681", "CWE-787" ]
tensorflow
db4f9717c41bccc3ce10099ab61996b246099892
149,257,465,446,489,920,000,000,000,000,000,000,000
49
Fix heap buffer overflow in UnsortedSegmentSum. When Index=int32, data_size and num_segments were truncated from int64 to int32. This truncation can produce negative numbers, which causes UnsortedSegmentFunctor to access out of bounds memory. Also: - Switches some indexing calculations to int64 to avoid signed integer overflow when either the input or output tensors have more than 2**31 - 1 elements. - Fixes a range check error in the GPU kernel. The segment ID was checked against an upper bound measured in elements, not segments. PiperOrigin-RevId: 256451663
static void svm_set_vintr(struct vcpu_svm *svm) { struct vmcb_control_area *control; /* * The following fields are ignored when AVIC is enabled */ WARN_ON(kvm_apicv_activated(svm->vcpu.kvm)); svm_set_intercept(svm, INTERCEPT_VINTR); /* * This is just a dummy VINTR to actually cause a vmexit to happen. * Actual injection of virtual interrupts happens through EVENTINJ. */ control = &svm->vmcb->control; control->int_vector = 0x0; control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl |= V_IRQ_MASK | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); vmcb_mark_dirty(svm->vmcb, VMCB_INTR); }
0
[ "CWE-862" ]
kvm
0f923e07124df069ba68d8bb12324398f4b6b709
307,325,522,761,541,950,000,000,000,000,000,000,000
22
KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653) * Invert the mask of bits that we pick from L2 in nested_vmcb02_prepare_control * Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr This fixes a security issue that allowed a malicious L1 to run L2 with AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled AVIC to read/write the host physical memory at some offsets. Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler") Signed-off-by: Maxim Levitsky <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, int16_t *block, int n, int qscale, int last_index) { const uint8_t *weight_matrix; int level; int i; weight_matrix = (n&2) ? ctx->cid_table->chroma_weight : ctx->cid_table->luma_weight; for (i = 1; i <= last_index; i++) { int j = ctx->m.intra_scantable.permutated[i]; level = block[j]; if (level) { if (level < 0) { level = (1-2*level) * qscale * weight_matrix[i]; if (ctx->cid_table->bit_depth == 10) { if (weight_matrix[i] != 8) level += 8; level >>= 4; } else { if (weight_matrix[i] != 32) level += 32; level >>= 6; } level = -level; } else { level = (2*level+1) * qscale * weight_matrix[i]; if (ctx->cid_table->bit_depth == 10) { if (weight_matrix[i] != 8) level += 8; level >>= 4; } else { if (weight_matrix[i] != 32) level += 32; level >>= 6; } } block[j] = level; } } }
0
[ "CWE-703" ]
FFmpeg
f1caaa1c61310beba705957e6366f0392a0b005b
262,871,303,037,778,730,000,000,000,000,000,000,000
40
dnxhdenc: fix mb_rc size Fixes out of array access with RC_VARIANCE set to 0 Signed-off-by: Michael Niedermayer <[email protected]>
static void ext4_write_super(struct super_block *sb) { lock_super(sb); ext4_commit_super(sb, 1); unlock_super(sb); }
0
[ "CWE-703" ]
linux
744692dc059845b2a3022119871846e74d4f6e11
251,522,534,565,043,100,000,000,000,000,000,000,000
6
ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]>
static inline void fpu_copy(struct task_struct *dst, struct task_struct *src) { if (use_eager_fpu()) { memset(&dst->thread.fpu.state->xsave, 0, xstate_size); __save_fpu(dst); } else { struct fpu *dfpu = &dst->thread.fpu; struct fpu *sfpu = &src->thread.fpu; unlazy_fpu(src); memcpy(dfpu->state, sfpu->state, xstate_size); } }
0
[ "CWE-284", "CWE-264" ]
linux
26bef1318adc1b3a530ecc807ef99346db2aa8b0
283,915,630,087,063,160,000,000,000,000,000,000,000
13
x86, fpu, amd: Clear exceptions in AMD FXSAVE workaround Before we do an EMMS in the AMD FXSAVE information leak workaround we need to clear any pending exceptions, otherwise we trap with a floating-point exception inside this code. Reported-by: halfdog <[email protected]> Tested-by: Borislav Petkov <[email protected]> Link: http://lkml.kernel.org/r/CA%2B55aFxQnY_PCG_n4=0w-VG=YLXL-yr7oMxyy0WU2gCBAf3ydg@mail.gmail.com Signed-off-by: H. Peter Anvin <[email protected]>
void add_reported_to(struct dump_dir *dd, const char *line) { if (!dd->locked) error_msg_and_die("dump_dir is not opened"); /* bug */ char *reported_to = dd_load_text_ext(dd, FILENAME_REPORTED_TO, DD_FAIL_QUIETLY_ENOENT | DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE); if (reported_to) { unsigned len_line = strlen(line); char *p = reported_to; while (*p) { if (strncmp(p, line, len_line) == 0 && (p[len_line] == '\n' || p[len_line] == '\0')) goto ret; p = strchrnul(p, '\n'); if (!*p) break; p++; } if (p != reported_to && p[-1] != '\n') reported_to = append_to_malloced_string(reported_to, "\n"); reported_to = append_to_malloced_string(reported_to, line); reported_to = append_to_malloced_string(reported_to, "\n"); } else reported_to = xasprintf("%s\n", line); dd_save_text(dd, FILENAME_REPORTED_TO, reported_to); ret: free(reported_to); }
0
[ "CWE-264" ]
libreport
3bbf961b1884dd32654dd39b360dd78ef294b10a
44,443,432,841,809,080,000,000,000,000,000,000,000
30
never follow symlinks rhbz#887866 - use lchown instead chown and O_NOFOLLOW where we use open
DEFUN (clear_ip_bgp_all_vpnv4_soft_out, clear_ip_bgp_all_vpnv4_soft_out_cmd, "clear ip bgp * vpnv4 unicast soft out", CLEAR_STR IP_STR BGP_STR "Clear all peers\n" "Address family\n" "Address Family Modifier\n" "Soft reconfig\n" "Soft reconfig outbound update\n") { return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_MPLS_VPN, clear_all, BGP_CLEAR_SOFT_OUT, NULL); }
0
[ "CWE-125" ]
frr
6d58272b4cf96f0daa846210dd2104877900f921
131,212,802,549,929,900,000,000,000,000,000,000,000
15
[bgpd] cleanup, compact and consolidate capability parsing code 2007-07-26 Paul Jakma <[email protected]> * (general) Clean up and compact capability parsing slightly. Consolidate validation of length and logging of generic TLV, and memcpy of capability data, thus removing such from cap specifc code (not always present or correct). * bgp_open.h: Add structures for the generic capability TLV header and for the data formats of the various specific capabilities we support. Hence remove the badly named, or else misdefined, struct capability. * bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data. Do the length checks *before* memcpy()'ing based on that length (stored capability - should have been validated anyway on input, but..). (bgp_afi_safi_valid_indices) new function to validate (afi,safi) which is about to be used as index into arrays, consolidates several instances of same, at least one of which appeared to be incomplete.. (bgp_capability_mp) Much condensed. (bgp_capability_orf_entry) New, process one ORF entry (bgp_capability_orf) Condensed. Fixed to process all ORF entries. (bgp_capability_restart) Condensed, and fixed to use a cap-specific type, rather than abusing capability_mp. (struct message capcode_str) added to aid generic logging. (size_t cap_minsizes[]) added to aid generic validation of capability length field. (bgp_capability_parse) Generic logging and validation of TLV consolidated here. Code compacted as much as possible. * bgp_packet.c: (bgp_open_receive) Capability parsers now use streams, so no more need here to manually fudge the input stream getp. (bgp_capability_msg_parse) use struct capability_mp_data. Validate lengths /before/ memcpy. Use bgp_afi_safi_valid_indices. (bgp_capability_receive) Exported for use by test harness. * bgp_vty.c: (bgp_show_summary) fix conversion warning (bgp_show_peer) ditto * bgp_debug.h: Fix storage 'extern' after type 'const'. * lib/log.c: (mes_lookup) warning about code not being in same-number array slot should be debug, not warning. E.g. BGP has several discontigious number spaces, allocating from different parts of a space is not uncommon (e.g. IANA assigned versus vendor-assigned code points in some number space).
lyp_check_include_missing(struct lys_module *main_module) { int ret = 0; uint8_t i; /* in YANG 1.1, all the submodules must be in the main module, check it even for * 1.0 where it will be printed as warning and the include will be added into the main module */ for (i = 0; i < main_module->inc_size; i++) { if (lyp_check_include_missing_recursive(main_module, main_module->inc[i].submodule)) { ret = 1; } } return ret; }
0
[ "CWE-787" ]
libyang
f6d684ade99dd37b21babaa8a856f64faa1e2e0d
295,964,720,780,365,040,000,000,000,000,000,000,000
15
parser BUGFIX long identity name buffer overflow STRING_OVERFLOW (CWE-120)
static void print_help() { MEM_ROOT mem_root; init_alloc_root(&mem_root, 4096, 4096); pop_dynamic(&all_options); sys_var_add_options(&all_options, sys_var::PARSE_EARLY); add_plugin_options(&all_options, &mem_root); sort_dynamic(&all_options, (qsort_cmp) option_cmp); add_terminator(&all_options); my_print_help((my_option*) all_options.buffer); my_print_variables((my_option*) all_options.buffer); free_root(&mem_root, MYF(0)); }
0
[ "CWE-362" ]
server
347eeefbfc658c8531878218487d729f4e020805
32,932,762,186,193,293,000,000,000,000,000,000,000
16
don't use my_copystat in the server it was supposed to be used in command-line tools only. Different fix for 4e5473862e: Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
archive_mstring_copy_wcs_len(struct archive_mstring *aes, const wchar_t *wcs, size_t len) { if (wcs == NULL) { aes->aes_set = 0; return (0); } aes->aes_set = AES_SET_WCS; /* Only WCS form set. */ archive_string_empty(&(aes->aes_mbs)); archive_string_empty(&(aes->aes_utf8)); archive_wstrncpy(&(aes->aes_wcs), wcs, len); return (0); }
0
[ "CWE-125" ]
libarchive
22b1db9d46654afc6f0c28f90af8cdc84a199f41
255,618,059,265,825,070,000,000,000,000,000,000,000
13
Bugfix and optimize archive_wstring_append_from_mbs() The cal to mbrtowc() or mbtowc() should read up to mbs_length bytes and not wcs_length. This avoids out-of-bounds reads. mbrtowc() and mbtowc() return (size_t)-1 wit errno EILSEQ when they encounter an invalid multibyte character and (size_t)-2 when they they encounter an incomplete multibyte character. As we return failure and all our callers error out it makes no sense to continue parsing mbs. As we allocate `len` wchars at the beginning and each wchar has at least one byte, there will never be need to grow the buffer, so the code can be left out. On the other hand, we are always allocatng more memory than we need. As long as wcs_length == mbs_length == len we can omit wcs_length. We keep the old code commented if we decide to save memory and use autoexpanding wcs_length in the future. Fixes #1276
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) { if (!vma->anon_vma || vma->vm_ops) return; __vma_adjust_trans_huge(vma, start, end, adjust_next); }
0
[ "CWE-399" ]
linux
78f11a255749d09025f54d4e2df4fbcb031530e2
254,479,437,542,397,500,000,000,000,000,000,000,000
9
mm: thp: fix /dev/zero MAP_PRIVATE and vm_flags cleanups The huge_memory.c THP page fault was allowed to run if vm_ops was null (which would succeed for /dev/zero MAP_PRIVATE, as the f_op->mmap wouldn't setup a special vma->vm_ops and it would fallback to regular anonymous memory) but other THP logics weren't fully activated for vmas with vm_file not NULL (/dev/zero has a not NULL vma->vm_file). So this removes the vm_file checks so that /dev/zero also can safely use THP (the other albeit safer approach to fix this bug would have been to prevent the THP initial page fault to run if vm_file was set). After removing the vm_file checks, this also makes huge_memory.c stricter in khugepaged for the DEBUG_VM=y case. It doesn't replace the vm_file check with a is_pfn_mapping check (but it keeps checking for VM_PFNMAP under VM_BUG_ON) because for a is_cow_mapping() mapping VM_PFNMAP should only be allowed to exist before the first page fault, and in turn when vma->anon_vma is null (so preventing khugepaged registration). So I tend to think the previous comment saying if vm_file was set, VM_PFNMAP might have been set and we could still be registered in khugepaged (despite anon_vma was not NULL to be registered in khugepaged) was too paranoid. The is_linear_pfn_mapping check is also I think superfluous (as described by comment) but under DEBUG_VM it is safe to stay. Addresses https://bugzilla.kernel.org/show_bug.cgi?id=33682 Signed-off-by: Andrea Arcangeli <[email protected]> Reported-by: Caspar Zhang <[email protected]> Acked-by: Mel Gorman <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: <[email protected]> [2.6.38.x] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static inline int normal_prio(struct task_struct *p) { int prio; if (task_has_rt_policy(p)) prio = MAX_RT_PRIO-1 - p->rt_priority; else prio = __normal_prio(p); return prio; }
0
[]
linux-2.6
8f1bc385cfbab474db6c27b5af1e439614f3025c
51,957,293,515,592,670,000,000,000,000,000,000,000
10
sched: fair: weight calculations In order to level the hierarchy, we need to calculate load based on the root view. That is, each task's load is in the same unit. A / \ B 1 / \ 2 3 To compute 1's load we do: weight(1) -------------- rq_weight(A) To compute 2's load we do: weight(2) weight(B) ------------ * ----------- rq_weight(B) rw_weight(A) This yields load fractions in comparable units. The consequence is that it changes virtual time. We used to have: time_{i} vtime_{i} = ------------ weight_{i} vtime = \Sum vtime_{i} = time / rq_weight. But with the new way of load calculation we get that vtime equals time. Signed-off-by: Peter Zijlstra <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
dp_packet_at(const struct dp_packet *b, size_t offset, size_t size) { return offset + size <= dp_packet_size(b) ? (char *) dp_packet_data(b) + offset : NULL; }
0
[ "CWE-400" ]
ovs
79349cbab0b2a755140eedb91833ad2760520a83
93,342,466,422,067,190,000,000,000,000,000,000,000
6
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <[email protected]> Acked-by: Ilya Maximets <[email protected]> Signed-off-by: Flavio Leitner <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed) { if (get_rac(c, state + 0)) return 0; else { int i, e; unsigned a; e = 0; while (get_rac(c, state + 1 + FFMIN(e, 9))) { // 1..10 e++; if (e > 31) return AVERROR_INVALIDDATA; } a = 1; for (i = e - 1; i >= 0; i--) a += a + get_rac(c, state + 22 + FFMIN(i, 9)); // 22..31 e = -(is_signed && get_rac(c, state + 11 + FFMIN(e, 10))); // 11..21 return (a ^ e) - e; } }
0
[ "CWE-125" ]
FFmpeg
d893253fcd93d11258e98857175e93be7d158708
178,678,052,786,556,400,000,000,000,000,000,000,000
23
avcodec/ffv1dec: Fix out of array read in slice counting Fixes: test-201710.mp4 Found-by: 连一汉 <[email protected]> and Zhibin Hu Signed-off-by: Michael Niedermayer <[email protected]> (cherry picked from commit c20f4fcb74da2d0432c7b54499bb98f48236b904) Signed-off-by: Michael Niedermayer <[email protected]>
fix_unset_addr_list(UnsetAddrList* uslist, regex_t* reg) { int i, offset; EnclosureNode* en; AbsAddrType addr; for (i = 0; i < uslist->num; i++) { if (! NODE_IS_ADDR_FIXED(uslist->us[i].target)) return ONIGERR_PARSER_BUG; en = ENCLOSURE_(uslist->us[i].target); addr = en->m.called_addr; offset = uslist->us[i].offset; BB_WRITE(reg, offset, &addr, SIZE_ABSADDR); } return 0; }
0
[ "CWE-476" ]
oniguruma
410f5916429e7d2920e1d4867388514f605413b8
11,619,073,687,990,597,000,000,000,000,000,000,000
18
fix #87: Read unknown address in onig_error_code_to_str()
static MagickBooleanType ReadOneLayer(const ImageInfo *image_info,Image* image, XCFDocInfo* inDocInfo,XCFLayerInfo *outLayer,const ssize_t layer, ExceptionInfo *exception) { MagickBooleanType status; MagickOffsetType offset; unsigned int foundPropEnd = 0; size_t hierarchy_offset, layer_mask_offset; /* clear the block! */ (void) ResetMagickMemory( outLayer, 0, sizeof( XCFLayerInfo ) ); /* read in the layer width, height, type and name */ outLayer->width = ReadBlobMSBLong(image); outLayer->height = ReadBlobMSBLong(image); outLayer->type = ReadBlobMSBLong(image); (void) ReadBlobStringWithLongSize(image, outLayer->name, sizeof(outLayer->name),exception); if (EOFBlob(image) != MagickFalse) ThrowBinaryException(CorruptImageError,"InsufficientImageDataInFile", image->filename); /* read the layer properties! */ foundPropEnd = 0; while ( (foundPropEnd == MagickFalse) && (EOFBlob(image) == MagickFalse) ) { PropType prop_type = (PropType) ReadBlobMSBLong(image); size_t prop_size = ReadBlobMSBLong(image); switch (prop_type) { case PROP_END: foundPropEnd = 1; break; case PROP_ACTIVE_LAYER: outLayer->active = 1; break; case PROP_FLOATING_SELECTION: outLayer->floating_offset = ReadBlobMSBLong(image); break; case PROP_OPACITY: outLayer->alpha = ReadBlobMSBLong(image); break; case PROP_VISIBLE: outLayer->visible = ReadBlobMSBLong(image); break; case PROP_LINKED: outLayer->linked = ReadBlobMSBLong(image); break; case PROP_PRESERVE_TRANSPARENCY: outLayer->preserve_trans = ReadBlobMSBLong(image); break; case PROP_APPLY_MASK: outLayer->apply_mask = ReadBlobMSBLong(image); break; case PROP_EDIT_MASK: outLayer->edit_mask = ReadBlobMSBLong(image); break; case PROP_SHOW_MASK: outLayer->show_mask = ReadBlobMSBLong(image); break; case PROP_OFFSETS: outLayer->offset_x = ReadBlobMSBSignedLong(image); outLayer->offset_y = ReadBlobMSBSignedLong(image); break; case PROP_MODE: outLayer->mode = ReadBlobMSBLong(image); break; case PROP_TATTOO: outLayer->preserve_trans = ReadBlobMSBLong(image); break; case PROP_PARASITES: { if (DiscardBlobBytes(image,prop_size) == MagickFalse) ThrowFileException(exception,CorruptImageError, "UnexpectedEndOfFile",image->filename); /* ssize_t base = info->cp; GimpParasite *p; while (info->cp - base < prop_size) { p = xcf_load_parasite(info); gimp_drawable_parasite_attach(GIMP_DRAWABLE(layer), p); gimp_parasite_free(p); } if (info->cp - base != prop_size) g_message ("Error detected while loading a layer's parasites"); */ } break; default: /* g_message ("unexpected/unknown layer property: %d (skipping)", prop_type); */ { int buf[16]; ssize_t amount; /* read over it... */ while ((prop_size > 0) && (EOFBlob(image) == MagickFalse)) { amount = (ssize_t) MagickMin(16, prop_size); amount = ReadBlob(image, (size_t) amount, (unsigned char *) &buf); if (!amount) ThrowBinaryException(CorruptImageError,"CorruptImage", image->filename); prop_size -= (size_t) MagickMin(16, (size_t) amount); } } break; } } if (foundPropEnd == MagickFalse) return(MagickFalse); /* allocate the image for this layer */ if (image_info->number_scenes != 0) { ssize_t scene; scene=inDocInfo->number_layers-layer-1; if (scene > (ssize_t) (image_info->scene+image_info->number_scenes-1)) { outLayer->image=CloneImage(image,0,0,MagickTrue,exception); if (outLayer->image == (Image *) NULL) return(MagickFalse); InitXCFImage(outLayer,exception); return(MagickTrue); } } outLayer->image=CloneImage(image,outLayer->width, outLayer->height,MagickTrue, exception); if (outLayer->image == (Image *) NULL) return(MagickFalse); status=SetImageExtent(outLayer->image,outLayer->image->columns, outLayer->image->rows,exception); if (status == MagickFalse) { outLayer->image=DestroyImageList(outLayer->image); return(MagickFalse); } /* clear the image based on the layer opacity */ outLayer->image->background_color.alpha= ScaleCharToQuantum((unsigned char) outLayer->alpha); (void) SetImageBackgroundColor(outLayer->image,exception); InitXCFImage(outLayer,exception); /* set the compositing mode */ outLayer->image->compose = GIMPBlendModeToCompositeOperator( outLayer->mode ); if ( outLayer->visible == MagickFalse ) { /* BOGUS: should really be separate member var! */ outLayer->image->compose = NoCompositeOp; } /* read the hierarchy and layer mask offsets */ hierarchy_offset = ReadBlobMSBLong(image); layer_mask_offset = ReadBlobMSBLong(image); /* read in the hierarchy */ offset=SeekBlob(image, (MagickOffsetType) hierarchy_offset, SEEK_SET); if (offset != (MagickOffsetType) hierarchy_offset) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageError,"InvalidImageHeader","`%s'",image->filename); if (load_hierarchy (image, inDocInfo, outLayer, exception) == 0) return(MagickFalse); /* read in the layer mask */ if (layer_mask_offset != 0) { offset=SeekBlob(image, (MagickOffsetType) layer_mask_offset, SEEK_SET); #if 0 /* BOGUS: support layer masks! */ layer_mask = xcf_load_layer_mask (info, gimage); if (layer_mask == 0) goto error; /* set the offsets of the layer_mask */ GIMP_DRAWABLE (layer_mask)->offset_x = GIMP_DRAWABLE (layer)->offset_x; GIMP_DRAWABLE (layer_mask)->offset_y = GIMP_DRAWABLE (layer)->offset_y; gimp_layer_add_mask (layer, layer_mask, MagickFalse); layer->mask->apply_mask = apply_mask; layer->mask->edit_mask = edit_mask; layer->mask->show_mask = show_mask; #endif } /* attach the floating selection... */ #if 0 /* BOGUS: we may need to read this, even if we don't support it! */ if (add_floating_sel) { GimpLayer *floating_sel; floating_sel = info->floating_sel; floating_sel_attach (floating_sel, GIMP_DRAWABLE (layer)); } #endif return MagickTrue; }
0
[ "CWE-770" ]
ImageMagick
19dbe11c5060f66abb393d1945107c5f54894fa8
241,181,586,554,026,070,000,000,000,000,000,000,000
209
https://github.com/ImageMagick/ImageMagick/issues/679
double ruby_strtod(const char *s00, char **se) { #ifdef Avoid_Underflow int scale; #endif int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign, e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign; const char *s, *s0, *s1; double aadj, adj; double_u aadj1, rv, rv0; Long L; ULong y, z; Bigint *bb, *bb1, *bd, *bd0, *bs, *delta; #ifdef SET_INEXACT int inexact, oldinexact; #endif #ifdef Honor_FLT_ROUNDS int rounding; #endif #ifdef USE_LOCALE const char *s2; #endif errno = 0; sign = nz0 = nz = 0; dval(rv) = 0.; for (s = s00;;s++) switch (*s) { case '-': sign = 1; /* no break */ case '+': if (*++s) goto break2; /* no break */ case 0: goto ret0; case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': continue; default: goto break2; } break2: if (*s == '0') { if (s[1] == 'x' || s[1] == 'X') { static const char hexdigit[] = "0123456789abcdef0123456789ABCDEF"; s0 = ++s; adj = 0; aadj = 1.0; nd0 = -4; if (!*++s || !(s1 = strchr(hexdigit, *s))) goto ret0; if (*s == '0') { while (*++s == '0'); s1 = strchr(hexdigit, *s); } if (s1 != NULL) { do { adj += aadj * ((s1 - hexdigit) & 15); nd0 += 4; aadj /= 16; } while (*++s && (s1 = strchr(hexdigit, *s))); } if (*s == '.') { dsign = 1; if (!*++s || !(s1 = strchr(hexdigit, *s))) goto ret0; if (nd0 < 0) { while (*s == '0') { s++; nd0 -= 4; } } for (; *s && (s1 = strchr(hexdigit, *s)); ++s) { adj += aadj * ((s1 - hexdigit) & 15); if ((aadj /= 16) == 0.0) { while (strchr(hexdigit, *++s)); break; } } } else { dsign = 0; } if (*s == 'P' || *s == 'p') { dsign = 0x2C - *++s; /* +: 2B, -: 2D */ if (abs(dsign) == 1) s++; else dsign = 1; nd = 0; c = *s; if (c < '0' || '9' < c) goto ret0; do { nd *= 10; nd += c; nd -= '0'; c = *++s; /* Float("0x0."+("0"*267)+"1fp2095") */ if (nd + dsign * nd0 > 2095) { while ('0' <= c && c <= '9') c = *++s; break; } } while ('0' <= c && c <= '9'); nd0 += nd * dsign; } else { if (dsign) goto ret0; } dval(rv) = ldexp(adj, nd0); goto ret; } nz0 = 1; while (*++s == '0') ; if (!*s) goto ret; } s0 = s; y = z = 0; for (nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++) if (nd < 9) y = 10*y + c - '0'; else if (nd < 16) z = 10*z + c - '0'; nd0 = nd; #ifdef USE_LOCALE s1 = localeconv()->decimal_point; if (c == *s1) { c = '.'; if (*++s1) { s2 = s; for (;;) { if (*++s2 != *s1) { c = 0; break; } if (!*++s1) { s = s2; break; } } } } #endif if (c == '.') { if (!ISDIGIT(s[1])) goto dig_done; c = *++s; if (!nd) { for (; c == '0'; c = *++s) nz++; if (c > '0' && c <= '9') { s0 = s; nf += nz; nz = 0; goto have_dig; } goto dig_done; } for (; c >= '0' && c <= '9'; c = *++s) { have_dig: nz++; if (c -= '0') { nf += nz; for (i = 1; i < nz; i++) if (nd++ < 9) y *= 10; else if (nd <= DBL_DIG + 1) z *= 10; if (nd++ < 9) y = 10*y + c; else if (nd <= DBL_DIG + 1) z = 10*z + c; nz = 0; } } } dig_done: e = 0; if (c == 'e' || c == 'E') { if (!nd && !nz && !nz0) { goto ret0; } s00 = s; esign = 0; switch (c = *++s) { case '-': esign = 1; case '+': c = *++s; } if (c >= '0' && c <= '9') { while (c == '0') c = *++s; if (c > '0' && c <= '9') { L = c - '0'; s1 = s; while ((c = *++s) >= '0' && c <= '9') L = 10*L + c - '0'; if (s - s1 > 8 || L > 19999) /* Avoid confusion from exponents * so large that e might overflow. */ e = 19999; /* safe for 16 bit ints */ else e = (int)L; if (esign) e = -e; } else e = 0; } else s = s00; } if (!nd) { if (!nz && !nz0) { #ifdef INFNAN_CHECK /* Check for Nan and Infinity */ switch (c) { case 'i': case 'I': if (match(&s,"nf")) { --s; if (!match(&s,"inity")) ++s; word0(rv) = 0x7ff00000; word1(rv) = 0; goto ret; } break; case 'n': case 'N': if (match(&s, "an")) { word0(rv) = NAN_WORD0; word1(rv) = NAN_WORD1; #ifndef No_Hex_NaN if (*s == '(') /*)*/ hexnan(&rv, &s); #endif goto ret; } } #endif /* INFNAN_CHECK */ ret0: s = s00; sign = 0; } goto ret; } e1 = e -= nf; /* Now we have nd0 digits, starting at s0, followed by a * decimal point, followed by nd-nd0 digits. The number we're * after is the integer represented by those digits times * 10**e */ if (!nd0) nd0 = nd; k = nd < DBL_DIG + 1 ? nd : DBL_DIG + 1; dval(rv) = y; if (k > 9) { #ifdef SET_INEXACT if (k > DBL_DIG) oldinexact = get_inexact(); #endif dval(rv) = tens[k - 9] * dval(rv) + z; } bd0 = bb = bd = bs = delta = 0; if (nd <= DBL_DIG #ifndef RND_PRODQUOT #ifndef Honor_FLT_ROUNDS && Flt_Rounds == 1 #endif #endif ) { if (!e) goto ret; if (e > 0) { if (e <= Ten_pmax) { #ifdef VAX goto vax_ovfl_check; #else #ifdef Honor_FLT_ROUNDS /* round correctly FLT_ROUNDS = 2 or 3 */ if (sign) { dval(rv) = -dval(rv); sign = 0; } #endif /* rv = */ rounded_product(dval(rv), tens[e]); goto ret; #endif } i = DBL_DIG - nd; if (e <= Ten_pmax + i) { /* A fancier test would sometimes let us do * this for larger i values. */ #ifdef Honor_FLT_ROUNDS /* round correctly FLT_ROUNDS = 2 or 3 */ if (sign) { dval(rv) = -dval(rv); sign = 0; } #endif e -= i; dval(rv) *= tens[i]; #ifdef VAX /* VAX exponent range is so narrow we must * worry about overflow here... */ vax_ovfl_check: word0(rv) -= P*Exp_msk1; /* rv = */ rounded_product(dval(rv), tens[e]); if ((word0(rv) & Exp_mask) > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) goto ovfl; word0(rv) += P*Exp_msk1; #else /* rv = */ rounded_product(dval(rv), tens[e]); #endif goto ret; } } #ifndef Inaccurate_Divide else if (e >= -Ten_pmax) { #ifdef Honor_FLT_ROUNDS /* round correctly FLT_ROUNDS = 2 or 3 */ if (sign) { dval(rv) = -dval(rv); sign = 0; } #endif /* rv = */ rounded_quotient(dval(rv), tens[-e]); goto ret; } #endif } e1 += nd - k; #ifdef IEEE_Arith #ifdef SET_INEXACT inexact = 1; if (k <= DBL_DIG) oldinexact = get_inexact(); #endif #ifdef Avoid_Underflow scale = 0; #endif #ifdef Honor_FLT_ROUNDS if ((rounding = Flt_Rounds) >= 2) { if (sign) rounding = rounding == 2 ? 0 : 2; else if (rounding != 2) rounding = 0; } #endif #endif /*IEEE_Arith*/ /* Get starting approximation = rv * 10**e1 */ if (e1 > 0) { if ((i = e1 & 15) != 0) dval(rv) *= tens[i]; if (e1 &= ~15) { if (e1 > DBL_MAX_10_EXP) { ovfl: #ifndef NO_ERRNO errno = ERANGE; #endif /* Can't trust HUGE_VAL */ #ifdef IEEE_Arith #ifdef Honor_FLT_ROUNDS switch (rounding) { case 0: /* toward 0 */ case 3: /* toward -infinity */ word0(rv) = Big0; word1(rv) = Big1; break; default: word0(rv) = Exp_mask; word1(rv) = 0; } #else /*Honor_FLT_ROUNDS*/ word0(rv) = Exp_mask; word1(rv) = 0; #endif /*Honor_FLT_ROUNDS*/ #ifdef SET_INEXACT /* set overflow bit */ dval(rv0) = 1e300; dval(rv0) *= dval(rv0); #endif #else /*IEEE_Arith*/ word0(rv) = Big0; word1(rv) = Big1; #endif /*IEEE_Arith*/ if (bd0) goto retfree; goto ret; } e1 >>= 4; for (j = 0; e1 > 1; j++, e1 >>= 1) if (e1 & 1) dval(rv) *= bigtens[j]; /* The last multiplication could overflow. */ word0(rv) -= P*Exp_msk1; dval(rv) *= bigtens[j]; if ((z = word0(rv) & Exp_mask) > Exp_msk1*(DBL_MAX_EXP+Bias-P)) goto ovfl; if (z > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) { /* set to largest number */ /* (Can't trust DBL_MAX) */ word0(rv) = Big0; word1(rv) = Big1; } else word0(rv) += P*Exp_msk1; } } else if (e1 < 0) { e1 = -e1; if ((i = e1 & 15) != 0) dval(rv) /= tens[i]; if (e1 >>= 4) { if (e1 >= 1 << n_bigtens) goto undfl; #ifdef Avoid_Underflow if (e1 & Scale_Bit) scale = 2*P; for (j = 0; e1 > 0; j++, e1 >>= 1) if (e1 & 1) dval(rv) *= tinytens[j]; if (scale && (j = 2*P + 1 - ((word0(rv) & Exp_mask) >> Exp_shift)) > 0) { /* scaled rv is denormal; zap j low bits */ if (j >= 32) { word1(rv) = 0; if (j >= 53) word0(rv) = (P+2)*Exp_msk1; else word0(rv) &= 0xffffffff << (j-32); } else word1(rv) &= 0xffffffff << j; } #else for (j = 0; e1 > 1; j++, e1 >>= 1) if (e1 & 1) dval(rv) *= tinytens[j]; /* The last multiplication could underflow. */ dval(rv0) = dval(rv); dval(rv) *= tinytens[j]; if (!dval(rv)) { dval(rv) = 2.*dval(rv0); dval(rv) *= tinytens[j]; #endif if (!dval(rv)) { undfl: dval(rv) = 0.; #ifndef NO_ERRNO errno = ERANGE; #endif if (bd0) goto retfree; goto ret; } #ifndef Avoid_Underflow word0(rv) = Tiny0; word1(rv) = Tiny1; /* The refinement below will clean * this approximation up. */ } #endif } } /* Now the hard part -- adjusting rv to the correct value.*/ /* Put digits into bd: true value = bd * 10^e */ bd0 = s2b(s0, nd0, nd, y); for (;;) { bd = Balloc(bd0->k); Bcopy(bd, bd0); bb = d2b(dval(rv), &bbe, &bbbits); /* rv = bb * 2^bbe */ bs = i2b(1); if (e >= 0) { bb2 = bb5 = 0; bd2 = bd5 = e; } else { bb2 = bb5 = -e; bd2 = bd5 = 0; } if (bbe >= 0) bb2 += bbe; else bd2 -= bbe; bs2 = bb2; #ifdef Honor_FLT_ROUNDS if (rounding != 1) bs2++; #endif #ifdef Avoid_Underflow j = bbe - scale; i = j + bbbits - 1; /* logb(rv) */ if (i < Emin) /* denormal */ j += P - Emin; else j = P + 1 - bbbits; #else /*Avoid_Underflow*/ #ifdef Sudden_Underflow #ifdef IBM j = 1 + 4*P - 3 - bbbits + ((bbe + bbbits - 1) & 3); #else j = P + 1 - bbbits; #endif #else /*Sudden_Underflow*/ j = bbe; i = j + bbbits - 1; /* logb(rv) */ if (i < Emin) /* denormal */ j += P - Emin; else j = P + 1 - bbbits; #endif /*Sudden_Underflow*/ #endif /*Avoid_Underflow*/ bb2 += j; bd2 += j; #ifdef Avoid_Underflow bd2 += scale; #endif i = bb2 < bd2 ? bb2 : bd2; if (i > bs2) i = bs2; if (i > 0) { bb2 -= i; bd2 -= i; bs2 -= i; } if (bb5 > 0) { bs = pow5mult(bs, bb5); bb1 = mult(bs, bb); Bfree(bb); bb = bb1; } if (bb2 > 0) bb = lshift(bb, bb2); if (bd5 > 0) bd = pow5mult(bd, bd5); if (bd2 > 0) bd = lshift(bd, bd2); if (bs2 > 0) bs = lshift(bs, bs2); delta = diff(bb, bd); dsign = delta->sign; delta->sign = 0; i = cmp(delta, bs); #ifdef Honor_FLT_ROUNDS if (rounding != 1) { if (i < 0) { /* Error is less than an ulp */ if (!delta->x[0] && delta->wds <= 1) { /* exact */ #ifdef SET_INEXACT inexact = 0; #endif break; } if (rounding) { if (dsign) { adj = 1.; goto apply_adj; } } else if (!dsign) { adj = -1.; if (!word1(rv) && !(word0(rv) & Frac_mask)) { y = word0(rv) & Exp_mask; #ifdef Avoid_Underflow if (!scale || y > 2*P*Exp_msk1) #else if (y) #endif { delta = lshift(delta,Log2P); if (cmp(delta, bs) <= 0) adj = -0.5; } } apply_adj: #ifdef Avoid_Underflow if (scale && (y = word0(rv) & Exp_mask) <= 2*P*Exp_msk1) word0(adj) += (2*P+1)*Exp_msk1 - y; #else #ifdef Sudden_Underflow if ((word0(rv) & Exp_mask) <= P*Exp_msk1) { word0(rv) += P*Exp_msk1; dval(rv) += adj*ulp(dval(rv)); word0(rv) -= P*Exp_msk1; } else #endif /*Sudden_Underflow*/ #endif /*Avoid_Underflow*/ dval(rv) += adj*ulp(dval(rv)); } break; } adj = ratio(delta, bs); if (adj < 1.) adj = 1.; if (adj <= 0x7ffffffe) { /* adj = rounding ? ceil(adj) : floor(adj); */ y = adj; if (y != adj) { if (!((rounding>>1) ^ dsign)) y++; adj = y; } } #ifdef Avoid_Underflow if (scale && (y = word0(rv) & Exp_mask) <= 2*P*Exp_msk1) word0(adj) += (2*P+1)*Exp_msk1 - y; #else #ifdef Sudden_Underflow if ((word0(rv) & Exp_mask) <= P*Exp_msk1) { word0(rv) += P*Exp_msk1; adj *= ulp(dval(rv)); if (dsign) dval(rv) += adj; else dval(rv) -= adj; word0(rv) -= P*Exp_msk1; goto cont; } #endif /*Sudden_Underflow*/ #endif /*Avoid_Underflow*/ adj *= ulp(dval(rv)); if (dsign) dval(rv) += adj; else dval(rv) -= adj; goto cont; } #endif /*Honor_FLT_ROUNDS*/ if (i < 0) { /* Error is less than half an ulp -- check for * special case of mantissa a power of two. */ if (dsign || word1(rv) || word0(rv) & Bndry_mask #ifdef IEEE_Arith #ifdef Avoid_Underflow || (word0(rv) & Exp_mask) <= (2*P+1)*Exp_msk1 #else || (word0(rv) & Exp_mask) <= Exp_msk1 #endif #endif ) { #ifdef SET_INEXACT if (!delta->x[0] && delta->wds <= 1) inexact = 0; #endif break; } if (!delta->x[0] && delta->wds <= 1) { /* exact result */ #ifdef SET_INEXACT inexact = 0; #endif break; } delta = lshift(delta,Log2P); if (cmp(delta, bs) > 0) goto drop_down; break; } if (i == 0) { /* exactly half-way between */ if (dsign) { if ((word0(rv) & Bndry_mask1) == Bndry_mask1 && word1(rv) == ( #ifdef Avoid_Underflow (scale && (y = word0(rv) & Exp_mask) <= 2*P*Exp_msk1) ? (0xffffffff & (0xffffffff << (2*P+1-(y>>Exp_shift)))) : #endif 0xffffffff)) { /*boundary case -- increment exponent*/ word0(rv) = (word0(rv) & Exp_mask) + Exp_msk1 #ifdef IBM | Exp_msk1 >> 4 #endif ; word1(rv) = 0; #ifdef Avoid_Underflow dsign = 0; #endif break; } } else if (!(word0(rv) & Bndry_mask) && !word1(rv)) { drop_down: /* boundary case -- decrement exponent */ #ifdef Sudden_Underflow /*{{*/ L = word0(rv) & Exp_mask; #ifdef IBM if (L < Exp_msk1) #else #ifdef Avoid_Underflow if (L <= (scale ? (2*P+1)*Exp_msk1 : Exp_msk1)) #else if (L <= Exp_msk1) #endif /*Avoid_Underflow*/ #endif /*IBM*/ goto undfl; L -= Exp_msk1; #else /*Sudden_Underflow}{*/ #ifdef Avoid_Underflow if (scale) { L = word0(rv) & Exp_mask; if (L <= (2*P+1)*Exp_msk1) { if (L > (P+2)*Exp_msk1) /* round even ==> */ /* accept rv */ break; /* rv = smallest denormal */ goto undfl; } } #endif /*Avoid_Underflow*/ L = (word0(rv) & Exp_mask) - Exp_msk1; #endif /*Sudden_Underflow}}*/ word0(rv) = L | Bndry_mask1; word1(rv) = 0xffffffff; #ifdef IBM goto cont; #else break; #endif } #ifndef ROUND_BIASED if (!(word1(rv) & LSB)) break; #endif if (dsign) dval(rv) += ulp(dval(rv)); #ifndef ROUND_BIASED else { dval(rv) -= ulp(dval(rv)); #ifndef Sudden_Underflow if (!dval(rv)) goto undfl; #endif } #ifdef Avoid_Underflow dsign = 1 - dsign; #endif #endif break; } if ((aadj = ratio(delta, bs)) <= 2.) { if (dsign) aadj = dval(aadj1) = 1.; else if (word1(rv) || word0(rv) & Bndry_mask) { #ifndef Sudden_Underflow if (word1(rv) == Tiny1 && !word0(rv)) goto undfl; #endif aadj = 1.; dval(aadj1) = -1.; } else { /* special case -- power of FLT_RADIX to be */ /* rounded down... */ if (aadj < 2./FLT_RADIX) aadj = 1./FLT_RADIX; else aadj *= 0.5; dval(aadj1) = -aadj; } } else { aadj *= 0.5; dval(aadj1) = dsign ? aadj : -aadj; #ifdef Check_FLT_ROUNDS switch (Rounding) { case 2: /* towards +infinity */ dval(aadj1) -= 0.5; break; case 0: /* towards 0 */ case 3: /* towards -infinity */ dval(aadj1) += 0.5; } #else if (Flt_Rounds == 0) dval(aadj1) += 0.5; #endif /*Check_FLT_ROUNDS*/ } y = word0(rv) & Exp_mask; /* Check for overflow */ if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) { dval(rv0) = dval(rv); word0(rv) -= P*Exp_msk1; adj = dval(aadj1) * ulp(dval(rv)); dval(rv) += adj; if ((word0(rv) & Exp_mask) >= Exp_msk1*(DBL_MAX_EXP+Bias-P)) { if (word0(rv0) == Big0 && word1(rv0) == Big1) goto ovfl; word0(rv) = Big0; word1(rv) = Big1; goto cont; } else word0(rv) += P*Exp_msk1; } else { #ifdef Avoid_Underflow if (scale && y <= 2*P*Exp_msk1) { if (aadj <= 0x7fffffff) { if ((z = (int)aadj) <= 0) z = 1; aadj = z; dval(aadj1) = dsign ? aadj : -aadj; } word0(aadj1) += (2*P+1)*Exp_msk1 - y; } adj = dval(aadj1) * ulp(dval(rv)); dval(rv) += adj; #else #ifdef Sudden_Underflow if ((word0(rv) & Exp_mask) <= P*Exp_msk1) { dval(rv0) = dval(rv); word0(rv) += P*Exp_msk1; adj = dval(aadj1) * ulp(dval(rv)); dval(rv) += adj; #ifdef IBM if ((word0(rv) & Exp_mask) < P*Exp_msk1) #else if ((word0(rv) & Exp_mask) <= P*Exp_msk1) #endif { if (word0(rv0) == Tiny0 && word1(rv0) == Tiny1) goto undfl; word0(rv) = Tiny0; word1(rv) = Tiny1; goto cont; } else word0(rv) -= P*Exp_msk1; } else { adj = dval(aadj1) * ulp(dval(rv)); dval(rv) += adj; } #else /*Sudden_Underflow*/ /* Compute adj so that the IEEE rounding rules will * correctly round rv + adj in some half-way cases. * If rv * ulp(rv) is denormalized (i.e., * y <= (P-1)*Exp_msk1), we must adjust aadj to avoid * trouble from bits lost to denormalization; * example: 1.2e-307 . */ if (y <= (P-1)*Exp_msk1 && aadj > 1.) { dval(aadj1) = (double)(int)(aadj + 0.5); if (!dsign) dval(aadj1) = -dval(aadj1); } adj = dval(aadj1) * ulp(dval(rv)); dval(rv) += adj; #endif /*Sudden_Underflow*/ #endif /*Avoid_Underflow*/ } z = word0(rv) & Exp_mask; #ifndef SET_INEXACT #ifdef Avoid_Underflow if (!scale) #endif if (y == z) { /* Can we stop now? */ L = (Long)aadj; aadj -= L; /* The tolerances below are conservative. */ if (dsign || word1(rv) || word0(rv) & Bndry_mask) { if (aadj < .4999999 || aadj > .5000001) break; } else if (aadj < .4999999/FLT_RADIX) break; } #endif cont: Bfree(bb); Bfree(bd); Bfree(bs); Bfree(delta); } #ifdef SET_INEXACT if (inexact) { if (!oldinexact) { word0(rv0) = Exp_1 + (70 << Exp_shift); word1(rv0) = 0; dval(rv0) += 1.; } } else if (!oldinexact) clear_inexact(); #endif #ifdef Avoid_Underflow if (scale) { word0(rv0) = Exp_1 - 2*P*Exp_msk1; word1(rv0) = 0; dval(rv) *= dval(rv0); #ifndef NO_ERRNO /* try to avoid the bug of testing an 8087 register value */ if (word0(rv) == 0 && word1(rv) == 0) errno = ERANGE; #endif } #endif /* Avoid_Underflow */ #ifdef SET_INEXACT if (inexact && !(word0(rv) & Exp_mask)) { /* set underflow bit */ dval(rv0) = 1e-300; dval(rv0) *= dval(rv0); } #endif retfree: Bfree(bb); Bfree(bd); Bfree(bs); Bfree(bd0); Bfree(delta); ret: if (se) *se = (char *)s; return sign ? -dval(rv) : dval(rv);
1
[ "CWE-119" ]
ruby
5cb83d9dab13e14e6146f455ffd9fed4254d238f
336,205,701,680,627,040,000,000,000,000,000,000,000
955
util.c: ignore too long fraction part * util.c (ruby_strtod): ignore too long fraction part, which does not affect the result. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@43775 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
find_status_by_pid (pid) pid_t pid; { int i; i = find_index_by_pid (pid); if (i == NO_PID) return (PROC_BAD); if (pid_list[i].flags & PROC_RUNNING) return (PROC_STILL_ALIVE); return (pid_list[i].status); }
0
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
122,479,295,042,844,280,000,000,000,000,000,000,000
12
bash-4.4-rc2 release
struct json_object* json_tokener_parse_ex(struct json_tokener *tok, const char *str, int len) { struct json_object *obj = NULL; char c = '\1'; #ifdef HAVE_SETLOCALE char *oldlocale=NULL, *tmplocale; tmplocale = setlocale(LC_NUMERIC, NULL); if (tmplocale) oldlocale = strdup(tmplocale); setlocale(LC_NUMERIC, "C"); #endif tok->char_offset = 0; tok->err = json_tokener_success; while (PEEK_CHAR(c, tok)) { redo_char: switch(state) { case json_tokener_state_eatws: /* Advance until we change state */ while (isspace((int)c)) { if ((!ADVANCE_CHAR(str, tok)) || (!PEEK_CHAR(c, tok))) goto out; } if(c == '/' && !(tok->flags & JSON_TOKENER_STRICT)) { printbuf_reset(tok->pb); printbuf_memappend_fast(tok->pb, &c, 1); state = json_tokener_state_comment_start; } else { state = saved_state; goto redo_char; } break; case json_tokener_state_start: switch(c) { case '{': state = json_tokener_state_eatws; saved_state = json_tokener_state_object_field_start; current = json_object_new_object(); break; case '[': state = json_tokener_state_eatws; saved_state = json_tokener_state_array; current = json_object_new_array(); break; case 'I': case 'i': state = json_tokener_state_inf; printbuf_reset(tok->pb); tok->st_pos = 0; goto redo_char; case 'N': case 'n': state = json_tokener_state_null; // or NaN printbuf_reset(tok->pb); tok->st_pos = 0; goto redo_char; case '\'': if (tok->flags & JSON_TOKENER_STRICT) { /* in STRICT mode only double-quote are allowed */ tok->err = json_tokener_error_parse_unexpected; goto out; } case '"': state = json_tokener_state_string; printbuf_reset(tok->pb); tok->quote_char = c; break; case 'T': case 't': case 'F': case 'f': state = json_tokener_state_boolean; printbuf_reset(tok->pb); tok->st_pos = 0; goto redo_char; #if defined(__GNUC__) case '0' ... '9': #else case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': #endif case '-': state = json_tokener_state_number; printbuf_reset(tok->pb); tok->is_double = 0; goto redo_char; default: tok->err = json_tokener_error_parse_unexpected; goto out; } break; case json_tokener_state_finish: if(tok->depth == 0) goto out; obj = json_object_get(current); json_tokener_reset_level(tok, tok->depth); tok->depth--; goto redo_char; case json_tokener_state_inf: /* aka starts with 'i' */ { int size; int size_inf; int is_negative = 0; printbuf_memappend_fast(tok->pb, &c, 1); size = json_min(tok->st_pos+1, json_null_str_len); size_inf = json_min(tok->st_pos+1, json_inf_str_len); char *infbuf = tok->pb->buf; if (*infbuf == '-') { infbuf++; is_negative = 1; } if ((!(tok->flags & JSON_TOKENER_STRICT) && strncasecmp(json_inf_str, infbuf, size_inf) == 0) || (strncmp(json_inf_str, infbuf, size_inf) == 0) ) { if (tok->st_pos == json_inf_str_len) { current = json_object_new_double(is_negative ? -INFINITY : INFINITY); saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; goto redo_char; } } else { tok->err = json_tokener_error_parse_unexpected; goto out; } tok->st_pos++; } break; case json_tokener_state_null: /* aka starts with 'n' */ { int size; int size_nan; printbuf_memappend_fast(tok->pb, &c, 1); size = json_min(tok->st_pos+1, json_null_str_len); size_nan = json_min(tok->st_pos+1, json_nan_str_len); if((!(tok->flags & JSON_TOKENER_STRICT) && strncasecmp(json_null_str, tok->pb->buf, size) == 0) || (strncmp(json_null_str, tok->pb->buf, size) == 0) ) { if (tok->st_pos == json_null_str_len) { current = NULL; saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; goto redo_char; } } else if ((!(tok->flags & JSON_TOKENER_STRICT) && strncasecmp(json_nan_str, tok->pb->buf, size_nan) == 0) || (strncmp(json_nan_str, tok->pb->buf, size_nan) == 0) ) { if (tok->st_pos == json_nan_str_len) { current = json_object_new_double(NAN); saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; goto redo_char; } } else { tok->err = json_tokener_error_parse_null; goto out; } tok->st_pos++; } break; case json_tokener_state_comment_start: if(c == '*') { state = json_tokener_state_comment; } else if(c == '/') { state = json_tokener_state_comment_eol; } else { tok->err = json_tokener_error_parse_comment; goto out; } printbuf_memappend_fast(tok->pb, &c, 1); break; case json_tokener_state_comment: { /* Advance until we change state */ const char *case_start = str; while(c != '*') { if (!ADVANCE_CHAR(str, tok) || !PEEK_CHAR(c, tok)) { printbuf_memappend_fast(tok->pb, case_start, str-case_start); goto out; } } printbuf_memappend_fast(tok->pb, case_start, 1+str-case_start); state = json_tokener_state_comment_end; } break; case json_tokener_state_comment_eol: { /* Advance until we change state */ const char *case_start = str; while(c != '\n') { if (!ADVANCE_CHAR(str, tok) || !PEEK_CHAR(c, tok)) { printbuf_memappend_fast(tok->pb, case_start, str-case_start); goto out; } } printbuf_memappend_fast(tok->pb, case_start, str-case_start); MC_DEBUG("json_tokener_comment: %s\n", tok->pb->buf); state = json_tokener_state_eatws; } break; case json_tokener_state_comment_end: printbuf_memappend_fast(tok->pb, &c, 1); if(c == '/') { MC_DEBUG("json_tokener_comment: %s\n", tok->pb->buf); state = json_tokener_state_eatws; } else { state = json_tokener_state_comment; } break; case json_tokener_state_string: { /* Advance until we change state */ const char *case_start = str; while(1) { if(c == tok->quote_char) { printbuf_memappend_fast(tok->pb, case_start, str-case_start); current = json_object_new_string_len(tok->pb->buf, tok->pb->bpos); saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; break; } else if(c == '\\') { printbuf_memappend_fast(tok->pb, case_start, str-case_start); saved_state = json_tokener_state_string; state = json_tokener_state_string_escape; break; } if (!ADVANCE_CHAR(str, tok) || !PEEK_CHAR(c, tok)) { printbuf_memappend_fast(tok->pb, case_start, str-case_start); goto out; } } } break; case json_tokener_state_string_escape: switch(c) { case '"': case '\\': case '/': printbuf_memappend_fast(tok->pb, &c, 1); state = saved_state; break; case 'b': case 'n': case 'r': case 't': case 'f': if(c == 'b') printbuf_memappend_fast(tok->pb, "\b", 1); else if(c == 'n') printbuf_memappend_fast(tok->pb, "\n", 1); else if(c == 'r') printbuf_memappend_fast(tok->pb, "\r", 1); else if(c == 't') printbuf_memappend_fast(tok->pb, "\t", 1); else if(c == 'f') printbuf_memappend_fast(tok->pb, "\f", 1); state = saved_state; break; case 'u': tok->ucs_char = 0; tok->st_pos = 0; state = json_tokener_state_escape_unicode; break; default: tok->err = json_tokener_error_parse_string; goto out; } break; case json_tokener_state_escape_unicode: { unsigned int got_hi_surrogate = 0; /* Handle a 4-byte sequence, or two sequences if a surrogate pair */ while(1) { if(strchr(json_hex_chars, c)) { tok->ucs_char += ((unsigned int)hexdigit(c) << ((3-tok->st_pos++)*4)); if(tok->st_pos == 4) { unsigned char unescaped_utf[4]; if (got_hi_surrogate) { if (IS_LOW_SURROGATE(tok->ucs_char)) { /* Recalculate the ucs_char, then fall thru to process normally */ tok->ucs_char = DECODE_SURROGATE_PAIR(got_hi_surrogate, tok->ucs_char); } else { /* Hi surrogate was not followed by a low surrogate */ /* Replace the hi and process the rest normally */ printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3); } got_hi_surrogate = 0; } if (tok->ucs_char < 0x80) { unescaped_utf[0] = tok->ucs_char; printbuf_memappend_fast(tok->pb, (char*)unescaped_utf, 1); } else if (tok->ucs_char < 0x800) { unescaped_utf[0] = 0xc0 | (tok->ucs_char >> 6); unescaped_utf[1] = 0x80 | (tok->ucs_char & 0x3f); printbuf_memappend_fast(tok->pb, (char*)unescaped_utf, 2); } else if (IS_HIGH_SURROGATE(tok->ucs_char)) { /* Got a high surrogate. Remember it and look for the * the beginning of another sequence, which should be the * low surrogate. */ got_hi_surrogate = tok->ucs_char; /* Not at end, and the next two chars should be "\u" */ if ((tok->char_offset+1 != len) && (tok->char_offset+2 != len) && (str[1] == '\\') && (str[2] == 'u')) { /* Advance through the 16 bit surrogate, and move on to the * next sequence. The next step is to process the following * characters. */ if( !ADVANCE_CHAR(str, tok) || !ADVANCE_CHAR(str, tok) ) { printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3); } /* Advance to the first char of the next sequence and * continue processing with the next sequence. */ if (!ADVANCE_CHAR(str, tok) || !PEEK_CHAR(c, tok)) { printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3); goto out; } tok->ucs_char = 0; tok->st_pos = 0; continue; /* other json_tokener_state_escape_unicode */ } else { /* Got a high surrogate without another sequence following * it. Put a replacement char in for the hi surrogate * and pretend we finished. */ printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3); } } else if (IS_LOW_SURROGATE(tok->ucs_char)) { /* Got a low surrogate not preceded by a high */ printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3); } else if (tok->ucs_char < 0x10000) { unescaped_utf[0] = 0xe0 | (tok->ucs_char >> 12); unescaped_utf[1] = 0x80 | ((tok->ucs_char >> 6) & 0x3f); unescaped_utf[2] = 0x80 | (tok->ucs_char & 0x3f); printbuf_memappend_fast(tok->pb, (char*)unescaped_utf, 3); } else if (tok->ucs_char < 0x110000) { unescaped_utf[0] = 0xf0 | ((tok->ucs_char >> 18) & 0x07); unescaped_utf[1] = 0x80 | ((tok->ucs_char >> 12) & 0x3f); unescaped_utf[2] = 0x80 | ((tok->ucs_char >> 6) & 0x3f); unescaped_utf[3] = 0x80 | (tok->ucs_char & 0x3f); printbuf_memappend_fast(tok->pb, (char*)unescaped_utf, 4); } else { /* Don't know what we got--insert the replacement char */ printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3); } state = saved_state; break; } } else { tok->err = json_tokener_error_parse_string; goto out; } if (!ADVANCE_CHAR(str, tok) || !PEEK_CHAR(c, tok)) { if (got_hi_surrogate) /* Clean up any pending chars */ printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3); goto out; } } } break; case json_tokener_state_boolean: { int size1, size2; printbuf_memappend_fast(tok->pb, &c, 1); size1 = json_min(tok->st_pos+1, json_true_str_len); size2 = json_min(tok->st_pos+1, json_false_str_len); if((!(tok->flags & JSON_TOKENER_STRICT) && strncasecmp(json_true_str, tok->pb->buf, size1) == 0) || (strncmp(json_true_str, tok->pb->buf, size1) == 0) ) { if(tok->st_pos == json_true_str_len) { current = json_object_new_boolean(1); saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; goto redo_char; } } else if((!(tok->flags & JSON_TOKENER_STRICT) && strncasecmp(json_false_str, tok->pb->buf, size2) == 0) || (strncmp(json_false_str, tok->pb->buf, size2) == 0)) { if(tok->st_pos == json_false_str_len) { current = json_object_new_boolean(0); saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; goto redo_char; } } else { tok->err = json_tokener_error_parse_boolean; goto out; } tok->st_pos++; } break; case json_tokener_state_number: { /* Advance until we change state */ const char *case_start = str; int case_len=0; while(c && strchr(json_number_chars, c)) { ++case_len; if(c == '.' || c == 'e' || c == 'E') tok->is_double = 1; if (!ADVANCE_CHAR(str, tok) || !PEEK_CHAR(c, tok)) { printbuf_memappend_fast(tok->pb, case_start, case_len); goto out; } } if (case_len>0) printbuf_memappend_fast(tok->pb, case_start, case_len); // Check for -Infinity if (tok->pb->buf[0] == '-' && case_len == 1 && (c == 'i' || c == 'I')) { state = json_tokener_state_inf; goto redo_char; } } { int64_t num64; double numd; if (!tok->is_double && json_parse_int64(tok->pb->buf, &num64) == 0) { if (num64 && tok->pb->buf[0]=='0' && (tok->flags & JSON_TOKENER_STRICT)) { /* in strict mode, number must not start with 0 */ tok->err = json_tokener_error_parse_number; goto out; } current = json_object_new_int64(num64); } else if(tok->is_double && json_parse_double(tok->pb->buf, &numd) == 0) { current = json_object_new_double_s(numd, tok->pb->buf); } else { tok->err = json_tokener_error_parse_number; goto out; } saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; goto redo_char; } break; case json_tokener_state_array_after_sep: case json_tokener_state_array: if(c == ']') { if (state == json_tokener_state_array_after_sep && (tok->flags & JSON_TOKENER_STRICT)) { tok->err = json_tokener_error_parse_unexpected; goto out; } saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; } else { if(tok->depth >= tok->max_depth-1) { tok->err = json_tokener_error_depth; goto out; } state = json_tokener_state_array_add; tok->depth++; json_tokener_reset_level(tok, tok->depth); goto redo_char; } break; case json_tokener_state_array_add: json_object_array_add(current, obj); saved_state = json_tokener_state_array_sep; state = json_tokener_state_eatws; goto redo_char; case json_tokener_state_array_sep: if(c == ']') { saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; } else if(c == ',') { saved_state = json_tokener_state_array_after_sep; state = json_tokener_state_eatws; } else { tok->err = json_tokener_error_parse_array; goto out; } break; case json_tokener_state_object_field_start: case json_tokener_state_object_field_start_after_sep: if(c == '}') { if (state == json_tokener_state_object_field_start_after_sep && (tok->flags & JSON_TOKENER_STRICT)) { tok->err = json_tokener_error_parse_unexpected; goto out; } saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; } else if (c == '"' || c == '\'') { tok->quote_char = c; printbuf_reset(tok->pb); state = json_tokener_state_object_field; } else { tok->err = json_tokener_error_parse_object_key_name; goto out; } break; case json_tokener_state_object_field: { /* Advance until we change state */ const char *case_start = str; while(1) { if(c == tok->quote_char) { printbuf_memappend_fast(tok->pb, case_start, str-case_start); obj_field_name = strdup(tok->pb->buf); saved_state = json_tokener_state_object_field_end; state = json_tokener_state_eatws; break; } else if(c == '\\') { printbuf_memappend_fast(tok->pb, case_start, str-case_start); saved_state = json_tokener_state_object_field; state = json_tokener_state_string_escape; break; } if (!ADVANCE_CHAR(str, tok) || !PEEK_CHAR(c, tok)) { printbuf_memappend_fast(tok->pb, case_start, str-case_start); goto out; } } } break; case json_tokener_state_object_field_end: if(c == ':') { saved_state = json_tokener_state_object_value; state = json_tokener_state_eatws; } else { tok->err = json_tokener_error_parse_object_key_sep; goto out; } break; case json_tokener_state_object_value: if(tok->depth >= tok->max_depth-1) { tok->err = json_tokener_error_depth; goto out; } state = json_tokener_state_object_value_add; tok->depth++; json_tokener_reset_level(tok, tok->depth); goto redo_char; case json_tokener_state_object_value_add: json_object_object_add(current, obj_field_name, obj); free(obj_field_name); obj_field_name = NULL; saved_state = json_tokener_state_object_sep; state = json_tokener_state_eatws; goto redo_char; case json_tokener_state_object_sep: if(c == '}') { saved_state = json_tokener_state_finish; state = json_tokener_state_eatws; } else if(c == ',') { saved_state = json_tokener_state_object_field_start_after_sep; state = json_tokener_state_eatws; } else { tok->err = json_tokener_error_parse_object_value_sep; goto out; } break; } if (!ADVANCE_CHAR(str, tok)) goto out; } /* while(POP_CHAR) */ out: if (c && (state == json_tokener_state_finish) && (tok->depth == 0) && (tok->flags & JSON_TOKENER_STRICT)) { /* unexpected char after JSON data */ tok->err = json_tokener_error_parse_unexpected; } if (!c) { /* We hit an eof char (0) */ if(state != json_tokener_state_finish && saved_state != json_tokener_state_finish) tok->err = json_tokener_error_parse_eof; } #ifdef HAVE_SETLOCALE setlocale(LC_NUMERIC, oldlocale); if (oldlocale) free(oldlocale); #endif if (tok->err == json_tokener_success) { json_object *ret = json_object_get(current); int ii; /* Partially reset, so we parse additional objects on subsequent calls. */ for(ii = tok->depth; ii >= 0; ii--) json_tokener_reset_level(tok, ii); return ret; } MC_DEBUG("json_tokener_parse_ex: error %s at offset %d\n", json_tokener_errors[tok->err], tok->char_offset); return NULL; }
1
[ "CWE-119", "CWE-310" ]
json-c
64e36901a0614bf64a19bc3396469c66dcd0b015
207,693,523,410,174,330,000,000,000,000,000,000,000
643
Patch to address the following issues: * CVE-2013-6371: hash collision denial of service * CVE-2013-6370: buffer overflow if size_t is larger than int
static struct kvm_memslots *install_new_memslots(struct kvm *kvm, struct kvm_memslots *slots, struct kvm_memory_slot *new) { struct kvm_memslots *old_memslots = kvm->memslots; update_memslots(slots, new, kvm->memslots->generation); rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); kvm_arch_memslots_updated(kvm); return old_memslots; }
0
[ "CWE-20" ]
linux
338c7dbadd2671189cec7faf64c84d01071b3f96
21,762,333,018,234,960,000,000,000,000,000,000,000
13
KVM: Improve create VCPU parameter (CVE-2013-4587) In multiple functions the vcpu_id is used as an offset into a bitfield. Ag malicious user could specify a vcpu_id greater than 255 in order to set or clear bits in kernel memory. This could be used to elevate priveges in the kernel. This patch verifies that the vcpu_id provided is less than 255. The api documentation already specifies that the vcpu_id must be less than max_vcpus, but this is currently not checked. Reported-by: Andrew Honig <[email protected]> Cc: [email protected] Signed-off-by: Andrew Honig <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
void kvm_set_pfn_accessed(kvm_pfn_t pfn) { if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) mark_page_accessed(pfn_to_page(pfn));
0
[ "CWE-459" ]
linux
683412ccf61294d727ead4a73d97397396e69a6b
30,046,446,367,683,970,000,000,000,000,000,000,000
5
KVM: SEV: add cache flush to solve SEV cache incoherency issues Flush the CPU caches when memory is reclaimed from an SEV guest (where reclaim also includes it being unmapped from KVM's memslots). Due to lack of coherency for SEV encrypted memory, failure to flush results in silent data corruption if userspace is malicious/broken and doesn't ensure SEV guest memory is properly pinned and unpinned. Cache coherency is not enforced across the VM boundary in SEV (AMD APM vol.2 Section 15.34.7). Confidential cachelines, generated by confidential VM guests have to be explicitly flushed on the host side. If a memory page containing dirty confidential cachelines was released by VM and reallocated to another user, the cachelines may corrupt the new user at a later time. KVM takes a shortcut by assuming all confidential memory remain pinned until the end of VM lifetime. Therefore, KVM does not flush cache at mmu_notifier invalidation events. Because of this incorrect assumption and the lack of cache flushing, malicous userspace can crash the host kernel: creating a malicious VM and continuously allocates/releases unpinned confidential memory pages when the VM is running. Add cache flush operations to mmu_notifier operations to ensure that any physical memory leaving the guest VM get flushed. In particular, hook mmu_notifier_invalidate_range_start and mmu_notifier_release events and flush cache accordingly. The hook after releasing the mmu lock to avoid contention with other vCPUs. Cc: [email protected] Suggested-by: Sean Christpherson <[email protected]> Reported-by: Mingwei Zhang <[email protected]> Signed-off-by: Mingwei Zhang <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; unsigned long rto_min, rto_max; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof (struct sctp_rtoinfo)) return -EINVAL; if (copy_from_user(&rtoinfo, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); /* Set the values to the specific association */ if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; rto_max = rtoinfo.srto_max; rto_min = rtoinfo.srto_min; if (rto_max) rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; else rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; if (rto_min) rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; else rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; if (rto_min > rto_max) return -EINVAL; if (asoc) { if (rtoinfo.srto_initial != 0) asoc->rto_initial = msecs_to_jiffies(rtoinfo.srto_initial); asoc->rto_max = rto_max; asoc->rto_min = rto_min; } else { /* If there is no association or the association-id = 0 * set the values to the endpoint. */ if (rtoinfo.srto_initial != 0) sp->rtoinfo.srto_initial = rtoinfo.srto_initial; sp->rtoinfo.srto_max = rto_max; sp->rtoinfo.srto_min = rto_min; } return 0; }
0
[ "CWE-617", "CWE-362" ]
linux
2dcab598484185dea7ec22219c76dcdd59e3cb90
58,574,244,708,825,400,000,000,000,000,000,000,000
53
sctp: avoid BUG_ON on sctp_wait_for_sndbuf Alexander Popov reported that an application may trigger a BUG_ON in sctp_wait_for_sndbuf if the socket tx buffer is full, a thread is waiting on it to queue more data and meanwhile another thread peels off the association being used by the first thread. This patch replaces the BUG_ON call with a proper error handling. It will return -EPIPE to the original sendmsg call, similarly to what would have been done if the association wasn't found in the first place. Acked-by: Alexander Popov <[email protected]> Signed-off-by: Marcelo Ricardo Leitner <[email protected]> Reviewed-by: Xin Long <[email protected]> Signed-off-by: David S. Miller <[email protected]>
read_packet(int fd, gss_buffer_t buf, int timeout, int first) { int ret; static uint32_t len = 0; static char len_buf[4]; static int len_buf_pos = 0; static char * tmpbuf = 0; static int tmpbuf_pos = 0; if (first) { len_buf_pos = 0; return -2; } if (len_buf_pos < 4) { ret = timed_read(fd, &len_buf[len_buf_pos], 4 - len_buf_pos, timeout); if (ret == -1) { if (errno == EINTR || errno == EAGAIN) return -2; LOG(LOG_ERR, ("%s", strerror(errno))); goto bail; } if (ret == 0) { /* EOF */ /* Failure to read ANY length just means we're done */ if (len_buf_pos == 0) return 0; /* * Otherwise, we got EOF mid-length, and that's * a protocol error. */ LOG(LOG_INFO, ("EOF reading packet len")); goto bail; } len_buf_pos += ret; } /* Not done reading the length? */ if (len_buf_pos != 4) return -2; /* We have the complete length */ len = ntohl(*(uint32_t *)len_buf); /* * We make sure recvd length is reasonable, allowing for some * slop in enc overhead, beyond the actual maximum number of * bytes of decrypted payload. */ if (len > GSTD_MAXPACKETCONTENTS + 512) { LOG(LOG_ERR, ("ridiculous length, %ld", len)); goto bail; } if (!tmpbuf) { if ((tmpbuf = malloc(len)) == NULL) { LOG(LOG_CRIT, ("malloc failure, %ld bytes", len)); goto bail; } } ret = timed_read(fd, tmpbuf + tmpbuf_pos, len - tmpbuf_pos, timeout); if (ret == -1) { if (errno == EINTR || errno == EAGAIN) return -2; LOG(LOG_ERR, ("%s", strerror(errno))); goto bail; } if (ret == 0) { LOG(LOG_ERR, ("EOF while reading packet (len=%d)", len)); goto bail; } tmpbuf_pos += ret; if (tmpbuf_pos == len) { buf->length = len; buf->value = tmpbuf; len = len_buf_pos = tmpbuf_pos = 0; tmpbuf = NULL; LOG(LOG_DEBUG, ("read packet of length %d", buf->length)); return 1; } return -2; bail: free(tmpbuf); tmpbuf = NULL; return -1; }
0
[ "CWE-400", "CWE-703" ]
knc
f237f3e09ecbaf59c897f5046538a7b1a3fa40c1
304,139,018,340,292,150,000,000,000,000,000,000,000
102
knc: fix a couple of memory leaks. One of these can be remotely triggered during the authentication phase which leads to a remote DoS possibility. Pointed out by: Imre Rad <[email protected]>
user_path_parent(int dfd, const char __user *path, struct path *parent, struct qstr *last, int *type, unsigned int flags) { /* only LOOKUP_REVAL is allowed in extra flags */ return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL, parent, last, type); }
0
[ "CWE-284" ]
linux
9409e22acdfc9153f88d9b1ed2bd2a5b34d2d3ca
12,722,889,561,999,910,000,000,000,000,000,000,000
10
vfs: rename: check backing inode being equal If a file is renamed to a hardlink of itself POSIX specifies that rename(2) should do nothing and return success. This condition is checked in vfs_rename(). However it won't detect hard links on overlayfs where these are given separate inodes on the overlayfs layer. Overlayfs itself detects this condition and returns success without doing anything, but then vfs_rename() will proceed as if this was a successful rename (detach_mounts(), d_move()). The correct thing to do is to detect this condition before even calling into overlayfs. This patch does this by calling vfs_select_inode() to get the underlying inodes. Signed-off-by: Miklos Szeredi <[email protected]> Cc: <[email protected]> # v4.2+
xmlSchemaPValAttrNode(xmlSchemaParserCtxtPtr ctxt, xmlSchemaBasicItemPtr ownerItem, xmlAttrPtr attr, xmlSchemaTypePtr type, const xmlChar **value) { const xmlChar *val; if ((ctxt == NULL) || (type == NULL) || (attr == NULL)) return (-1); val = xmlSchemaGetNodeContent(ctxt, (xmlNodePtr) attr); if (value != NULL) *value = val; return (xmlSchemaPValAttrNodeValue(ctxt, ownerItem, attr, val, type)); }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
201,330,972,331,251,460,000,000,000,000,000,000,000
18
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
TEST(Random, SecureFork) { unsigned char buffer = 0; // Init random buffer folly::Random::secureRandom(&buffer, 1); auto pid = fork(); EXPECT_NE(pid, -1); if (pid) { // parent int status = 0; folly::Random::secureRandom(&buffer, 1); auto pid2 = wait(&status); EXPECT_NE(WEXITSTATUS(status), buffer); EXPECT_EQ(pid, pid2); } else { // child folly::Random::secureRandom(&buffer, 1); exit(buffer); // Do not print gtest results } }
0
[ "CWE-119", "CWE-787" ]
folly
8e927ee48b114c8a2f90d0cbd5ac753795a6761f
243,044,740,183,069,100,000,000,000,000,000,000,000
20
Flush secureRandom buffer on fork Summary: On fork, flush the secureRandom buffer, so that we don't share entropy between the parent and child. Reviewed By: ricklavoie Differential Revision: D9196474 fbshipit-source-id: 12ff8488d814466186df61328a5f1d4000beb27f
void FilterManager::encode1xxHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers) { filter_manager_callbacks_.resetIdleTimer(); ASSERT(proxy_100_continue_); // The caller must guarantee that encode1xxHeaders() is invoked at most once. ASSERT(!state_.has_1xx_headers_ || filter != nullptr); // Make sure commonContinue continues encode1xxHeaders. state_.has_1xx_headers_ = true; // Similar to the block in encodeHeaders, run encode1xxHeaders on each // filter. This is simpler than that case because 100 continue implies no // end-stream, and because there are normal headers coming there's no need for // complex continuation logic. // 100-continue filter iteration should always start with the next filter if available. std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, false, FilterIterationStartState::AlwaysStartFromNext); for (; entry != encoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } ASSERT(!(state_.filter_call_state_ & FilterCallState::Encode1xxHeaders)); state_.filter_call_state_ |= FilterCallState::Encode1xxHeaders; FilterHeadersStatus status = (*entry)->handle_->encode1xxHeaders(headers); state_.filter_call_state_ &= ~FilterCallState::Encode1xxHeaders; ENVOY_STREAM_LOG(trace, "encode 1xx continue headers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (!(*entry)->commonHandleAfter1xxHeadersCallback(status)) { return; } } filter_manager_callbacks_.encode1xxHeaders(headers); }
0
[ "CWE-416" ]
envoy
148de954ed3585d8b4298b424aa24916d0de6136
269,610,738,041,901,540,000,000,000,000,000,000,000
34
CVE-2021-43825 Response filter manager crash Signed-off-by: Yan Avlasov <[email protected]>
static inline void show_node(struct zone *zone) { if (IS_ENABLED(CONFIG_NUMA)) printk("Node %d ", zone_to_nid(zone)); }
0
[]
linux
400e22499dd92613821374c8c6c88c7225359980
303,399,235,014,706,700,000,000,000,000,000,000,000
5
mm: don't warn about allocations which stall for too long Commit 63f53dea0c98 ("mm: warn about allocations which stall for too long") was a great step for reducing possibility of silent hang up problem caused by memory allocation stalls. But this commit reverts it, for it is possible to trigger OOM lockup and/or soft lockups when many threads concurrently called warn_alloc() (in order to warn about memory allocation stalls) due to current implementation of printk(), and it is difficult to obtain useful information due to limitation of synchronous warning approach. Current printk() implementation flushes all pending logs using the context of a thread which called console_unlock(). printk() should be able to flush all pending logs eventually unless somebody continues appending to printk() buffer. Since warn_alloc() started appending to printk() buffer while waiting for oom_kill_process() to make forward progress when oom_kill_process() is processing pending logs, it became possible for warn_alloc() to force oom_kill_process() loop inside printk(). As a result, warn_alloc() significantly increased possibility of preventing oom_kill_process() from making forward progress. ---------- Pseudo code start ---------- Before warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } goto retry; After warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else if (waited_for_10seconds()) { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- Although waited_for_10seconds() becomes true once per 10 seconds, unbounded number of threads can call waited_for_10seconds() at the same time. Also, since threads doing waited_for_10seconds() keep doing almost busy loop, the thread doing print_one_log() can use little CPU resource. Therefore, this situation can be simplified like ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- when printk() is called faster than print_one_log() can process a log. One of possible mitigation would be to introduce a new lock in order to make sure that no other series of printk() (either oom_kill_process() or warn_alloc()) can append to printk() buffer when one series of printk() (either oom_kill_process() or warn_alloc()) is already in progress. Such serialization will also help obtaining kernel messages in readable form. ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { mutex_lock(&oom_printk_lock); while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_printk_lock); mutex_unlock(&oom_lock) } else { if (mutex_trylock(&oom_printk_lock)) { atomic_inc(&printk_pending_logs); mutex_unlock(&oom_printk_lock); } } goto retry; ---------- Pseudo code end ---------- But this commit does not go that direction, for we don't want to introduce a new lock dependency, and we unlikely be able to obtain useful information even if we serialized oom_kill_process() and warn_alloc(). Synchronous approach is prone to unexpected results (e.g. too late [1], too frequent [2], overlooked [3]). As far as I know, warn_alloc() never helped with providing information other than "something is going wrong". I want to consider asynchronous approach which can obtain information during stalls with possibly relevant threads (e.g. the owner of oom_lock and kswapd-like threads) and serve as a trigger for actions (e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump of stalling KVM guest for diagnostic purpose). This commit temporarily loses ability to report e.g. OOM lockup due to unable to invoke the OOM killer due to !__GFP_FS allocation request. But asynchronous approach will be able to detect such situation and emit warning. Thus, let's remove warn_alloc(). [1] https://bugzilla.kernel.org/show_bug.cgi?id=192981 [2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com [3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever")) Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <[email protected]> Reported-by: Cong Wang <[email protected]> Reported-by: yuwang.yuwang <[email protected]> Reported-by: Johannes Weiner <[email protected]> Acked-by: Michal Hocko <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Sergey Senozhatsky <[email protected]> Cc: Petr Mladek <[email protected]> Cc: Steven Rostedt <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
void SimpleMessenger::queue_reap(Pipe *pipe) { ldout(cct,10) << "queue_reap " << pipe << dendl; lock.Lock(); pipe_reap_queue.push_back(pipe); reaper_cond.Signal(); lock.Unlock(); }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
242,515,867,696,507,130,000,000,000,000,000,000,000
8
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <[email protected]> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
Init_dlhandle() { rb_cDLHandle = rb_define_class_under(rb_mDL, "Handle", rb_cObject); rb_define_alloc_func(rb_cDLHandle, rb_dlhandle_s_allocate); rb_define_method(rb_cDLHandle, "initialize", rb_dlhandle_initialize, -1); rb_define_method(rb_cDLHandle, "to_i", rb_dlhandle_to_i, 0); rb_define_method(rb_cDLHandle, "close", rb_dlhandle_close, 0); rb_define_method(rb_cDLHandle, "sym", rb_dlhandle_sym, 1); rb_define_method(rb_cDLHandle, "[]", rb_dlhandle_sym, 1); rb_define_method(rb_cDLHandle, "disable_close", rb_dlhandle_disable_close, 0); rb_define_method(rb_cDLHandle, "enable_close", rb_dlhandle_enable_close, 0); }
0
[ "CWE-20", "CWE-399" ]
ruby
4600cf725a86ce31266153647ae5aa1197b1215b
270,515,982,786,496,470,000,000,000,000,000,000,000
12
* ext/dl/dl.c (rb_dlhandle_initialize): prohibits DL::dlopen with a tainted name of library. Patch by sheepman <sheepman AT sheepman.sakura.ne.jp>. * ext/dl/dl.c (rb_dlhandle_sym): ditto git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_9_1@23405 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
static int deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { struct snd_seq_subscribers *subs; int err, result = 0, num_ev = 0; struct snd_seq_event event_saved; struct snd_seq_client_port *src_port; struct snd_seq_port_subs_info *grp; src_port = snd_seq_port_use_ptr(client, event->source.port); if (src_port == NULL) return -EINVAL; /* invalid source port */ /* save original event record */ event_saved = *event; grp = &src_port->c_src; /* lock list */ if (atomic) read_lock(&grp->list_lock); else down_read_nested(&grp->list_mutex, hop); list_for_each_entry(subs, &grp->list_head, src_list) { /* both ports ready? */ if (atomic_read(&subs->ref_count) != 2) continue; event->dest = subs->info.dest; if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP) /* convert time according to flag with subscription */ update_timestamp_of_queue(event, subs->info.queue, subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL); err = snd_seq_deliver_single_event(client, event, 0, atomic, hop); if (err < 0) { /* save first error that occurs and continue */ if (!result) result = err; continue; } num_ev++; /* restore original event record */ *event = event_saved; } if (atomic) read_unlock(&grp->list_lock); else up_read(&grp->list_mutex); *event = event_saved; /* restore */ snd_seq_port_unlock(src_port); return (result < 0) ? result : num_ev; }
0
[ "CWE-362" ]
linux
b3defb791b26ea0683a93a4f49c77ec45ec96f10
127,324,164,545,149,040,000,000,000,000,000,000,000
51
ALSA: seq: Make ioctls race-free The ALSA sequencer ioctls have no protection against racy calls while the concurrent operations may lead to interfere with each other. As reported recently, for example, the concurrent calls of setting client pool with a combination of write calls may lead to either the unkillable dead-lock or UAF. As a slightly big hammer solution, this patch introduces the mutex to make each ioctl exclusive. Although this may reduce performance via parallel ioctl calls, usually it's not demanded for sequencer usages, hence it should be negligible. Reported-by: Luo Quan <[email protected]> Reviewed-by: Kees Cook <[email protected]> Reviewed-by: Greg Kroah-Hartman <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
bool Virtual_column_info::fix_expr(THD *thd) { DBUG_ENTER("fix_vcol_expr"); const enum enum_column_usage saved_column_usage= thd->column_usage; thd->column_usage= COLUMNS_WRITE; int error= expr->fix_fields(thd, &expr); thd->column_usage= saved_column_usage; if (unlikely(error)) { StringBuffer<MAX_FIELD_WIDTH> str; print(&str); my_error(ER_ERROR_EVALUATING_EXPRESSION, MYF(0), str.c_ptr_safe()); DBUG_RETURN(1); } DBUG_RETURN(0); }
0
[ "CWE-416" ]
server
c02ebf3510850ba78a106be9974c94c3b97d8585
22,340,190,165,552,257,000,000,000,000,000,000,000
21
MDEV-24176 Preparations 1. moved fix_vcol_exprs() call to open_table() mysql_alter_table() doesn't do lock_tables() so it cannot win from fix_vcol_exprs() from there. Tests affected: main.default_session 2. Vanilla cleanups and comments.
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { struct kmem_cache_cpu *c; int i; /* memcg and kmem_cache debug support */ s = slab_pre_alloc_hook(s, flags); if (unlikely(!s)) return false; /* * Drain objects in the per cpu slab, while disabling local * IRQs, which protects against PREEMPT and interrupts * handlers invoking normal fastpath. */ local_irq_disable(); c = this_cpu_ptr(s->cpu_slab); for (i = 0; i < size; i++) { void *object = c->freelist; if (unlikely(!object)) { /* * We may have removed an object from c->freelist using * the fastpath in the previous iteration; in that case, * c->tid has not been bumped yet. * Since ___slab_alloc() may reenable interrupts while * allocating memory, we should bump c->tid now. */ c->tid = next_tid(c->tid); /* * Invoking slow path likely have side-effect * of re-populating per CPU c->freelist */ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_, c); if (unlikely(!p[i])) goto error; c = this_cpu_ptr(s->cpu_slab); maybe_wipe_obj_freeptr(s, p[i]); continue; /* goto for-loop */ } c->freelist = get_freepointer(s, object); p[i] = object; maybe_wipe_obj_freeptr(s, p[i]); } c->tid = next_tid(c->tid); local_irq_enable(); /* Clear memory outside IRQ disabled fastpath loop */ if (unlikely(slab_want_init_on_alloc(flags, s))) { int j; for (j = 0; j < i; j++) memset(p[j], 0, s->object_size); } /* memcg and kmem_cache debug support */ slab_post_alloc_hook(s, flags, size, p); return i; error: local_irq_enable(); slab_post_alloc_hook(s, flags, i, p); __kmem_cache_free_bulk(s, i, p); return 0; }
0
[]
linux
fd4d9c7d0c71866ec0c2825189ebd2ce35bd95b8
286,573,245,831,184,630,000,000,000,000,000,000,000
69
mm: slub: add missing TID bump in kmem_cache_alloc_bulk() When kmem_cache_alloc_bulk() attempts to allocate N objects from a percpu freelist of length M, and N > M > 0, it will first remove the M elements from the percpu freelist, then call ___slab_alloc() to allocate the next element and repopulate the percpu freelist. ___slab_alloc() can re-enable IRQs via allocate_slab(), so the TID must be bumped before ___slab_alloc() to properly commit the freelist head change. Fix it by unconditionally bumping c->tid when entering the slowpath. Cc: [email protected] Fixes: ebe909e0fdb3 ("slub: improve bulk alloc strategy") Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
_upnp_delete_redir(unsigned short eport, int proto) { int r; #if defined(__linux__) r = delete_redirect_and_filter_rules(eport, proto); #elif defined(USE_PF) r = delete_redirect_and_filter_rules(ext_if_name, eport, proto); #else r = delete_redirect_rule(ext_if_name, eport, proto); delete_filter_rule(ext_if_name, eport, proto); #endif #ifdef ENABLE_LEASEFILE lease_file_remove( eport, proto); #endif #ifdef ENABLE_EVENTS upnp_event_var_change_notify(EWanIPC); #endif return r; }
0
[ "CWE-476" ]
miniupnp
f321c2066b96d18afa5158dfa2d2873a2957ef38
68,215,192,947,058,525,000,000,000,000,000,000,000
20
upnp_redirect(): accept NULL desc argument
static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len) { while (!iov->iov_len) iov++; while (len > 0) { unsigned long this_len; this_len = min_t(unsigned long, len, iov->iov_len); fault_in_pages_readable(iov->iov_base, this_len); len -= this_len; iov++; } }
1
[ "CWE-17" ]
linux
f0d1bec9d58d4c038d0ac958c9af82be6eb18045
185,972,045,487,277,260,000,000,000,000,000,000,000
14
new helper: copy_page_from_iter() parallel to copy_page_to_iter(). pipe_write() switched to it (and became ->write_iter()). Signed-off-by: Al Viro <[email protected]>
int URI_FUNC(ParseSingleUriExMm)(URI_TYPE(Uri) * uri, const URI_CHAR * first, const URI_CHAR * afterLast, const URI_CHAR ** errorPos, UriMemoryManager * memory) { URI_TYPE(ParserState) state; int res; /* Check params */ if ((uri == NULL) || (first == NULL) || (afterLast == NULL)) { return URI_ERROR_NULL; } URI_CHECK_MEMORY_MANAGER(memory); /* may return */ state.uri = uri; res = URI_FUNC(ParseUriExMm)(&state, first, afterLast, memory); if (res != URI_SUCCESS) { if (errorPos != NULL) { *errorPos = state.errorPos; } URI_FUNC(FreeUriMembersMm)(uri, memory); } return res; }
0
[ "CWE-125" ]
uriparser
cef25028de5ff872c2e1f0a6c562eb3ea9ecbce4
64,223,607,256,008,240,000,000,000,000,000,000,000
25
Fix uriParse*Ex* out-of-bounds read
static ssize_t print_cpus_kernel_max(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", NR_CPUS - 1); }
1
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
236,444,924,406,476,620,000,000,000,000,000,000,000
5
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <[email protected]> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <[email protected]>
int kvm_arch_hardware_enable(void *garbage) { long status; long tmp_base; unsigned long pte; unsigned long saved_psr; int slot; pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); local_irq_save(saved_psr); slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); local_irq_restore(saved_psr); if (slot < 0) return -EINVAL; spin_lock(&vp_lock); status = ia64_pal_vp_init_env(kvm_vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE, __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); if (status != 0) { spin_unlock(&vp_lock); printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); return -EINVAL; } if (!kvm_vsa_base) { kvm_vsa_base = tmp_base; printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); } spin_unlock(&vp_lock); ia64_ptr_entry(0x3, slot); return 0; }
0
[ "CWE-399" ]
kvm
5b40572ed5f0344b9dbee486a17c589ce1abe1a3
39,250,474,077,948,810,000,000,000,000,000,000,000
34
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings If some vcpus are created before KVM_CREATE_IRQCHIP, then irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading to potential NULL pointer dereferences. Fix by: - ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called - ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP This is somewhat long winded because vcpu->arch.apic is created without kvm->lock held. Based on earlier patch by Michael Ellerman. Signed-off-by: Michael Ellerman <[email protected]> Signed-off-by: Avi Kivity <[email protected]>
ex_sleep(exarg_T *eap) { int n; long len; if (cursor_valid()) { n = W_WINROW(curwin) + curwin->w_wrow - msg_scrolled; if (n >= 0) windgoto((int)n, curwin->w_wincol + curwin->w_wcol); } len = eap->line2; switch (*eap->arg) { case 'm': break; case NUL: len *= 1000L; break; default: semsg(_(e_invarg2), eap->arg); return; } // Hide the cursor if invoked with ! do_sleep(len, eap->forceit); }
0
[ "CWE-122" ]
vim
35a319b77f897744eec1155b736e9372c9c5575f
239,387,884,951,875,350,000,000,000,000,000,000,000
23
patch 8.2.3489: ml_get error after search with range Problem: ml_get error after search with range. Solution: Limit the line number to the buffer line count.
tsize_t t2p_write_pdf_xobject_calcs(T2P* t2p, TIFF* output){ tsize_t written=0; char buffer[256]; int buflen=0; float X_W=0.0; float Y_W=0.0; float Z_W=0.0; float X_R=0.0; float Y_R=0.0; float Z_R=0.0; float X_G=0.0; float Y_G=0.0; float Z_G=0.0; float X_B=0.0; float Y_B=0.0; float Z_B=0.0; float x_w=0.0; float y_w=0.0; float z_w=0.0; float x_r=0.0; float y_r=0.0; float x_g=0.0; float y_g=0.0; float x_b=0.0; float y_b=0.0; float R=1.0; float G=1.0; float B=1.0; written += t2pWriteFile(output, (tdata_t) "[", 1); if(t2p->pdf_colorspace & T2P_CS_CALGRAY){ written += t2pWriteFile(output, (tdata_t) "/CalGray ", 9); X_W = t2p->tiff_whitechromaticities[0]; Y_W = t2p->tiff_whitechromaticities[1]; Z_W = 1.0F - (X_W + Y_W); X_W /= Y_W; Z_W /= Y_W; Y_W = 1.0F; } if(t2p->pdf_colorspace & T2P_CS_CALRGB){ written += t2pWriteFile(output, (tdata_t) "/CalRGB ", 8); x_w = t2p->tiff_whitechromaticities[0]; y_w = t2p->tiff_whitechromaticities[1]; x_r = t2p->tiff_primarychromaticities[0]; y_r = t2p->tiff_primarychromaticities[1]; x_g = t2p->tiff_primarychromaticities[2]; y_g = t2p->tiff_primarychromaticities[3]; x_b = t2p->tiff_primarychromaticities[4]; y_b = t2p->tiff_primarychromaticities[5]; z_w = y_w * ((x_g - x_b)*y_r - (x_r-x_b)*y_g + (x_r-x_g)*y_b); Y_R = (y_r/R) * ((x_g-x_b)*y_w - (x_w-x_b)*y_g + (x_w-x_g)*y_b) / z_w; X_R = Y_R * x_r / y_r; Z_R = Y_R * (((1-x_r)/y_r)-1); Y_G = ((0.0F-(y_g))/G) * ((x_r-x_b)*y_w - (x_w-x_b)*y_r + (x_w-x_r)*y_b) / z_w; X_G = Y_G * x_g / y_g; Z_G = Y_G * (((1-x_g)/y_g)-1); Y_B = (y_b/B) * ((x_r-x_g)*y_w - (x_w-x_g)*y_r + (x_w-x_r)*y_g) / z_w; X_B = Y_B * x_b / y_b; Z_B = Y_B * (((1-x_b)/y_b)-1); X_W = (X_R * R) + (X_G * G) + (X_B * B); Y_W = (Y_R * R) + (Y_G * G) + (Y_B * B); Z_W = (Z_R * R) + (Z_G * G) + (Z_B * B); X_W /= Y_W; Z_W /= Y_W; Y_W = 1.0; } written += t2pWriteFile(output, (tdata_t) "<< \n", 4); if(t2p->pdf_colorspace & T2P_CS_CALGRAY){ written += t2pWriteFile(output, (tdata_t) "/WhitePoint ", 12); buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f] \n", X_W, Y_W, Z_W); check_snprintf_ret(t2p, buflen, buffer); written += t2pWriteFile(output, (tdata_t) buffer, buflen); written += t2pWriteFile(output, (tdata_t) "/Gamma 2.2 \n", 12); } if(t2p->pdf_colorspace & T2P_CS_CALRGB){ written += t2pWriteFile(output, (tdata_t) "/WhitePoint ", 12); buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f] \n", X_W, Y_W, Z_W); check_snprintf_ret(t2p, buflen, buffer); written += t2pWriteFile(output, (tdata_t) buffer, buflen); written += t2pWriteFile(output, (tdata_t) "/Matrix ", 8); buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f] \n", X_R, Y_R, Z_R, X_G, Y_G, Z_G, X_B, Y_B, Z_B); check_snprintf_ret(t2p, buflen, buffer); written += t2pWriteFile(output, (tdata_t) buffer, buflen); written += t2pWriteFile(output, (tdata_t) "/Gamma [2.2 2.2 2.2] \n", 22); } written += t2pWriteFile(output, (tdata_t) ">>] \n", 5); return(written); }
0
[ "CWE-119" ]
libtiff
b5d6803f0898e931cf772d3d0755704ab8488e63
64,218,964,493,948,340,000,000,000,000,000,000,000
94
* tools/tiff2pdf.c: fix write buffer overflow of 2 bytes on JPEG compressed images. Reported by Tyler Bohan of Cisco Talos as TALOS-CAN-0187 / CVE-2016-5652. Also prevents writing 2 extra uninitialized bytes to the file stream.
bool vma_policy_mof(struct vm_area_struct *vma) { struct mempolicy *pol; if (vma->vm_ops && vma->vm_ops->get_policy) { bool ret = false; pol = vma->vm_ops->get_policy(vma, vma->vm_start); if (pol && (pol->flags & MPOL_F_MOF)) ret = true; mpol_cond_put(pol); return ret; } pol = vma->vm_policy; if (!pol) pol = get_task_policy(current); return pol->flags & MPOL_F_MOF; }
0
[ "CWE-388" ]
linux
cf01fb9985e8deb25ccf0ea54d916b8871ae0e62
86,035,000,236,446,190,000,000,000,000,000,000,000
21
mm/mempolicy.c: fix error handling in set_mempolicy and mbind. In the case that compat_get_bitmap fails we do not want to copy the bitmap to the user as it will contain uninitialized stack data and leak sensitive data. Signed-off-by: Chris Salls <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
test_gui_scrollbar(dict_T *args) { char_u *which; long value; int dragging; scrollbar_T *sb = NULL; if (dict_find(args, (char_u *)"which", -1) == NULL || dict_find(args, (char_u *)"value", -1) == NULL || dict_find(args, (char_u *)"dragging", -1) == NULL) return FALSE; which = dict_get_string(args, (char_u *)"which", FALSE); value = (long)dict_get_number(args, (char_u *)"value"); dragging = (int)dict_get_number(args, (char_u *)"dragging"); if (STRCMP(which, "left") == 0) sb = &curwin->w_scrollbars[SBAR_LEFT]; else if (STRCMP(which, "right") == 0) sb = &curwin->w_scrollbars[SBAR_RIGHT]; else if (STRCMP(which, "hor") == 0) sb = &gui.bottom_sbar; if (sb == NULL) { semsg(_(e_invalid_argument_str), which); return FALSE; } gui_drag_scrollbar(sb, value, dragging); # ifndef USE_ON_FLY_SCROLL // need to loop through normal_cmd() to handle the scroll events exec_normal(FALSE, TRUE, FALSE); # endif return TRUE; }
0
[ "CWE-121", "CWE-787" ]
vim
34f8117dec685ace52cd9e578e2729db278163fc
327,456,994,961,898,920,000,000,000,000,000,000,000
35
patch 8.2.4397: crash when using many composing characters in error message Problem: Crash when using many composing characters in error message. Solution: Use mb_cptr2char_adv() instead of mb_ptr2char_adv().
flatpak_dir_find_remote_related (FlatpakDir *self, FlatpakRemoteState *state, const char *ref, GCancellable *cancellable, GError **error) { const char *metadata = NULL; g_autoptr(GKeyFile) metakey = g_key_file_new (); g_auto(GStrv) parts = NULL; g_autoptr(GPtrArray) related = NULL; g_autofree char *url = NULL; parts = flatpak_decompose_ref (ref, error); if (parts == NULL) return NULL; if (!ostree_repo_remote_get_url (self->repo, state->remote_name, &url, error)) return FALSE; if (*url == 0) return g_steal_pointer (&related); /* Empty url, silently disables updates */ if (flatpak_remote_state_lookup_cache (state, ref, NULL, NULL, &metadata, NULL) && g_key_file_load_from_data (metakey, metadata, -1, 0, NULL)) related = flatpak_dir_find_remote_related_for_metadata (self, state, ref, metakey, cancellable, error); else related = g_ptr_array_new_with_free_func ((GDestroyNotify) flatpak_related_free); return g_steal_pointer (&related); }
0
[ "CWE-668" ]
flatpak
cd2142888fc4c199723a0dfca1f15ea8788a5483
90,454,045,066,936,700,000,000,000,000,000,000,000
36
Don't expose /proc when running apply_extra As shown by CVE-2019-5736, it is sometimes possible for the sandbox app to access outside files using /proc/self/exe. This is not typically an issue for flatpak as the sandbox runs as the user which has no permissions to e.g. modify the host files. However, when installing apps using extra-data into the system repo we *do* actually run a sandbox as root. So, in this case we disable mounting /proc in the sandbox, which will neuter attacks like this.
virDomainActualNetDefFree(virDomainActualNetDefPtr def) { if (!def) return; switch (def->type) { case VIR_DOMAIN_NET_TYPE_BRIDGE: case VIR_DOMAIN_NET_TYPE_NETWORK: VIR_FREE(def->data.bridge.brname); break; case VIR_DOMAIN_NET_TYPE_DIRECT: VIR_FREE(def->data.direct.linkdev); break; case VIR_DOMAIN_NET_TYPE_HOSTDEV: virDomainHostdevDefClear(&def->data.hostdev.def); break; default: break; } VIR_FREE(def->virtPortProfile); virNetDevBandwidthFree(def->bandwidth); virNetDevVlanClear(&def->vlan); VIR_FREE(def); }
0
[ "CWE-212" ]
libvirt
a5b064bf4b17a9884d7d361733737fb614ad8979
28,409,275,172,659,030,000,000,000,000,000,000,000
25
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410 (v6.1.0-122-g3b076391be) we support http cookies. Since they may contain somewhat sensitive information we should not format them into the XML unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted. Reported-by: Han Han <[email protected]> Signed-off-by: Peter Krempa <[email protected]> Reviewed-by: Erik Skultety <[email protected]>
GF_Box *pasp_New() { ISOM_DECL_BOX_ALLOC(GF_PixelAspectRatioBox, GF_ISOM_BOX_TYPE_PASP); return (GF_Box *)tmp;
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
233,377,451,858,873,780,000,000,000,000,000,000,000
5
prevent dref memleak on invalid input (#1183)
send_progress (GOutputStream *out, int op, int n_ops, int progress, int status, const GError *update_error) { g_autoptr(GVariant) v = NULL; g_autofree gchar *error_name = NULL; if (update_error) error_name = get_progress_error (update_error); v = g_variant_ref_sink (g_variant_new ("(uuuuss)", op, n_ops, progress, status, error_name ? error_name : "", update_error ? update_error->message : "")); send_variant (v, out); }
0
[ "CWE-94", "CWE-74" ]
flatpak
aeb6a7ab0abaac4a8f4ad98b3df476d9de6b8bd4
68,020,371,280,019,520,000,000,000,000,000,000,000
19
portal: Convert --env in extra-args into --env-fd This hides overridden variables from the command-line, which means processes running under other uids can't see them in /proc/*/cmdline, which might be important if they contain secrets. Signed-off-by: Simon McVittie <[email protected]> Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2
TEST_F(StreamErrorOnInvalidHttpMessageTest, ConnectionTerminatedIfCodecStreamErrorIsFalse) { sendInvalidRequestAndVerifyConnectionState(false); }
0
[ "CWE-22" ]
envoy
5333b928d8bcffa26ab19bf018369a835f697585
299,710,033,342,044,160,000,000,000,000,000,000,000
3
Implement handling of escaped slash characters in URL path Fixes: CVE-2021-29492 Signed-off-by: Yan Avlasov <[email protected]>
bool Smb4KGlobal::coreIsInitialized() { return p->coreInitialized; }
0
[ "CWE-20" ]
smb4k
71554140bdaede27b95dbe4c9b5a028a83c83cce
27,153,003,588,920,790,000,000,000,000,000,000,000
4
Find the mount/umount commands in the helper Instead of trusting what we get passed in CVE-2017-8849
cms_context_fini(cms_context *cms) { struct list_head *n, *pos; if (cms->cert) { CERT_DestroyCertificate(cms->cert); cms->cert = NULL; } switch (cms->pwdata.source) { case PW_SOURCE_INVALID: case PW_PROMPT: case PW_DEVICE: case PW_FROMFILEDB: case PW_FROMENV: case PW_FROMFILE: case PW_FROMFD: case PW_SOURCE_MAX: break; case PW_DATABASE: xfree(cms->pwdata.data); break; case PW_PLAINTEXT: memset(cms->pwdata.data, 0, strlen(cms->pwdata.data)); xfree(cms->pwdata.data); break; } cms->pwdata.source = PW_SOURCE_INVALID; cms->pwdata.orig_source = PW_SOURCE_INVALID; if (cms->privkey) { free(cms->privkey); cms->privkey = NULL; } if (cms->db_out >= 0) fsync(cms->db_out); xclose(cms->db_out); if (cms->dbx_out >= 0) fsync(cms->dbx_out); xclose(cms->dbx_out); if (cms->dbt_out >= 0) fsync(cms->dbt_out); xclose(cms->dbt_out); list_for_each_safe(pos, n, &cms->pk12_ins) { pk12_file_t *file = list_entry(pos, pk12_file_t, list); xfree(file->path); if (file->fd >= 0) { /* * This may or may not be writable... */ fsync(file->fd); errno = 0; } xclose(file->fd); xfree(file->pw); } xclose(cms->pk12_out.fd); xfree(cms->pk12_out.path); xfree(cms->pk12_out.pw); /* These were freed when the arena was destroyed */ if (cms->tokenname) cms->tokenname = NULL; if (cms->certname) cms->certname = NULL; if (cms->newsig.data) { free_poison(cms->newsig.data, cms->newsig.len); free(cms->newsig.data); memset(&cms->newsig, '\0', sizeof (cms->newsig)); } cms->selected_digest = -1; if (cms->ci_digest) { free_poison(cms->ci_digest->data, cms->ci_digest->len); /* XXX sure seems like we should be freeing it here, but * that's segfaulting, and we know it'll get cleaned up with * PORT_FreeArena a couple of lines down. */ cms->ci_digest = NULL; } teardown_digests(cms); if (cms->raw_signed_attrs) { free_poison(cms->raw_signed_attrs->data, cms->raw_signed_attrs->len); /* XXX sure seems like we should be freeing it here, but * that's segfaulting, and we know it'll get cleaned up with * PORT_FreeArena a couple of lines down. */ cms->raw_signed_attrs = NULL; } if (cms->raw_signature) { free_poison(cms->raw_signature->data, cms->raw_signature->len); /* XXX sure seems like we should be freeing it here, but * that's segfaulting, and we know it'll get cleaned up with * PORT_FreeArena a couple of lines down. */ cms->raw_signature = NULL; } for (int i = 0; i < cms->num_signatures; i++) { free(cms->signatures[i]->data); free(cms->signatures[i]); } xfree(cms->signatures); cms->num_signatures = 0; if (cms->authbuf) { xfree(cms->authbuf); cms->authbuf_len = 0; } PORT_FreeArena(cms->arena, PR_TRUE); memset(cms, '\0', sizeof(*cms)); xfree(cms); }
0
[ "CWE-787" ]
pesign
b879dda52f8122de697d145977c285fb0a022d76
126,365,159,753,832,470,000,000,000,000,000,000,000
124
Handle NULL pwdata in cms_set_pw_data() When 12f16710ee44ef64ddb044a3523c3c4c4d90039a rewrote this function, it didn't handle the NULL pwdata invocation from daemon.c. This leads to a explicit NULL dereference and crash on all attempts to daemonize pesign. Signed-off-by: Robbie Harwood <[email protected]>
size_t tls12_get_sig_algs(SSL *s, unsigned char *p) { TLS_SIGALGS *sptr = s->cert->conf_sigalgs; size_t slen; /* Use custom signature algorithms if any are set */ if (sptr) { slen = s->cert->conf_sigalgslen; if (p) { size_t i; for (i = 0; i < slen; i++, sptr++) { *p++ = sptr->rhash; *p++ = sptr->rsign; } } return slen * 2; } slen = sizeof(tls12_sigalgs); #ifdef OPENSSL_FIPS /* If FIPS mode don't include MD5 which is last */ if (FIPS_mode()) slen -= 2; #endif if (p) memcpy(p, tls12_sigalgs, slen); return slen; }
1
[]
openssl
c70a1fee71119a9005b1f304a3bf47694b4a53ac
41,015,729,261,650,327,000,000,000,000,000,000,000
32
Reorganise supported signature algorithm extension processing. Only store encoded versions of peer and configured signature algorithms. Determine shared signature algorithms and cache the result along with NID equivalents of each algorithm. (backport from HEAD)
rb_string_value_cstr(ptr) volatile VALUE *ptr; { VALUE str = rb_string_value(ptr); char *s = RSTRING(str)->ptr; if (!s || RSTRING(str)->len != strlen(s)) { rb_raise(rb_eArgError, "string contains null byte"); } return s; }
0
[ "CWE-20" ]
ruby
e926ef5233cc9f1035d3d51068abe9df8b5429da
63,249,896,140,123,080,000,000,000,000,000,000,000
11
* random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export. * string.c (rb_str_tmp_new), intern.h: New function. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
void *Type_LUT8_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag) { cmsUInt8Number InputChannels, OutputChannels, CLUTpoints; cmsUInt8Number* Temp = NULL; cmsPipeline* NewLUT = NULL; cmsStage *mpemat, *mpeclut; cmsUInt32Number nTabSize, i; cmsFloat64Number Matrix[3*3]; *nItems = 0; if (!_cmsReadUInt8Number(io, &InputChannels)) goto Error; if (!_cmsReadUInt8Number(io, &OutputChannels)) goto Error; if (!_cmsReadUInt8Number(io, &CLUTpoints)) goto Error; if (CLUTpoints == 1) goto Error; // Impossible value, 0 for no CLUT and then 2 at least // Padding if (!_cmsReadUInt8Number(io, NULL)) goto Error; // Do some checking if (InputChannels > cmsMAXCHANNELS) goto Error; if (OutputChannels > cmsMAXCHANNELS) goto Error; // Allocates an empty Pipeline NewLUT = cmsPipelineAlloc(self ->ContextID, InputChannels, OutputChannels); if (NewLUT == NULL) goto Error; // Read the Matrix if (!_cmsRead15Fixed16Number(io, &Matrix[0])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[1])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[2])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[3])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[4])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[5])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[6])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[7])) goto Error; if (!_cmsRead15Fixed16Number(io, &Matrix[8])) goto Error; // Only operates if not identity... if ((InputChannels == 3) && !_cmsMAT3isIdentity((cmsMAT3*) Matrix)) { mpemat = cmsStageAllocMatrix(self ->ContextID, 3, 3, Matrix, NULL); if (mpemat == NULL) goto Error; cmsPipelineInsertStage(NewLUT, cmsAT_BEGIN, mpemat); } // Get input tables if (!Read8bitTables(self ->ContextID, io, NewLUT, InputChannels)) goto Error; // Get 3D CLUT. Check the overflow.... nTabSize = uipow(OutputChannels, CLUTpoints, InputChannels); if (nTabSize == (size_t) -1) goto Error; if (nTabSize > 0) { cmsUInt16Number *PtrW, *T; cmsUInt32Number Tsize; Tsize = (cmsUInt32Number) nTabSize * sizeof(cmsUInt16Number); PtrW = T = (cmsUInt16Number*) _cmsCalloc(self ->ContextID, nTabSize, sizeof(cmsUInt16Number)); if (T == NULL) goto Error; Temp = (cmsUInt8Number*) _cmsMalloc(self ->ContextID, nTabSize); if (Temp == NULL) goto Error; if (io ->Read(io, Temp, nTabSize, 1) != 1) goto Error; for (i = 0; i < nTabSize; i++) { *PtrW++ = FROM_8_TO_16(Temp[i]); } _cmsFree(self ->ContextID, Temp); Temp = NULL; mpeclut = cmsStageAllocCLut16bit(self ->ContextID, CLUTpoints, InputChannels, OutputChannels, T); if (mpeclut == NULL) goto Error; cmsPipelineInsertStage(NewLUT, cmsAT_END, mpeclut); _cmsFree(self ->ContextID, T); } // Get output tables if (!Read8bitTables(self ->ContextID, io, NewLUT, OutputChannels)) goto Error; *nItems = 1; return NewLUT; Error: if (NewLUT != NULL) cmsPipelineFree(NewLUT); return NULL; cmsUNUSED_PARAMETER(SizeOfTag); }
0
[]
Little-CMS
886e2f524268efe8a1c3aa838c28e446fda24486
279,119,072,145,374,070,000,000,000,000,000,000,000
97
Fixes from coverity check
static bool check_btf_id_ok(const struct bpf_func_proto *fn) { int i; for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i]) return false; if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i]) return false; } return true; }
0
[]
linux
9b00f1b78809309163dda2d044d9e94a3c0248a3
101,572,012,832,630,820,000,000,000,000,000,000,000
14
bpf: Fix truncation handling for mod32 dst reg wrt zero Recently noticed that when mod32 with a known src reg of 0 is performed, then the dst register is 32-bit truncated in verifier: 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 0: (b7) r0 = 0 1: R0_w=inv0 R1=ctx(id=0,off=0,imm=0) R10=fp0 1: (b7) r1 = -1 2: R0_w=inv0 R1_w=inv-1 R10=fp0 2: (b4) w2 = -1 3: R0_w=inv0 R1_w=inv-1 R2_w=inv4294967295 R10=fp0 3: (9c) w1 %= w0 4: R0_w=inv0 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0 4: (b7) r0 = 1 5: R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0 5: (1d) if r1 == r2 goto pc+1 R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0 6: R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0 6: (b7) r0 = 2 7: R0_w=inv2 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0 7: (95) exit 7: R0=inv1 R1=inv(id=0,umin_value=4294967295,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2=inv4294967295 R10=fp0 7: (95) exit However, as a runtime result, we get 2 instead of 1, meaning the dst register does not contain (u32)-1 in this case. The reason is fairly straight forward given the 0 test leaves the dst register as-is: # ./bpftool p d x i 23 0: (b7) r0 = 0 1: (b7) r1 = -1 2: (b4) w2 = -1 3: (16) if w0 == 0x0 goto pc+1 4: (9c) w1 %= w0 5: (b7) r0 = 1 6: (1d) if r1 == r2 goto pc+1 7: (b7) r0 = 2 8: (95) exit This was originally not an issue given the dst register was marked as completely unknown (aka 64 bit unknown). However, after 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification") the verifier casts the register output to 32 bit, and hence it becomes 32 bit unknown. Note that for the case where the src register is unknown, the dst register is marked 64 bit unknown. After the fix, the register is truncated by the runtime and the test passes: # ./bpftool p d x i 23 0: (b7) r0 = 0 1: (b7) r1 = -1 2: (b4) w2 = -1 3: (16) if w0 == 0x0 goto pc+2 4: (9c) w1 %= w0 5: (05) goto pc+1 6: (bc) w1 = w1 7: (b7) r0 = 1 8: (1d) if r1 == r2 goto pc+1 9: (b7) r0 = 2 10: (95) exit Semantics also match with {R,W}x mod{64,32} 0 -> {R,W}x. Invalid div has always been {R,W}x div{64,32} 0 -> 0. Rewrites are as follows: mod32: mod64: (16) if w0 == 0x0 goto pc+2 (15) if r0 == 0x0 goto pc+1 (9c) w1 %= w0 (9f) r1 %= r0 (05) goto pc+1 (bc) w1 = w1 Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification") Signed-off-by: Daniel Borkmann <[email protected]> Reviewed-by: John Fastabend <[email protected]> Acked-by: Alexei Starovoitov <[email protected]>
R_API void r_io_bank_init(RIO *io) { r_return_if_fail (io); r_io_bank_fini (io); io->banks = r_id_storage_new (0, UT32_MAX); }
0
[ "CWE-416" ]
radare2
b5cb90b28ec71fda3504da04e3cc94a362807f5e
32,958,583,594,162,116,000,000,000,000,000,000,000
5
Prefer memleak over usaf in io.bank's rbtree bug ##crash * That's a workaround, proper fix will come later * Reproducer: bins/fuzzed/iobank-crash * Reported by Akyne Choi via huntr.dev
static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, struct iovec *iov, unsigned int iov_cnt) { VirtIODevice *vdev = VIRTIO_DEVICE(n); struct virtio_net_ctrl_mq mq; size_t s; uint16_t queues; s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); if (s != sizeof(mq)) { return VIRTIO_NET_ERR; } if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { return VIRTIO_NET_ERR; } queues = lduw_p(&mq.virtqueue_pairs); if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || queues > n->max_queues || !n->multiqueue) { return VIRTIO_NET_ERR; } n->curr_queues = queues; /* stop the backend before changing the number of queues to avoid handling a * disabled queue */ virtio_net_set_status(vdev, vdev->status); virtio_net_set_queues(n); return VIRTIO_NET_OK; }
0
[ "CWE-119" ]
qemu
98f93ddd84800f207889491e0b5d851386b459cf
15,127,200,991,551,416,000,000,000,000,000,000,000
34
virtio-net: out-of-bounds buffer write on load CVE-2013-4149 QEMU 1.3.0 out-of-bounds buffer write in virtio_net_load()@hw/net/virtio-net.c > } else if (n->mac_table.in_use) { > uint8_t *buf = g_malloc0(n->mac_table.in_use); We are allocating buffer of size n->mac_table.in_use > qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN); and read to the n->mac_table.in_use size buffer n->mac_table.in_use * ETH_ALEN bytes, corrupting memory. If adversary controls state then memory written there is controlled by adversary. Reviewed-by: Michael Roth <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Signed-off-by: Juan Quintela <[email protected]>
*/ static __cold void io_uring_del_tctx_node(unsigned long index) { struct io_uring_task *tctx = current->io_uring; struct io_tctx_node *node; if (!tctx) return; node = xa_erase(&tctx->xa, index); if (!node) return; WARN_ON_ONCE(current != node->task); WARN_ON_ONCE(list_empty(&node->ctx_node)); mutex_lock(&node->ctx->uring_lock); list_del(&node->ctx_node); mutex_unlock(&node->ctx->uring_lock); if (tctx->last == node->ctx) tctx->last = NULL; kfree(node);
0
[ "CWE-416" ]
linux
e677edbcabee849bfdd43f1602bccbecf736a646
83,790,641,641,650,895,000,000,000,000,000,000,000
22
io_uring: fix race between timeout flush and removal io_flush_timeouts() assumes the timeout isn't in progress of triggering or being removed/canceled, so it unconditionally removes it from the timeout list and attempts to cancel it. Leave it on the list and let the normal timeout cancelation take care of it. Cc: [email protected] # 5.5+ Signed-off-by: Jens Axboe <[email protected]>
win_new_width(win_T *wp, int width) { wp->w_width = width; wp->w_lines_valid = 0; changed_line_abv_curs_win(wp); invalidate_botline_win(wp); if (wp == curwin) { update_topline(); curs_columns(TRUE); // validate w_wrow } redraw_win_later(wp, NOT_VALID); wp->w_redr_status = TRUE; }
0
[ "CWE-476" ]
vim
0f6e28f686dbb59ab3b562408ab9b2234797b9b1
16,386,323,795,977,240,000,000,000,000,000,000,000
14
patch 8.2.4428: crash when switching tabpage while in the cmdline window Problem: Crash when switching tabpage while in the cmdline window. Solution: Disallow switching tabpage when in the cmdline window.
virDomainBlockJobSetSpeed(virDomainPtr dom, const char *disk, unsigned long bandwidth, unsigned int flags) { virConnectPtr conn; VIR_DOMAIN_DEBUG(dom, "disk=%s, bandwidth=%lu, flags=%x", disk, bandwidth, flags); virResetLastError(); virCheckDomainReturn(dom, -1); conn = dom->conn; virCheckReadOnlyGoto(conn->flags, error); virCheckNonNullArgGoto(disk, error); if (conn->driver->domainBlockJobSetSpeed) { int ret; ret = conn->driver->domainBlockJobSetSpeed(dom, disk, bandwidth, flags); if (ret < 0) goto error; return ret; } virReportUnsupportedError(); error: virDispatchError(dom->conn); return -1; }
0
[ "CWE-254" ]
libvirt
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
140,579,469,351,613,120,000,000,000,000,000,000,000
30
virDomainGetTime: Deny on RO connections We have a policy that if API may end up talking to a guest agent it should require RW connection. We don't obey the rule in virDomainGetTime(). Signed-off-by: Michal Privoznik <[email protected]>
HttpTransact::handle_cache_operation_on_forward_server_response(State* s) { DebugTxn("http_trans", "[handle_cache_operation_on_forward_server_response] (hcoofsr)"); DebugTxn("http_seq", "[handle_cache_operation_on_forward_server_response]"); HTTPHdr *base_response = NULL; HTTPStatus server_response_code = HTTP_STATUS_NONE; HTTPStatus client_response_code = HTTP_STATUS_NONE; const char *warn_text = NULL; bool cacheable = false; cacheable = is_response_cacheable(s, &s->hdr_info.client_request, &s->hdr_info.server_response); DebugTxn("http_trans", "[hcoofsr] response %s cacheable", cacheable ? "is" : "is not"); // set the correct next action, cache action, response code, and base response server_response_code = s->hdr_info.server_response.status_get(); switch (server_response_code) { case HTTP_STATUS_NOT_MODIFIED: // 304 SET_VIA_STRING(VIA_SERVER_RESULT, VIA_SERVER_NOT_MODIFIED); // determine the correct cache action, next state, and response // precondition: s->cache_info.action should be one of the following // CACHE_DO_DELETE, or CACHE_DO_UPDATE; otherwise, it's an error. if (s->api_server_response_ignore && s->cache_info.action == CACHE_DO_UPDATE) { s->api_server_response_ignore = false; ink_assert(s->cache_info.object_read); base_response = s->cache_info.object_read->response_get(); s->cache_info.action = CACHE_DO_SERVE; DebugTxn("http_trans", "[hcoofsr] not merging, cache action changed to: %s", HttpDebugNames::get_cache_action_name(s->cache_info.action)); s->next_action = SM_ACTION_SERVE_FROM_CACHE; client_response_code = base_response->status_get(); } else if ((s->cache_info.action == CACHE_DO_DELETE) || ((s->cache_info.action == CACHE_DO_UPDATE) && !cacheable)) { if (is_request_conditional(&s->hdr_info.client_request)) { client_response_code = HttpTransactCache::match_response_to_request_conditionals(&s->hdr_info.client_request, s->cache_info.object_read->response_get()); } else { client_response_code = HTTP_STATUS_OK; } if (client_response_code != HTTP_STATUS_OK) { // we can just forward the not modified response // from the server and delete the cached copy base_response = &s->hdr_info.server_response; client_response_code = base_response->status_get(); s->cache_info.action = CACHE_DO_DELETE; s->next_action = SM_ACTION_INTERNAL_CACHE_DELETE; } else { // We got screwed. The client did not send a conditional request, // but we had a cached copy which we revalidated. The server has // now told us to delete the cached copy and sent back a 304. // We need to send the cached copy to the client, then delete it. if (s->method == HTTP_WKSIDX_HEAD) { s->cache_info.action = CACHE_DO_DELETE; s->next_action = SM_ACTION_SERVER_READ; } else { s->cache_info.action = CACHE_DO_SERVE_AND_DELETE; s->next_action = SM_ACTION_SERVE_FROM_CACHE; } base_response = s->cache_info.object_read->response_get(); client_response_code = base_response->status_get(); } } else if (s->cache_info.action == CACHE_DO_UPDATE && is_request_conditional(&s->hdr_info.server_request)) { // CACHE_DO_UPDATE and server response is cacheable if (is_request_conditional(&s->hdr_info.client_request)) { if (s->txn_conf->cache_when_to_revalidate != 4) client_response_code = HttpTransactCache::match_response_to_request_conditionals(&s->hdr_info.client_request, s->cache_info.object_read->response_get()); else client_response_code = server_response_code; } else { client_response_code = HTTP_STATUS_OK; } if (client_response_code != HTTP_STATUS_OK) { // delete the cached copy unless configured to always verify IMS if (s->txn_conf->cache_when_to_revalidate != 4) { s->cache_info.action = CACHE_DO_UPDATE; s->next_action = SM_ACTION_INTERNAL_CACHE_UPDATE_HEADERS; /* base_response will be set after updating headers below */ } else { s->cache_info.action = CACHE_DO_NO_ACTION; s->next_action = SM_ACTION_INTERNAL_CACHE_NOOP; base_response = &s->hdr_info.server_response; } } else { if (s->method == HTTP_WKSIDX_HEAD) { s->cache_info.action = CACHE_DO_UPDATE; s->next_action = SM_ACTION_SERVER_READ; } else { if (s->hdr_info.client_request.presence(MIME_PRESENCE_RANGE)) { s->state_machine->do_range_setup_if_necessary(); // Note that even if the Range request is not satisfiable, we // update and serve this cache. This will give a 200 response to // a bad client, but allows us to avoid pegging the origin (e.g. abuse). } s->cache_info.action = CACHE_DO_SERVE_AND_UPDATE; s->next_action = SM_ACTION_SERVE_FROM_CACHE; } /* base_response will be set after updating headers below */ } } else { // cache action != CACHE_DO_DELETE and != CACHE_DO_UPDATE // bogus response from server. deal by tunnelling to client. // server should not have sent back a 304 because our request // should not have been an conditional. DebugTxn("http_trans", "[hcoofsr] 304 for non-conditional request"); s->cache_info.action = CACHE_DO_NO_ACTION; s->next_action = SM_ACTION_INTERNAL_CACHE_NOOP; client_response_code = s->hdr_info.server_response.status_get(); base_response = &s->hdr_info.server_response; // since this is bad, insert warning header into client response // The only exception case is conditional client request, // cache miss, and client request being unlikely cacheable. // In this case, the server request is given the same // conditional headers as client request (see build_request()). // So an unexpected 304 might be received. // FIXME: check this case if (is_request_likely_cacheable(s, &s->hdr_info.client_request)) { warn_text = "Proxy received unexpected 304 response; " "content may be stale"; } } break; case HTTP_STATUS_HTTPVER_NOT_SUPPORTED: // 505 { bool keep_alive = (s->current.server->keep_alive == HTTP_KEEPALIVE); s->next_action = how_to_open_connection(s); /* Downgrade the request level and retry */ if (!HttpTransactHeaders::downgrade_request(&keep_alive, &s->hdr_info.server_request)) { build_error_response(s, HTTP_STATUS_HTTPVER_NOT_SUPPORTED, "HTTP Version Not Supported", "response#bad_version", NULL); s->next_action = SM_ACTION_SEND_ERROR_CACHE_NOOP; s->already_downgraded = true; } else { if (!keep_alive) { /* START Hack */ (s->hdr_info.server_request).field_delete(MIME_FIELD_PROXY_CONNECTION, MIME_LEN_PROXY_CONNECTION); /* END Hack */ } s->already_downgraded = true; s->next_action = how_to_open_connection(s); } } return; default: DebugTxn("http_trans", "[hcoofsr] response code: %d", server_response_code); SET_VIA_STRING(VIA_SERVER_RESULT, VIA_SERVER_SERVED); SET_VIA_STRING(VIA_PROXY_RESULT, VIA_PROXY_SERVED); /* if we receive a 500, 502, 503 or 504 while revalidating a document, treat the response as a 304 and in effect revalidate the document for negative_revalidating_lifetime. (negative revalidating) */ if ((server_response_code == HTTP_STATUS_INTERNAL_SERVER_ERROR || server_response_code == HTTP_STATUS_GATEWAY_TIMEOUT || server_response_code == HTTP_STATUS_BAD_GATEWAY || server_response_code == HTTP_STATUS_SERVICE_UNAVAILABLE) && s->cache_info.action == CACHE_DO_UPDATE && s->txn_conf->negative_revalidating_enabled && is_stale_cache_response_returnable(s)) { DebugTxn("http_trans", "[hcoofsr] negative revalidating: revalidate stale object and serve from cache"); s->cache_info.object_store.create(); s->cache_info.object_store.request_set(&s->hdr_info.client_request); s->cache_info.object_store.response_set(s->cache_info.object_read->response_get()); base_response = s->cache_info.object_store.response_get(); time_t exp_time = s->txn_conf->negative_revalidating_lifetime + ink_cluster_time(); base_response->set_expires(exp_time); SET_VIA_STRING(VIA_CACHE_FILL_ACTION, VIA_CACHE_UPDATED); HTTP_INCREMENT_TRANS_STAT(http_cache_updates_stat); // unset Cache-control: "need-revalidate-once" (if it's set) // This directive is used internally by T.S. to invalidate // documents so that an invalidated document needs to be // revalidated again. base_response->unset_cooked_cc_need_revalidate_once(); if (is_request_conditional(&s->hdr_info.client_request) && HttpTransactCache::match_response_to_request_conditionals(&s->hdr_info.client_request, s->cache_info.object_read->response_get()) == HTTP_STATUS_NOT_MODIFIED) { s->next_action = SM_ACTION_INTERNAL_CACHE_UPDATE_HEADERS; client_response_code = HTTP_STATUS_NOT_MODIFIED; } else { if (s->method == HTTP_WKSIDX_HEAD) { s->cache_info.action = CACHE_DO_UPDATE; s->next_action = SM_ACTION_INTERNAL_CACHE_NOOP; } else { s->cache_info.action = CACHE_DO_SERVE_AND_UPDATE; s->next_action = SM_ACTION_SERVE_FROM_CACHE; } client_response_code = s->cache_info.object_read->response_get()->status_get(); } ink_assert(base_response->valid()); if (client_response_code == HTTP_STATUS_NOT_MODIFIED) { ink_assert(GET_VIA_STRING(VIA_CLIENT_REQUEST) != VIA_CLIENT_SIMPLE); SET_VIA_STRING(VIA_CLIENT_REQUEST, VIA_CLIENT_IMS); SET_VIA_STRING(VIA_PROXY_RESULT, VIA_PROXY_NOT_MODIFIED); } else { SET_VIA_STRING(VIA_PROXY_RESULT, VIA_PROXY_SERVED); } ink_assert(client_response_code != HTTP_STATUS_NONE); if (s->next_action == SM_ACTION_SERVE_FROM_CACHE && s->state_machine->do_transform_open()) { set_header_for_transform(s, base_response); } else { build_response(s, base_response, &s->hdr_info.client_response, s->client_info.http_version, client_response_code); } return; } s->next_action = SM_ACTION_SERVER_READ; client_response_code = server_response_code; base_response = &s->hdr_info.server_response; s->negative_caching = is_negative_caching_appropriate(s); // determine the correct cache action given the original cache action, // cacheability of server response, and request method // precondition: s->cache_info.action is one of the following // CACHE_DO_UPDATE, CACHE_DO_WRITE, or CACHE_DO_DELETE if (s->api_server_response_no_store) { s->cache_info.action = CACHE_DO_NO_ACTION; } else if (s->api_server_response_ignore && server_response_code == HTTP_STATUS_OK && s->hdr_info.server_request.method_get_wksidx() == HTTP_WKSIDX_HEAD) { s->api_server_response_ignore = false; ink_assert(s->cache_info.object_read); base_response = s->cache_info.object_read->response_get(); s->cache_info.action = CACHE_DO_SERVE; DebugTxn("http_trans", "[hcoofsr] ignoring server response, " "cache action changed to: %s", HttpDebugNames::get_cache_action_name(s->cache_info.action)); s->next_action = SM_ACTION_SERVE_FROM_CACHE; client_response_code = base_response->status_get(); } else if (s->cache_info.action == CACHE_DO_UPDATE) { if (s->www_auth_content == CACHE_AUTH_FRESH) { s->cache_info.action = CACHE_DO_NO_ACTION; } else if (s->www_auth_content == CACHE_AUTH_STALE && server_response_code == HTTP_STATUS_UNAUTHORIZED) { s->cache_info.action = CACHE_DO_NO_ACTION; } else if (!cacheable) { s->cache_info.action = CACHE_DO_DELETE; } else if (s->method == HTTP_WKSIDX_HEAD) { s->cache_info.action = CACHE_DO_DELETE; } else { ink_assert(s->cache_info.object_read != 0); s->cache_info.action = CACHE_DO_REPLACE; } } else if (s->cache_info.action == CACHE_DO_WRITE) { if (!cacheable && !s->negative_caching) { s->cache_info.action = CACHE_DO_NO_ACTION; } else if (s->method == HTTP_WKSIDX_HEAD) { s->cache_info.action = CACHE_DO_NO_ACTION; } else { s->cache_info.action = CACHE_DO_WRITE; } } else if (s->cache_info.action == CACHE_DO_DELETE) { // do nothing } else { ink_assert(!("cache action inconsistent with current state")); } // postcondition: s->cache_info.action is one of the following // CACHE_DO_REPLACE, CACHE_DO_WRITE, CACHE_DO_DELETE, or // CACHE_DO_NO_ACTION // Check see if we ought to serve the client a 304 based on // it's IMS date. We may gotten a 200 back from the origin // server if our (the proxies's) cached copy was out of date // but the client's wasn't. However, if the response is // not cacheable we ought not issue a 304 to the client so // make sure we are writing the document to the cache if // before issuing a 304 if (s->cache_info.action == CACHE_DO_WRITE || s->cache_info.action == CACHE_DO_NO_ACTION || s->cache_info.action == CACHE_DO_REPLACE) { if (s->negative_caching) { HTTPHdr *resp; s->cache_info.object_store.create(); s->cache_info.object_store.request_set(&s->hdr_info.client_request); s->cache_info.object_store.response_set(&s->hdr_info.server_response); resp = s->cache_info.object_store.response_get(); if (!resp->presence(MIME_PRESENCE_EXPIRES)) { time_t exp_time = s->txn_conf->negative_caching_lifetime + ink_cluster_time(); resp->set_expires(exp_time); } } else if (is_request_conditional(&s->hdr_info.client_request) && server_response_code == HTTP_STATUS_OK) { client_response_code = HttpTransactCache::match_response_to_request_conditionals(&s->hdr_info.client_request, &s->hdr_info.server_response); DebugTxn("http_trans", "[hcoofsr] conditional request, 200 " "response, send back 304 if possible [crc=%d]", client_response_code); if ((client_response_code == HTTP_STATUS_NOT_MODIFIED) || (client_response_code == HTTP_STATUS_PRECONDITION_FAILED)) { switch (s->cache_info.action) { case CACHE_DO_WRITE: case CACHE_DO_REPLACE: s->next_action = SM_ACTION_INTERNAL_CACHE_WRITE; break; case CACHE_DO_DELETE: s->next_action = SM_ACTION_INTERNAL_CACHE_DELETE; break; default: s->next_action = SM_ACTION_INTERNAL_CACHE_NOOP; break; } } else { SET_VIA_STRING(VIA_PROXY_RESULT, VIA_PROXY_SERVER_REVALIDATED); } } } else if (s->negative_caching) { s->negative_caching = false; } break; } // update stat, set via string, etc switch (s->cache_info.action) { case CACHE_DO_SERVE_AND_DELETE: // fall through case CACHE_DO_DELETE: DebugTxn("http_trans", "[hcoofsr] delete cached copy"); SET_VIA_STRING(VIA_CACHE_FILL_ACTION, VIA_CACHE_DELETED); HTTP_INCREMENT_TRANS_STAT(http_cache_deletes_stat); break; case CACHE_DO_WRITE: DebugTxn("http_trans", "[hcoofsr] cache write"); SET_VIA_STRING(VIA_CACHE_FILL_ACTION, VIA_CACHE_WRITTEN); HTTP_INCREMENT_TRANS_STAT(http_cache_writes_stat); break; case CACHE_DO_SERVE_AND_UPDATE: // fall through case CACHE_DO_UPDATE: // fall through case CACHE_DO_REPLACE: DebugTxn("http_trans", "[hcoofsr] cache update/replace"); SET_VIA_STRING(VIA_CACHE_FILL_ACTION, VIA_CACHE_UPDATED); HTTP_INCREMENT_TRANS_STAT(http_cache_updates_stat); break; default: break; } if ((client_response_code == HTTP_STATUS_NOT_MODIFIED) && (s->cache_info.action != CACHE_DO_NO_ACTION)) { /* ink_assert(GET_VIA_STRING(VIA_CLIENT_REQUEST) != VIA_CLIENT_SIMPLE); */ DebugTxn("http_trans", "[hcoofsr] Client request was conditional"); SET_VIA_STRING(VIA_CLIENT_REQUEST, VIA_CLIENT_IMS); SET_VIA_STRING(VIA_PROXY_RESULT, VIA_PROXY_NOT_MODIFIED); } else { SET_VIA_STRING(VIA_PROXY_RESULT, VIA_PROXY_SERVED); } ink_assert(client_response_code != HTTP_STATUS_NONE); // The correct cache action, next action, and response code are set. // Do the real work below. // first update the cached object if ((s->cache_info.action == CACHE_DO_UPDATE) || (s->cache_info.action == CACHE_DO_SERVE_AND_UPDATE)) { DebugTxn("http_trans", "[hcoofsr] merge and update cached copy"); merge_and_update_headers_for_cache_update(s); base_response = s->cache_info.object_store.response_get(); // unset Cache-control: "need-revalidate-once" (if it's set) // This directive is used internally by T.S. to invalidate documents // so that an invalidated document needs to be revalidated again. base_response->unset_cooked_cc_need_revalidate_once(); // unset warning revalidation failed header if it set // (potentially added by negative revalidating) delete_warning_value(base_response, HTTP_WARNING_CODE_REVALIDATION_FAILED); } ink_assert(base_response->valid()); if ((s->cache_info.action == CACHE_DO_WRITE) || (s->cache_info.action == CACHE_DO_REPLACE)) { set_headers_for_cache_write(s, &s->cache_info.object_store, &s->hdr_info.server_request, &s->hdr_info.server_response); } // 304, 412, and 416 responses are handled here if ((client_response_code == HTTP_STATUS_NOT_MODIFIED) || (client_response_code == HTTP_STATUS_PRECONDITION_FAILED)) { // Because we are decoupling User-Agent validation from // Traffic Server validation just build a regular 304 // if the exception of adding prepending the VIA // header to show the revalidation path build_response(s, base_response, &s->hdr_info.client_response, s->client_info.http_version, client_response_code); // Copy over the response via field (if any) preserving // the order of the fields MIMEField *resp_via = s->hdr_info.server_response.field_find(MIME_FIELD_VIA, MIME_LEN_VIA); if (resp_via) { MIMEField *our_via; our_via = s->hdr_info.client_response.field_find(MIME_FIELD_VIA, MIME_LEN_VIA); if (our_via == NULL) { our_via = s->hdr_info.client_response.field_create(MIME_FIELD_VIA, MIME_LEN_VIA); s->hdr_info.client_response.field_attach(our_via); } // HDR FIX ME - Mulitple appends are VERY slow while (resp_via) { int clen; const char *cfield = resp_via->value_get(&clen); s->hdr_info.client_response.field_value_append(our_via, cfield, clen, true); resp_via = resp_via->m_next_dup; } } // a warning text is added only in the case of a NOT MODIFIED response if (warn_text) { HttpTransactHeaders::insert_warning_header(s->http_config_param, &s->hdr_info.client_response, HTTP_WARNING_CODE_MISC_WARNING, warn_text, strlen(warn_text)); } if (!s->cop_test_page) DUMP_HEADER("http_hdrs", &s->hdr_info.client_response, s->state_machine_id, "Proxy's Response (Client Conditionals)"); return; } // all other responses (not 304, 412, 416) are handled here else { if (((s->next_action == SM_ACTION_SERVE_FROM_CACHE) || (s->next_action == SM_ACTION_SERVER_READ)) && s->state_machine->do_transform_open()) { set_header_for_transform(s, base_response); } else { build_response(s, base_response, &s->hdr_info.client_response, s->client_info.http_version, client_response_code); } } return; }
0
[ "CWE-119" ]
trafficserver
8b5f0345dade6b2822d9b52c8ad12e63011a5c12
149,568,304,298,000,850,000,000,000,000,000,000,000
443
Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug
static void pn_sock_close(struct sock *sk, long timeout) { sk_common_release(sk); }
0
[ "CWE-20" ]
net
bceaa90240b6019ed73b49965eac7d167610be69
333,829,161,558,082,340,000,000,000,000,000,000,000
4
inet: prevent leakage of uninitialized memory to user in recv syscalls Only update *addr_len when we actually fill in sockaddr, otherwise we can return uninitialized memory from the stack to the caller in the recvfrom, recvmmsg and recvmsg syscalls. Drop the the (addr_len == NULL) checks because we only get called with a valid addr_len pointer either from sock_common_recvmsg or inet_recvmsg. If a blocking read waits on a socket which is concurrently shut down we now return zero and set msg_msgnamelen to 0. Reported-by: mpb <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
free_parser_data(void) { if (LIST_EXISTS(defs)) free_list(&defs); if (LIST_EXISTS(multiline_stack)) free_list(&multiline_stack); }
0
[ "CWE-59", "CWE-61" ]
keepalived
04f2d32871bb3b11d7dc024039952f2fe2750306
300,025,461,993,212,520,000,000,000,000,000,000,000
8
When opening files for write, ensure they aren't symbolic links Issue #1048 identified that if, for example, a non privileged user created a symbolic link from /etc/keepalvied.data to /etc/passwd, writing to /etc/keepalived.data (which could be invoked via DBus) would cause /etc/passwd to be overwritten. This commit stops keepalived writing to pathnames where the ultimate component is a symbolic link, by setting O_NOFOLLOW whenever opening a file for writing. This might break some setups, where, for example, /etc/keepalived.data was a symbolic link to /home/fred/keepalived.data. If this was the case, instead create a symbolic link from /home/fred/keepalived.data to /tmp/keepalived.data, so that the file is still accessible via /home/fred/keepalived.data. There doesn't appear to be a way around this backward incompatibility, since even checking if the pathname is a symbolic link prior to opening for writing would create a race condition. Signed-off-by: Quentin Armitage <[email protected]>
bool operator()(const QString &left, const QString &right) const { #ifdef QBT_USES_QT5 #if defined(Q_OS_WIN) // Without ICU library, QCollator uses the native API on Windows 7+. But that API // sorts older versions of μTorrent differently than the newer ones because the // 'μ' character is encoded differently and the native API can't cope with that. // So default to using our custom natural sorting algorithm instead. // See #5238 and #5240 // Without ICU library, QCollator doesn't support `setNumericMode(true)` on OS older than Win7 // if (QSysInfo::windowsVersion() < QSysInfo::WV_WINDOWS7) return lessThan(left, right); #endif return (m_collator.compare(left, right) < 0); #else return lessThan(left, right); #endif }
0
[ "CWE-20", "CWE-79" ]
qBittorrent
6ca3e4f094da0a0017cb2d483ec1db6176bb0b16
9,480,498,440,044,843,000,000,000,000,000,000,000
18
Add Utils::String::toHtmlEscaped
static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, gpa_t addr, int len) { /* is it in a batchable area ? * (addr,len) is fully included in * (zone->addr, zone->size) */ if (len < 0) return 0; if (addr + len < addr) return 0; if (addr < dev->zone.addr) return 0; if (addr + len > dev->zone.addr + dev->zone.size) return 0; return 1; }
0
[ "CWE-787" ]
kvm
b60fe990c6b07ef6d4df67bc0530c7c90a62623a
231,838,230,199,082,050,000,000,000,000,000,000,000
17
KVM: coalesced_mmio: add bounds checking The first/last indexes are typically shared with a user app. The app can change the 'last' index that the kernel uses to store the next result. This change sanity checks the index before using it for writing to a potentially arbitrary address. This fixes CVE-2019-14821. Cc: [email protected] Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)") Signed-off-by: Matt Delco <[email protected]> Signed-off-by: Jim Mattson <[email protected]> Reported-by: [email protected] [Use READ_ONCE. - Paolo] Signed-off-by: Paolo Bonzini <[email protected]>
GF_Err drep_Size(GF_Box *s) { s->size += 8; return GF_OK; }
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
297,648,047,849,360,200,000,000,000,000,000,000,000
5
prevent dref memleak on invalid input (#1183)
lambda_function_body( char_u **arg, typval_T *rettv, evalarg_T *evalarg, garray_T *newargs, garray_T *argtypes, int varargs, garray_T *default_args, char_u *ret_type) { int evaluate = (evalarg->eval_flags & EVAL_EVALUATE); garray_T *gap = &evalarg->eval_ga; garray_T *freegap = &evalarg->eval_freega; ufunc_T *ufunc = NULL; exarg_T eap; garray_T newlines; char_u *cmdline = NULL; int ret = FAIL; char_u *line_to_free = NULL; partial_T *pt; char_u *name; int lnum_save = -1; linenr_T sourcing_lnum_top = SOURCING_LNUM; if (!ends_excmd2(*arg, skipwhite(*arg + 1))) { semsg(_(e_trailing_arg), *arg + 1); return FAIL; } CLEAR_FIELD(eap); eap.cmdidx = CMD_block; eap.forceit = FALSE; eap.cmdlinep = &cmdline; eap.skip = !evaluate; if (evalarg->eval_cctx != NULL) fill_exarg_from_cctx(&eap, evalarg->eval_cctx); else { eap.getline = evalarg->eval_getline; eap.cookie = evalarg->eval_cookie; } ga_init2(&newlines, (int)sizeof(char_u *), 10); if (get_function_body(&eap, &newlines, NULL, &line_to_free) == FAIL) { vim_free(cmdline); goto erret; } // When inside a lambda must add the function lines to evalarg.eval_ga. evalarg->eval_break_count += newlines.ga_len; if (gap->ga_itemsize > 0) { int idx; char_u *last; size_t plen; char_u *pnl; for (idx = 0; idx < newlines.ga_len; ++idx) { char_u *p = skipwhite(((char_u **)newlines.ga_data)[idx]); if (ga_grow(gap, 1) == FAIL || ga_grow(freegap, 1) == FAIL) goto erret; // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. // Insert NL characters at the start of each line, the string will // be split again later in .get_lambda_tv(). if (*p == NUL || vim9_comment_start(p)) p = (char_u *)""; plen = STRLEN(p); pnl = vim_strnsave((char_u *)"\n", plen + 1); if (pnl != NULL) mch_memmove(pnl + 1, p, plen + 1); ((char_u **)gap->ga_data)[gap->ga_len++] = pnl; ((char_u **)freegap->ga_data)[freegap->ga_len++] = pnl; } if (ga_grow(gap, 1) == FAIL || ga_grow(freegap, 1) == FAIL) goto erret; if (eap.nextcmd != NULL) // more is following after the "}", which was skipped last = cmdline; else // nothing is following the "}" last = (char_u *)"}"; plen = STRLEN(last); pnl = vim_strnsave((char_u *)"\n", plen + 1); if (pnl != NULL) mch_memmove(pnl + 1, last, plen + 1); ((char_u **)gap->ga_data)[gap->ga_len++] = pnl; ((char_u **)freegap->ga_data)[freegap->ga_len++] = pnl; } if (eap.nextcmd != NULL) { garray_T *tfgap = &evalarg->eval_tofree_ga; // Something comes after the "}". *arg = eap.nextcmd; // "arg" points into cmdline, need to keep the line and free it later. if (ga_grow(tfgap, 1) == OK) { ((char_u **)(tfgap->ga_data))[tfgap->ga_len++] = cmdline; evalarg->eval_using_cmdline = TRUE; if (cmdline == line_to_free) line_to_free = NULL; } } else *arg = (char_u *)""; if (!evaluate) { ret = OK; goto erret; } name = get_lambda_name(); ufunc = alloc_clear(offsetof(ufunc_T, uf_name) + STRLEN(name) + 1); if (ufunc == NULL) goto erret; set_ufunc_name(ufunc, name); if (hash_add(&func_hashtab, UF2HIKEY(ufunc)) == FAIL) goto erret; ufunc->uf_flags = FC_LAMBDA; ufunc->uf_refcount = 1; ufunc->uf_args = *newargs; newargs->ga_data = NULL; ufunc->uf_def_args = *default_args; default_args->ga_data = NULL; ufunc->uf_func_type = &t_func_any; // error messages are for the first function line lnum_save = SOURCING_LNUM; SOURCING_LNUM = sourcing_lnum_top; // parse argument types if (parse_argument_types(ufunc, argtypes, varargs) == FAIL) { SOURCING_LNUM = lnum_save; goto erret; } // parse the return type, if any if (parse_return_type(ufunc, ret_type) == FAIL) goto erret; pt = ALLOC_CLEAR_ONE(partial_T); if (pt == NULL) goto erret; pt->pt_func = ufunc; pt->pt_refcount = 1; ufunc->uf_lines = newlines; newlines.ga_data = NULL; if (sandbox) ufunc->uf_flags |= FC_SANDBOX; if (!ASCII_ISUPPER(*ufunc->uf_name)) ufunc->uf_flags |= FC_VIM9; ufunc->uf_script_ctx = current_sctx; ufunc->uf_script_ctx_version = current_sctx.sc_version; ufunc->uf_script_ctx.sc_lnum += sourcing_lnum_top; set_function_type(ufunc); function_using_block_scopes(ufunc, evalarg->eval_cstack); rettv->vval.v_partial = pt; rettv->v_type = VAR_PARTIAL; ufunc = NULL; ret = OK; erret: if (lnum_save >= 0) SOURCING_LNUM = lnum_save; vim_free(line_to_free); ga_clear_strings(&newlines); if (newargs != NULL) ga_clear_strings(newargs); ga_clear_strings(default_args); if (ufunc != NULL) { func_clear(ufunc, TRUE); func_free(ufunc, TRUE); } return ret; }
0
[ "CWE-416" ]
vim
9c23f9bb5fe435b28245ba8ac65aa0ca6b902c04
155,524,476,331,384,000,000,000,000,000,000,000,000
189
patch 8.2.3902: Vim9: double free with nested :def function Problem: Vim9: double free with nested :def function. Solution: Pass "line_to_free" from compile_def_function() and make sure cmdlinep is valid.
static void test_bug1664() { MYSQL_STMT *stmt; int rc, int_data; const char *data; const char *str_data= "Simple string"; MYSQL_BIND my_bind[2]; const char *query= "INSERT INTO test_long_data(col2, col1) VALUES(?, ?)"; myheader("test_bug1664"); rc= mysql_query(mysql, "DROP TABLE IF EXISTS test_long_data"); myquery(rc); rc= mysql_query(mysql, "CREATE TABLE test_long_data(col1 int, col2 long varchar)"); myquery(rc); stmt= mysql_stmt_init(mysql); check_stmt(stmt); rc= mysql_stmt_prepare(stmt, query, strlen(query)); check_execute(stmt, rc); verify_param_count(stmt, 2); memset(my_bind, 0, sizeof(my_bind)); my_bind[0].buffer_type= MYSQL_TYPE_STRING; my_bind[0].buffer= (void *)str_data; my_bind[0].buffer_length= strlen(str_data); my_bind[1].buffer= (void *)&int_data; my_bind[1].buffer_type= MYSQL_TYPE_LONG; rc= mysql_stmt_bind_param(stmt, my_bind); check_execute(stmt, rc); int_data= 1; /* Let us supply empty long_data. This should work and should not break following execution. */ data= ""; rc= mysql_stmt_send_long_data(stmt, 0, data, strlen(data)); check_execute(stmt, rc); rc= mysql_stmt_execute(stmt); check_execute(stmt, rc); verify_col_data("test_long_data", "col1", "1"); verify_col_data("test_long_data", "col2", ""); rc= mysql_query(mysql, "DELETE FROM test_long_data"); myquery(rc); /* This should pass OK */ data= (char *)"Data"; rc= mysql_stmt_send_long_data(stmt, 0, data, strlen(data)); check_execute(stmt, rc); rc= mysql_stmt_execute(stmt); check_execute(stmt, rc); verify_col_data("test_long_data", "col1", "1"); verify_col_data("test_long_data", "col2", "Data"); /* clean up */ rc= mysql_query(mysql, "DELETE FROM test_long_data"); myquery(rc); /* Now we are changing int parameter and don't do anything with first parameter. Second mysql_stmt_execute() should run OK treating this first parameter as string parameter. */ int_data= 2; /* execute */ rc= mysql_stmt_execute(stmt); check_execute(stmt, rc); verify_col_data("test_long_data", "col1", "2"); verify_col_data("test_long_data", "col2", str_data); /* clean up */ rc= mysql_query(mysql, "DELETE FROM test_long_data"); myquery(rc); /* Now we are sending other long data. It should not be concatened to previous. */ data= (char *)"SomeOtherData"; rc= mysql_stmt_send_long_data(stmt, 0, data, strlen(data)); check_execute(stmt, rc); rc= mysql_stmt_execute(stmt); check_execute(stmt, rc); verify_col_data("test_long_data", "col1", "2"); verify_col_data("test_long_data", "col2", "SomeOtherData"); mysql_stmt_close(stmt); /* clean up */ rc= mysql_query(mysql, "DELETE FROM test_long_data"); myquery(rc); /* Now let us test how mysql_stmt_reset works. */ stmt= mysql_stmt_init(mysql); check_stmt(stmt); rc= mysql_stmt_prepare(stmt, query, strlen(query)); check_execute(stmt, rc); rc= mysql_stmt_bind_param(stmt, my_bind); check_execute(stmt, rc); data= (char *)"SomeData"; rc= mysql_stmt_send_long_data(stmt, 0, data, strlen(data)); check_execute(stmt, rc); rc= mysql_stmt_reset(stmt); check_execute(stmt, rc); rc= mysql_stmt_execute(stmt); check_execute(stmt, rc); verify_col_data("test_long_data", "col1", "2"); verify_col_data("test_long_data", "col2", str_data); mysql_stmt_close(stmt); /* Final clean up */ rc= mysql_query(mysql, "DROP TABLE test_long_data"); myquery(rc); }
0
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
149,877,885,369,011,620,000,000,000,000,000,000,000
136
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
unsigned long get_max_files(void) { return files_stat.max_files; }
0
[ "CWE-17" ]
linux
eee5cc2702929fd41cce28058dc6d6717f723f87
339,325,976,176,935,780,000,000,000,000,000,000,000
4
get rid of s_files and files_lock The only thing we need it for is alt-sysrq-r (emergency remount r/o) and these days we can do just as well without going through the list of files. Signed-off-by: Al Viro <[email protected]>
mysql_list_processes(MYSQL *mysql) { MYSQL_DATA *fields; uint field_count; uchar *pos; LINT_INIT(fields); if (ma_simple_command(mysql, COM_PROCESS_INFO,0,0,0,0)) return(NULL); free_old_query(mysql); pos=(uchar*) mysql->net.read_pos; field_count=(uint) net_field_length(&pos); if (!(fields = mysql->methods->db_read_rows(mysql,(MYSQL_FIELD*) 0,7))) return(NULL); if (!(mysql->fields= unpack_fields(mysql, fields, &mysql->field_alloc, field_count, 0))) return(NULL); mysql->status=MYSQL_STATUS_GET_RESULT; mysql->field_count=field_count; return(mysql_store_result(mysql)); }
0
[ "CWE-20" ]
mariadb-connector-c
2759b87d72926b7c9b5426437a7c8dd15ff57945
98,516,357,705,314,600,000,000,000,000,000,000,000
21
sanity checks for client-supplied OK packet content reported by Matthias Kaiser, Apple Information Security
static MagickBooleanType IsPWP(const unsigned char *magick,const size_t length) { if (length < 5) return(MagickFalse); if (LocaleNCompare((char *) magick,"SFW95",5) == 0) return(MagickTrue); return(MagickFalse); }
0
[ "CWE-20", "CWE-252" ]
ImageMagick
6b6bff054d569a77973f2140c0e86366e6168a6c
43,258,781,706,984,890,000,000,000,000,000,000,000
8
https://github.com/ImageMagick/ImageMagick/issues/1199
void regulator_unregister_supply_alias(struct device *dev, const char *id) { struct regulator_supply_alias *map; map = regulator_find_supply_alias(dev, id); if (map) { list_del(&map->list); kfree(map); } }
0
[ "CWE-416" ]
linux
60a2362f769cf549dc466134efe71c8bf9fbaaba
13,035,101,802,931,967,000,000,000,000,000,000,000
10
regulator: core: Fix regualtor_ena_gpio_free not to access pin after freeing After freeing pin from regulator_ena_gpio_free, loop can access the pin. So this patch fixes not to access pin after freeing. Signed-off-by: Seung-Woo Kim <[email protected]> Signed-off-by: Mark Brown <[email protected]>
DEFUN(goURL, GOTO, "Open specified document in a new buffer") { goURL0("Goto URL: ", FALSE); }
0
[ "CWE-59", "CWE-241" ]
w3m
18dcbadf2771cdb0c18509b14e4e73505b242753
260,743,856,877,671,800,000,000,000,000,000,000,000
4
Make temporary directory safely when ~/.w3m is unwritable
static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename) { int len; if (!filename) { return 0; } len = strlen(filename); if (len > 4 && !strcmp(filename + len - 4, ".dmg")) { return 2; } return 0; }
0
[ "CWE-119" ]
qemu
f0dce23475b5af5da6b17b97c1765271307734b6
268,351,189,225,756,600,000,000,000,000,000,000,000
14
dmg: prevent chunk buffer overflow (CVE-2014-0145) Both compressed and uncompressed I/O is buffered. dmg_open() calculates the maximum buffer size needed from the metadata in the image file. There is currently a buffer overflow since ->lengths[] is accounted against the maximum compressed buffer size but actually uses the uncompressed buffer: switch (s->types[chunk]) { case 1: /* copy */ ret = bdrv_pread(bs->file, s->offsets[chunk], s->uncompressed_chunk, s->lengths[chunk]); We must account against the maximum uncompressed buffer size for type=1 chunks. This patch fixes the maximum buffer size calculation to take into account the chunk type. It is critical that we update the correct maximum since there are two buffers ->compressed_chunk and ->uncompressed_chunk. Signed-off-by: Stefan Hajnoczi <[email protected]> Signed-off-by: Kevin Wolf <[email protected]> Reviewed-by: Max Reitz <[email protected]> Signed-off-by: Stefan Hajnoczi <[email protected]>
HeaderString::HeaderString(HeaderString&& move_value) noexcept { type_ = move_value.type_; string_length_ = move_value.string_length_; switch (move_value.type_) { case Type::Reference: { buffer_.ref_ = move_value.buffer_.ref_; break; } case Type::Dynamic: { // When we move a dynamic header, we switch the moved header back to its default state (inline). buffer_.dynamic_ = move_value.buffer_.dynamic_; dynamic_capacity_ = move_value.dynamic_capacity_; move_value.type_ = Type::Inline; move_value.buffer_.dynamic_ = move_value.inline_buffer_; move_value.clear(); break; } case Type::Inline: { buffer_.dynamic_ = inline_buffer_; memcpy(inline_buffer_, move_value.inline_buffer_, string_length_); move_value.string_length_ = 0; break; } } ASSERT(valid()); }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
246,425,230,875,018,200,000,000,000,000,000,000,000
26
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
static int fuzzer_make_current(UNUSED void *cookie, UNUSED int scanout_idx, virgl_renderer_gl_context ctx) { return virgl_egl_make_context_current(test_egl, ctx); }
0
[ "CWE-787" ]
virglrenderer
95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
196,723,659,493,805,700,000,000,000,000,000,000,000
5
vrend: Add test to resource OOB write and fix it v2: Also check that no depth != 1 has been send when none is due Closes: #250 Signed-off-by: Gert Wollny <[email protected]> Reviewed-by: Chia-I Wu <[email protected]>
static int ntop_snmpgetnext(lua_State* vm) { return(ntop_snmp_get_fctn(vm, SNMP_GETNEXT_REQUEST_TYPE)); }
0
[ "CWE-254" ]
ntopng
2e0620be3410f5e22c9aa47e261bc5a12be692c6
205,837,035,671,656,000,000,000,000,000,000,000,000
1
Added security fix to avoid escalating privileges to non-privileged users Many thanks to Dolev Farhi for reporting it
static int dom_size(int peers) { int i = 0; while ((i * i) < peers) i++; return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN; }
0
[ "CWE-787" ]
linux
9aa422ad326634b76309e8ff342c246800621216
279,944,407,203,088,930,000,000,000,000,000,000,000
8
tipc: improve size validations for received domain records The function tipc_mon_rcv() allows a node to receive and process domain_record structs from peer nodes to track their views of the network topology. This patch verifies that the number of members in a received domain record does not exceed the limit defined by MAX_MON_DOMAIN, something that may otherwise lead to a stack overflow. tipc_mon_rcv() is called from the function tipc_link_proto_rcv(), where we are reading a 32 bit message data length field into a uint16. To avert any risk of bit overflow, we add an extra sanity check for this in that function. We cannot see that happen with the current code, but future designers being unaware of this risk, may introduce it by allowing delivery of very large (> 64k) sk buffers from the bearer layer. This potential problem was identified by Eric Dumazet. This fixes CVE-2022-0435 Reported-by: Samuel Page <[email protected]> Reported-by: Eric Dumazet <[email protected]> Fixes: 35c55c9877f8 ("tipc: add neighbor monitoring framework") Signed-off-by: Jon Maloy <[email protected]> Reviewed-by: Xin Long <[email protected]> Reviewed-by: Samuel Page <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
int rtas_suspend_cpu(struct rtas_suspend_me_data *data) { return __rtas_suspend_cpu(data, 0); }
0
[ "CWE-862" ]
linux
bd59380c5ba4147dcbaad3e582b55ccfd120b764
198,142,674,634,705,860,000,000,000,000,000,000,000
4
powerpc/rtas: Restrict RTAS requests from userspace A number of userspace utilities depend on making calls to RTAS to retrieve information and update various things. The existing API through which we expose RTAS to userspace exposes more RTAS functionality than we actually need, through the sys_rtas syscall, which allows root (or anyone with CAP_SYS_ADMIN) to make any RTAS call they want with arbitrary arguments. Many RTAS calls take the address of a buffer as an argument, and it's up to the caller to specify the physical address of the buffer as an argument. We allocate a buffer (the "RMO buffer") in the Real Memory Area that RTAS can access, and then expose the physical address and size of this buffer in /proc/powerpc/rtas/rmo_buffer. Userspace is expected to read this address, poke at the buffer using /dev/mem, and pass an address in the RMO buffer to the RTAS call. However, there's nothing stopping the caller from specifying whatever address they want in the RTAS call, and it's easy to construct a series of RTAS calls that can overwrite arbitrary bytes (even without /dev/mem access). Additionally, there are some RTAS calls that do potentially dangerous things and for which there are no legitimate userspace use cases. In the past, this would not have been a particularly big deal as it was assumed that root could modify all system state freely, but with Secure Boot and lockdown we need to care about this. We can't fundamentally change the ABI at this point, however we can address this by implementing a filter that checks RTAS calls against a list of permitted calls and forces the caller to use addresses within the RMO buffer. The list is based off the list of calls that are used by the librtas userspace library, and has been tested with a number of existing userspace RTAS utilities. For compatibility with any applications we are not aware of that require other calls, the filter can be turned off at build time. Cc: [email protected] Reported-by: Daniel Axtens <[email protected]> Signed-off-by: Andrew Donnellan <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
static unsigned long id_function(void) { return ((unsigned long) CURRENT_THREAD_ID); }
0
[ "CWE-476" ]
libvncserver
33441d90a506d5f3ae9388f2752901227e430553
196,002,046,653,351,400,000,000,000,000,000,000,000
4
libvncclient/tls_openssl: do not deref a NULL pointer Happens in anonTLS mode where cred is NULL. re #347
int inode_wait(void *word) { schedule(); return 0; }
0
[ "CWE-284", "CWE-264" ]
linux
23adbe12ef7d3d4195e80800ab36b37bee28cd03
86,599,448,853,162,970,000,000,000,000,000,000,000
5
fs,userns: Change inode_capable to capable_wrt_inode_uidgid The kernel has no concept of capabilities with respect to inodes; inodes exist independently of namespaces. For example, inode_capable(inode, CAP_LINUX_IMMUTABLE) would be nonsense. This patch changes inode_capable to check for uid and gid mappings and renames it to capable_wrt_inode_uidgid, which should make it more obvious what it does. Fixes CVE-2014-4014. Cc: Theodore Ts'o <[email protected]> Cc: Serge Hallyn <[email protected]> Cc: "Eric W. Biederman" <[email protected]> Cc: Dave Chinner <[email protected]> Cc: [email protected] Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static int __init nf_conntrack_proto_dccp_init(void) { int err; err = nf_conntrack_l4proto_register(&dccp_proto4); if (err < 0) goto err1; err = nf_conntrack_l4proto_register(&dccp_proto6); if (err < 0) goto err2; return 0; err2: nf_conntrack_l4proto_unregister(&dccp_proto4); err1: return err; }
0
[]
linux
2bc780499aa33311ec0f3e42624dfaa7be0ade5e
335,104,690,955,236,300,000,000,000,000,000,000,000
18
[NETFILTER]: nf_conntrack: add DCCP protocol support Add DCCP conntrack helper. Thanks to Gerrit Renker <[email protected]> for review and testing. Signed-off-by: Patrick McHardy <[email protected]>
AR_10_ConfirmRelease(PRIVATE_NETWORKKEY ** /*network*/, PRIVATE_ASSOCIATIONKEY ** association, int nextState, void * /*params*/) { (*association)->protocolState = nextState; return EC_Normal; }
0
[ "CWE-415", "CWE-703", "CWE-401" ]
dcmtk
a9697dfeb672b0b9412c00c7d36d801e27ec85cb
15,436,647,356,912,308,000,000,000,000,000,000,000
6
Fixed poss. NULL pointer dereference/double free. Thanks to Jinsheng Ba <[email protected]> for the report and some patches.
//! Resize image to half-size along XY axes, using an optimized filter. CImg<T>& resize_halfXY() { return get_resize_halfXY().move_to(*this);
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
115,320,496,418,690,290,000,000,000,000,000,000,000
3
Fix other issues in 'CImg<T>::load_bmp()'.
str_new4(klass, str) VALUE klass, str; { VALUE str2 = str_alloc(klass); RSTRING(str2)->len = RSTRING(str)->len; RSTRING(str2)->ptr = RSTRING(str)->ptr; if (FL_TEST(str, ELTS_SHARED)) { FL_SET(str2, ELTS_SHARED); RSTRING(str2)->aux.shared = RSTRING(str)->aux.shared; } else { FL_SET(str, ELTS_SHARED); RSTRING(str)->aux.shared = str2; } return str2; }
0
[ "CWE-20" ]
ruby
e926ef5233cc9f1035d3d51068abe9df8b5429da
117,853,995,757,430,230,000,000,000,000,000,000,000
18
* random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export. * string.c (rb_str_tmp_new), intern.h: New function. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
static int _annotate_rewrite(struct mailbox *oldmailbox, uint32_t olduid, const char *olduserid, struct mailbox *newmailbox, uint32_t newuid, const char *newuserid, int copy) { struct rename_rock rrock; rrock.oldmailbox = oldmailbox; rrock.newmailbox = newmailbox; rrock.olduserid = olduserid; rrock.newuserid = newuserid; rrock.olduid = olduid; rrock.newuid = newuid; rrock.copy = copy; return annotatemore_findall(oldmailbox->name, olduid, "*", /*modseq*/0, &rename_cb, &rrock, /*flags*/0); }
0
[ "CWE-732" ]
cyrus-imapd
621f9e41465b521399f691c241181300fab55995
163,996,081,750,211,150,000,000,000,000,000,000,000
21
annotate: don't allow everyone to write shared server entries
static GF_Node *lsr_read_use(GF_LASeRCodec *lsr, Bool is_same) { GF_FieldInfo info; u32 flag; GF_Node *elt = gf_node_new(lsr->sg, TAG_SVG_use); if (is_same) { if (lsr->prev_use) { lsr_restore_base(lsr, (SVG_Element *)elt, lsr->prev_use, 0, 0); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[LASeR] sameuse coded in bitstream but no use defined !\n")); } lsr_read_id(lsr, elt); lsr_read_href(lsr, elt); } else { lsr_read_id(lsr, elt); lsr_read_rare_full(lsr, elt); lsr_read_fill(lsr, elt); lsr_read_stroke(lsr, elt); lsr_read_eRR(lsr, elt); GF_LSR_READ_INT(lsr, flag, 1, "hasOverflow"); if (flag) { lsr->last_error = gf_node_get_attribute_by_tag(elt, TAG_SVG_ATT_overflow, 1, 0, &info); GF_LSR_READ_INT(lsr, *(SVG_Overflow*)info.far_ptr, 2, "overflow"); } lsr_read_coordinate_ptr(lsr, elt, TAG_SVG_ATT_x, 1, "x"); lsr_read_coordinate_ptr(lsr, elt, TAG_SVG_ATT_y, 1, "y"); lsr_read_href(lsr, elt); lsr_read_any_attribute(lsr, elt, 1); lsr->prev_use = (SVG_Element*)elt; } lsr_read_group_content(lsr, elt, is_same); return elt; }
0
[ "CWE-190" ]
gpac
faa75edde3dfeba1e2cf6ffa48e45a50f1042096
102,512,162,616,681,970,000,000,000,000,000,000,000
34
fixed #2213
rsvp_clear_checksum(void *header) { struct rsvp_common_header *rsvp_com_header = (struct rsvp_common_header *) header; rsvp_com_header->checksum[0] = 0; rsvp_com_header->checksum[1] = 0; }
0
[ "CWE-125" ]
tcpdump
bea2686c296b79609060a104cc139810785b0739
5,723,624,345,988,628,000,000,000,000,000,000,000
7
(for 4.9.3) CVE-2018-14465/RSVP: Add a missing bounds check In rsvp_obj_print(). This fixes a buffer over-read discovered by Bhargava Shastry. Add a test using the capture file supplied by the reporter(s).
static uint64_t vmxnet3_get_mac_high(MACAddr *addr) { return VMXNET3_MAKE_BYTE(0, addr->a[4]) | VMXNET3_MAKE_BYTE(1, addr->a[5]); }
0
[ "CWE-20" ]
qemu
a7278b36fcab9af469563bd7b9dadebe2ae25e48
273,799,855,050,103,630,000,000,000,000,000,000,000
5
net/vmxnet3: Refine l2 header validation Validation of l2 header length assumed minimal packet size as eth_header + 2 * vlan_header regardless of the actual protocol. This caused crash for valid non-IP packets shorter than 22 bytes, as 'tx_pkt->packet_type' hasn't been assigned for such packets, and 'vmxnet3_on_tx_done_update_stats()' expects it to be properly set. Refine header length validation in 'vmxnet_tx_pkt_parse_headers'. Check its return value during packet processing flow. As a side effect, in case IPv4 and IPv6 header validation failure, corrupt packets will be dropped. Signed-off-by: Dana Rubin <[email protected]> Signed-off-by: Shmulik Ladkani <[email protected]> Signed-off-by: Jason Wang <[email protected]>
void *jas_realloc(void *ptr, size_t size) { void *result; JAS_DBGLOG(101, ("jas_realloc called with %x,%zu\n", ptr, size)); result = realloc(ptr, size); JAS_DBGLOG(100, ("jas_realloc(%p, %zu) -> %p\n", ptr, size, result)); return result; }
1
[ "CWE-190" ]
jasper
988f8365f7d8ad8073b6786e433d34c553ecf568
139,439,419,542,915,110,000,000,000,000,000,000,000
8
Fixed an integer overflow problem.
PS_READ_FUNC(files) { long n; struct stat sbuf; PS_FILES_DATA; /* If strict mode, check session id existence */ if (PS(use_strict_mode) && ps_files_key_exists(data, key TSRMLS_CC) == FAILURE) { /* key points to PS(id), but cannot change here. */ if (key) { efree(PS(id)); PS(id) = NULL; } PS(id) = PS(mod)->s_create_sid((void **)&data, NULL TSRMLS_CC); if (!PS(id)) { return FAILURE; } if (PS(use_cookies)) { PS(send_cookie) = 1; } php_session_reset_id(TSRMLS_C); PS(session_status) = php_session_active; } if (!PS(id)) { return FAILURE; } ps_files_open(data, PS(id) TSRMLS_CC); if (data->fd < 0) { return FAILURE; } if (fstat(data->fd, &sbuf)) { return FAILURE; } data->st_size = *vallen = sbuf.st_size; if (sbuf.st_size == 0) { *val = STR_EMPTY_ALLOC(); return SUCCESS; } *val = emalloc(sbuf.st_size); #if defined(HAVE_PREAD) n = pread(data->fd, *val, sbuf.st_size, 0); #else lseek(data->fd, 0, SEEK_SET); n = read(data->fd, *val, sbuf.st_size); #endif if (n != sbuf.st_size) { if (n == -1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "read failed: %s (%d)", strerror(errno), errno); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "read returned less bytes than requested"); } efree(*val); return FAILURE; } return SUCCESS; }
0
[]
php-src
a793b709086eed655bc98f933d838b8679b28920
33,532,134,815,063,706,000,000,000,000,000,000,000
66
refix bug #69111, crash in 5.6 only
pixOctreeQuantNumColors(PIX *pixs, l_int32 maxcolors, l_int32 subsample) { l_int32 w, h, minside, bpp, wpls, wpld, i, j, actualcolors; l_int32 rval, gval, bval, nbase, nextra, maxlevel, ncubes, val; l_int32 *lut1, *lut2; l_uint32 index; l_uint32 *lines, *lined, *datas, *datad, *pspixel; l_uint32 *rtab, *gtab, *btab; OQCELL *oqc; OQCELL **oqca; L_HEAP *lh; PIX *pixd; PIXCMAP *cmap; PROCNAME("pixOctreeQuantNumColors"); if (!pixs) return (PIX *)ERROR_PTR("pixs not defined", procName, NULL); if (pixGetDepth(pixs) != 32) return (PIX *)ERROR_PTR("pixs not 32 bpp", procName, NULL); if (maxcolors < 8) { L_WARNING("max colors < 8; setting to 8\n", procName); maxcolors = 8; } if (maxcolors > 256) { L_WARNING("max colors > 256; setting to 256\n", procName); maxcolors = 256; } pixGetDimensions(pixs, &w, &h, NULL); datas = pixGetData(pixs); wpls = pixGetWpl(pixs); minside = L_MIN(w, h); if (subsample <= 0) { subsample = L_MAX(1, minside / 200); } if (maxcolors <= 16) { bpp = 4; pixd = pixCreate(w, h, bpp); maxlevel = 2; ncubes = 64; /* 2^6 */ nbase = 8; nextra = maxcolors - nbase; } else if (maxcolors <= 64) { bpp = 8; pixd = pixCreate(w, h, bpp); maxlevel = 2; ncubes = 64; /* 2^6 */ nbase = 8; nextra = maxcolors - nbase; } else { /* maxcolors <= 256 */ bpp = 8; pixd = pixCreate(w, h, bpp); maxlevel = 3; ncubes = 512; /* 2^9 */ nbase = 64; nextra = maxcolors - nbase; } pixCopyResolution(pixd, pixs); pixCopyInputFormat(pixd, pixs); /*----------------------------------------------------------* * If we're using the minimum number of colors, it is * * much simpler. We just use 'nbase' octcubes. * * For this case, we don't eliminate any extra colors. * *----------------------------------------------------------*/ if (nextra == 0) { /* prepare the OctcubeQuantCell array */ if ((oqca = (OQCELL **)LEPT_CALLOC(nbase, sizeof(OQCELL *))) == NULL) { pixDestroy(&pixd); return (PIX *)ERROR_PTR("oqca not made", procName, NULL); } for (i = 0; i < nbase; i++) { oqca[i] = (OQCELL *)LEPT_CALLOC(1, sizeof(OQCELL)); oqca[i]->n = 0.0; } rtab = gtab = btab = NULL; makeRGBToIndexTables(maxlevel - 1, &rtab, &gtab, &btab); /* Go through the entire image, gathering statistics and * assigning pixels to their quantized value */ datad = pixGetData(pixd); wpld = pixGetWpl(pixd); for (i = 0; i < h; i++) { lines = datas + i * wpls; lined = datad + i * wpld; for (j = 0; j < w; j++) { pspixel = lines + j; extractRGBValues(*pspixel, &rval, &gval, &bval); getOctcubeIndexFromRGB(rval, gval, bval, rtab, gtab, btab, &index); /* lept_stderr("rval = %d, gval = %d, bval = %d," " index = %d\n", rval, gval, bval, index); */ if (bpp == 4) SET_DATA_QBIT(lined, j, index); else /* bpp == 8 */ SET_DATA_BYTE(lined, j, index); oqca[index]->n += 1.0; oqca[index]->rcum += rval; oqca[index]->gcum += gval; oqca[index]->bcum += bval; } } /* Compute average color values in each octcube, and * generate colormap */ cmap = pixcmapCreate(bpp); pixSetColormap(pixd, cmap); for (i = 0; i < nbase; i++) { oqc = oqca[i]; if (oqc->n != 0) { oqc->rval = (l_int32)(oqc->rcum / oqc->n); oqc->gval = (l_int32)(oqc->gcum / oqc->n); oqc->bval = (l_int32)(oqc->bcum / oqc->n); } else { getRGBFromOctcube(i, maxlevel - 1, &oqc->rval, &oqc->gval, &oqc->bval); } pixcmapAddColor(cmap, oqc->rval, oqc->gval, oqc->bval); } for (i = 0; i < nbase; i++) LEPT_FREE(oqca[i]); LEPT_FREE(oqca); LEPT_FREE(rtab); LEPT_FREE(gtab); LEPT_FREE(btab); return pixd; } /*------------------------------------------------------------* * General case: we will use colors in octcubes at maxlevel. * * We also remove any colors that are not populated from * * the colormap. * *------------------------------------------------------------*/ /* Prepare the OctcubeQuantCell array */ if ((oqca = (OQCELL **)LEPT_CALLOC(ncubes, sizeof(OQCELL *))) == NULL) { pixDestroy(&pixd); return (PIX *)ERROR_PTR("oqca not made", procName, NULL); } for (i = 0; i < ncubes; i++) { oqca[i] = (OQCELL *)LEPT_CALLOC(1, sizeof(OQCELL)); oqca[i]->n = 0.0; } /* Make the tables to map color to the octindex, * of which there are 'ncubes' at 'maxlevel' */ rtab = gtab = btab = NULL; makeRGBToIndexTables(maxlevel, &rtab, &gtab, &btab); /* Estimate the color distribution; we want to find the * most popular nextra colors at 'maxlevel' */ for (i = 0; i < h; i += subsample) { lines = datas + i * wpls; for (j = 0; j < w; j += subsample) { pspixel = lines + j; extractRGBValues(*pspixel, &rval, &gval, &bval); getOctcubeIndexFromRGB(rval, gval, bval, rtab, gtab, btab, &index); oqca[index]->n += 1.0; oqca[index]->octindex = index; oqca[index]->rcum += rval; oqca[index]->gcum += gval; oqca[index]->bcum += bval; } } /* Transfer the OQCELL from the array, and order in a heap */ lh = lheapCreate(512, L_SORT_DECREASING); for (i = 0; i < ncubes; i++) lheapAdd(lh, oqca[i]); LEPT_FREE(oqca); /* don't need this array */ /* Prepare a new OctcubeQuantCell array, with maxcolors cells */ oqca = (OQCELL **)LEPT_CALLOC(maxcolors, sizeof(OQCELL *)); for (i = 0; i < nbase; i++) { /* make nbase cells */ oqca[i] = (OQCELL *)LEPT_CALLOC(1, sizeof(OQCELL)); oqca[i]->n = 0.0; } /* Remove the nextra most populated ones, and put them in the array */ for (i = 0; i < nextra; i++) { oqc = (OQCELL *)lheapRemove(lh); oqc->n = 0.0; /* reinit */ oqc->rcum = 0; oqc->gcum = 0; oqc->bcum = 0; oqca[nbase + i] = oqc; /* store it in the array */ } /* Destroy the heap and its remaining contents */ lheapDestroy(&lh, TRUE); /* Generate a lookup table from octindex at maxlevel * to color table index */ lut1 = (l_int32 *)LEPT_CALLOC(ncubes, sizeof(l_int32)); for (i = 0; i < nextra; i++) lut1[oqca[nbase + i]->octindex] = nbase + i; for (index = 0; index < ncubes; index++) { if (lut1[index] == 0) /* not one of the extras; need to assign */ lut1[index] = index >> 3; /* remove the least significant bits */ /* lept_stderr("lut1[%d] = %d\n", index, lut1[index]); */ } /* Go through the entire image, gathering statistics and * assigning pixels to their quantized value */ datad = pixGetData(pixd); wpld = pixGetWpl(pixd); for (i = 0; i < h; i++) { lines = datas + i * wpls; lined = datad + i * wpld; for (j = 0; j < w; j++) { pspixel = lines + j; extractRGBValues(*pspixel, &rval, &gval, &bval); getOctcubeIndexFromRGB(rval, gval, bval, rtab, gtab, btab, &index); /* lept_stderr("rval = %d, gval = %d, bval = %d, index = %d\n", rval, gval, bval, index); */ val = lut1[index]; switch (bpp) { case 4: SET_DATA_QBIT(lined, j, val); break; case 8: SET_DATA_BYTE(lined, j, val); break; default: LEPT_FREE(oqca); LEPT_FREE(lut1); return (PIX *)ERROR_PTR("bpp not 4 or 8!", procName, NULL); break; } oqca[val]->n += 1.0; oqca[val]->rcum += rval; oqca[val]->gcum += gval; oqca[val]->bcum += bval; } } /* Compute averages, set up a colormap, and make a second * lut that converts from the color values currently in * the image to a minimal set */ lut2 = (l_int32 *)LEPT_CALLOC(ncubes, sizeof(l_int32)); cmap = pixcmapCreate(bpp); pixSetColormap(pixd, cmap); for (i = 0, index = 0; i < maxcolors; i++) { oqc = oqca[i]; lut2[i] = index; if (oqc->n == 0) /* no occupancy; don't bump up index */ continue; oqc->rval = (l_int32)(oqc->rcum / oqc->n); oqc->gval = (l_int32)(oqc->gcum / oqc->n); oqc->bval = (l_int32)(oqc->bcum / oqc->n); pixcmapAddColor(cmap, oqc->rval, oqc->gval, oqc->bval); index++; } /* pixcmapWriteStream(stderr, cmap); */ actualcolors = pixcmapGetCount(cmap); /* lept_stderr("Number of different colors = %d\n", actualcolors); */ /* Last time through the image; use the lookup table to * remap the pixel value to the minimal colormap */ if (actualcolors < maxcolors) { for (i = 0; i < h; i++) { lined = datad + i * wpld; for (j = 0; j < w; j++) { switch (bpp) { case 4: val = GET_DATA_QBIT(lined, j); SET_DATA_QBIT(lined, j, lut2[val]); break; case 8: val = GET_DATA_BYTE(lined, j); SET_DATA_BYTE(lined, j, lut2[val]); break; } } } } if (oqca) { for (i = 0; i < maxcolors; i++) LEPT_FREE(oqca[i]); } LEPT_FREE(oqca); LEPT_FREE(lut1); LEPT_FREE(lut2); LEPT_FREE(rtab); LEPT_FREE(gtab); LEPT_FREE(btab); return pixd; }
0
[ "CWE-125" ]
leptonica
5ee24b398bb67666f6d173763eaaedd9c36fb1e5
117,408,059,232,472,580,000,000,000,000,000,000,000
295
Fixed issue 22140 in oss-fuzz: Heap-buffer-overflow * color quantized pix must be 8 bpp before extra colors are added.
static unsigned HuffmanTree_makeFromLengths2(HuffmanTree* tree) { uivector blcount; uivector nextcode; unsigned bits, n, error = 0; uivector_init(&blcount); uivector_init(&nextcode); tree->tree1d = (unsigned*)calloc(tree->numcodes, sizeof(unsigned)); if(!tree->tree1d) error = 83; /*alloc fail*/ if(!uivector_resizev(&blcount, tree->maxbitlen + 1, 0) || !uivector_resizev(&nextcode, tree->maxbitlen + 1, 0)) error = 83; /*alloc fail*/ if(!error) { /*step 1: count number of instances of each code length*/ for(bits = 0; bits < tree->numcodes; bits++) blcount.data[tree->lengths[bits]]++; /*step 2: generate the nextcode values*/ for(bits = 1; bits <= tree->maxbitlen; bits++) { nextcode.data[bits] = (nextcode.data[bits - 1] + blcount.data[bits - 1]) << 1; } /*step 3: generate all the codes*/ for(n = 0; n < tree->numcodes; n++) { if(tree->lengths[n] != 0) tree->tree1d[n] = nextcode.data[tree->lengths[n]]++; } } uivector_cleanup(&blcount); uivector_cleanup(&nextcode); if(!error) return HuffmanTree_make2DTree(tree); else return error; }
0
[ "CWE-401" ]
FreeRDP
9fee4ae076b1ec97b97efb79ece08d1dab4df29a
74,415,327,421,336,180,000,000,000,000,000,000,000
38
Fixed #5645: realloc return handling
long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; }
0
[ "CWE-347" ]
linux
ee8f844e3c5a73b999edf733df1c529d6503ec2f
200,904,449,988,877,300,000,000,000,000,000,000,000
25
KEYS: Disallow keyrings beginning with '.' to be joined as session keyrings This fixes CVE-2016-9604. Keyrings whose name begin with a '.' are special internal keyrings and so userspace isn't allowed to create keyrings by this name to prevent shadowing. However, the patch that added the guard didn't fix KEYCTL_JOIN_SESSION_KEYRING. Not only can that create dot-named keyrings, it can also subscribe to them as a session keyring if they grant SEARCH permission to the user. This, for example, allows a root process to set .builtin_trusted_keys as its session keyring, at which point it has full access because now the possessor permissions are added. This permits root to add extra public keys, thereby bypassing module verification. This also affects kexec and IMA. This can be tested by (as root): keyctl session .builtin_trusted_keys keyctl add user a a @s keyctl list @s which on my test box gives me: 2 keys in keyring: 180010936: ---lswrv 0 0 asymmetric: Build time autogenerated kernel key: ae3d4a31b82daa8e1a75b49dc2bba949fd992a05 801382539: --alswrv 0 0 user: a Fix this by rejecting names beginning with a '.' in the keyctl. Signed-off-by: David Howells <[email protected]> Acked-by: Mimi Zohar <[email protected]> cc: [email protected] cc: [email protected]
apr_status_t h2_session_create(h2_session **psession, conn_rec *c, h2_ctx *ctx, h2_workers *workers) { return h2_session_create_int(psession, c, NULL, ctx, workers); }
0
[]
mod_h2
5e75e5685dd043fe93a5a08a15edd087a43f6968
67,096,842,579,634,230,000,000,000,000,000,000,000
5
v1.11.0 -------------------------------------------------------------------------------- * connection IO event handling reworked. Instead of reacting on incoming bytes, the state machine now acts on incoming frames that are affecting it. This reduces state transitions. * pytest suite now covers some basic tests on h2 selection, GET and POST * started to add pytest suite from existing bash tests