func
stringlengths
0
484k
target
int64
0
1
cwe
sequencelengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
void ist_enter(struct pt_regs *regs) { if (user_mode(regs)) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); } else { /* * We might have interrupted pretty much anything. In * fact, if we're a machine check, we can even interrupt * NMI processing. We don't want in_nmi() to return true, * but we need to notify RCU. */ rcu_nmi_enter(); } preempt_disable(); /* This code is a bit fragile. Test it. */ RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); }
0
[ "CWE-362", "CWE-284" ]
linux
d8ba61ba58c88d5207c1ba2f7d9a2280e7d03be9
280,436,702,869,252,750,000,000,000,000,000,000,000
19
x86/entry/64: Don't use IST entry for #BP stack There's nothing IST-worthy about #BP/int3. We don't allow kprobes in the small handful of places in the kernel that run at CPL0 with an invalid stack, and 32-bit kernels have used normal interrupt gates for #BP forever. Furthermore, we don't allow kprobes in places that have usergs while in kernel mode, so "paranoid" is also unnecessary. Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: [email protected]
static int fp_prime_miller_rabin_ex(fp_int * a, fp_int * b, int *result, fp_int *n1, fp_int *y, fp_int *r) { int s, j; int err; /* default */ *result = FP_NO; /* ensure b > 1 */ if (fp_cmp_d(b, 1) != FP_GT) { return FP_OKAY; } /* get n1 = a - 1 */ fp_copy(a, n1); err = fp_sub_d(n1, 1, n1); if (err != FP_OKAY) { return err; } /* set 2**s * r = n1 */ fp_copy(n1, r); /* count the number of least significant bits * which are zero */ s = fp_cnt_lsb(r); /* now divide n - 1 by 2**s */ fp_div_2d (r, s, r, NULL); /* compute y = b**r mod a */ fp_zero(y); #if (defined(WOLFSSL_HAVE_SP_RSA) && !defined(WOLFSSL_RSA_PUBLIC_ONLY)) || \ defined(WOLFSSL_HAVE_SP_DH) #ifndef WOLFSSL_SP_NO_2048 if (fp_count_bits(a) == 1024) sp_ModExp_1024(b, r, a, y); else if (fp_count_bits(a) == 2048) sp_ModExp_2048(b, r, a, y); else #endif #ifndef WOLFSSL_SP_NO_3072 if (fp_count_bits(a) == 1536) sp_ModExp_1536(b, r, a, y); else if (fp_count_bits(a) == 3072) sp_ModExp_3072(b, r, a, y); else #endif #ifdef WOLFSSL_SP_4096 if (fp_count_bits(a) == 4096) sp_ModExp_4096(b, r, a, y); else #endif #endif fp_exptmod(b, r, a, y); /* if y != 1 and y != n1 do */ if (fp_cmp_d (y, 1) != FP_EQ && fp_cmp (y, n1) != FP_EQ) { j = 1; /* while j <= s-1 and y != n1 */ while ((j <= (s - 1)) && fp_cmp (y, n1) != FP_EQ) { fp_sqrmod (y, a, y); /* if y == 1 then composite */ if (fp_cmp_d (y, 1) == FP_EQ) { return FP_OKAY; } ++j; } /* if y != n1 then composite */ if (fp_cmp (y, n1) != FP_EQ) { return FP_OKAY; } } /* probably prime now */ *result = FP_YES; return FP_OKAY; }
0
[ "CWE-326", "CWE-203" ]
wolfssl
1de07da61f0c8e9926dcbd68119f73230dae283f
284,885,308,894,492,960,000,000,000,000,000,000,000
83
Constant time EC map to affine for private operations For fast math, use a constant time modular inverse when mapping to affine when operation involves a private key - key gen, calc shared secret, sign.
void jpc_qmfb_join_row(jpc_fix_t *a, int numcols, int parity) { int bufsize = JPC_CEILDIVPOW2(numcols, 1); jpc_fix_t joinbuf[QMFB_JOINBUFSIZE]; jpc_fix_t *buf = joinbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; int hstartcol; /* Allocate memory for the join buffer from the heap. */ if (bufsize > QMFB_JOINBUFSIZE) { if (!(buf = jas_malloc(bufsize * sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide. */ abort(); } } hstartcol = (numcols + 1 - parity) >> 1; /* Save the samples from the lowpass channel. */ n = hstartcol; srcptr = &a[0]; dstptr = buf; while (n-- > 0) { *dstptr = *srcptr; ++srcptr; ++dstptr; } /* Copy the samples from the highpass channel into place. */ srcptr = &a[hstartcol]; dstptr = &a[1 - parity]; n = numcols - hstartcol; while (n-- > 0) { *dstptr = *srcptr; dstptr += 2; ++srcptr; } /* Copy the samples from the lowpass channel into place. */ srcptr = buf; dstptr = &a[parity]; n = hstartcol; while (n-- > 0) { *dstptr = *srcptr; dstptr += 2; ++srcptr; } /* If the join buffer was allocated on the heap, free this memory. */ if (buf != joinbuf) { jas_free(buf); } }
1
[ "CWE-189" ]
jasper
3c55b399c36ef46befcb21e4ebc4799367f89684
191,509,043,773,661,800,000,000,000,000,000,000,000
55
At many places in the code, jas_malloc or jas_recalloc was being invoked with the size argument being computed in a manner that would not allow integer overflow to be detected. Now, these places in the code have been modified to use special-purpose memory allocation functions (e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow. This should fix many security problems.
winlink_cmp(struct winlink *wl1, struct winlink *wl2) { return (wl1->idx - wl2->idx); }
0
[]
src
b32e1d34e10a0da806823f57f02a4ae6e93d756e
20,636,624,474,766,762,000,000,000,000,000,000,000
4
evbuffer_new and bufferevent_new can both fail (when malloc fails) and return NULL. GitHub issue 1547.
static const char *get_http_string(const struct Curl_easy *data, const struct connectdata *conn) { #ifdef ENABLE_QUIC if((data->state.httpwant == CURL_HTTP_VERSION_3) || (conn->httpversion == 30)) return "3"; #endif #ifdef USE_NGHTTP2 if(conn->proto.httpc.h2) return "2"; #endif if(Curl_use_http_1_1plus(data, conn)) return "1.1"; return "1.0"; }
0
[]
curl
48d7064a49148f03942380967da739dcde1cdc24
330,416,390,969,574,950,000,000,000,000,000,000,000
19
cookie: apply limits - Send no more than 150 cookies per request - Cap the max length used for a cookie: header to 8K - Cap the max number of received Set-Cookie: headers to 50 Bug: https://curl.se/docs/CVE-2022-32205.html CVE-2022-32205 Reported-by: Harry Sintonen Closes #9048
static int check_nonce(request_rec *r, digest_header_rec *resp, const digest_config_rec *conf) { apr_time_t dt; time_rec nonce_time; char tmp, hash[NONCE_HASH_LEN+1]; if (strlen(resp->nonce) != NONCE_LEN) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01775) "invalid nonce %s received - length is not %d", resp->nonce, NONCE_LEN); note_digest_auth_failure(r, conf, resp, 1); return HTTP_UNAUTHORIZED; } tmp = resp->nonce[NONCE_TIME_LEN]; resp->nonce[NONCE_TIME_LEN] = '\0'; apr_base64_decode_binary(nonce_time.arr, resp->nonce); gen_nonce_hash(hash, resp->nonce, resp->opaque, r->server, conf); resp->nonce[NONCE_TIME_LEN] = tmp; resp->nonce_time = nonce_time.time; if (strcmp(hash, resp->nonce+NONCE_TIME_LEN)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01776) "invalid nonce %s received - hash is not %s", resp->nonce, hash); note_digest_auth_failure(r, conf, resp, 1); return HTTP_UNAUTHORIZED; } dt = r->request_time - nonce_time.time; if (conf->nonce_lifetime > 0 && dt < 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01777) "invalid nonce %s received - user attempted " "time travel", resp->nonce); note_digest_auth_failure(r, conf, resp, 1); return HTTP_UNAUTHORIZED; } if (conf->nonce_lifetime > 0) { if (dt > conf->nonce_lifetime) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0,r, APLOGNO(01778) "user %s: nonce expired (%.2f seconds old " "- max lifetime %.2f) - sending new nonce", r->user, (double)apr_time_sec(dt), (double)apr_time_sec(conf->nonce_lifetime)); note_digest_auth_failure(r, conf, resp, 1); return HTTP_UNAUTHORIZED; } } else if (conf->nonce_lifetime == 0 && resp->client) { if (memcmp(resp->client->last_nonce, resp->nonce, NONCE_LEN)) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01779) "user %s: one-time-nonce mismatch - sending " "new nonce", r->user); note_digest_auth_failure(r, conf, resp, 1); return HTTP_UNAUTHORIZED; } } /* else (lifetime < 0) => never expires */ return OK; }
1
[ "CWE-787" ]
httpd
3b6431eb9c9dba603385f70a2131ab4a01bf0d3b
123,208,080,869,545,700,000,000,000,000,000,000,000
63
Merge r1885659 from trunk: mod_auth_digest: Fast validation of the nonce's base64 to fail early if the format can't match anyway. Submitted by: ylavic Reviewed by: ylavic, covener, jailletc36 git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1885666 13f79535-47bb-0310-9956-ffa450edef68
bool WindowsServiceControl::stop() { if( checkService() == false ) { return false; } SERVICE_STATUS status; // Try to stop the service if( ControlService( m_serviceHandle, SERVICE_CONTROL_STOP, &status ) ) { while( QueryServiceStatus( m_serviceHandle, &status ) ) { if( status.dwCurrentState == SERVICE_STOP_PENDING ) { Sleep( 1000 ); } else { break; } } if( status.dwCurrentState != SERVICE_STOPPED ) { vWarning() << "service" << m_name << "could not be stopped."; return false; } } return true; }
0
[ "CWE-428", "CWE-295" ]
veyon
f231ec511b9a09f43f49b2c7bb7c60b8046276b1
22,770,722,503,596,650,000,000,000,000,000,000,000
33
WindowsServiceControl: quote service binary path Fix unquoted service path vulnerability. Closes #657.
httpGetPending(http_t *http) /* I - HTTP connection */ { return (http ? (size_t)http->wused : 0); }
0
[ "CWE-120" ]
cups
f24e6cf6a39300ad0c3726a41a4aab51ad54c109
261,404,690,716,058,400,000,000,000,000,000,000,000
4
Fix multiple security/disclosure issues: - CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251) - Fixed IPP buffer overflow (rdar://50035411) - Fixed memory disclosure issue in the scheduler (rdar://51373853) - Fixed DoS issues in the scheduler (rdar://51373929)
hfinfo_number_value_format_display(const header_field_info *hfinfo, int display, char buf[32], guint32 value) { char *ptr = &buf[31]; gboolean isint = IS_FT_INT(hfinfo->type); *ptr = '\0'; /* Properly format value */ switch (FIELD_DISPLAY(display)) { case BASE_DEC: return isint ? int_to_str_back(ptr, (gint32) value) : uint_to_str_back(ptr, value); case BASE_DEC_HEX: *(--ptr) = ')'; ptr = hex_to_str_back(ptr, hfinfo_hex_digits(hfinfo), value); *(--ptr) = '('; *(--ptr) = ' '; ptr = isint ? int_to_str_back(ptr, (gint32) value) : uint_to_str_back(ptr, value); return ptr; case BASE_OCT: return oct_to_str_back(ptr, value); case BASE_HEX: return hex_to_str_back(ptr, hfinfo_hex_digits(hfinfo), value); case BASE_HEX_DEC: *(--ptr) = ')'; ptr = isint ? int_to_str_back(ptr, (gint32) value) : uint_to_str_back(ptr, value); *(--ptr) = '('; *(--ptr) = ' '; ptr = hex_to_str_back(ptr, hfinfo_hex_digits(hfinfo), value); return ptr; case BASE_PT_UDP: case BASE_PT_TCP: case BASE_PT_DCCP: case BASE_PT_SCTP: port_with_resolution_to_str_buf(buf, 32, display_to_port_type((field_display_e)display), value); return buf; case BASE_OUI: { guint8 p_oui[3]; const gchar *manuf_name; p_oui[0] = value >> 16 & 0xFF; p_oui[1] = value >> 8 & 0xFF; p_oui[2] = value & 0xFF; /* Attempt an OUI lookup. */ manuf_name = uint_get_manuf_name_if_known(value); if (manuf_name == NULL) { /* Could not find an OUI. */ g_snprintf(buf, 32, "%02x:%02x:%02x", p_oui[0], p_oui[1], p_oui[2]); } else { /* Found an address string. */ g_snprintf(buf, 32, "%02x:%02x:%02x (%s)", p_oui[0], p_oui[1], p_oui[2], manuf_name); } return buf; } default: g_assert_not_reached(); } return ptr; }
0
[ "CWE-401" ]
wireshark
a9fc769d7bb4b491efb61c699d57c9f35269d871
288,172,021,671,377,270,000,000,000,000,000,000,000
67
epan: Fix a memory leak. Make sure _proto_tree_add_bits_ret_val allocates a bits array using the packet scope, otherwise we leak memory. Fixes #17032.
static zend_bool php_auto_globals_create_post(const char *name, uint name_len TSRMLS_DC) { zval *vars; if (PG(variables_order) && (strchr(PG(variables_order),'P') || strchr(PG(variables_order),'p')) && SG(request_info).request_method && !strcasecmp(SG(request_info).request_method, "POST")) { sapi_module.treat_data(PARSE_POST, NULL, NULL TSRMLS_CC); vars = PG(http_globals)[TRACK_VARS_POST]; } else { ALLOC_ZVAL(vars); array_init(vars); INIT_PZVAL(vars); if (PG(http_globals)[TRACK_VARS_POST]) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_POST]); } PG(http_globals)[TRACK_VARS_POST] = vars; } zend_hash_update(&EG(symbol_table), name, name_len + 1, &vars, sizeof(zval *), NULL); Z_ADDREF_P(vars); return 0; /* don't rearm */ }
0
[ "CWE-400", "CWE-703" ]
php-src
0f8cf3b8497dc45c010c44ed9e96518e11e19fc3
273,558,628,837,799,670,000,000,000,000,000,000,000
25
Fix bug #73807
static void tx_data_destroy(pjsip_tx_data *tdata) { PJ_LOG(5,(tdata->obj_name, "Destroying txdata %s", pjsip_tx_data_get_info(tdata))); pjsip_tpselector_dec_ref(&tdata->tp_sel); #if defined(PJ_DEBUG) && PJ_DEBUG!=0 pj_atomic_dec( tdata->mgr->tdata_counter ); #endif #if defined(PJSIP_HAS_TX_DATA_LIST) && PJSIP_HAS_TX_DATA_LIST!=0 /* Remove this tdata from transmit buffer list */ pj_lock_acquire(tdata->mgr->lock); pj_list_erase(tdata); pj_lock_release(tdata->mgr->lock); #endif pj_atomic_destroy( tdata->ref_cnt ); pj_lock_destroy( tdata->lock ); pjsip_endpt_release_pool( tdata->mgr->endpt, tdata->pool ); }
0
[ "CWE-297", "CWE-295" ]
pjproject
67e46c1ac45ad784db5b9080f5ed8b133c122872
178,284,024,680,165,430,000,000,000,000,000,000,000
20
Merge pull request from GHSA-8hcp-hm38-mfph * Check hostname during TLS transport selection * revision based on feedback * remove the code in create_request that has been moved
InSliceInfo::InSliceInfo (PixelType tifb, char * b, PixelType tifl, size_t xpst, size_t ypst, size_t spst, int xsm, int ysm, bool f, bool s, double fv) : typeInFrameBuffer (tifb), typeInFile (tifl), base(b), xPointerStride (xpst), yPointerStride (ypst), sampleStride (spst), xSampling (xsm), ySampling (ysm), fill (f), skip (s), fillValue (fv) { // empty }
0
[ "CWE-125" ]
openexr
e79d2296496a50826a15c667bf92bdc5a05518b4
247,876,844,386,801,970,000,000,000,000,000,000,000
24
fix memory leaks and invalid memory accesses Signed-off-by: Peter Hillman <[email protected]>
megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) { int i, j; struct pci_dev *pdev; pdev = instance->pdev; /* Try MSI-x */ for (i = 0; i < instance->msix_vectors; i++) { instance->irq_context[i].instance = instance; instance->irq_context[i].MSIxIndex = i; if (request_irq(pci_irq_vector(pdev, i), instance->instancet->service_isr, 0, "megasas", &instance->irq_context[i])) { dev_err(&instance->pdev->dev, "Failed to register IRQ for vector %d.\n", i); for (j = 0; j < i; j++) free_irq(pci_irq_vector(pdev, j), &instance->irq_context[j]); /* Retry irq register for IO_APIC*/ instance->msix_vectors = 0; if (is_probe) { pci_free_irq_vectors(instance->pdev); return megasas_setup_irqs_ioapic(instance); } else { return -1; } } } return 0; }
0
[ "CWE-476" ]
linux
bcf3b67d16a4c8ffae0aa79de5853435e683945c
49,219,103,012,821,600,000,000,000,000,000,000,000
31
scsi: megaraid_sas: return error when create DMA pool failed when create DMA pool for cmd frames failed, we should return -ENOMEM, instead of 0. In some case in: megasas_init_adapter_fusion() -->megasas_alloc_cmds() -->megasas_create_frame_pool create DMA pool failed, --> megasas_free_cmds() [1] -->megasas_alloc_cmds_fusion() failed, then goto fail_alloc_cmds. -->megasas_free_cmds() [2] we will call megasas_free_cmds twice, [1] will kfree cmd_list, [2] will use cmd_list.it will cause a problem: Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = ffffffc000f70000 [00000000] *pgd=0000001fbf893003, *pud=0000001fbf893003, *pmd=0000001fbf894003, *pte=006000006d000707 Internal error: Oops: 96000005 [#1] SMP Modules linked in: CPU: 18 PID: 1 Comm: swapper/0 Not tainted task: ffffffdfb9290000 ti: ffffffdfb923c000 task.ti: ffffffdfb923c000 PC is at megasas_free_cmds+0x30/0x70 LR is at megasas_free_cmds+0x24/0x70 ... Call trace: [<ffffffc0005b779c>] megasas_free_cmds+0x30/0x70 [<ffffffc0005bca74>] megasas_init_adapter_fusion+0x2f4/0x4d8 [<ffffffc0005b926c>] megasas_init_fw+0x2dc/0x760 [<ffffffc0005b9ab0>] megasas_probe_one+0x3c0/0xcd8 [<ffffffc0004a5abc>] local_pci_probe+0x4c/0xb4 [<ffffffc0004a5c40>] pci_device_probe+0x11c/0x14c [<ffffffc00053a5e4>] driver_probe_device+0x1ec/0x430 [<ffffffc00053a92c>] __driver_attach+0xa8/0xb0 [<ffffffc000538178>] bus_for_each_dev+0x74/0xc8 [<ffffffc000539e88>] driver_attach+0x28/0x34 [<ffffffc000539a18>] bus_add_driver+0x16c/0x248 [<ffffffc00053b234>] driver_register+0x6c/0x138 [<ffffffc0004a5350>] __pci_register_driver+0x5c/0x6c [<ffffffc000ce3868>] megasas_init+0xc0/0x1a8 [<ffffffc000082a58>] do_one_initcall+0xe8/0x1ec [<ffffffc000ca7be8>] kernel_init_freeable+0x1c8/0x284 [<ffffffc0008d90b8>] kernel_init+0x1c/0xe4 Signed-off-by: Jason Yan <[email protected]> Acked-by: Sumit Saxena <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
static __net_init int raw_init_net(struct net *net) { if (!proc_net_fops_create(net, "raw", S_IRUGO, &raw_seq_fops)) return -ENOMEM; return 0; }
0
[ "CWE-362" ]
linux-2.6
f6d8bd051c391c1c0458a30b2a7abcd939329259
59,080,099,556,571,480,000,000,000,000,000,000,000
7
inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *this_branch = env->cur_state; struct bpf_verifier_state *other_branch; struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *dst_reg, *other_branch_regs; u8 opcode = BPF_OP(insn->code); int err; if (opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; if (BPF_SRC(insn->code) == BPF_K) { int pred = is_branch_taken(dst_reg, insn->imm, opcode); if (pred == 1) { /* only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else if (pred == 0) { /* only follow fall-through branch, since * that's where the program will go */ return 0; } } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, false); if (!other_branch) return -EFAULT; other_branch_regs = other_branch->frame[other_branch->curframe]->regs; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { if (dst_reg->type == SCALAR_VALUE && regs[insn->src_reg].type == SCALAR_VALUE) { if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, regs[insn->src_reg].var_off.value, opcode); else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch_regs[insn->src_reg], &regs[insn->src_reg], dst_reg->var_off.value, opcode); else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch_regs[insn->src_reg], &other_branch_regs[insn->dst_reg], &regs[insn->src_reg], &regs[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, insn->imm, opcode); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && reg_type_may_be_null(dst_reg->type)) { /* Mark all identical registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_ptr_or_null_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_ptr_or_null_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level) print_verifier_state(env, this_branch->frame[this_branch->curframe]); return 0; }
0
[ "CWE-703", "CWE-189" ]
linux
979d63d50c0c0f7bc537bf821e056cc9fe5abd38
8,118,950,935,345,117,000,000,000,000,000,000,000
118
bpf: prevent out of bounds speculation on pointer arithmetic Jann reported that the original commit back in b2157399cc98 ("bpf: prevent out-of-bounds speculation") was not sufficient to stop CPU from speculating out of bounds memory access: While b2157399cc98 only focussed on masking array map access for unprivileged users for tail calls and data access such that the user provided index gets sanitized from BPF program and syscall side, there is still a more generic form affected from BPF programs that applies to most maps that hold user data in relation to dynamic map access when dealing with unknown scalars or "slow" known scalars as access offset, for example: - Load a map value pointer into R6 - Load an index into R7 - Do a slow computation (e.g. with a memory dependency) that loads a limit into R8 (e.g. load the limit from a map for high latency, then mask it to make the verifier happy) - Exit if R7 >= R8 (mispredicted branch) - Load R0 = R6[R7] - Load R0 = R6[R0] For unknown scalars there are two options in the BPF verifier where we could derive knowledge from in order to guarantee safe access to the memory: i) While </>/<=/>= variants won't allow to derive any lower or upper bounds from the unknown scalar where it would be safe to add it to the map value pointer, it is possible through ==/!= test however. ii) another option is to transform the unknown scalar into a known scalar, for example, through ALU ops combination such as R &= <imm> followed by R |= <imm> or any similar combination where the original information from the unknown scalar would be destroyed entirely leaving R with a constant. The initial slow load still precedes the latter ALU ops on that register, so the CPU executes speculatively from that point. Once we have the known scalar, any compare operation would work then. A third option only involving registers with known scalars could be crafted as described in [0] where a CPU port (e.g. Slow Int unit) would be filled with many dependent computations such that the subsequent condition depending on its outcome has to wait for evaluation on its execution port and thereby executing speculatively if the speculated code can be scheduled on a different execution port, or any other form of mistraining as described in [1], for example. Given this is not limited to only unknown scalars, not only map but also stack access is affected since both is accessible for unprivileged users and could potentially be used for out of bounds access under speculation. In order to prevent any of these cases, the verifier is now sanitizing pointer arithmetic on the offset such that any out of bounds speculation would be masked in a way where the pointer arithmetic result in the destination register will stay unchanged, meaning offset masked into zero similar as in array_index_nospec() case. With regards to implementation, there are three options that were considered: i) new insn for sanitation, ii) push/pop insn and sanitation as inlined BPF, iii) reuse of ax register and sanitation as inlined BPF. Option i) has the downside that we end up using from reserved bits in the opcode space, but also that we would require each JIT to emit masking as native arch opcodes meaning mitigation would have slow adoption till everyone implements it eventually which is counter-productive. Option ii) and iii) have both in common that a temporary register is needed in order to implement the sanitation as inlined BPF since we are not allowed to modify the source register. While a push / pop insn in ii) would be useful to have in any case, it requires once again that every JIT needs to implement it first. While possible, amount of changes needed would also be unsuitable for a -stable patch. Therefore, the path which has fewer changes, less BPF instructions for the mitigation and does not require anything to be changed in the JITs is option iii) which this work is pursuing. The ax register is already mapped to a register in all JITs (modulo arm32 where it's mapped to stack as various other BPF registers there) and used in constant blinding for JITs-only so far. It can be reused for verifier rewrites under certain constraints. The interpreter's tmp "register" has therefore been remapped into extending the register set with hidden ax register and reusing that for a number of instructions that needed the prior temporary variable internally (e.g. div, mod). This allows for zero increase in stack space usage in the interpreter, and enables (restricted) generic use in rewrites otherwise as long as such a patchlet does not make use of these instructions. The sanitation mask is dynamic and relative to the offset the map value or stack pointer currently holds. There are various cases that need to be taken under consideration for the masking, e.g. such operation could look as follows: ptr += val or val += ptr or ptr -= val. Thus, the value to be sanitized could reside either in source or in destination register, and the limit is different depending on whether the ALU op is addition or subtraction and depending on the current known and bounded offset. The limit is derived as follows: limit := max_value_size - (smin_value + off). For subtraction: limit := umax_value + off. This holds because we do not allow any pointer arithmetic that would temporarily go out of bounds or would have an unknown value with mixed signed bounds where it is unclear at verification time whether the actual runtime value would be either negative or positive. For example, we have a derived map pointer value with constant offset and bounded one, so limit based on smin_value works because the verifier requires that statically analyzed arithmetic on the pointer must be in bounds, and thus it checks if resulting smin_value + off and umax_value + off is still within map value bounds at time of arithmetic in addition to time of access. Similarly, for the case of stack access we derive the limit as follows: MAX_BPF_STACK + off for subtraction and -off for the case of addition where off := ptr_reg->off + ptr_reg->var_off.value. Subtraction is a special case for the masking which can be in form of ptr += -val, ptr -= -val, or ptr -= val. In the first two cases where we know that the value is negative, we need to temporarily negate the value in order to do the sanitation on a positive value where we later swap the ALU op, and restore original source register if the value was in source. The sanitation of pointer arithmetic alone is still not fully sufficient as is, since a scenario like the following could happen ... PTR += 0x1000 (e.g. K-based imm) PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON PTR += 0x1000 PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON [...] ... which under speculation could end up as ... PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] [...] ... and therefore still access out of bounds. To prevent such case, the verifier is also analyzing safety for potential out of bounds access under speculative execution. Meaning, it is also simulating pointer access under truncation. We therefore "branch off" and push the current verification state after the ALU operation with known 0 to the verification stack for later analysis. Given the current path analysis succeeded it is likely that the one under speculation can be pruned. In any case, it is also subject to existing complexity limits and therefore anything beyond this point will be rejected. In terms of pruning, it needs to be ensured that the verification state from speculative execution simulation must never prune a non-speculative execution path, therefore, we mark verifier state accordingly at the time of push_stack(). If verifier detects out of bounds access under speculative execution from one of the possible paths that includes a truncation, it will reject such program. Given we mask every reg-based pointer arithmetic for unprivileged programs, we've been looking into how it could affect real-world programs in terms of size increase. As the majority of programs are targeted for privileged-only use case, we've unconditionally enabled masking (with its alu restrictions on top of it) for privileged programs for the sake of testing in order to check i) whether they get rejected in its current form, and ii) by how much the number of instructions and size will increase. We've tested this by using Katran, Cilium and test_l4lb from the kernel selftests. For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb we've used test_l4lb.o as well as test_l4lb_noinline.o. We found that none of the programs got rejected by the verifier with this change, and that impact is rather minimal to none. balancer_kern.o had 13,904 bytes (1,738 insns) xlated and 7,797 bytes JITed before and after the change. Most complex program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated and 18,538 bytes JITed before and after and none of the other tail call programs in bpf_lxc.o had any changes either. For the older bpf_lxc_opt_-DUNKNOWN.o object we found a small increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed after the change. Other programs from that object file had similar small increase. Both test_l4lb.o had no change and remained at 6,544 bytes (817 insns) xlated and 3,401 bytes JITed and for test_l4lb_noinline.o constant at 5,080 bytes (634 insns) xlated and 3,313 bytes JITed. This can be explained in that LLVM typically optimizes stack based pointer arithmetic by using K-based operations and that use of dynamic map access is not overly frequent. However, in future we may decide to optimize the algorithm further under known guarantees from branch and value speculation. Latter seems also unclear in terms of prediction heuristics that today's CPUs apply as well as whether there could be collisions in e.g. the predictor's Value History/Pattern Table for triggering out of bounds access, thus masking is performed unconditionally at this point but could be subject to relaxation later on. We were generally also brainstorming various other approaches for mitigation, but the blocker was always lack of available registers at runtime and/or overhead for runtime tracking of limits belonging to a specific pointer. Thus, we found this to be minimally intrusive under given constraints. With that in place, a simple example with sanitized access on unprivileged load at post-verification time looks as follows: # bpftool prog dump xlated id 282 [...] 28: (79) r1 = *(u64 *)(r7 +0) 29: (79) r2 = *(u64 *)(r7 +8) 30: (57) r1 &= 15 31: (79) r3 = *(u64 *)(r0 +4608) 32: (57) r3 &= 1 33: (47) r3 |= 1 34: (2d) if r2 > r3 goto pc+19 35: (b4) (u32) r11 = (u32) 20479 | 36: (1f) r11 -= r2 | Dynamic sanitation for pointer 37: (4f) r11 |= r2 | arithmetic with registers 38: (87) r11 = -r11 | containing bounded or known 39: (c7) r11 s>>= 63 | scalars in order to prevent 40: (5f) r11 &= r2 | out of bounds speculation. 41: (0f) r4 += r11 | 42: (71) r4 = *(u8 *)(r4 +0) 43: (6f) r4 <<= r1 [...] For the case where the scalar sits in the destination register as opposed to the source register, the following code is emitted for the above example: [...] 16: (b4) (u32) r11 = (u32) 20479 17: (1f) r11 -= r2 18: (4f) r11 |= r2 19: (87) r11 = -r11 20: (c7) r11 s>>= 63 21: (5f) r2 &= r11 22: (0f) r2 += r0 23: (61) r0 = *(u32 *)(r2 +0) [...] JIT blinding example with non-conflicting use of r10: [...] d5: je 0x0000000000000106 _ d7: mov 0x0(%rax),%edi | da: mov $0xf153246,%r10d | Index load from map value and e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f. e7: and %r10,%rdi |_ ea: mov $0x2f,%r10d | f0: sub %rdi,%r10 | Sanitized addition. Both use r10 f3: or %rdi,%r10 | but do not interfere with each f6: neg %r10 | other. (Neither do these instructions f9: sar $0x3f,%r10 | interfere with the use of ax as temp fd: and %r10,%rdi | in interpreter.) 100: add %rax,%rdi |_ 103: mov 0x0(%rdi),%eax [...] Tested that it fixes Jann's reproducer, and also checked that test_verifier and test_progs suite with interpreter, JIT and JIT with hardening enabled on x86-64 and arm64 runs successfully. [0] Speculose: Analyzing the Security Implications of Speculative Execution in CPUs, Giorgi Maisuradze and Christian Rossow, https://arxiv.org/pdf/1801.04084.pdf [1] A Systematic Evaluation of Transient Execution Attacks and Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz, Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens, Dmitry Evtyushkin, Daniel Gruss, https://arxiv.org/pdf/1811.05441.pdf Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation") Reported-by: Jann Horn <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
static inline int mb_find_next_zero_bit(void *addr, int max, int start) { int fix = 0, ret, tmpmax; addr = mb_correct_addr_and_bit(&fix, addr); tmpmax = max + fix; start += fix; ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; if (ret > max) return max; return ret; }
0
[ "CWE-416" ]
linux
8844618d8aa7a9973e7b527d038a2a589665002c
85,134,962,239,069,800,000,000,000,000,000,000,000
12
ext4: only look at the bg_flags field if it is valid The bg_flags field in the block group descripts is only valid if the uninit_bg or metadata_csum feature is enabled. We were not consistently looking at this field; fix this. Also block group #0 must never have uninitialized allocation bitmaps, or need to be zeroed, since that's where the root inode, and other special inodes are set up. Check for these conditions and mark the file system as corrupted if they are detected. This addresses CVE-2018-10876. https://bugzilla.kernel.org/show_bug.cgi?id=199403 Signed-off-by: Theodore Ts'o <[email protected]> Cc: [email protected]
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { trace_kvm_age_hva(start, end); /* * Even though we do not flush TLB, this will still adversely * affect performance on pre-Haswell Intel EPT, where there is * no EPT Access Bit to clear so that we have to tear down EPT * tables instead. If we find this unacceptable, we can always * add a parameter to kvm_age_hva so that it effectively doesn't * do anything on clear_young. * * Also note that currently we never issue secondary TLB flushes * from clear_young, leaving this job up to the regular system * cadence. If we find this inaccurate, we might come up with a * more sophisticated heuristic later. */ return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); }
0
[ "CWE-119" ]
linux
f8be156be163a052a067306417cd0ff679068c97
217,348,278,772,285,900,000,000,000,000,000,000,000
22
KVM: do not allow mapping valid but non-reference-counted pages It's possible to create a region which maps valid but non-refcounted pages (e.g., tail pages of non-compound higher order allocations). These host pages can then be returned by gfn_to_page, gfn_to_pfn, etc., family of APIs, which take a reference to the page, which takes it from 0 to 1. When the reference is dropped, this will free the page incorrectly. Fix this by only taking a reference on valid pages if it was non-zero, which indicates it is participating in normal refcounting (and can be released with put_page). This addresses CVE-2021-22543. Signed-off-by: Nicholas Piggin <[email protected]> Tested-by: Paolo Bonzini <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
dump_innodb_buffer_pool(MYSQL *connection) { innodb_buffer_pool_dump = has_innodb_buffer_pool_dump(); innodb_buffer_pool_dump_pct = has_innodb_buffer_pool_dump_pct(); if (!innodb_buffer_pool_dump) { return; } innodb_buffer_pool_dump_start_time = (ssize_t)my_time(MY_WME); char *buf_innodb_buffer_pool_dump_pct; char change_bp_dump_pct_query[100]; /* Verify if we need to change innodb_buffer_pool_dump_pct */ if (opt_dump_innodb_buffer_pool_pct != 0 && innodb_buffer_pool_dump_pct) { mysql_variable variables[] = { {"innodb_buffer_pool_dump_pct", &buf_innodb_buffer_pool_dump_pct}, {NULL, NULL} }; read_mysql_variables(connection, "SHOW GLOBAL VARIABLES " "LIKE 'innodb_buffer_pool_dump_pct'", variables, true); original_innodb_buffer_pool_dump_pct = atoi(buf_innodb_buffer_pool_dump_pct); free_mysql_variables(variables); ut_snprintf(change_bp_dump_pct_query, sizeof(change_bp_dump_pct_query), "SET GLOBAL innodb_buffer_pool_dump_pct = %u", opt_dump_innodb_buffer_pool_pct); msg_ts("Executing %s \n", change_bp_dump_pct_query); xb_mysql_query(mysql_connection, change_bp_dump_pct_query, false); } msg_ts("Executing SET GLOBAL innodb_buffer_pool_dump_now=ON...\n"); xb_mysql_query(mysql_connection, "SET GLOBAL innodb_buffer_pool_dump_now=ON;", false); }
0
[ "CWE-200" ]
percona-xtrabackup
7742f875bb289a874246fb4653b7cd9f14b588fe
50,697,922,935,858,670,000,000,000,000,000,000,000
43
PXB-2722 password is written into xtrabackup_info https://jira.percona.com/browse/PXB-2722 Analysis: password passed with -p option is written into backup tool_command in xtrabackup_info Fix: mask password before writting into xtrabackup_info
inline int Http2Stream::ReadStart() { CHECK(!this->IsDestroyed()); flags_ |= NGHTTP2_STREAM_FLAG_READ_START; flags_ &= ~NGHTTP2_STREAM_FLAG_READ_PAUSED; // Flush any queued data chunks immediately out to the JS layer FlushDataChunks(); DEBUG_HTTP2STREAM(this, "reading starting"); return 0; }
0
[]
node
ce22d6f9178507c7a41b04ac4097b9ea902049e3
333,010,833,016,374,520,000,000,000,000,000,000,000
10
http2: add altsvc support Add support for sending and receiving ALTSVC frames. PR-URL: https://github.com/nodejs/node/pull/17917 Reviewed-By: Anna Henningsen <[email protected]> Reviewed-By: Tiancheng "Timothy" Gu <[email protected]> Reviewed-By: Matteo Collina <[email protected]>
sys_auth_passwd(Authctxt *authctxt, const char *password) { struct passwd *pw = authctxt->pw; auth_session_t *as; static int expire_checked = 0; as = auth_usercheck(pw->pw_name, authctxt->style, "auth-ssh", (char *)password); if (as == NULL) return (0); if (auth_getstate(as) & AUTH_PWEXPIRED) { auth_close(as); disable_forwarding(); authctxt->force_pwchange = 1; return (1); } else { if (!expire_checked) { expire_checked = 1; warn_expiry(authctxt, as); } return (auth_close(as)); } }
0
[ "CWE-20" ]
openssh-portable
fcd135c9df440bcd2d5870405ad3311743d78d97
105,896,923,474,288,200,000,000,000,000,000,000,000
23
upstream commit Skip passwords longer than 1k in length so clients can't easily DoS sshd by sending very long passwords, causing it to spend CPU hashing them. feedback djm@, ok markus@. Brought to our attention by tomas.kuthan at oracle.com, shilei-c at 360.cn and coredump at autistici.org Upstream-ID: d0af7d4a2190b63ba1d38eec502bc4be0be9e333
static void server_cache_boot_id(Server *s) { sd_id128_t id; int r; assert(s); r = sd_id128_get_boot(&id); if (r < 0) return; sd_id128_to_string(id, stpcpy(s->boot_id_field, "_BOOT_ID=")); }
0
[ "CWE-770" ]
systemd
084eeb865ca63887098e0945fb4e93c852b91b0f
169,009,384,047,271,980,000,000,000,000,000,000,000
12
journald: do not store the iovec entry for process commandline on stack This fixes a crash where we would read the commandline, whose length is under control of the sending program, and then crash when trying to create a stack allocation for it. CVE-2018-16864 https://bugzilla.redhat.com/show_bug.cgi?id=1653855 The message actually doesn't get written to disk, because journal_file_append_entry() returns -E2BIG.
lyd_keyless_list_hash_change(struct lyd_node *parent) { int r; while (parent && (parent->schema->flags & LYS_CONFIG_R)) { if (parent->schema->nodetype == LYS_LIST) { if (!((struct lys_node_list *)parent->schema)->keys_size) { if (parent->parent && parent->parent->ht) { /* remove the list from the parent */ r = lyht_remove(parent->parent->ht, &parent, parent->hash); assert(!r); (void)r; } /* recalculate the hash */ lyd_hash(parent); if (parent->parent && parent->parent->ht) { /* re-add the list again */ r = lyht_insert(parent->parent->ht, &parent, parent->hash, NULL); assert(!r); (void)r; } } else if (!lyd_list_has_keys(parent)) { /* a parent is a list without keys so it cannot be a part of any parent hash */ break; } } parent = parent->parent; } }
0
[ "CWE-119" ]
libyang
32fb4993bc8bb49e93e84016af3c10ea53964be5
236,790,437,411,259,360,000,000,000,000,000,000,000
30
schema tree BUGFIX do not check features while still resolving schema Fixes #723
char *f_name_buf(void) { static char names[5][MAXPATHLEN]; static unsigned int n; n = (n + 1) % (sizeof names / sizeof names[0]); return names[n]; }
0
[ "CWE-59" ]
rsync
962f8b90045ab331fc04c9e65f80f1a53e68243b
157,984,893,268,112,400,000,000,000,000,000,000,000
9
Complain if an inc-recursive path is not right for its dir. This ensures that a malicious sender can't use a just-sent symlink as a trasnfer path.
const char *fn_frm_ext(const char *name) { const char *res= strrchr(name, '.'); if (res && !strcmp(res, reg_ext)) return res; return 0; }
0
[ "CWE-416" ]
server
c02ebf3510850ba78a106be9974c94c3b97d8585
206,562,217,833,326,500,000,000,000,000,000,000,000
7
MDEV-24176 Preparations 1. moved fix_vcol_exprs() call to open_table() mysql_alter_table() doesn't do lock_tables() so it cannot win from fix_vcol_exprs() from there. Tests affected: main.default_session 2. Vanilla cleanups and comments.
static void *vnc_worker_thread(void *arg) { VncJobQueue *queue = arg; qemu_thread_get_self(&queue->thread); while (!vnc_worker_thread_loop(queue)) ; vnc_queue_clear(queue); return NULL; }
0
[ "CWE-125" ]
qemu
9f64916da20eea67121d544698676295bbb105a7
329,266,572,477,616,220,000,000,000,000,000,000,000
10
pixman/vnc: use pixman images in vnc. The vnc code uses *three* DisplaySurfaces: First is the surface of the actual QemuConsole, usually the guest screen, but could also be a text console (monitor/serial reachable via Ctrl-Alt-<nr> keys). This is left as-is. Second is the current server's view of the screen content. The vnc code uses this to figure which parts of the guest screen did _really_ change to reduce the amount of updates sent to the vnc clients. It is also used as data source when sending out the updates to the clients. This surface gets replaced by a pixman image. The format changes too, instead of using the guest screen format we'll use fixed 32bit rgb framebuffer and convert the pixels on the fly when comparing and updating the server framebuffer. Third surface carries the format expected by the vnc client. That isn't used to store image data. This surface is switched to PixelFormat and a boolean for bigendian byte order. Signed-off-by: Gerd Hoffmann <[email protected]>
____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type) { if (type == INTEL_UC_FW_TYPE_GUC) return container_of(uc_fw, struct intel_gt, uc.guc.fw); GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC); return container_of(uc_fw, struct intel_gt, uc.huc.fw); }
0
[ "CWE-20", "CWE-190" ]
linux
c784e5249e773689e38d2bc1749f08b986621a26
101,021,735,039,923,330,000,000,000,000,000,000,000
8
drm/i915/guc: Update to use firmware v49.0.1 The latest GuC firmware includes a number of interface changes that require driver updates to match. * Starting from Gen11, the ID to be provided to GuC needs to contain the engine class in bits [0..2] and the instance in bits [3..6]. NOTE: this patch breaks pointer dereferences in some existing GuC functions that use the guc_id to dereference arrays but these functions are not used for now as we have GuC submission disabled and we will update these functions in follow up patch which requires new IDs. * The new GuC requires the additional data structure (ADS) and associated 'private_data' pointer to be setup. This is basically a scratch area of memory that the GuC owns. The size is read from the CSS header. * There is now a physical to logical engine mapping table in the ADS which needs to be configured in order for the firmware to load. For now, the table is initialised with a 1 to 1 mapping. * GUC_CTL_CTXINFO has been removed from the initialization params. * reg_state_buffer is maintained internally by the GuC as part of the private data. * The ADS layout has changed significantly. This patch updates the shared structure and also adds better documentation of the layout. * While i915 does not use GuC doorbells, the firmware now requires that some initialisation is done. * The number of engine classes and instances supported in the ADS has been increased. Signed-off-by: John Harrison <[email protected]> Signed-off-by: Matthew Brost <[email protected]> Signed-off-by: Daniele Ceraolo Spurio <[email protected]> Signed-off-by: Oscar Mateo <[email protected]> Signed-off-by: Michel Thierry <[email protected]> Signed-off-by: Rodrigo Vivi <[email protected]> Signed-off-by: Michal Wajdeczko <[email protected]> Cc: Michal Winiarski <[email protected]> Cc: Tomasz Lis <[email protected]> Cc: Joonas Lahtinen <[email protected]> Reviewed-by: Daniele Ceraolo Spurio <[email protected]> Signed-off-by: Joonas Lahtinen <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
ZEND_VM_HANDLER(88, ZEND_FETCH_OBJ_RW, VAR|UNUSED|THIS|CV, CONST|TMPVAR|CV, CACHE_SLOT) { USE_OPLINE zend_free_op free_op1, free_op2; zval *property, *container, *result; SAVE_OPLINE(); container = GET_OP1_OBJ_ZVAL_PTR_PTR_UNDEF(BP_VAR_RW); if (OP1_TYPE == IS_UNUSED && UNEXPECTED(Z_TYPE_P(container) == IS_UNDEF)) { ZEND_VM_DISPATCH_TO_HELPER(zend_this_not_in_object_context_helper); } property = GET_OP2_ZVAL_PTR(BP_VAR_R); result = EX_VAR(opline->result.var); zend_fetch_property_address(result, container, OP1_TYPE, property, OP2_TYPE, ((OP2_TYPE == IS_CONST) ? CACHE_ADDR(opline->extended_value) : NULL), BP_VAR_RW, 0, 1 OPLINE_CC EXECUTE_DATA_CC); FREE_OP2(); if (OP1_TYPE == IS_VAR) { FREE_VAR_PTR_AND_EXTRACT_RESULT_IF_NECESSARY(free_op1, result); } ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION(); }
0
[ "CWE-787" ]
php-src
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
297,021,426,250,079,900,000,000,000,000,000,000,000
21
Fix #73122: Integer Overflow when concatenating strings We must avoid integer overflows in memory allocations, so we introduce an additional check in the VM, and bail out in the rare case of an overflow. Since the recent fix for bug #74960 still doesn't catch all possible overflows, we fix that right away.
void seq_parameter_set::set_resolution(int w,int h) { pic_width_in_luma_samples = w; pic_height_in_luma_samples = h; }
0
[ "CWE-787" ]
libde265
8e89fe0e175d2870c39486fdd09250b230ec10b8
303,673,548,089,114,760,000,000,000,000,000,000,000
5
error on out-of-range cpb_cnt_minus1 (oss-fuzz issue 27590)
put_af3(byte *buf, u32 id) { put_u16(buf, id >> 16); buf[2] = id & 0xff; }
0
[ "CWE-787" ]
bird
8388f5a7e14108a1458fea35bfbb5a453e2c563c
241,556,014,214,332,520,000,000,000,000,000,000,000
5
BGP: Fix bugs in handling of shutdown messages There is an improper check for valid message size, which may lead to stack overflow and buffer leaks to log when a large message is received. Thanks to Daniel McCarney for bugreport and analysis.
MagickExport XrmDatabase XGetResourceDatabase(Display *display, const char *client_name) { char filename[MaxTextExtent]; int c; register const char *p; XrmDatabase resource_database, server_database; if (display == (Display *) NULL) return((XrmDatabase) NULL); assert(client_name != (char *) NULL); /* Initialize resource database. */ XrmInitialize(); (void) XGetDefault(display,(char *) client_name,"dummy"); resource_database=XrmGetDatabase(display); /* Combine application database. */ p=client_name+(strlen(client_name)-1); while ((p > client_name) && (*p != '/')) p--; if (*p == '/') client_name=p+1; c=(int) (*client_name); if ((c >= XK_a) && (c <= XK_z)) c-=(XK_a-XK_A); else if ((c >= XK_agrave) && (c <= XK_odiaeresis)) c-=(XK_agrave-XK_Agrave); else if ((c >= XK_oslash) && (c <= XK_thorn)) c-=(XK_oslash-XK_Ooblique); #if defined(X11_APPLICATION_PATH) (void) FormatLocaleString(filename,MaxTextExtent,"%s%c%s", X11_APPLICATION_PATH,c,client_name+1); (void) XrmCombineFileDatabase(filename,&resource_database,MagickFalse); #endif if (XResourceManagerString(display) != (char *) NULL) { /* Combine server database. */ server_database=XrmGetStringDatabase(XResourceManagerString(display)); XrmCombineDatabase(server_database,&resource_database,MagickFalse); } /* Merge user preferences database. */ #if defined(X11_PREFERENCES_PATH) (void) FormatLocaleString(filename,MaxTextExtent,"%s%src", X11_PREFERENCES_PATH,client_name); ExpandFilename(filename); (void) XrmCombineFileDatabase(filename,&resource_database,MagickFalse); #endif return(resource_database); }
0
[ "CWE-401" ]
ImageMagick6
13801f5d0bd7a6fdb119682d34946636afdb2629
153,612,909,383,597,740,000,000,000,000,000,000,000
66
https://github.com/ImageMagick/ImageMagick/issues/1531
addCharOrDots(FileInfo *nested, widechar c, int m, TranslationTableHeader **table) { /* See if a character or dot pattern is in the appropriate table. If not, * insert it. In either * case, return a pointer to it. */ TranslationTableOffset bucket; TranslationTableCharacter *character; TranslationTableCharacter *oldchar; TranslationTableOffset offset; unsigned long int makeHash; if ((character = compile_findCharOrDots(c, m, *table))) return character; if (!allocateSpaceInTable(nested, &offset, sizeof(*character), table)) return NULL; character = (TranslationTableCharacter *)&(*table)->ruleArea[offset]; memset(character, 0, sizeof(*character)); character->realchar = c; makeHash = (unsigned long int)c % HASHNUM; if (m == 0) bucket = (*table)->characters[makeHash]; else bucket = (*table)->dots[makeHash]; if (!bucket) { if (m == 0) (*table)->characters[makeHash] = offset; else (*table)->dots[makeHash] = offset; } else { oldchar = (TranslationTableCharacter *)&(*table)->ruleArea[bucket]; while (oldchar->next) oldchar = (TranslationTableCharacter *)&(*table)->ruleArea[oldchar->next]; oldchar->next = offset; } return character; }
0
[ "CWE-787" ]
liblouis
fb2bfce4ed49ac4656a8f7e5b5526e4838da1dde
219,921,243,098,921,600,000,000,000,000,000,000,000
32
Fix yet another buffer overflow in the braille table parser Reported by Henri Salo Fixes #592
int ecryptfs_encrypt_page(struct page *page) { struct inode *ecryptfs_inode; struct ecryptfs_crypt_stat *crypt_stat; char *enc_extent_virt; struct page *enc_extent_page = NULL; loff_t extent_offset; loff_t lower_offset; int rc = 0; ecryptfs_inode = page->mapping->host; crypt_stat = &(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat); BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)); enc_extent_page = alloc_page(GFP_USER); if (!enc_extent_page) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, "Error allocating memory for " "encrypted extent\n"); goto out; } for (extent_offset = 0; extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); extent_offset++) { rc = crypt_extent(crypt_stat, enc_extent_page, page, extent_offset, ENCRYPT); if (rc) { printk(KERN_ERR "%s: Error encrypting extent; " "rc = [%d]\n", __func__, rc); goto out; } } lower_offset = lower_offset_for_page(crypt_stat, page); enc_extent_virt = kmap(enc_extent_page); rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset, PAGE_CACHE_SIZE); kunmap(enc_extent_page); if (rc < 0) { ecryptfs_printk(KERN_ERR, "Error attempting to write lower page; rc = [%d]\n", rc); goto out; } rc = 0; out: if (enc_extent_page) { __free_page(enc_extent_page); } return rc; }
0
[ "CWE-703", "CWE-189" ]
linux
942080643bce061c3dd9d5718d3b745dcb39a8bc
225,576,920,260,747,760,000,000,000,000,000,000,000
52
eCryptfs: Remove buggy and unnecessary write in file name decode routine Dmitry Chernenkov used KASAN to discover that eCryptfs writes past the end of the allocated buffer during encrypted filename decoding. This fix corrects the issue by getting rid of the unnecessary 0 write when the current bit offset is 2. Signed-off-by: Michael Halcrow <[email protected]> Reported-by: Dmitry Chernenkov <[email protected]> Suggested-by: Kees Cook <[email protected]> Cc: [email protected] # v2.6.29+: 51ca58d eCryptfs: Filename Encryption: Encoding and encryption functions Signed-off-by: Tyler Hicks <[email protected]>
pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) { struct mm_struct *mm = task->mm; struct vm_area_struct *vma = NULL; unsigned long size; void *smpl_buf; /* * the fixed header + requested size and align to page boundary */ size = PAGE_ALIGN(rsize); DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size)); /* * check requested size to avoid Denial-of-service attacks * XXX: may have to refine this test * Check against address space limit. * * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) * return -ENOMEM; */ if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) return -ENOMEM; /* * We do the easy to undo allocations first. * * pfm_rvmalloc(), clears the buffer, so there is no leak */ smpl_buf = pfm_rvmalloc(size); if (smpl_buf == NULL) { DPRINT(("Can't allocate sampling buffer\n")); return -ENOMEM; } DPRINT(("smpl_buf @%p\n", smpl_buf)); /* allocate vma */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { DPRINT(("Cannot allocate vma\n")); goto error_kmem; } /* * partially initialize the vma for the sampling buffer */ vma->vm_mm = mm; vma->vm_file = filp; vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ /* * Now we have everything we need and we can initialize * and connect all the data structures */ ctx->ctx_smpl_hdr = smpl_buf; ctx->ctx_smpl_size = size; /* aligned size */ /* * Let's do the difficult operations next. * * now we atomically find some area in the address space and * remap the buffer in it. */ down_write(&task->mm->mmap_sem); /* find some free area in address space, must have mmap sem held */ vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0); if (vma->vm_start == 0UL) { DPRINT(("Cannot find unmapped area for size %ld\n", size)); up_write(&task->mm->mmap_sem); goto error; } vma->vm_end = vma->vm_start + size; vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start)); /* can only be applied to current task, need to have the mm semaphore held when called */ if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) { DPRINT(("Can't remap buffer\n")); up_write(&task->mm->mmap_sem); goto error; } get_file(filp); /* * now insert the vma in the vm list for the process, must be * done with mmap lock held */ insert_vm_struct(mm, vma); mm->total_vm += size >> PAGE_SHIFT; vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, vma_pages(vma)); up_write(&task->mm->mmap_sem); /* * keep track of user level virtual address */ ctx->ctx_smpl_vaddr = (void *)vma->vm_start; *(unsigned long *)user_vaddr = vma->vm_start; return 0; error: kmem_cache_free(vm_area_cachep, vma); error_kmem: pfm_rvfree(smpl_buf, size); return -ENOMEM; }
0
[]
linux-2.6
41d5e5d73ecef4ef56b7b4cde962929a712689b4
183,545,999,103,797,770,000,000,000,000,000,000,000
117
[IA64] permon use-after-free fix Perfmon associates vmalloc()ed memory with a file descriptor, and installs a vma mapping that memory. Unfortunately, the vm_file field is not filled in, so processes with mappings to that memory do not prevent the file from being closed and the memory freed. This results in use-after-free bugs and multiple freeing of pages, etc. I saw this bug on an Altix on SLES9. Haven't reproduced upstream but it looks like the same issue is there. Signed-off-by: Nick Piggin <[email protected]> Cc: Stephane Eranian <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Tony Luck <[email protected]>
virtual uint32 pack_length_in_rec() const { return pack_length(); }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
233,994,963,900,039,320,000,000,000,000,000,000,000
1
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <[email protected]>
aubio_filterbank_set_coeffs (aubio_filterbank_t * f, const fmat_t * filter_coeffs) { fmat_copy(filter_coeffs, f->filters); return 0; }
0
[]
aubio
eda95c9c22b4f0b466ae94c4708765eaae6e709e
247,895,530,746,812,600,000,000,000,000,000,000,000
5
[filterbank] validate input parameters
static inline u64 paravirt_read_msr_safe(unsigned msr, int *err) { return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err); }
0
[ "CWE-276" ]
linux
cadfad870154e14f745ec845708bc17d166065f2
314,459,086,952,546,200,000,000,000,000,000,000,000
4
x86/ioperm: Fix io bitmap invalidation on Xen PV tss_invalidate_io_bitmap() wasn't wired up properly through the pvop machinery, so the TSS and Xen's io bitmap would get out of sync whenever disabling a valid io bitmap. Add a new pvop for tss_invalidate_io_bitmap() to fix it. This is XSA-329. Fixes: 22fe5b0439dd ("x86/ioperm: Move TSS bitmap update to exit to user work") Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Juergen Gross <[email protected]> Reviewed-by: Thomas Gleixner <[email protected]> Cc: [email protected] Link: https://lkml.kernel.org/r/d53075590e1f91c19f8af705059d3ff99424c020.1595030016.git.luto@kernel.org
static void vrend_destroy_sampler_view_object(void *obj_ptr) { struct vrend_sampler_view *samp = obj_ptr; vrend_sampler_view_reference(&samp, NULL); }
0
[ "CWE-787" ]
virglrenderer
cbc8d8b75be360236cada63784046688aeb6d921
142,022,682,487,040,290,000,000,000,000,000,000,000
6
vrend: check transfer bounds for negative values too and report error Closes #138 Signed-off-by: Gert Wollny <[email protected]> Reviewed-by: Emil Velikov <[email protected]>
static int edge_startup(struct usb_serial *serial) { struct edgeport_serial *edge_serial; int status; u16 product_id; /* create our private serial structure */ edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); if (!edge_serial) return -ENOMEM; mutex_init(&edge_serial->es_lock); edge_serial->serial = serial; INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work); usb_set_serial_data(serial, edge_serial); status = download_fw(edge_serial); if (status < 0) { kfree(edge_serial); return status; } if (status > 0) return 1; /* bind but do not register any ports */ product_id = le16_to_cpu( edge_serial->serial->dev->descriptor.idProduct); /* Currently only the EP/416 models require heartbeat support */ if (edge_serial->fw_version > FW_HEARTBEAT_VERSION_CUTOFF) { if (product_id == ION_DEVICE_ID_TI_EDGEPORT_416 || product_id == ION_DEVICE_ID_TI_EDGEPORT_416B) { edge_serial->use_heartbeat = true; } } edge_heartbeat_schedule(edge_serial); return 0; }
0
[ "CWE-369" ]
linux
6aeb75e6adfaed16e58780309613a578fe1ee90b
240,845,060,623,057,500,000,000,000,000,000,000,000
40
USB: serial: io_ti: fix div-by-zero in set_termios Fix a division-by-zero in set_termios when debugging is enabled and a high-enough speed has been requested so that the divisor value becomes zero. Instead of just fixing the offending debug statement, cap the baud rate at the base as a zero divisor value also appears to crash the firmware. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable <[email protected]> # 2.6.12 Reviewed-by: Greg Kroah-Hartman <[email protected]> Signed-off-by: Johan Hovold <[email protected]>
static int get_consumed_bytes(MpegEncContext *s, int buf_size) { int pos = (get_bits_count(&s->gb) + 7) >> 3; if (s->divx_packed || s->avctx->hwaccel) { /* We would have to scan through the whole buf to handle the weird * reordering ... */ return buf_size; } else if (s->avctx->flags & AV_CODEC_FLAG_TRUNCATED) { pos -= s->parse_context.last_index; // padding is not really read so this might be -1 if (pos < 0) pos = 0; return pos; } else { // avoid infinite loops (maybe not needed...) if (pos == 0) pos = 1; // oops ;) if (pos + 10 > buf_size) pos = buf_size; return pos; } }
0
[ "CWE-20", "CWE-617" ]
FFmpeg
bd27a9364ca274ca97f1df6d984e88a0700fb235
310,381,862,545,869,000,000,000,000,000,000,000,000
25
avcodec/mpeg4videodec: Remove use of FF_PROFILE_MPEG4_SIMPLE_STUDIO as indicator of studio profile The profile field is changed by code inside and outside the decoder, its not a reliable indicator of the internal codec state. Maintaining it consistency with studio_profile is messy. Its easier to just avoid it and use only studio_profile Fixes: assertion failure Fixes: ffmpeg_crash_9.avi Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart Signed-off-by: Michael Niedermayer <[email protected]>
R_API RList *r_bin_java_find_cp_const_by_val_utf8(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; IFDBG eprintf ("In UTF-8 Looking for %s\n", bytes); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_UTF8) { IFDBG eprintf ("In UTF-8 Looking @ %s\n", cp_obj->info.cp_utf8.bytes); IFDBG eprintf ("UTF-8 len = %d and memcmp = %d\n", cp_obj->info.cp_utf8.length, memcmp (bytes, cp_obj->info.cp_utf8.bytes, len)); if (len == cp_obj->info.cp_utf8.length && !memcmp (bytes, cp_obj->info.cp_utf8.bytes, len)) { v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->metas->ord; IFDBG eprintf ("Found a match adding idx: %d\n", *v); r_list_append (res, v); } } } return res; }
0
[ "CWE-119", "CWE-788" ]
radare2
6c4428f018d385fc80a33ecddcb37becea685dd5
232,849,720,542,812,440,000,000,000,000,000,000,000
24
Improve boundary checks to fix oobread segfaults ##crash * Reported by Cen Zhang via huntr.dev * Reproducer: bins/fuzzed/javaoob-havoc.class
PHP_FUNCTION(xml_parser_get_option) { xml_parser *parser; zval *pind; zend_long opt; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rl", &pind, &opt) == FAILURE) { return; } if ((parser = (xml_parser *)zend_fetch_resource(Z_RES_P(pind), "XML Parser", le_xml_parser)) == NULL) { RETURN_FALSE; } switch (opt) { case PHP_XML_OPTION_CASE_FOLDING: RETURN_LONG(parser->case_folding); break; case PHP_XML_OPTION_TARGET_ENCODING: RETURN_STRING((char *)parser->target_encoding); break; default: php_error_docref(NULL, E_WARNING, "Unknown option"); RETURN_FALSE; break; } RETVAL_FALSE; /* never reached */ }
0
[ "CWE-190" ]
php-src
57b997ebf99e0eb9a073e0dafd2ab100bd4a112d
114,523,238,157,091,120,000,000,000,000,000,000,000
29
Fix bug #71637: Multiple Heap Overflow due to integer overflows
static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size) { struct mwifiex_ie_list *ap_ie = cmd_buf; struct mwifiex_ie_types_header *tlv_ie = (void *)tlv; if (!ap_ie || !ap_ie->len) return -1; *ie_size += le16_to_cpu(ap_ie->len) + sizeof(struct mwifiex_ie_types_header); tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE); tlv_ie->len = ap_ie->len; tlv += sizeof(struct mwifiex_ie_types_header); memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len)); return 0; }
0
[ "CWE-120", "CWE-787" ]
linux
7caac62ed598a196d6ddf8d9c121e12e082cac3a
74,814,265,293,741,760,000,000,000,000,000,000,000
19
mwifiex: Fix three heap overflow at parsing element in cfg80211_ap_settings mwifiex_update_vs_ie(),mwifiex_set_uap_rates() and mwifiex_set_wmm_params() call memcpy() without checking the destination size.Since the source is given from user-space, this may trigger a heap buffer overflow. Fix them by putting the length check before performing memcpy(). This fix addresses CVE-2019-14814,CVE-2019-14815,CVE-2019-14816. Signed-off-by: Wen Huang <[email protected]> Acked-by: Ganapathi Bhat <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
Status InferenceContext::ExpandOutputs(int new_output_size) { const int outputs_size = outputs_.size(); if (new_output_size < outputs_size) { return errors::InvalidArgument("Trying to reduce number of outputs of op."); } outputs_.resize(new_output_size, nullptr); output_handle_shapes_and_types_.resize(new_output_size); return Status::OK(); }
0
[ "CWE-190" ]
tensorflow
acd56b8bcb72b163c834ae4f18469047b001fadf
146,937,995,679,781,350,000,000,000,000,000,000,000
9
Fix security vulnerability with SpaceToBatchNDOp. PiperOrigin-RevId: 445527615
ccid_check_card_presence (ccid_driver_t handle) { (void)handle; /* Not yet implemented. */ return -1; }
0
[ "CWE-20" ]
gnupg
2183683bd633818dd031b090b5530951de76f392
139,917,314,579,354,860,000,000,000,000,000,000,000
5
Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>
slhc_free(struct slcompress *comp) { if ( comp == NULLSLCOMPR ) return; if ( comp->tstate != NULLSLSTATE ) kfree( comp->tstate ); if ( comp->rstate != NULLSLSTATE ) kfree( comp->rstate ); kfree( comp ); }
0
[]
linux
4ab42d78e37a294ac7bc56901d563c642e03c4ae
7,872,556,058,016,223,000,000,000,000,000,000,000
13
ppp, slip: Validate VJ compression slot parameters completely Currently slhc_init() treats out-of-range values of rslots and tslots as equivalent to 0, except that if tslots is too large it will dereference a null pointer (CVE-2015-7799). Add a range-check at the top of the function and make it return an ERR_PTR() on error instead of NULL. Change the callers accordingly. Compile-tested only. Reported-by: 郭永刚 <[email protected]> References: http://article.gmane.org/gmane.comp.security.oss.general/17908 Signed-off-by: Ben Hutchings <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void Parser::error(std::string msg) { error(msg, pstate); }
0
[ "CWE-125" ]
libsass
eb15533b07773c30dc03c9d742865604f47120ef
10,647,124,940,705,952,000,000,000,000,000,000,000
4
Fix memory leak in `parse_ie_keyword_arg` `kwd_arg` would never get freed when there was a parse error in `parse_ie_keyword_arg`. Closes #2656
static void put_signed_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels, ptrdiff_t line_size) { int i, j; for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++) { if (*block < -128) *pixels = 0; else if (*block > 127) *pixels = 255; else *pixels = (uint8_t) (*block + 128); block++; pixels++; } pixels += (line_size - 8); } }
0
[ "CWE-476" ]
FFmpeg
b3332a182f8ba33a34542e4a0370f38b914ccf7d
164,932,784,372,794,190,000,000,000,000,000,000,000
20
avcodec/idctdsp: Transmit studio_profile to init instead of using AVCodecContext profile These 2 fields are not always the same, it is simpler to always use the same field for detecting studio profile Fixes: null pointer dereference Fixes: ffmpeg_crash_3.avi Found-by: Thuan Pham <[email protected]>, Marcel Böhme, Andrew Santosa and Alexandru RazvanCaciulescu with AFLSmart Signed-off-by: Michael Niedermayer <[email protected]>
void ms_deliver_handle_accept(Connection *con) { for (list<Dispatcher*>::iterator p = dispatchers.begin(); p != dispatchers.end(); ++p) (*p)->ms_handle_accept(con); }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
136,996,178,065,977,420,000,000,000,000,000,000,000
6
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <[email protected]> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
static void dump_usage_bucket_info(Formatter *formatter, const std::string& name, const cls_user_bucket_entry& entry) { formatter->open_object_section("Entry"); encode_json("Bucket", name, formatter); encode_json("Bytes", entry.size, formatter); encode_json("Bytes_Rounded", entry.size_rounded, formatter); formatter->close_section(); // entry }
0
[ "CWE-79" ]
ceph
8f90658c731499722d5f4393c8ad70b971d05f77
238,295,967,831,187,720,000,000,000,000,000,000,000
8
rgw: reject unauthenticated response-header actions Signed-off-by: Matt Benjamin <[email protected]> Reviewed-by: Casey Bodley <[email protected]> (cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
xfs_bmap_del_extent_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_trans_t *tp, /* current transaction pointer */ struct xfs_iext_cursor *icur, struct xfs_defer_ops *dfops, /* list of extents to be freed */ xfs_btree_cur_t *cur, /* if null, not a btree */ xfs_bmbt_irec_t *del, /* data to remove from extents */ int *logflagsp, /* inode logging flags */ int whichfork, /* data or attr fork */ int bflags) /* bmapi flags */ { xfs_fsblock_t del_endblock=0; /* first block past del */ xfs_fileoff_t del_endoff; /* first offset past del */ int do_fx; /* free extent at end of routine */ int error; /* error return value */ int flags = 0;/* inode logging flags */ struct xfs_bmbt_irec got; /* current extent entry */ xfs_fileoff_t got_endoff; /* first offset past got */ int i; /* temp state */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_mount_t *mp; /* mount structure */ xfs_filblks_t nblks; /* quota/sb block count */ xfs_bmbt_irec_t new; /* new record to be inserted */ /* REFERENCED */ uint qfield; /* quota field to update */ int state = xfs_bmap_fork_to_state(whichfork); struct xfs_bmbt_irec old; mp = ip->i_mount; XFS_STATS_INC(mp, xs_del_exlist); ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(del->br_blockcount > 0); xfs_iext_get_extent(ifp, icur, &got); ASSERT(got.br_startoff <= del->br_startoff); del_endoff = del->br_startoff + del->br_blockcount; got_endoff = got.br_startoff + got.br_blockcount; ASSERT(got_endoff >= del_endoff); ASSERT(!isnullstartblock(got.br_startblock)); qfield = 0; error = 0; /* * If it's the case where the directory code is running with no block * reservation, and the deleted block is in the middle of its extent, * and the resulting insert of an extent would cause transformation to * btree format, then reject it. The calling code will then swap blocks * around instead. We have to do this now, rather than waiting for the * conversion to btree format, since the transaction will be dirty then. */ if (tp->t_blk_res == 0 && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && XFS_IFORK_NEXTENTS(ip, whichfork) >= XFS_IFORK_MAXEXT(ip, whichfork) && del->br_startoff > got.br_startoff && del_endoff < got_endoff) return -ENOSPC; flags = XFS_ILOG_CORE; if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { xfs_fsblock_t bno; xfs_filblks_t len; ASSERT(do_mod(del->br_blockcount, mp->m_sb.sb_rextsize) == 0); ASSERT(do_mod(del->br_startblock, mp->m_sb.sb_rextsize) == 0); bno = del->br_startblock; len = del->br_blockcount; do_div(bno, mp->m_sb.sb_rextsize); do_div(len, mp->m_sb.sb_rextsize); error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); if (error) goto done; do_fx = 0; nblks = len * mp->m_sb.sb_rextsize; qfield = XFS_TRANS_DQ_RTBCOUNT; } else { do_fx = 1; nblks = del->br_blockcount; qfield = XFS_TRANS_DQ_BCOUNT; } del_endblock = del->br_startblock + del->br_blockcount; if (cur) { error = xfs_bmbt_lookup_eq(cur, &got, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } if (got.br_startoff == del->br_startoff) state |= BMAP_LEFT_FILLING; if (got_endoff == del_endoff) state |= BMAP_RIGHT_FILLING; switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: /* * Matches the whole extent. Delete the entry. */ xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) - 1); flags |= XFS_ILOG_CORE; if (!cur) { flags |= xfs_ilog_fext(whichfork); break; } if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); break; case BMAP_LEFT_FILLING: /* * Deleting the first part of the extent. */ got.br_startoff = del_endoff; got.br_startblock = del_endblock; got.br_blockcount -= del->br_blockcount; xfs_iext_update_extent(ip, state, icur, &got); if (!cur) { flags |= xfs_ilog_fext(whichfork); break; } error = xfs_bmbt_update(cur, &got); if (error) goto done; break; case BMAP_RIGHT_FILLING: /* * Deleting the last part of the extent. */ got.br_blockcount -= del->br_blockcount; xfs_iext_update_extent(ip, state, icur, &got); if (!cur) { flags |= xfs_ilog_fext(whichfork); break; } error = xfs_bmbt_update(cur, &got); if (error) goto done; break; case 0: /* * Deleting the middle of the extent. */ old = got; got.br_blockcount = del->br_startoff - got.br_startoff; xfs_iext_update_extent(ip, state, icur, &got); new.br_startoff = del_endoff; new.br_blockcount = got_endoff - del_endoff; new.br_state = got.br_state; new.br_startblock = del_endblock; flags |= XFS_ILOG_CORE; if (cur) { error = xfs_bmbt_update(cur, &got); if (error) goto done; error = xfs_btree_increment(cur, 0, &i); if (error) goto done; cur->bc_rec.b = new; error = xfs_btree_insert(cur, &i); if (error && error != -ENOSPC) goto done; /* * If get no-space back from btree insert, it tried a * split, and we have a zero block reservation. Fix up * our state and return the error. */ if (error == -ENOSPC) { /* * Reset the cursor, don't trust it after any * insert operation. */ error = xfs_bmbt_lookup_eq(cur, &got, &i); if (error) goto done; XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); /* * Update the btree record back * to the original value. */ error = xfs_bmbt_update(cur, &old); if (error) goto done; /* * Reset the extent record back * to the original value. */ xfs_iext_update_extent(ip, state, icur, &old); flags = 0; error = -ENOSPC; goto done; } XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); } else flags |= xfs_ilog_fext(whichfork); XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) + 1); xfs_iext_next(ifp, icur); xfs_iext_insert(ip, icur, &new, state); break; } /* remove reverse mapping */ error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del); if (error) goto done; /* * If we need to, add to list of extents to delete. */ if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { error = xfs_refcount_decrease_extent(mp, dfops, del); if (error) goto done; } else xfs_bmap_add_free(mp, dfops, del->br_startblock, del->br_blockcount, NULL); } /* * Adjust inode # blocks in the file. */ if (nblks) ip->i_d.di_nblocks -= nblks; /* * Adjust quota data. */ if (qfield && !(bflags & XFS_BMAPI_REMAP)) xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); done: *logflagsp = flags; return error; }
0
[]
linux
2c4306f719b083d17df2963bc761777576b8ad1b
44,866,498,056,733,080,000,000,000,000,000,000,000
240
xfs: set format back to extents if xfs_bmap_extents_to_btree If xfs_bmap_extents_to_btree fails in a mode where we call xfs_iroot_realloc(-1) to de-allocate the root, set the format back to extents. Otherwise we can assume we can dereference ifp->if_broot based on the XFS_DINODE_FMT_BTREE format, and crash. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199423 Signed-off-by: Eric Sandeen <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Darrick J. Wong <[email protected]> Signed-off-by: Darrick J. Wong <[email protected]>
gx_default_pop_transparency_state(gx_device *dev, gs_gstate *pgs) { return 0; }
0
[]
ghostpdl
c9b362ba908ca4b1d7c72663a33229588012d7d9
243,275,456,004,943,900,000,000,000,000,000,000,000
4
Bug 699670: disallow copying of the epo device The erasepage optimisation (epo) subclass device shouldn't be allowed to be copied because the subclass private data, child and parent pointers end up being shared between the original device and the copy. Add an epo_finish_copydevice which NULLs the three offending pointers, and then communicates to the caller that copying is not allowed. This also exposed a separate issue with the stype for subclasses devices. Devices are, I think, unique in having two stype objects associated with them: the usual one in the memory manager header, and the other stored in the device structere directly. In order for the stype to be correct, we have to use the stype for the incoming device, with the ssize of the original device (ssize should reflect the size of the memory allocation). We correctly did so with the stype in the device structure, but then used the prototype device's stype to patch the memory manager stype - meaning the ssize potentially no longer matched the allocated memory. This caused problems in the garbager where there is an implicit assumption that the size of a single object clump (c_alone == 1) is also the size (+ memory manager overheads) of the single object it contains. The solution is to use the same stype instance to patch the memory manager data as we do in the device structure (with the correct ssize).
mj_open(gx_device *pdev, int ptype) { /* Change the margins if necessary. */ int xdpi = pdev->x_pixels_per_inch; int ydpi = pdev->y_pixels_per_inch; static const float mj_margin[4] = { MJ700V2C_MARGINS_A4 }; static const float mj6000c_a2[4] = { MJ6000C_MARGINS_A2 }; static const float mj8000c_a2[4] = { MJ8000C_MARGINS_A2 }; const float *m; int paper_size; #if 0 /* Set up colour params if put_props has not already done so */ if (pdev->color_info.num_components == 0) set_bpp(pdev, pdev->color_info.depth); #endif paper_size = gdev_mjc_paper_size(pdev); if (paper_size == PAPER_SIZE_A2 ) { if (ptype == MJ6000C) m = mj6000c_a2; else if (ptype == MJ8000C) m = mj8000c_a2; else m = mj_margin; } else { m = mj_margin; } gx_device_set_margins(pdev, m, true); if (mj->colorcomp == 3) mj->density = mj->density * 720 / ydpi * 1.5; else mj->density = mj->density * 720 / ydpi; /* Print Resolution Check */ if (!((xdpi == 180 && ydpi == 180) || (xdpi == 360 && ydpi == 360) || (xdpi == 720 && ydpi == 720) || (xdpi == 360 && ydpi == 720) || (xdpi == 720 && ydpi == 360))) return_error(gs_error_rangecheck); return gdev_prn_open(pdev); }
0
[ "CWE-120" ]
ghostpdl
849e74e5ab450dd581942192da7101e0664fa5af
163,898,537,148,643,120,000,000,000,000,000,000,000
48
Bug 701799: avoid out-of-range array access in mj_color_correct(). Code is obscure, so this fix merely avoids out-of-range access in the simplest way possible, without understanding what the code is trying to do. Fixes: ./sanbin/gs -sOutputFile=tmp -sDEVICE=mj6000c ../bug-701799.pdf
Bool gf_fs_fire_event(GF_FilterSession *fs, GF_Filter *f, GF_FilterEvent *evt, Bool upstream) { Bool ret = GF_FALSE; if (!fs || !evt) return GF_FALSE; GF_FilterPid *on_pid = evt->base.on_pid; evt->base.on_pid = NULL; if (f) { if (evt->base.type==GF_FEVT_USER) { if (f->freg->process_event && f->event_target) { gf_mx_p(f->tasks_mx); f->freg->process_event(f, evt); gf_mx_v(f->tasks_mx); ret = GF_TRUE; } } if (!ret) { gf_mx_p(f->tasks_mx); if (f->num_output_pids && upstream) ret = GF_TRUE; else if (f->num_input_pids && !upstream) ret = GF_TRUE; gf_filter_send_event(f, evt, upstream); gf_mx_v(f->tasks_mx); } } else { u32 i, count; gf_fs_lock_filters(fs, GF_TRUE); count = gf_list_count(fs->filters); for (i=0; i<count; i++) { Bool canceled; f = gf_list_get(fs->filters, i); if (f->disabled || f->removed) continue; if (f->multi_sink_target) continue; if (!f->freg->process_event) continue; if (!f->event_target) continue; gf_mx_p(f->tasks_mx); canceled = f->freg->process_event(f, evt); gf_mx_v(f->tasks_mx); ret = GF_TRUE; if (canceled) break; } gf_fs_lock_filters(fs, GF_FALSE); } evt->base.on_pid = on_pid; return ret; }
0
[ "CWE-787" ]
gpac
da37ec8582266983d0ec4b7550ec907401ec441e
5,039,645,983,733,285,000,000,000,000,000,000,000
46
fixed crashes for very long path - cf #1908
static void php_curl_ssl_lock(int mode, int n, const char * file, int line) { if (mode & CRYPTO_LOCK) { tsrm_mutex_lock(php_curl_openssl_tsl[n]); } else { tsrm_mutex_unlock(php_curl_openssl_tsl[n]); } }
0
[]
php-src
124fb22a13fafa3648e4e15b4f207c7096d8155e
260,625,552,426,250,900,000,000,000,000,000,000,000
8
Fixed bug #68739 #68740 #68741
BOOL WINAPI TOwnConsole::HandlerRoutine(DWORD CtrlType) { if ((CtrlType == CTRL_C_EVENT) || (CtrlType == CTRL_BREAK_EVENT)) { { TGuard Guard(FSection.get()); // just to be real thread-safe if (FInstance != NULL) { FInstance->CancelInput(); } } return true; } else { return false; } }
0
[ "CWE-787" ]
winscp
faa96e8144e6925a380f94a97aa382c9427f688d
307,076,776,541,890,580,000,000,000,000,000,000,000
21
Bug 1943: Prevent loading session settings that can lead to remote code execution from handled URLs https://winscp.net/tracker/1943 (cherry picked from commit ec584f5189a856cd79509f754722a6898045c5e0) Source commit: 0f4be408b3f01132b00682da72d925d6c4ee649b
inline bool IsGenDelim(int c) { return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'; }
0
[ "CWE-22" ]
webcc
55a45fd5039061d5cc62e9f1b9d1f7e97a15143f
15,760,238,267,350,020,000,000,000,000,000,000,000
4
fix static file serving security issue; fix url path encoding issue
static void parse_xattrmap_map(struct lo_data *lo, const char *rule, char sep) { const char *tmp; char *key; char *prefix; XattrMapEntry tmp_entry; if (*rule != sep) { fuse_log(FUSE_LOG_ERR, "%s: Expecting '%c' after 'map' keyword, found '%c'\n", __func__, sep, *rule); exit(1); } rule++; /* At start of 'key' field */ tmp = strchr(rule, sep); if (!tmp) { fuse_log(FUSE_LOG_ERR, "%s: Missing '%c' at end of key field in map rule\n", __func__, sep); exit(1); } key = g_strndup(rule, tmp - rule); rule = tmp + 1; /* At start of prefix field */ tmp = strchr(rule, sep); if (!tmp) { fuse_log(FUSE_LOG_ERR, "%s: Missing '%c' at end of prefix field in map rule\n", __func__, sep); exit(1); } prefix = g_strndup(rule, tmp - rule); rule = tmp + 1; /* * This should be the end of the string, we don't allow * any more commands after 'map'. */ if (*rule) { fuse_log(FUSE_LOG_ERR, "%s: Expecting end of command after map, found '%c'\n", __func__, *rule); exit(1); } /* 1st: Prefix matches/everything */ tmp_entry.flags = XATTR_MAP_FLAG_PREFIX | XATTR_MAP_FLAG_ALL; tmp_entry.key = g_strdup(key); tmp_entry.prepend = g_strdup(prefix); add_xattrmap_entry(lo, &tmp_entry); if (!*key) { /* Prefix all case */ /* 2nd: Hide any non-prefixed entries on the host */ tmp_entry.flags = XATTR_MAP_FLAG_BAD | XATTR_MAP_FLAG_ALL; tmp_entry.key = g_strdup(""); tmp_entry.prepend = g_strdup(""); add_xattrmap_entry(lo, &tmp_entry); } else { /* Prefix matching case */ /* 2nd: Hide non-prefixed but matching entries on the host */ tmp_entry.flags = XATTR_MAP_FLAG_BAD | XATTR_MAP_FLAG_SERVER; tmp_entry.key = g_strdup(""); /* Not used */ tmp_entry.prepend = g_strdup(key); add_xattrmap_entry(lo, &tmp_entry); /* 3rd: Stop the client accessing prefixed attributes directly */ tmp_entry.flags = XATTR_MAP_FLAG_BAD | XATTR_MAP_FLAG_CLIENT; tmp_entry.key = g_strdup(prefix); tmp_entry.prepend = g_strdup(""); /* Not used */ add_xattrmap_entry(lo, &tmp_entry); /* 4th: Everything else is OK */ tmp_entry.flags = XATTR_MAP_FLAG_OK | XATTR_MAP_FLAG_ALL; tmp_entry.key = g_strdup(""); tmp_entry.prepend = g_strdup(""); add_xattrmap_entry(lo, &tmp_entry); } g_free(key); g_free(prefix); }
0
[ "CWE-281" ]
qemu
e586edcb410543768ef009eaa22a2d9dd4a53846
102,105,984,769,050,190,000,000,000,000,000,000,000
91
virtiofs: drop remapped security.capability xattr as needed On Linux, the 'security.capability' xattr holds a set of capabilities that can change when an executable is run, giving a limited form of privilege escalation to those programs that the writer of the file deemed worthy. Any write causes the 'security.capability' xattr to be dropped, stopping anyone from gaining privilege by modifying a blessed file. Fuse relies on the daemon to do this dropping, and in turn the daemon relies on the host kernel to drop the xattr for it. However, with the addition of -o xattrmap, the xattr that the guest stores its capabilities in is now not the same as the one that the host kernel automatically clears. Where the mapping changes 'security.capability', explicitly clear the remapped name to preserve the same behaviour. This bug is assigned CVE-2021-20263. Signed-off-by: Dr. David Alan Gilbert <[email protected]> Reviewed-by: Vivek Goyal <[email protected]>
static void nfp_flower_stop(struct nfp_app *app) { nfp_tunnel_config_stop(app); }
0
[ "CWE-400", "CWE-401" ]
linux
8572cea1461a006bce1d06c0c4b0575869125fa4
25,675,607,691,324,760,000,000,000,000,000,000,000
4
nfp: flower: prevent memory leak in nfp_flower_spawn_phy_reprs In nfp_flower_spawn_phy_reprs, in the for loop over eth_tbl if any of intermediate allocations or initializations fail memory is leaked. requiered releases are added. Fixes: b94524529741 ("nfp: flower: add per repr private data for LAG offload") Signed-off-by: Navid Emamdoost <[email protected]> Acked-by: Jakub Kicinski <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int neigh_stat_seq_show(struct seq_file *seq, void *v) { struct proc_dir_entry *pde = seq->private; struct neigh_table *tbl = pde->data; struct neigh_statistics *st = v; if (v == SEQ_START_TOKEN) { seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n"); return 0; } seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " "%08lx %08lx %08lx %08lx\n", atomic_read(&tbl->entries), st->allocs, st->destroys, st->hash_grows, st->lookups, st->hits, st->res_failed, st->rcv_probes_mcast, st->rcv_probes_ucast, st->periodic_gc_runs, st->forced_gc_runs ); return 0; }
0
[ "CWE-200" ]
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
314,743,130,912,990,980,000,000,000,000,000,000,000
33
[NETLINK]: Missing initializations in dumped data Mostly missing initialization of padding fields of 1 or 2 bytes length, two instances of uninitialized nlmsgerr->msg of 16 bytes length. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { return _ext4_get_block(inode, iblock, bh, create ? EXT4_GET_BLOCKS_CREATE : 0); }
0
[ "CWE-362" ]
linux
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
234,639,078,993,372,020,000,000,000,000,000,000,000
6
ext4: fix races between page faults and hole punching Currently, page faults and hole punching are completely unsynchronized. This can result in page fault faulting in a page into a range that we are punching after truncate_pagecache_range() has been called and thus we can end up with a page mapped to disk blocks that will be shortly freed. Filesystem corruption will shortly follow. Note that the same race is avoided for truncate by checking page fault offset against i_size but there isn't similar mechanism available for punching holes. Fix the problem by creating new rw semaphore i_mmap_sem in inode and grab it for writing over truncate, hole punching, and other functions removing blocks from extent tree and for read over page faults. We cannot easily use i_data_sem for this since that ranks below transaction start and we need something ranking above it so that it can be held over the whole truncate / hole punching operation. Also remove various workarounds we had in the code to reduce race window when page fault could have created pages with stale mapping information. Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]>
static OPJ_BOOL opj_tcd_dc_level_shift_encode(opj_tcd_t *p_tcd) { OPJ_UINT32 compno; opj_tcd_tilecomp_t * l_tile_comp = 00; opj_tccp_t * l_tccp = 00; opj_image_comp_t * l_img_comp = 00; opj_tcd_tile_t * l_tile; OPJ_SIZE_T l_nb_elem, i; OPJ_INT32 * l_current_ptr; l_tile = p_tcd->tcd_image->tiles; l_tile_comp = l_tile->comps; l_tccp = p_tcd->tcp->tccps; l_img_comp = p_tcd->image->comps; for (compno = 0; compno < l_tile->numcomps; compno++) { l_current_ptr = l_tile_comp->data; l_nb_elem = (OPJ_SIZE_T)(l_tile_comp->x1 - l_tile_comp->x0) * (OPJ_SIZE_T)(l_tile_comp->y1 - l_tile_comp->y0); if (l_tccp->qmfbid == 1) { for (i = 0; i < l_nb_elem; ++i) { *l_current_ptr -= l_tccp->m_dc_level_shift ; ++l_current_ptr; } } else { for (i = 0; i < l_nb_elem; ++i) { *l_current_ptr = (*l_current_ptr - l_tccp->m_dc_level_shift) * (1 << 11); ++l_current_ptr; } } ++l_img_comp; ++l_tccp; ++l_tile_comp; } return OPJ_TRUE; }
0
[ "CWE-787" ]
openjpeg
05f9b91e60debda0e83977e5e63b2e66486f7074
208,610,362,964,782,900,000,000,000,000,000,000,000
39
opj_tcd_init_tile(): avoid integer overflow That could lead to later assertion failures. Fixes #1231 / CVE-2020-8112
self_recursively_generated_p (ipcp_value<tree> *val) { class ipa_node_params *info = NULL; for (ipcp_value_source<tree> *src = val->sources; src; src = src->next) { cgraph_edge *cs = src->cs; if (!src->val || cs->caller != cs->callee->function_symbol () || src->val == val) return false; if (!info) info = IPA_NODE_REF (cs->caller); class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, src->index); ipcp_lattice<tree> *src_lat = src->offset == -1 ? &plats->itself : plats->aggs; ipcp_value<tree> *src_val; for (src_val = src_lat->values; src_val; src_val = src_val->next) if (src_val == val) break; if (!src_val) return false; } return true; }
0
[ "CWE-20" ]
gcc
a09ccc22459c565814f79f96586fe4ad083fe4eb
114,424,561,116,161,620,000,000,000,000,000,000,000
31
Avoid segfault when doing IPA-VRP but not IPA-CP (PR 93015) 2019-12-21 Martin Jambor <[email protected]> PR ipa/93015 * ipa-cp.c (ipcp_store_vr_results): Check that info exists testsuite/ * gcc.dg/lto/pr93015_0.c: New test. From-SVN: r279695
ConnectionHandlerTest() : handler_(new ConnectionHandlerImpl(dispatcher_, "test")), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()) {}
0
[ "CWE-835" ]
envoy
c8de199e2971f79cbcbc6b5eadc8c566b28705d1
245,504,494,149,359,900,000,000,000,000,000,000,000
3
listener: clean up accept filter before creating connection (#8922) Signed-off-by: Yuchen Dai <[email protected]>
static double ratio(Bigint *a, Bigint *b) { double_u da, db; int k, ka, kb; dval(da) = b2d(a, &ka); dval(db) = b2d(b, &kb); #ifdef Pack_32 k = ka - kb + 32*(a->wds - b->wds); #else k = ka - kb + 16*(a->wds - b->wds); #endif #ifdef IBM if (k > 0) { word0(da) += (k >> 2)*Exp_msk1; if (k &= 3) dval(da) *= 1 << k; } else { k = -k; word0(db) += (k >> 2)*Exp_msk1; if (k &= 3) dval(db) *= 1 << k; } #else if (k > 0) word0(da) += k*Exp_msk1; else { k = -k; word0(db) += k*Exp_msk1; } #endif return dval(da) / dval(db);
0
[ "CWE-119" ]
ruby
5cb83d9dab13e14e6146f455ffd9fed4254d238f
44,808,003,732,301,540,000,000,000,000,000,000,000
34
util.c: ignore too long fraction part * util.c (ruby_strtod): ignore too long fraction part, which does not affect the result. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@43775 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
TEST(WriterTest, WriteWithoutArgs) { MemoryWriter w; w.write("test"); EXPECT_EQ("test", std::string(w.data(), w.size())); }
0
[ "CWE-134", "CWE-119", "CWE-787" ]
fmt
8cf30aa2be256eba07bb1cefb998c52326e846e7
182,824,770,754,763,150,000,000,000,000,000,000,000
5
Fix segfault on complex pointer formatting (#642)
PHP_FUNCTION(sqlite_single_query) { zval *zdb, *ent; struct php_sqlite_db *db; struct php_sqlite_result *rres; char *sql; int sql_len; char *errtext = NULL; zend_bool decode_binary = 1; zend_bool srow = 1; zval *object = getThis(); if (object) { if (FAILURE == zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|bb", &sql, &sql_len, &srow, &decode_binary)) { return; } RES_FROM_OBJECT(db, object); } else { if (FAILURE == zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "sr|bb", &sql, &sql_len, &zdb, &srow, &decode_binary) && FAILURE == zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs|bb", &zdb, &sql, &sql_len, &srow, &decode_binary)) { return; } DB_FROM_ZVAL(db, &zdb); } PHP_SQLITE_EMPTY_QUERY; /* avoid doing work if we can */ if (!return_value_used) { db->last_err_code = sqlite_exec(db->db, sql, NULL, NULL, &errtext); if (db->last_err_code != SQLITE_OK) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", errtext); sqlite_freemem(errtext); } return; } rres = (struct php_sqlite_result *)ecalloc(1, sizeof(*rres)); sqlite_query(NULL, db, sql, sql_len, PHPSQLITE_NUM, 0, NULL, &rres, NULL TSRMLS_CC); if (db->last_err_code != SQLITE_OK) { if (rres) { efree(rres); } RETURN_FALSE; } if (!srow) { array_init(return_value); } while (rres->curr_row < rres->nrows) { MAKE_STD_ZVAL(ent); php_sqlite_fetch_single(rres, decode_binary, ent TSRMLS_CC); /* if set and we only have 1 row in the result set, return the result as a string. */ if (srow) { if (rres->curr_row == 1 && rres->curr_row >= rres->nrows) { *return_value = *ent; zval_copy_ctor(return_value); zval_dtor(ent); FREE_ZVAL(ent); break; } else { srow = 0; array_init(return_value); } } add_next_index_zval(return_value, ent); } real_result_dtor(rres TSRMLS_CC); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
162,861,580,010,790,210,000,000,000,000,000,000,000
74
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
recordDependencyOnCurrentExtension(const ObjectAddress *object, bool isReplace) { /* Only whole objects can be extension members */ Assert(object->objectSubId == 0); if (creating_extension) { ObjectAddress extension; /* Only need to check for existing membership if isReplace */ if (isReplace) { Oid oldext; /* * Side note: these catalog lookups are safe only because the * object is a pre-existing one. In the not-isReplace case, the * caller has most likely not yet done a CommandCounterIncrement * that would make the new object visible. */ oldext = getExtensionOfObject(object->classId, object->objectId); if (OidIsValid(oldext)) { /* If already a member of this extension, nothing to do */ if (oldext == CurrentExtensionObject) return; /* Already a member of some other extension, so reject */ ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("%s is already a member of extension \"%s\"", getObjectDescription(object), get_extension_name(oldext)))); } /* It's a free-standing object, so reject */ ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("%s is not a member of extension \"%s\"", getObjectDescription(object), get_extension_name(CurrentExtensionObject)), errdetail("An extension is not allowed to replace an object that it does not own."))); } /* OK, record it as a member of CurrentExtensionObject */ extension.classId = ExtensionRelationId; extension.objectId = CurrentExtensionObject; extension.objectSubId = 0; recordDependencyOn(object, &extension, DEPENDENCY_EXTENSION); } }
0
[ "CWE-94" ]
postgres
7e92f78abe80e4b30e648a40073abb59057e21f8
76,733,919,452,928,360,000,000,000,000,000,000,000
51
In extensions, don't replace objects not belonging to the extension. Previously, if an extension script did CREATE OR REPLACE and there was an existing object not belonging to the extension, it would overwrite the object and adopt it into the extension. This is problematic, first because the overwrite is probably unintentional, and second because we didn't change the object's ownership. Thus a hostile user could create an object in advance of an expected CREATE EXTENSION command, and would then have ownership rights on an extension object, which could be modified for trojan-horse-type attacks. Hence, forbid CREATE OR REPLACE of an existing object unless it already belongs to the extension. (Note that we've always forbidden replacing an object that belongs to some other extension; only the behavior for previously-free-standing objects changes here.) For the same reason, also fail CREATE IF NOT EXISTS when there is an existing object that doesn't belong to the extension. Our thanks to Sven Klemm for reporting this problem. Security: CVE-2022-2625
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) { pte_t *src_pte, *dst_pte, entry; struct page *ptepage; unsigned long addr; int cow; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { src_pte = huge_pte_offset(src, addr); if (!src_pte) continue; dst_pte = huge_pte_alloc(dst, addr, sz); if (!dst_pte) goto nomem; /* If the pagetables are shared don't copy or take references */ if (dst_pte == src_pte) continue; spin_lock(&dst->page_table_lock); spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); if (!huge_pte_none(huge_ptep_get(src_pte))) { if (cow) huge_ptep_set_wrprotect(src, addr, src_pte); entry = huge_ptep_get(src_pte); ptepage = pte_page(entry); get_page(ptepage); page_dup_rmap(ptepage); set_huge_pte_at(dst, addr, dst_pte, entry); } spin_unlock(&src->page_table_lock); spin_unlock(&dst->page_table_lock); } return 0; nomem: return -ENOMEM; }
0
[ "CWE-399" ]
linux
90481622d75715bfcb68501280a917dbfe516029
311,747,471,768,984,000,000,000,000,000,000,000,000
43
hugepages: fix use after free bug in "quota" handling hugetlbfs_{get,put}_quota() are badly named. They don't interact with the general quota handling code, and they don't much resemble its behaviour. Rather than being about maintaining limits on on-disk block usage by particular users, they are instead about maintaining limits on in-memory page usage (including anonymous MAP_PRIVATE copied-on-write pages) associated with a particular hugetlbfs filesystem instance. Worse, they work by having callbacks to the hugetlbfs filesystem code from the low-level page handling code, in particular from free_huge_page(). This is a layering violation of itself, but more importantly, if the kernel does a get_user_pages() on hugepages (which can happen from KVM amongst others), then the free_huge_page() can be delayed until after the associated inode has already been freed. If an unmount occurs at the wrong time, even the hugetlbfs superblock where the "quota" limits are stored may have been freed. Andrew Barry proposed a patch to fix this by having hugepages, instead of storing a pointer to their address_space and reaching the superblock from there, had the hugepages store pointers directly to the superblock, bumping the reference count as appropriate to avoid it being freed. Andrew Morton rejected that version, however, on the grounds that it made the existing layering violation worse. This is a reworked version of Andrew's patch, which removes the extra, and some of the existing, layering violation. It works by introducing the concept of a hugepage "subpool" at the lower hugepage mm layer - that is a finite logical pool of hugepages to allocate from. hugetlbfs now creates a subpool for each filesystem instance with a page limit set, and a pointer to the subpool gets added to each allocated hugepage, instead of the address_space pointer used now. The subpool has its own lifetime and is only freed once all pages in it _and_ all other references to it (i.e. superblocks) are gone. subpools are optional - a NULL subpool pointer is taken by the code to mean that no subpool limits are in effect. Previous discussion of this bug found in: "Fix refcounting in hugetlbfs quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or http://marc.info/?l=linux-mm&m=126928970510627&w=1 v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to alloc_huge_page() - since it already takes the vma, it is not necessary. Signed-off-by: Andrew Barry <[email protected]> Signed-off-by: David Gibson <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Paul Mackerras <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id) { size_t num; const struct sys_reg_desc *table, *r; struct sys_reg_params params; /* We only do sys_reg for now. */ if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) return NULL; if (!index_to_params(id, &params)) return NULL; table = get_target_table(vcpu->arch.target, true, &num); r = find_reg(&params, table, num); if (!r) r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); /* Not saved in the sys_reg array? */ if (r && !r->reg) r = NULL; return r; }
0
[ "CWE-20", "CWE-617" ]
linux
9e3f7a29694049edd728e2400ab57ad7553e5aa9
108,774,421,631,038,010,000,000,000,000,000,000,000
25
arm64: KVM: pmu: Fix AArch32 cycle counter access We're missing the handling code for the cycle counter accessed from a 32bit guest, leading to unexpected results. Cc: [email protected] # 4.6+ Signed-off-by: Wei Huang <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages, enum io_mem_account acct) { if (ctx->limit_mem) __io_unaccount_mem(ctx->user, nr_pages); if (ctx->mm_account) { if (acct == ACCT_LOCKED) ctx->mm_account->locked_vm -= nr_pages; else if (acct == ACCT_PINNED) atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm); } }
0
[]
linux
0f2122045b946241a9e549c2a76cea54fa58a7ff
172,585,881,021,620,430,000,000,000,000,000,000,000
13
io_uring: don't rely on weak ->files references Grab actual references to the files_struct. To avoid circular references issues due to this, we add a per-task note that keeps track of what io_uring contexts a task has used. When the tasks execs or exits its assigned files, we cancel requests based on this tracking. With that, we can grab proper references to the files table, and no longer need to rely on stashing away ring_fd and ring_file to check if the ring_fd may have been closed. Cc: [email protected] # v5.5+ Reviewed-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) { u32 reg, val; val = 0; if (!tg3_readphy(tp, MII_BMCR, &reg)) val = reg << 16; if (!tg3_readphy(tp, MII_BMSR, &reg)) val |= (reg & 0xffff); *data++ = val; val = 0; if (!tg3_readphy(tp, MII_ADVERTISE, &reg)) val = reg << 16; if (!tg3_readphy(tp, MII_LPA, &reg)) val |= (reg & 0xffff); *data++ = val; val = 0; if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { if (!tg3_readphy(tp, MII_CTRL1000, &reg)) val = reg << 16; if (!tg3_readphy(tp, MII_STAT1000, &reg)) val |= (reg & 0xffff); } *data++ = val; if (!tg3_readphy(tp, MII_PHYADDR, &reg)) val = reg << 16; else val = 0; *data++ = val; }
0
[ "CWE-476", "CWE-119" ]
linux
715230a44310a8cf66fbfb5a46f9a62a9b2de424
322,013,749,523,344,750,000,000,000,000,000,000,000
33
tg3: fix length overflow in VPD firmware parsing Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version when present") introduced VPD parsing that contained a potential length overflow. Limit the hardware's reported firmware string length (max 255 bytes) to stay inside the driver's firmware string length (32 bytes). On overflow, truncate the formatted firmware string instead of potentially overwriting portions of the tg3 struct. http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf Signed-off-by: Kees Cook <[email protected]> Reported-by: Oded Horovitz <[email protected]> Reported-by: Brad Spengler <[email protected]> Cc: [email protected] Cc: Matt Carlson <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void io_cqring_fill_event(struct io_kiocb *req, long res) { __io_cqring_fill_event(req, res, 0); }
0
[]
linux
0f2122045b946241a9e549c2a76cea54fa58a7ff
261,158,685,300,427,300,000,000,000,000,000,000,000
4
io_uring: don't rely on weak ->files references Grab actual references to the files_struct. To avoid circular references issues due to this, we add a per-task note that keeps track of what io_uring contexts a task has used. When the tasks execs or exits its assigned files, we cancel requests based on this tracking. With that, we can grab proper references to the files table, and no longer need to rely on stashing away ring_fd and ring_file to check if the ring_fd may have been closed. Cc: [email protected] # v5.5+ Reviewed-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off; if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) return -EINVAL; if (e->next_offset < sizeof(struct compat_ip6t_entry) + sizeof(struct compat_xt_entry_target)) return -EINVAL; if (!ip6_checkentry(&e->ipv6)) return -EINVAL; ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset, e->next_offset); if (ret) return ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) goto out; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; }
0
[ "CWE-476" ]
linux
57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
93,674,990,902,307,130,000,000,000,000,000,000,000
67
netfilter: add back stackpointer size checks The rationale for removing the check is only correct for rulesets generated by ip(6)tables. In iptables, a jump can only occur to a user-defined chain, i.e. because we size the stack based on number of user-defined chains we cannot exceed stack size. However, the underlying binary format has no such restriction, and the validation step only ensures that the jump target is a valid rule start point. IOW, its possible to build a rule blob that has no user-defined chains but does contain a jump. If this happens, no jump stack gets allocated and crash occurs because no jumpstack was allocated. Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset") Reported-by: [email protected] Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
tiff_rgb_print_page(gx_device_printer * pdev, gp_file * file) { gx_device_tiff *const tfdev = (gx_device_tiff *)pdev; int code; /* open the TIFF device */ if (gdev_prn_file_is_new(pdev)) { tfdev->tif = tiff_from_filep(pdev, pdev->dname, file, tfdev->BigEndian, tfdev->UseBigTIFF); if (!tfdev->tif) return_error(gs_error_invalidfileaccess); } code = gdev_tiff_begin_page(tfdev, file); if (code < 0) return code; TIFFSetField(tfdev->tif, TIFFTAG_BITSPERSAMPLE, pdev->color_info.depth / pdev->color_info.num_components); tiff_set_rgb_fields(tfdev); /* Write the page data. */ return tiff_print_page(pdev, tfdev->tif, 0); }
0
[ "CWE-787" ]
ghostpdl
714e8995cd582d418276915cbbec3c70711fb19e
102,517,325,254,448,030,000,000,000,000,000,000,000
23
Bug 701807: avoid buffer overflow in tiff12_print_page(). Fixes: ./sanbin/gs -r650 -sOutputFile=tmp -sDEVICE=tiff12nc ../bug-701807.pdf
static int track_header(VividasDemuxContext *viv, AVFormatContext *s, uint8_t *buf, int size) { int i, j, ret; int64_t off; int val_1; int num_video; AVIOContext pb0, *pb = &pb0; ffio_init_context(pb, buf, size, 0, NULL, NULL, NULL, NULL); ffio_read_varlen(pb); // track_header_len avio_r8(pb); // '1' val_1 = ffio_read_varlen(pb); for (i=0;i<val_1;i++) { int c = avio_r8(pb); if (avio_feof(pb)) return AVERROR_EOF; for (j=0;j<c;j++) { if (avio_feof(pb)) return AVERROR_EOF; avio_r8(pb); // val_3 avio_r8(pb); // val_4 } } avio_r8(pb); // num_streams off = avio_tell(pb); off += ffio_read_varlen(pb); // val_5 avio_r8(pb); // '2' num_video = avio_r8(pb); avio_seek(pb, off, SEEK_SET); if (num_video != 1) { av_log(s, AV_LOG_ERROR, "number of video tracks %d is not 1\n", num_video); return AVERROR_PATCHWELCOME; } for (i = 0; i < num_video; i++) { AVStream *st = avformat_new_stream(s, NULL); int num, den; if (!st) return AVERROR(ENOMEM); st->id = i; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = AV_CODEC_ID_VP6; off = avio_tell(pb); off += ffio_read_varlen(pb); avio_r8(pb); // '3' avio_r8(pb); // val_7 num = avio_rl32(pb); // frame_time den = avio_rl32(pb); // time_base avpriv_set_pts_info(st, 64, num, den); st->nb_frames = avio_rl32(pb); // n frames st->codecpar->width = avio_rl16(pb); // width st->codecpar->height = avio_rl16(pb); // height avio_r8(pb); // val_8 avio_rl32(pb); // val_9 avio_seek(pb, off, SEEK_SET); } off = avio_tell(pb); off += ffio_read_varlen(pb); // val_10 avio_r8(pb); // '4' viv->num_audio = avio_r8(pb); avio_seek(pb, off, SEEK_SET); if (viv->num_audio != 1) av_log(s, AV_LOG_WARNING, "number of audio tracks %d is not 1\n", viv->num_audio); for(i=0;i<viv->num_audio;i++) { int q; AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->id = num_video + i; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_id = AV_CODEC_ID_VORBIS; off = avio_tell(pb); off += ffio_read_varlen(pb); // length avio_r8(pb); // '5' avio_r8(pb); //codec_id avio_rl16(pb); //codec_subid st->codecpar->channels = avio_rl16(pb); // channels st->codecpar->sample_rate = avio_rl32(pb); // sample_rate avio_seek(pb, 10, SEEK_CUR); // data_1 q = avio_r8(pb); avio_seek(pb, q, SEEK_CUR); // data_2 avio_r8(pb); // zeropad if (avio_tell(pb) < off) { int num_data; int xd_size = 1; int data_len[256]; int offset = 1; uint8_t *p; ffio_read_varlen(pb); // val_13 avio_r8(pb); // '19' ffio_read_varlen(pb); // len_3 num_data = avio_r8(pb); for (j = 0; j < num_data; j++) { uint64_t len = ffio_read_varlen(pb); if (len > INT_MAX/2 - xd_size) { return AVERROR_INVALIDDATA; } data_len[j] = len; xd_size += len + 1 + len/255; } ret = ff_alloc_extradata(st->codecpar, xd_size); if (ret < 0) return ret; p = st->codecpar->extradata; p[0] = 2; for (j = 0; j < num_data - 1; j++) { unsigned delta = av_xiphlacing(&p[offset], data_len[j]); av_assert0(delta <= xd_size - offset); offset += delta; } for (j = 0; j < num_data; j++) { int ret = avio_read(pb, &p[offset], data_len[j]); if (ret < data_len[j]) { st->codecpar->extradata_size = 0; av_freep(&st->codecpar->extradata); break; } av_assert0(data_len[j] <= xd_size - offset); offset += data_len[j]; } if (offset < st->codecpar->extradata_size) st->codecpar->extradata_size = offset; } } return 0; }
0
[ "CWE-787" ]
FFmpeg
27a99e2c7d450fef15594671eef4465c8a166bd7
221,246,914,153,953,900,000,000,000,000,000,000,000
151
avformat/vividas: improve extradata packing checks in track_header() Fixes: out of array accesses Fixes: 26622/clusterfuzz-testcase-minimized-ffmpeg_dem_VIVIDAS_fuzzer-6581200338288640 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg Signed-off-by: Michael Niedermayer <[email protected]>
static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) { hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); }
0
[ "CWE-400" ]
linux-2.6
c377411f2494a931ff7facdbb3a6839b1266bcf6
54,235,644,556,295,930,000,000,000,000,000,000,000
4
net: sk_add_backlog() take rmem_alloc into account Current socket backlog limit is not enough to really stop DDOS attacks, because user thread spend many time to process a full backlog each round, and user might crazy spin on socket lock. We should add backlog size and receive_queue size (aka rmem_alloc) to pace writers, and let user run without being slow down too much. Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in stress situations. Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp receiver can now process ~200.000 pps (instead of ~100 pps before the patch) on a 8 core machine. Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) { struct sched_domain *this_sd; u64 avg_cost, avg_idle; u64 time, cost; s64 delta; int this = smp_processor_id(); int cpu, nr = INT_MAX, si_cpu = -1; this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); if (!this_sd) return -1; /* * Due to large variance we need a large fuzz factor; hackbench in * particularly is sensitive here. */ avg_idle = this_rq()->avg_idle / 512; avg_cost = this_sd->avg_scan_cost + 1; if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost) return -1; if (sched_feat(SIS_PROP)) { u64 span_avg = sd->span_weight * avg_idle; if (span_avg > 4*avg_cost) nr = div_u64(span_avg, avg_cost); else nr = 4; } time = cpu_clock(this); for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return si_cpu; if (!cpumask_test_cpu(cpu, p->cpus_ptr)) continue; if (available_idle_cpu(cpu)) break; if (si_cpu == -1 && sched_idle_cpu(cpu)) si_cpu = cpu; } time = cpu_clock(this) - time; cost = this_sd->avg_scan_cost; delta = (s64)(time - cost) / 8; this_sd->avg_scan_cost += delta; return cpu; }
0
[ "CWE-400", "CWE-703" ]
linux
de53fd7aedb100f03e5d2231cfce0e4993282425
68,602,551,787,628,185,000,000,000,000,000,000,000
51
sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices It has been observed, that highly-threaded, non-cpu-bound applications running under cpu.cfs_quota_us constraints can hit a high percentage of periods throttled while simultaneously not consuming the allocated amount of quota. This use case is typical of user-interactive non-cpu bound applications, such as those running in kubernetes or mesos when run on multiple cpu cores. This has been root caused to cpu-local run queue being allocated per cpu bandwidth slices, and then not fully using that slice within the period. At which point the slice and quota expires. This expiration of unused slice results in applications not being able to utilize the quota for which they are allocated. The non-expiration of per-cpu slices was recently fixed by 'commit 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition")'. Prior to that it appears that this had been broken since at least 'commit 51f2176d74ac ("sched/fair: Fix unlocked reads of some cfs_b->quota/period")' which was introduced in v3.16-rc1 in 2014. That added the following conditional which resulted in slices never being expired. if (cfs_rq->runtime_expires != cfs_b->runtime_expires) { /* extend local deadline, drift is bounded above by 2 ticks */ cfs_rq->runtime_expires += TICK_NSEC; Because this was broken for nearly 5 years, and has recently been fixed and is now being noticed by many users running kubernetes (https://github.com/kubernetes/kubernetes/issues/67577) it is my opinion that the mechanisms around expiring runtime should be removed altogether. This allows quota already allocated to per-cpu run-queues to live longer than the period boundary. This allows threads on runqueues that do not use much CPU to continue to use their remaining slice over a longer period of time than cpu.cfs_period_us. However, this helps prevent the above condition of hitting throttling while also not fully utilizing your cpu quota. This theoretically allows a machine to use slightly more than its allotted quota in some periods. This overflow would be bounded by the remaining quota left on each per-cpu runqueueu. This is typically no more than min_cfs_rq_runtime=1ms per cpu. For CPU bound tasks this will change nothing, as they should theoretically fully utilize all of their quota in each period. For user-interactive tasks as described above this provides a much better user/application experience as their cpu utilization will more closely match the amount they requested when they hit throttling. This means that cpu limits no longer strictly apply per period for non-cpu bound applications, but that they are still accurate over longer timeframes. This greatly improves performance of high-thread-count, non-cpu bound applications with low cfs_quota_us allocation on high-core-count machines. In the case of an artificial testcase (10ms/100ms of quota on 80 CPU machine), this commit resulted in almost 30x performance improvement, while still maintaining correct cpu quota restrictions. That testcase is available at https://github.com/indeedeng/fibtest. Fixes: 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition") Signed-off-by: Dave Chiluk <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Phil Auld <[email protected]> Reviewed-by: Ben Segall <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: John Hammond <[email protected]> Cc: Jonathan Corbet <[email protected]> Cc: Kyle Anderson <[email protected]> Cc: Gabriel Munos <[email protected]> Cc: Peter Oskolkov <[email protected]> Cc: Cong Wang <[email protected]> Cc: Brendan Gregg <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt) { struct illinois *ca = inet_csk_ca(sk); ca->acked = pkts_acked; /* dup ack, no rtt sample */ if (rtt < 0) return; /* ignore bogus values, this prevents wraparound in alpha math */ if (rtt > RTT_MAX) rtt = RTT_MAX; /* keep track of minimum RTT seen so far */ if (ca->base_rtt > rtt) ca->base_rtt = rtt; /* and max */ if (ca->max_rtt < rtt) ca->max_rtt = rtt; ++ca->cnt_rtt; ca->sum_rtt += rtt; }
0
[ "CWE-703", "CWE-189" ]
linux
8f363b77ee4fbf7c3bbcf5ec2c5ca482d396d664
241,932,525,640,136,050,000,000,000,000,000,000,000
25
net: fix divide by zero in tcp algorithm illinois Reading TCP stats when using TCP Illinois congestion control algorithm can cause a divide by zero kernel oops. The division by zero occur in tcp_illinois_info() at: do_div(t, ca->cnt_rtt); where ca->cnt_rtt can become zero (when rtt_reset is called) Steps to Reproduce: 1. Register tcp_illinois: # sysctl -w net.ipv4.tcp_congestion_control=illinois 2. Monitor internal TCP information via command "ss -i" # watch -d ss -i 3. Establish new TCP conn to machine Either it fails at the initial conn, or else it needs to wait for a loss or a reset. This is only related to reading stats. The function avg_delay() also performs the same divide, but is guarded with a (ca->cnt_rtt > 0) at its calling point in update_params(). Thus, simply fix tcp_illinois_info(). Function tcp_illinois_info() / get_info() is called without socket lock. Thus, eliminate any race condition on ca->cnt_rtt by using a local stack variable. Simply reuse info.tcpv_rttcnt, as its already set to ca->cnt_rtt. Function avg_delay() is not affected by this race condition, as its called with the socket lock. Cc: Petr Matousek <[email protected]> Signed-off-by: Jesper Dangaard Brouer <[email protected]> Acked-by: Eric Dumazet <[email protected]> Acked-by: Stephen Hemminger <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void ext4_clear_inode(struct inode *inode) { invalidate_inode_buffers(inode); clear_inode(inode); dquot_drop(inode); ext4_discard_preallocations(inode); ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); if (EXT4_I(inode)->jinode) { jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode); jbd2_free_inode(EXT4_I(inode)->jinode); EXT4_I(inode)->jinode = NULL; } #ifdef CONFIG_EXT4_FS_ENCRYPTION if (EXT4_I(inode)->i_crypt_info) ext4_free_encryption_info(inode, EXT4_I(inode)->i_crypt_info); #endif }
0
[ "CWE-362" ]
linux
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
177,718,823,230,829,600,000,000,000,000,000,000,000
18
ext4: fix races between page faults and hole punching Currently, page faults and hole punching are completely unsynchronized. This can result in page fault faulting in a page into a range that we are punching after truncate_pagecache_range() has been called and thus we can end up with a page mapped to disk blocks that will be shortly freed. Filesystem corruption will shortly follow. Note that the same race is avoided for truncate by checking page fault offset against i_size but there isn't similar mechanism available for punching holes. Fix the problem by creating new rw semaphore i_mmap_sem in inode and grab it for writing over truncate, hole punching, and other functions removing blocks from extent tree and for read over page faults. We cannot easily use i_data_sem for this since that ranks below transaction start and we need something ranking above it so that it can be held over the whole truncate / hole punching operation. Also remove various workarounds we had in the code to reduce race window when page fault could have created pages with stale mapping information. Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]>
void bdrv_make_anon(BlockDriverState *bs) { if (bs->device_name[0] != '\0') { QTAILQ_REMOVE(&bdrv_states, bs, device_list); } bs->device_name[0] = '\0'; if (bs->node_name[0] != '\0') { QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list); } bs->node_name[0] = '\0'; }
0
[ "CWE-190" ]
qemu
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
197,668,274,116,407,200,000,000,000,000,000,000,000
11
block: Limit request size (CVE-2014-0143) Limiting the size of a single request to INT_MAX not only fixes a direct integer overflow in bdrv_check_request() (which would only trigger bad behaviour with ridiculously huge images, as in close to 2^64 bytes), but can also prevent overflows in all block drivers. Signed-off-by: Kevin Wolf <[email protected]> Reviewed-by: Max Reitz <[email protected]> Signed-off-by: Stefan Hajnoczi <[email protected]>
int imagetorawl(opj_image_t * image, const char *outfile) { return imagetoraw_common(image, outfile, OPJ_FALSE); }
0
[ "CWE-787" ]
openjpeg
e5285319229a5d77bf316bb0d3a6cbd3cb8666d9
262,026,796,034,041,570,000,000,000,000,000,000,000
4
pgxtoimage(): fix write stack buffer overflow (#997)
Client::handledEarlyAdaptationAbort() { if (entry->isEmpty()) { debugs(11,8, "adaptation failure with an empty entry: " << *entry); ErrorState *err = new ErrorState(ERR_ICAP_FAILURE, Http::scInternalServerError, request); err->detailError(ERR_DETAIL_ICAP_RESPMOD_EARLY); fwd->fail(err); fwd->dontRetry(true); abortAll("adaptation failure with an empty entry"); return true; // handled } if (request) // update logged info directly request->detailError(ERR_ICAP_FAILURE, ERR_DETAIL_ICAP_RESPMOD_LATE); return false; // the caller must handle }
0
[ "CWE-20" ]
squid
1e05a85bd28c22c9ca5d3ac9f5e86d6269ec0a8c
2,119,869,260,276,331,400,000,000,000,000,000,000
17
Handle more partial responses (#791)
spice_server_char_device_add_interface(SpiceServer *reds, SpiceBaseInstance *sin) { SpiceCharDeviceInstance* char_device = SPICE_UPCAST(SpiceCharDeviceInstance, sin); red::shared_ptr<RedCharDevice> dev_state; spice_debug("CHAR_DEVICE %s", char_device->subtype); if (strcmp(char_device->subtype, SUBTYPE_VDAGENT) == 0) { if (reds->vdagent) { spice_warning("vdagent already attached"); return -1; } dev_state = attach_to_red_agent(reds, char_device); } #ifdef USE_SMARTCARD else if (strcmp(char_device->subtype, SUBTYPE_SMARTCARD) == 0) { dev_state = smartcard_device_connect(reds, char_device); if (!dev_state) { return -1; } } #endif else if (strcmp(char_device->subtype, SUBTYPE_USBREDIR) == 0) { dev_state = spicevmc_device_connect(reds, char_device, SPICE_CHANNEL_USBREDIR); } else if (strcmp(char_device->subtype, SUBTYPE_PORT) == 0) { if (strcmp(char_device->portname, "org.spice-space.webdav.0") == 0) { dev_state = spicevmc_device_connect(reds, char_device, SPICE_CHANNEL_WEBDAV); } else if (strcmp(char_device->portname, "org.spice-space.stream.0") == 0) { dev_state = stream_device_connect(reds, char_device); } else { dev_state = spicevmc_device_connect(reds, char_device, SPICE_CHANNEL_PORT); } } if (dev_state) { /* When spicevmc_device_connect() is called to create a RedCharDevice, * it also assigns that as the internal state for char_device. This is * just a sanity check to ensure that assumption is correct */ spice_assert(dev_state.get() == char_device->st); /* setting the char_device state to "started" for backward compatibily with * qemu releases that don't call spice api for start/stop (not implemented yet) */ if (reds->vm_running) { dev_state->start(); } reds_add_char_device(reds, dev_state); } else { spice_warning("failed to create device state for %s", char_device->subtype); return -1; } return 0; }
0
[]
spice
ca5bbc5692e052159bce1a75f55dc60b36078749
63,885,979,933,862,210,000,000,000,000,000,000,000
53
With OpenSSL 1.1: Disable client-initiated renegotiation. Fixes issue #49 Fixes BZ#1904459 Signed-off-by: Julien Ropé <[email protected]> Reported-by: BlackKD Acked-by: Frediano Ziglio <[email protected]>
static void hidg_free(struct usb_function *f) { struct f_hidg *hidg; struct f_hid_opts *opts; hidg = func_to_hidg(f); opts = container_of(f->fi, struct f_hid_opts, func_inst); kfree(hidg->report_desc); kfree(hidg); mutex_lock(&opts->lock); --opts->refcnt; mutex_unlock(&opts->lock); }
0
[ "CWE-703", "CWE-667", "CWE-189" ]
linux
072684e8c58d17e853f8e8b9f6d9ce2e58d2b036
328,739,883,856,492,630,000,000,000,000,000,000,000
13
USB: gadget: f_hid: fix deadlock in f_hidg_write() In f_hidg_write() the write_spinlock is acquired before calling usb_ep_queue() which causes a deadlock when dummy_hcd is being used. This is because dummy_queue() callbacks into f_hidg_req_complete() which tries to acquire the same spinlock. This is (part of) the backtrace when the deadlock occurs: 0xffffffffc06b1410 in f_hidg_req_complete 0xffffffffc06a590a in usb_gadget_giveback_request 0xffffffffc06cfff2 in dummy_queue 0xffffffffc06a4b96 in usb_ep_queue 0xffffffffc06b1eb6 in f_hidg_write 0xffffffff8127730b in __vfs_write 0xffffffff812774d1 in vfs_write 0xffffffff81277725 in SYSC_write Fix this by releasing the write_spinlock before calling usb_ep_queue() Reviewed-by: James Bottomley <[email protected]> Tested-by: James Bottomley <[email protected]> Cc: [email protected] # 4.11+ Fixes: 749494b6bdbb ("usb: gadget: f_hid: fix: Move IN request allocation to set_alt()") Signed-off-by: Radoslav Gerganov <[email protected]> Signed-off-by: Felipe Balbi <[email protected]>
void init_hash_table(struct io_hash_table *table, unsigned size) { unsigned int i; for (i = 0; i < size; i++) { spin_lock_init(&table->hbs[i].lock); INIT_HLIST_HEAD(&table->hbs[i].list); } }
0
[ "CWE-193" ]
linux
47abea041f897d64dbd5777f0cf7745148f85d75
166,434,899,294,878,280,000,000,000,000,000,000,000
9
io_uring: fix off-by-one in sync cancelation file check The passed in index should be validated against the number of registered files we have, it needs to be smaller than the index value to avoid going one beyond the end. Fixes: 78a861b94959 ("io_uring: add sync cancelation API through io_uring_register()") Reported-by: Luo Likang <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
void adjust_managed_page_count(struct page *page, long count) { spin_lock(&managed_page_count_lock); page_zone(page)->managed_pages += count; totalram_pages += count; #ifdef CONFIG_HIGHMEM if (PageHighMem(page)) totalhigh_pages += count; #endif spin_unlock(&managed_page_count_lock); }
0
[]
linux
400e22499dd92613821374c8c6c88c7225359980
266,235,013,973,019,300,000,000,000,000,000,000,000
11
mm: don't warn about allocations which stall for too long Commit 63f53dea0c98 ("mm: warn about allocations which stall for too long") was a great step for reducing possibility of silent hang up problem caused by memory allocation stalls. But this commit reverts it, for it is possible to trigger OOM lockup and/or soft lockups when many threads concurrently called warn_alloc() (in order to warn about memory allocation stalls) due to current implementation of printk(), and it is difficult to obtain useful information due to limitation of synchronous warning approach. Current printk() implementation flushes all pending logs using the context of a thread which called console_unlock(). printk() should be able to flush all pending logs eventually unless somebody continues appending to printk() buffer. Since warn_alloc() started appending to printk() buffer while waiting for oom_kill_process() to make forward progress when oom_kill_process() is processing pending logs, it became possible for warn_alloc() to force oom_kill_process() loop inside printk(). As a result, warn_alloc() significantly increased possibility of preventing oom_kill_process() from making forward progress. ---------- Pseudo code start ---------- Before warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } goto retry; After warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else if (waited_for_10seconds()) { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- Although waited_for_10seconds() becomes true once per 10 seconds, unbounded number of threads can call waited_for_10seconds() at the same time. Also, since threads doing waited_for_10seconds() keep doing almost busy loop, the thread doing print_one_log() can use little CPU resource. Therefore, this situation can be simplified like ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- when printk() is called faster than print_one_log() can process a log. One of possible mitigation would be to introduce a new lock in order to make sure that no other series of printk() (either oom_kill_process() or warn_alloc()) can append to printk() buffer when one series of printk() (either oom_kill_process() or warn_alloc()) is already in progress. Such serialization will also help obtaining kernel messages in readable form. ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { mutex_lock(&oom_printk_lock); while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_printk_lock); mutex_unlock(&oom_lock) } else { if (mutex_trylock(&oom_printk_lock)) { atomic_inc(&printk_pending_logs); mutex_unlock(&oom_printk_lock); } } goto retry; ---------- Pseudo code end ---------- But this commit does not go that direction, for we don't want to introduce a new lock dependency, and we unlikely be able to obtain useful information even if we serialized oom_kill_process() and warn_alloc(). Synchronous approach is prone to unexpected results (e.g. too late [1], too frequent [2], overlooked [3]). As far as I know, warn_alloc() never helped with providing information other than "something is going wrong". I want to consider asynchronous approach which can obtain information during stalls with possibly relevant threads (e.g. the owner of oom_lock and kswapd-like threads) and serve as a trigger for actions (e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump of stalling KVM guest for diagnostic purpose). This commit temporarily loses ability to report e.g. OOM lockup due to unable to invoke the OOM killer due to !__GFP_FS allocation request. But asynchronous approach will be able to detect such situation and emit warning. Thus, let's remove warn_alloc(). [1] https://bugzilla.kernel.org/show_bug.cgi?id=192981 [2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com [3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever")) Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <[email protected]> Reported-by: Cong Wang <[email protected]> Reported-by: yuwang.yuwang <[email protected]> Reported-by: Johannes Weiner <[email protected]> Acked-by: Michal Hocko <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Sergey Senozhatsky <[email protected]> Cc: Petr Mladek <[email protected]> Cc: Steven Rostedt <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
Http::Stream::socketState() { switch (clientStreamStatus(getTail(), http)) { case STREAM_NONE: /* check for range support ending */ if (http->request->range) { /* check: reply was parsed and range iterator was initialized */ assert(http->range_iter.valid); /* filter out data according to range specs */ if (!canPackMoreRanges()) { debugs(33, 5, "Range request at end of returnable " << "range sequence on " << clientConnection); // we got everything we wanted from the store return STREAM_COMPLETE; } } else if (reply && reply->contentRange()) { /* reply has content-range, but Squid is not managing ranges */ const int64_t &bytesSent = http->out.offset; const int64_t &bytesExpected = reply->contentRange()->spec.length; debugs(33, 7, "body bytes sent vs. expected: " << bytesSent << " ? " << bytesExpected << " (+" << reply->contentRange()->spec.offset << ")"); // did we get at least what we expected, based on range specs? if (bytesSent == bytesExpected) // got everything return STREAM_COMPLETE; if (bytesSent > bytesExpected) // Error: Sent more than expected return STREAM_UNPLANNED_COMPLETE; } return STREAM_NONE; case STREAM_COMPLETE: return STREAM_COMPLETE; case STREAM_UNPLANNED_COMPLETE: return STREAM_UNPLANNED_COMPLETE; case STREAM_FAILED: return STREAM_FAILED; } fatal ("unreachable code\n"); return STREAM_NONE; }
1
[ "CWE-20" ]
squid
6c9c44d0e9cf7b72bb233360c5308aa063af3d69
55,925,527,948,065,220,000,000,000,000,000,000,000
50
Handle more partial responses (#791)
static int ext4_da_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret, retries = 0; struct page *page; pgoff_t index; struct inode *inode = mapping->host; handle_t *handle; index = pos >> PAGE_CACHE_SHIFT; if (ext4_nonda_switch(inode->i_sb)) { *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; return ext4_write_begin(file, mapping, pos, len, flags, pagep, fsdata); } *fsdata = (void *)0; trace_ext4_da_write_begin(inode, pos, len, flags); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len, flags, pagep, fsdata); if (ret < 0) return ret; if (ret == 1) return 0; } /* * grab_cache_page_write_begin() can take a long time if the * system is thrashing due to memory pressure, or if the page * is being written back. So grab it first before we start * the transaction handle. This also allows us to allocate * the page (if needed) without using GFP_NOFS. */ retry_grab: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; unlock_page(page); /* * With delayed allocation, we don't log the i_disksize update * if there is delayed block allocation. But we still need * to journalling the i_disksize update if writes to the end * of file which has an already mapped buffer. */ retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_da_write_credits(inode, pos, len)); if (IS_ERR(handle)) { page_cache_release(page); return PTR_ERR(handle); } lock_page(page); if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); page_cache_release(page); ext4_journal_stop(handle); goto retry_grab; } /* In case writeback began while the page was unlocked */ wait_for_stable_page(page); #ifdef CONFIG_EXT4_FS_ENCRYPTION ret = ext4_block_write_begin(page, pos, len, ext4_da_get_block_prep); #else ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); #endif if (ret < 0) { unlock_page(page); ext4_journal_stop(handle); /* * block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. */ if (pos + len > inode->i_size) ext4_truncate_failed_write(inode); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; page_cache_release(page); return ret; } *pagep = page; return ret; }
0
[ "CWE-362" ]
linux
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
161,154,122,757,817,000,000,000,000,000,000,000,000
96
ext4: fix races between page faults and hole punching Currently, page faults and hole punching are completely unsynchronized. This can result in page fault faulting in a page into a range that we are punching after truncate_pagecache_range() has been called and thus we can end up with a page mapped to disk blocks that will be shortly freed. Filesystem corruption will shortly follow. Note that the same race is avoided for truncate by checking page fault offset against i_size but there isn't similar mechanism available for punching holes. Fix the problem by creating new rw semaphore i_mmap_sem in inode and grab it for writing over truncate, hole punching, and other functions removing blocks from extent tree and for read over page faults. We cannot easily use i_data_sem for this since that ranks below transaction start and we need something ranking above it so that it can be held over the whole truncate / hole punching operation. Also remove various workarounds we had in the code to reduce race window when page fault could have created pages with stale mapping information. Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]>
void assoc_array_apply_edit(struct assoc_array_edit *edit) { struct assoc_array_shortcut *shortcut; struct assoc_array_node *node; struct assoc_array_ptr *ptr; int i; pr_devel("-->%s()\n", __func__); smp_wmb(); if (edit->leaf_p) *edit->leaf_p = edit->leaf; smp_wmb(); for (i = 0; i < ARRAY_SIZE(edit->set_parent_slot); i++) if (edit->set_parent_slot[i].p) *edit->set_parent_slot[i].p = edit->set_parent_slot[i].to; smp_wmb(); for (i = 0; i < ARRAY_SIZE(edit->set_backpointers); i++) if (edit->set_backpointers[i]) *edit->set_backpointers[i] = edit->set_backpointers_to; smp_wmb(); for (i = 0; i < ARRAY_SIZE(edit->set); i++) if (edit->set[i].ptr) *edit->set[i].ptr = edit->set[i].to; if (edit->array->root == NULL) { edit->array->nr_leaves_on_tree = 0; } else if (edit->adjust_count_on) { node = edit->adjust_count_on; for (;;) { node->nr_leaves_on_branch += edit->adjust_count_by; ptr = node->back_pointer; if (!ptr) break; if (assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); ptr = shortcut->back_pointer; if (!ptr) break; } BUG_ON(!assoc_array_ptr_is_node(ptr)); node = assoc_array_ptr_to_node(ptr); } edit->array->nr_leaves_on_tree += edit->adjust_count_by; } call_rcu(&edit->rcu, assoc_array_rcu_cleanup); }
0
[ "CWE-399" ]
linux
95389b08d93d5c06ec63ab49bd732b0069b7c35e
229,547,922,404,453,170,000,000,000,000,000,000,000
53
KEYS: Fix termination condition in assoc array garbage collection This fixes CVE-2014-3631. It is possible for an associative array to end up with a shortcut node at the root of the tree if there are more than fan-out leaves in the tree, but they all crowd into the same slot in the lowest level (ie. they all have the same first nibble of their index keys). When assoc_array_gc() returns back up the tree after scanning some leaves, it can fall off of the root and crash because it assumes that the back pointer from a shortcut (after label ascend_old_tree) must point to a normal node - which isn't true of a shortcut node at the root. Should we find we're ascending rootwards over a shortcut, we should check to see if the backpointer is zero - and if it is, we have completed the scan. This particular bug cannot occur if the root node is not a shortcut - ie. if you have fewer than 17 keys in a keyring or if you have at least two keys that sit into separate slots (eg. a keyring and a non keyring). This can be reproduced by: ring=`keyctl newring bar @s` for ((i=1; i<=18; i++)); do last_key=`keyctl newring foo$i $ring`; done keyctl timeout $last_key 2 Doing this: echo 3 >/proc/sys/kernel/keys/gc_delay first will speed things up. If we do fall off of the top of the tree, we get the following oops: BUG: unable to handle kernel NULL pointer dereference at 0000000000000018 IP: [<ffffffff8136cea7>] assoc_array_gc+0x2f7/0x540 PGD dae15067 PUD cfc24067 PMD 0 Oops: 0000 [#1] SMP Modules linked in: xt_nat xt_mark nf_conntrack_netbios_ns nf_conntrack_broadcast ip6t_rpfilter ip6t_REJECT xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_ni CPU: 0 PID: 26011 Comm: kworker/0:1 Not tainted 3.14.9-200.fc20.x86_64 #1 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 Workqueue: events key_garbage_collector task: ffff8800918bd580 ti: ffff8800aac14000 task.ti: ffff8800aac14000 RIP: 0010:[<ffffffff8136cea7>] [<ffffffff8136cea7>] assoc_array_gc+0x2f7/0x540 RSP: 0018:ffff8800aac15d40 EFLAGS: 00010206 RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff8800aaecacc0 RDX: ffff8800daecf440 RSI: 0000000000000001 RDI: ffff8800aadc2bc0 RBP: ffff8800aac15da8 R08: 0000000000000001 R09: 0000000000000003 R10: ffffffff8136ccc7 R11: 0000000000000000 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000070 R15: 0000000000000001 FS: 0000000000000000(0000) GS:ffff88011fc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000000000000018 CR3: 00000000db10d000 CR4: 00000000000006f0 Stack: ffff8800aac15d50 0000000000000011 ffff8800aac15db8 ffffffff812e2a70 ffff880091a00600 0000000000000000 ffff8800aadc2bc3 00000000cd42c987 ffff88003702df20 ffff88003702dfa0 0000000053b65c09 ffff8800aac15fd8 Call Trace: [<ffffffff812e2a70>] ? keyring_detect_cycle_iterator+0x30/0x30 [<ffffffff812e3e75>] keyring_gc+0x75/0x80 [<ffffffff812e1424>] key_garbage_collector+0x154/0x3c0 [<ffffffff810a67b6>] process_one_work+0x176/0x430 [<ffffffff810a744b>] worker_thread+0x11b/0x3a0 [<ffffffff810a7330>] ? rescuer_thread+0x3b0/0x3b0 [<ffffffff810ae1a8>] kthread+0xd8/0xf0 [<ffffffff810ae0d0>] ? insert_kthread_work+0x40/0x40 [<ffffffff816ffb7c>] ret_from_fork+0x7c/0xb0 [<ffffffff810ae0d0>] ? insert_kthread_work+0x40/0x40 Code: 08 4c 8b 22 0f 84 bf 00 00 00 41 83 c7 01 49 83 e4 fc 41 83 ff 0f 4c 89 65 c0 0f 8f 5a fe ff ff 48 8b 45 c0 4d 63 cf 49 83 c1 02 <4e> 8b 34 c8 4d 85 f6 0f 84 be 00 00 00 41 f6 c6 01 0f 84 92 RIP [<ffffffff8136cea7>] assoc_array_gc+0x2f7/0x540 RSP <ffff8800aac15d40> CR2: 0000000000000018 ---[ end trace 1129028a088c0cbd ]--- Signed-off-by: David Howells <[email protected]> Acked-by: Don Zickus <[email protected]> Signed-off-by: James Morris <[email protected]>
static int tm_cvmx_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int ret; BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32])); if (!cpu_has_feature(CPU_FTR_TM)) return -ENODEV; if (!MSR_TM_ACTIVE(target->thread.regs->msr)) return -ENODATA; /* Flush the state */ flush_tmregs_to_thread(target); flush_fp_to_thread(target); flush_altivec_to_thread(target); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.ckvr_state, 0, 33 * sizeof(vector128)); if (!ret) { /* * Copy out only the low-order word of vrsave. */ union { elf_vrreg_t reg; u32 word; } vrsave; memset(&vrsave, 0, sizeof(vrsave)); vrsave.word = target->thread.ckvrsave; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave, 33 * sizeof(vector128), -1); } return ret; }
0
[ "CWE-119", "CWE-787" ]
linux
c1fa0768a8713b135848f78fd43ffc208d8ded70
263,249,057,508,344,630,000,000,000,000,000,000,000
39
powerpc/tm: Flush TM only if CPU has TM feature Commit cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump") added code to access TM SPRs in flush_tmregs_to_thread(). However flush_tmregs_to_thread() does not check if TM feature is available on CPU before trying to access TM SPRs in order to copy live state to thread structures. flush_tmregs_to_thread() is indeed guarded by CONFIG_PPC_TRANSACTIONAL_MEM but it might be the case that kernel was compiled with CONFIG_PPC_TRANSACTIONAL_MEM enabled and ran on a CPU without TM feature available, thus rendering the execution of TM instructions that are treated by the CPU as illegal instructions. The fix is just to add proper checking in flush_tmregs_to_thread() if CPU has the TM feature before accessing any TM-specific resource, returning immediately if TM is no available on the CPU. Adding that checking in flush_tmregs_to_thread() instead of in places where it is called, like in vsr_get() and vsr_set(), is better because avoids the same problem cropping up elsewhere. Cc: [email protected] # v4.13+ Fixes: cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump") Signed-off-by: Gustavo Romero <[email protected]> Reviewed-by: Cyril Bur <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
bool LEX::sp_body_finalize_routine(THD *thd) { if (sphead->check_unresolved_goto()) return true; sphead->set_stmt_end(thd); sphead->restore_thd_mem_root(thd); return false; }
0
[ "CWE-703" ]
server
39feab3cd31b5414aa9b428eaba915c251ac34a2
103,666,606,417,724,370,000,000,000,000,000,000,000
8
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT IF an INSERT/REPLACE SELECT statement contained an ON expression in the top level select and this expression used a subquery with a column reference that could not be resolved then an attempt to resolve this reference as an outer reference caused a crash of the server. This happened because the outer context field in the Name_resolution_context structure was not set to NULL for such references. Rather it pointed to the first element in the select_stack. Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select() method when parsing a SELECT construct. Approved by Oleksandr Byelkin <[email protected]>
virDomainObjFormat(virDomainObjPtr obj, virDomainXMLOptionPtr xmlopt, unsigned int flags) { g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER; int state; int reason; size_t i; state = virDomainObjGetState(obj, &reason); virBufferAsprintf(&buf, "<domstatus state='%s' reason='%s' pid='%lld'>\n", virDomainStateTypeToString(state), virDomainStateReasonToString(state, reason), (long long)obj->pid); virBufferAdjustIndent(&buf, 2); for (i = 0; i < VIR_DOMAIN_TAINT_LAST; i++) { if (obj->taint & (1 << i)) virBufferAsprintf(&buf, "<taint flag='%s'/>\n", virDomainTaintTypeToString(i)); } if (xmlopt->privateData.format && xmlopt->privateData.format(&buf, obj) < 0) return NULL; if (virDomainDefFormatInternal(obj->def, xmlopt, &buf, flags) < 0) return NULL; virBufferAdjustIndent(&buf, -2); virBufferAddLit(&buf, "</domstatus>\n"); return virBufferContentAndReset(&buf); }
0
[ "CWE-212" ]
libvirt
a5b064bf4b17a9884d7d361733737fb614ad8979
26,386,908,924,085,925,000,000,000,000,000,000,000
34
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410 (v6.1.0-122-g3b076391be) we support http cookies. Since they may contain somewhat sensitive information we should not format them into the XML unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted. Reported-by: Han Han <[email protected]> Signed-off-by: Peter Krempa <[email protected]> Reviewed-by: Erik Skultety <[email protected]>
static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier_pixels) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier_pixels[y*width+x]*=(-1.0); }
0
[ "CWE-125" ]
ImageMagick
7c2c5ba5b8e3a0b2b82f56c71dfab74ed4006df7
286,529,086,509,497,570,000,000,000,000,000,000,000
13
https://github.com/ImageMagick/ImageMagick/issues/1588
ff_layout_add_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg, struct list_head *free_me) { pnfs_generic_layout_insert_lseg(lo, lseg, ff_lseg_range_is_after, ff_lseg_merge, free_me); }
0
[ "CWE-787" ]
linux
ed34695e15aba74f45247f1ee2cf7e09d449f925
268,084,828,869,688,650,000,000,000,000,000,000,000
9
pNFS/flexfiles: fix incorrect size check in decode_nfs_fh() We (adam zabrocki, alexander matrosov, alexander tereshkin, maksym bazalii) observed the check: if (fh->size > sizeof(struct nfs_fh)) should not use the size of the nfs_fh struct which includes an extra two bytes from the size field. struct nfs_fh { unsigned short size; unsigned char data[NFS_MAXFHSIZE]; } but should determine the size from data[NFS_MAXFHSIZE] so the memcpy will not write 2 bytes beyond destination. The proposed fix is to compare against the NFS_MAXFHSIZE directly, as is done elsewhere in fs code base. Fixes: d67ae825a59d ("pnfs/flexfiles: Add the FlexFile Layout Driver") Signed-off-by: Nikola Livic <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: Trond Myklebust <[email protected]>
static void pptp_sock_destruct(struct sock *sk) { if (!(sk->sk_state & PPPOX_DEAD)) { del_chan(pppox_sk(sk)); pppox_unbind_sock(sk); } skb_queue_purge(&sk->sk_receive_queue); }
0
[ "CWE-200" ]
net
09ccfd238e5a0e670d8178cf50180ea81ae09ae1
99,021,513,986,619,500,000,000,000,000,000,000,000
8
pptp: verify sockaddr_len in pptp_bind() and pptp_connect() Reported-by: Dmitry Vyukov <[email protected]> Signed-off-by: Cong Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static inline unsigned rb_page_commit(struct buffer_page *bpage) { return local_read(&bpage->page->commit); }
0
[ "CWE-190" ]
linux-stable
59643d1535eb220668692a5359de22545af579f6
43,356,467,470,173,040,000,000,000,000,000,000,000
4
ring-buffer: Prevent overflow of size in ring_buffer_resize() If the size passed to ring_buffer_resize() is greater than MAX_LONG - BUF_PAGE_SIZE then the DIV_ROUND_UP() will return zero. Here's the details: # echo 18014398509481980 > /sys/kernel/debug/tracing/buffer_size_kb tracing_entries_write() processes this and converts kb to bytes. 18014398509481980 << 10 = 18446744073709547520 and this is passed to ring_buffer_resize() as unsigned long size. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); Where DIV_ROUND_UP(a, b) is (a + b - 1)/b BUF_PAGE_SIZE is 4080 and here 18446744073709547520 + 4080 - 1 = 18446744073709551599 where 18446744073709551599 is still smaller than 2^64 2^64 - 18446744073709551599 = 17 But now 18446744073709551599 / 4080 = 4521260802379792 and size = size * 4080 = 18446744073709551360 This is checked to make sure its still greater than 2 * 4080, which it is. Then we convert to the number of buffer pages needed. nr_page = DIV_ROUND_UP(size, BUF_PAGE_SIZE) but this time size is 18446744073709551360 and 2^64 - (18446744073709551360 + 4080 - 1) = -3823 Thus it overflows and the resulting number is less than 4080, which makes 3823 / 4080 = 0 an nr_pages is set to this. As we already checked against the minimum that nr_pages may be, this causes the logic to fail as well, and we crash the kernel. There's no reason to have the two DIV_ROUND_UP() (that's just result of historical code changes), clean up the code and fix this bug. Cc: [email protected] # 3.5+ Fixes: 83f40318dab00 ("ring-buffer: Make removal of ring buffer pages atomic") Signed-off-by: Steven Rostedt <[email protected]>
ippAddStrings( ipp_t *ipp, /* I - IPP message */ ipp_tag_t group, /* I - IPP group */ ipp_tag_t value_tag, /* I - Type of attribute */ const char *name, /* I - Name of attribute */ int num_values, /* I - Number of values */ const char *language, /* I - Language code (@code NULL@ for default) */ const char * const *values) /* I - Values */ { int i; /* Looping var */ ipp_tag_t temp_tag; /* Temporary value tag (masked) */ ipp_attribute_t *attr; /* New attribute */ _ipp_value_t *value; /* Current value */ char code[32]; /* Language/charset value buffer */ DEBUG_printf(("ippAddStrings(ipp=%p, group=%02x(%s), value_tag=%02x(%s), name=\"%s\", num_values=%d, language=\"%s\", values=%p)", (void *)ipp, group, ippTagString(group), value_tag, ippTagString(value_tag), name, num_values, language, (void *)values)); /* * Range check input... */ temp_tag = (ipp_tag_t)((int)value_tag & IPP_TAG_CUPS_MASK); #if 0 if (!ipp || !name || group < IPP_TAG_ZERO || group == IPP_TAG_END || group >= IPP_TAG_UNSUPPORTED_VALUE || (temp_tag < IPP_TAG_TEXT && temp_tag != IPP_TAG_TEXTLANG && temp_tag != IPP_TAG_NAMELANG) || temp_tag > IPP_TAG_MIMETYPE || num_values < 1) return (NULL); if ((temp_tag == IPP_TAG_TEXTLANG || temp_tag == IPP_TAG_NAMELANG) != (language != NULL)) return (NULL); #else if (!ipp || !name || group < IPP_TAG_ZERO || group == IPP_TAG_END || group >= IPP_TAG_UNSUPPORTED_VALUE || num_values < 1) return (NULL); #endif /* 0 */ /* * See if we need to map charset, language, or locale values... */ if (language && ((int)value_tag & IPP_TAG_CUPS_CONST) && strcmp(language, ipp_lang_code(language, code, sizeof(code)))) value_tag = temp_tag; /* Don't do a fast copy */ else if (values && value_tag == (ipp_tag_t)(IPP_TAG_CHARSET | IPP_TAG_CUPS_CONST)) { for (i = 0; i < num_values; i ++) if (strcmp(values[i], ipp_get_code(values[i], code, sizeof(code)))) { value_tag = temp_tag; /* Don't do a fast copy */ break; } } else if (values && value_tag == (ipp_tag_t)(IPP_TAG_LANGUAGE | IPP_TAG_CUPS_CONST)) { for (i = 0; i < num_values; i ++) if (strcmp(values[i], ipp_lang_code(values[i], code, sizeof(code)))) { value_tag = temp_tag; /* Don't do a fast copy */ break; } } /* * Create the attribute... */ if ((attr = ipp_add_attr(ipp, name, group, value_tag, num_values)) == NULL) return (NULL); /* * Initialize the attribute data... */ for (i = num_values, value = attr->values; i > 0; i --, value ++) { if (language) { if (value == attr->values) { if ((int)value_tag & IPP_TAG_CUPS_CONST) value->string.language = (char *)language; else value->string.language = _cupsStrAlloc(ipp_lang_code(language, code, sizeof(code))); } else value->string.language = attr->values[0].string.language; } if (values) { if ((int)value_tag & IPP_TAG_CUPS_CONST) value->string.text = (char *)*values++; else if (value_tag == IPP_TAG_CHARSET) value->string.text = _cupsStrAlloc(ipp_get_code(*values++, code, sizeof(code))); else if (value_tag == IPP_TAG_LANGUAGE) value->string.text = _cupsStrAlloc(ipp_lang_code(*values++, code, sizeof(code))); else value->string.text = _cupsStrAlloc(*values++); } } return (attr); }
0
[ "CWE-120" ]
cups
f24e6cf6a39300ad0c3726a41a4aab51ad54c109
37,345,673,252,027,665,000,000,000,000,000,000,000
112
Fix multiple security/disclosure issues: - CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251) - Fixed IPP buffer overflow (rdar://50035411) - Fixed memory disclosure issue in the scheduler (rdar://51373853) - Fixed DoS issues in the scheduler (rdar://51373929)
ins_typebuf( char_u *str, int noremap, int offset, int nottyped, int silent) { char_u *s1, *s2; int newlen; int addlen; int i; int newoff; int val; int nrm; init_typebuf(); if (++typebuf.tb_change_cnt == 0) typebuf.tb_change_cnt = 1; state_no_longer_safe("ins_typebuf()"); addlen = (int)STRLEN(str); if (offset == 0 && addlen <= typebuf.tb_off) { /* * Easy case: there is room in front of typebuf.tb_buf[typebuf.tb_off] */ typebuf.tb_off -= addlen; mch_memmove(typebuf.tb_buf + typebuf.tb_off, str, (size_t)addlen); } else if (typebuf.tb_len == 0 && typebuf.tb_buflen >= addlen + 3 * (MAXMAPLEN + 4)) { /* * Buffer is empty and string fits in the existing buffer. * Leave some space before and after, if possible. */ typebuf.tb_off = (typebuf.tb_buflen - addlen - 3 * (MAXMAPLEN + 4)) / 2; mch_memmove(typebuf.tb_buf + typebuf.tb_off, str, (size_t)addlen); } else { int extra; /* * Need to allocate a new buffer. * In typebuf.tb_buf there must always be room for 3 * (MAXMAPLEN + 4) * characters. We add some extra room to avoid having to allocate too * often. */ newoff = MAXMAPLEN + 4; extra = addlen + newoff + 4 * (MAXMAPLEN + 4); if (typebuf.tb_len > 2147483647 - extra) { // string is getting too long for a 32 bit int emsg(_(e_command_too_complex)); // also calls flush_buffers setcursor(); return FAIL; } newlen = typebuf.tb_len + extra; s1 = alloc(newlen); if (s1 == NULL) // out of memory return FAIL; s2 = alloc(newlen); if (s2 == NULL) // out of memory { vim_free(s1); return FAIL; } typebuf.tb_buflen = newlen; // copy the old chars, before the insertion point mch_memmove(s1 + newoff, typebuf.tb_buf + typebuf.tb_off, (size_t)offset); // copy the new chars mch_memmove(s1 + newoff + offset, str, (size_t)addlen); // copy the old chars, after the insertion point, including the NUL at // the end mch_memmove(s1 + newoff + offset + addlen, typebuf.tb_buf + typebuf.tb_off + offset, (size_t)(typebuf.tb_len - offset + 1)); if (typebuf.tb_buf != typebuf_init) vim_free(typebuf.tb_buf); typebuf.tb_buf = s1; mch_memmove(s2 + newoff, typebuf.tb_noremap + typebuf.tb_off, (size_t)offset); mch_memmove(s2 + newoff + offset + addlen, typebuf.tb_noremap + typebuf.tb_off + offset, (size_t)(typebuf.tb_len - offset)); if (typebuf.tb_noremap != noremapbuf_init) vim_free(typebuf.tb_noremap); typebuf.tb_noremap = s2; typebuf.tb_off = newoff; } typebuf.tb_len += addlen; // If noremap == REMAP_SCRIPT: do remap script-local mappings. if (noremap == REMAP_SCRIPT) val = RM_SCRIPT; else if (noremap == REMAP_SKIP) val = RM_ABBR; else val = RM_NONE; /* * Adjust typebuf.tb_noremap[] for the new characters: * If noremap == REMAP_NONE or REMAP_SCRIPT: new characters are * (sometimes) not remappable * If noremap == REMAP_YES: all the new characters are mappable * If noremap > 0: "noremap" characters are not remappable, the rest * mappable */ if (noremap == REMAP_SKIP) nrm = 1; else if (noremap < 0) nrm = addlen; else nrm = noremap; for (i = 0; i < addlen; ++i) typebuf.tb_noremap[typebuf.tb_off + i + offset] = (--nrm >= 0) ? val : RM_YES; // tb_maplen and tb_silent only remember the length of mapped and/or // silent mappings at the start of the buffer, assuming that a mapped // sequence doesn't result in typed characters. if (nottyped || typebuf.tb_maplen > offset) typebuf.tb_maplen += addlen; if (silent || typebuf.tb_silent > offset) { typebuf.tb_silent += addlen; cmd_silent = TRUE; } if (typebuf.tb_no_abbr_cnt && offset == 0) // and not used for abbrev.s typebuf.tb_no_abbr_cnt += addlen; return OK; }
0
[ "CWE-125" ]
vim
a4bc2dd7cccf5a4a9f78b58b6f35a45d17164323
205,046,639,734,810,040,000,000,000,000,000,000,000
139
patch 8.2.4233: crash when recording and using Select mode Problem: Crash when recording and using Select mode. Solution: When deleting the last recorded character check there is something to delete.
static int aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t len) { EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx); if (!xctx->xts.key1 || !xctx->xts.key2) return 0; if (!out || !in || len < AES_BLOCK_SIZE) return 0; if (xctx->stream) (*xctx->stream) (in, out, len, xctx->xts.key1, xctx->xts.key2, EVP_CIPHER_CTX_iv_noconst(ctx)); else if (CRYPTO_xts128_encrypt(&xctx->xts, EVP_CIPHER_CTX_iv_noconst(ctx), in, out, len, EVP_CIPHER_CTX_encrypting(ctx))) return 0; return 1; }
0
[ "CWE-125" ]
openssl
2198b3a55de681e1f3c23edb0586afe13f438051
204,699,869,549,394,840,000,000,000,000,000,000,000
18
crypto/evp: harden AEAD ciphers. Originally a crash in 32-bit build was reported CHACHA20-POLY1305 cipher. The crash is triggered by truncated packet and is result of excessive hashing to the edge of accessible memory. Since hash operation is read-only it is not considered to be exploitable beyond a DoS condition. Other ciphers were hardened. Thanks to Robert Święcki for report. CVE-2017-3731 Reviewed-by: Rich Salz <[email protected]>
v8::TryCatch* catcher() { return thread_local_top_.catcher_; }
0
[ "CWE-20", "CWE-119" ]
node
530af9cb8e700e7596b3ec812bad123c9fa06356
43,778,826,257,613,570,000,000,000,000,000,000,000
3
v8: Interrupts must not mask stack overflow. Backport of https://codereview.chromium.org/339883002