func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static void snd_msndmidi_free(struct snd_rawmidi *rmidi)
{
struct snd_msndmidi *mpu = rmidi->private_data;
kfree(mpu);
}
| 0 |
[
"CWE-125",
"CWE-401"
] |
linux
|
20e2b791796bd68816fa115f12be5320de2b8021
| 286,100,333,107,821,470,000,000,000,000,000,000,000 | 5 |
ALSA: msnd: Optimize / harden DSP and MIDI loops
The ISA msnd drivers have loops fetching the ring-buffer head, tail
and size values inside the loops. Such codes are inefficient and
fragile.
This patch optimizes it, and also adds the sanity check to avoid the
endless loops.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=196131
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=196133
Signed-off-by: Takashi Iwai <[email protected]>
|
void *Virtual_tmp_table::operator new(size_t size, THD *thd) throw()
{
return (Virtual_tmp_table *) alloc_root(thd->mem_root, size);
}
| 0 |
[] |
server
|
ff77a09bda884fe6bf3917eb29b9d3a2f53f919b
| 232,889,591,908,010,520,000,000,000,000,000,000,000 | 4 |
MDEV-22464 Server crash on UPDATE with nested subquery
Uninitialized ref_pointer_array[] because setup_fields() got empty
fields list. mysql_multi_update() for some reason does that by
substituting the fields list with empty total_list for the
mysql_select() call (looks like wrong merge since total_list is not
used anywhere else and is always empty). The fix would be to return
back the original fields list. But this fails update_use_source.test
case:
--error ER_BAD_FIELD_ERROR
update v1 set t1c1=2 order by 1;
Actually not failing the above seems to be ok.
The other fix would be to keep resolve_in_select_list false (and that
keeps outer context from being resolved in
Item_ref::fix_fields()). This fix is more consistent with how SELECT
behaves:
--error ER_SUBQUERY_NO_1_ROW
select a from t1 where a= (select 2 from t1 having (a = 3));
So this patch implements this fix.
|
int tls1_ec_curve_id2nid(int curve_id)
{
/* ECC curves from draft-ietf-tls-ecc-12.txt (Oct. 17, 2005) */
if ((curve_id < 1) || ((unsigned int)curve_id >
sizeof(nid_list)/sizeof(nid_list[0])))
return 0;
return nid_list[curve_id-1].nid;
}
| 0 |
[] |
openssl
|
80bd7b41b30af6ee96f519e629463583318de3b0
| 7,826,350,341,254,500,000,000,000,000,000,000,000 | 8 |
Fix SRP ciphersuite DoS vulnerability.
If a client attempted to use an SRP ciphersuite and it had not been
set up correctly it would crash with a null pointer read. A malicious
server could exploit this in a DoS attack.
Thanks to Joonas Kuorilehto and Riku Hietamäki from Codenomicon
for reporting this issue.
CVE-2014-2970
Reviewed-by: Tim Hudson <[email protected]>
|
ews_delete_type_to_str (EwsDeleteType delete_type)
{
switch (delete_type) {
case EWS_HARD_DELETE:
return "HardDelete";
case EWS_SOFT_DELETE:
return "SoftDelete";
case EWS_MOVE_TO_DELETED_ITEMS:
return "MoveToDeletedItems";
}
return NULL;
}
| 0 |
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
| 167,904,846,957,650,380,000,000,000,000,000,000,000 | 12 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
|
static CURLcode ossl_connect_step2(struct Curl_easy *data,
struct connectdata *conn, int sockindex)
{
int err;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
struct ssl_backend_data *backend = connssl->backend;
DEBUGASSERT(ssl_connect_2 == connssl->connecting_state
|| ssl_connect_2_reading == connssl->connecting_state
|| ssl_connect_2_writing == connssl->connecting_state);
ERR_clear_error();
err = SSL_connect(backend->handle);
#ifndef HAVE_KEYLOG_CALLBACK
if(Curl_tls_keylog_enabled()) {
/* If key logging is enabled, wait for the handshake to complete and then
* proceed with logging secrets (for TLS 1.2 or older).
*/
ossl_log_tls12_secret(backend->handle, &backend->keylog_done);
}
#endif
/* 1 is fine
0 is "not successful but was shut down controlled"
<0 is "handshake was not successful, because a fatal error occurred" */
if(1 != err) {
int detail = SSL_get_error(backend->handle, err);
if(SSL_ERROR_WANT_READ == detail) {
connssl->connecting_state = ssl_connect_2_reading;
return CURLE_OK;
}
if(SSL_ERROR_WANT_WRITE == detail) {
connssl->connecting_state = ssl_connect_2_writing;
return CURLE_OK;
}
#ifdef SSL_ERROR_WANT_ASYNC
if(SSL_ERROR_WANT_ASYNC == detail) {
connssl->connecting_state = ssl_connect_2;
return CURLE_OK;
}
#endif
else {
/* untreated error */
unsigned long errdetail;
char error_buffer[256]="";
CURLcode result;
long lerr;
int lib;
int reason;
/* the connection failed, we're not waiting for anything else. */
connssl->connecting_state = ssl_connect_2;
/* Get the earliest error code from the thread's error queue and removes
the entry. */
errdetail = ERR_get_error();
/* Extract which lib and reason */
lib = ERR_GET_LIB(errdetail);
reason = ERR_GET_REASON(errdetail);
if((lib == ERR_LIB_SSL) &&
((reason == SSL_R_CERTIFICATE_VERIFY_FAILED) ||
(reason == SSL_R_SSLV3_ALERT_CERTIFICATE_EXPIRED))) {
result = CURLE_PEER_FAILED_VERIFICATION;
lerr = SSL_get_verify_result(backend->handle);
if(lerr != X509_V_OK) {
SSL_SET_OPTION_LVALUE(certverifyresult) = lerr;
msnprintf(error_buffer, sizeof(error_buffer),
"SSL certificate problem: %s",
X509_verify_cert_error_string(lerr));
}
else
/* strcpy() is fine here as long as the string fits within
error_buffer */
strcpy(error_buffer, "SSL certificate verification failed");
}
else {
result = CURLE_SSL_CONNECT_ERROR;
ossl_strerror(errdetail, error_buffer, sizeof(error_buffer));
}
/* detail is already set to the SSL error above */
/* If we e.g. use SSLv2 request-method and the server doesn't like us
* (RST connection etc.), OpenSSL gives no explanation whatsoever and
* the SO_ERROR is also lost.
*/
if(CURLE_SSL_CONNECT_ERROR == result && errdetail == 0) {
const char * const hostname = SSL_HOST_NAME();
#ifndef CURL_DISABLE_PROXY
const long int port = SSL_IS_PROXY() ? conn->port : conn->remote_port;
#else
const long int port = conn->remote_port;
#endif
char extramsg[80]="";
int sockerr = SOCKERRNO;
if(sockerr && detail == SSL_ERROR_SYSCALL)
Curl_strerror(sockerr, extramsg, sizeof(extramsg));
failf(data, OSSL_PACKAGE " SSL_connect: %s in connection to %s:%ld ",
extramsg[0] ? extramsg : SSL_ERROR_to_str(detail),
hostname, port);
return result;
}
/* Could be a CERT problem */
failf(data, "%s", error_buffer);
return result;
}
}
else {
/* we have been connected fine, we're not waiting for anything else. */
connssl->connecting_state = ssl_connect_3;
/* Informational message */
infof(data, "SSL connection using %s / %s\n",
SSL_get_version(backend->handle),
SSL_get_cipher(backend->handle));
#ifdef HAS_ALPN
/* Sets data and len to negotiated protocol, len is 0 if no protocol was
* negotiated
*/
if(conn->bits.tls_enable_alpn) {
const unsigned char *neg_protocol;
unsigned int len;
SSL_get0_alpn_selected(backend->handle, &neg_protocol, &len);
if(len != 0) {
infof(data, "ALPN, server accepted to use %.*s\n", len, neg_protocol);
#ifdef USE_NGHTTP2
if(len == NGHTTP2_PROTO_VERSION_ID_LEN &&
!memcmp(NGHTTP2_PROTO_VERSION_ID, neg_protocol, len)) {
conn->negnpn = CURL_HTTP_VERSION_2;
}
else
#endif
if(len == ALPN_HTTP_1_1_LENGTH &&
!memcmp(ALPN_HTTP_1_1, neg_protocol, ALPN_HTTP_1_1_LENGTH)) {
conn->negnpn = CURL_HTTP_VERSION_1_1;
}
}
else
infof(data, "ALPN, server did not agree to a protocol\n");
Curl_multiuse_state(data, conn->negnpn == CURL_HTTP_VERSION_2 ?
BUNDLE_MULTIPLEX : BUNDLE_NO_MULTIUSE);
}
#endif
return CURLE_OK;
}
}
| 0 |
[
"CWE-290"
] |
curl
|
b09c8ee15771c614c4bf3ddac893cdb12187c844
| 323,317,023,059,594,600,000,000,000,000,000,000,000 | 156 |
vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890
|
static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe,
u32 *freq_min, u32 *freq_max)
{
*freq_min = max(fe->ops.info.frequency_min, fe->ops.tuner_ops.info.frequency_min);
if (fe->ops.info.frequency_max == 0)
*freq_max = fe->ops.tuner_ops.info.frequency_max;
else if (fe->ops.tuner_ops.info.frequency_max == 0)
*freq_max = fe->ops.info.frequency_max;
else
*freq_max = min(fe->ops.info.frequency_max, fe->ops.tuner_ops.info.frequency_max);
if (*freq_min == 0 || *freq_max == 0)
dev_warn(fe->dvb->device, "DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
fe->dvb->num, fe->id);
}
| 0 |
[
"CWE-416"
] |
linux
|
b1cb7372fa822af6c06c8045963571d13ad6348b
| 297,477,499,185,027,850,000,000,000,000,000,000,000 | 16 |
dvb_frontend: don't use-after-free the frontend struct
dvb_frontend_invoke_release() may free the frontend struct.
So, the free logic can't update it anymore after calling it.
That's OK, as __dvb_frontend_free() is called only when the
krefs are zeroed, so nobody is using it anymore.
That should fix the following KASAN error:
The KASAN report looks like this (running on kernel 3e0cc09a3a2c40ec1ffb6b4e12da86e98feccb11 (4.14-rc5+)):
==================================================================
BUG: KASAN: use-after-free in __dvb_frontend_free+0x113/0x120
Write of size 8 at addr ffff880067d45a00 by task kworker/0:1/24
CPU: 0 PID: 24 Comm: kworker/0:1 Not tainted 4.14.0-rc5-43687-g06ab8a23e0e6 #545
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Workqueue: usb_hub_wq hub_event
Call Trace:
__dump_stack lib/dump_stack.c:16
dump_stack+0x292/0x395 lib/dump_stack.c:52
print_address_description+0x78/0x280 mm/kasan/report.c:252
kasan_report_error mm/kasan/report.c:351
kasan_report+0x23d/0x350 mm/kasan/report.c:409
__asan_report_store8_noabort+0x1c/0x20 mm/kasan/report.c:435
__dvb_frontend_free+0x113/0x120 drivers/media/dvb-core/dvb_frontend.c:156
dvb_frontend_put+0x59/0x70 drivers/media/dvb-core/dvb_frontend.c:176
dvb_frontend_detach+0x120/0x150 drivers/media/dvb-core/dvb_frontend.c:2803
dvb_usb_adapter_frontend_exit+0xd6/0x160 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:340
dvb_usb_adapter_exit drivers/media/usb/dvb-usb/dvb-usb-init.c:116
dvb_usb_exit+0x9b/0x200 drivers/media/usb/dvb-usb/dvb-usb-init.c:132
dvb_usb_device_exit+0xa5/0xf0 drivers/media/usb/dvb-usb/dvb-usb-init.c:295
usb_unbind_interface+0x21c/0xa90 drivers/usb/core/driver.c:423
__device_release_driver drivers/base/dd.c:861
device_release_driver_internal+0x4f1/0x5c0 drivers/base/dd.c:893
device_release_driver+0x1e/0x30 drivers/base/dd.c:918
bus_remove_device+0x2f4/0x4b0 drivers/base/bus.c:565
device_del+0x5c4/0xab0 drivers/base/core.c:1985
usb_disable_device+0x1e9/0x680 drivers/usb/core/message.c:1170
usb_disconnect+0x260/0x7a0 drivers/usb/core/hub.c:2124
hub_port_connect drivers/usb/core/hub.c:4754
hub_port_connect_change drivers/usb/core/hub.c:5009
port_event drivers/usb/core/hub.c:5115
hub_event+0x1318/0x3740 drivers/usb/core/hub.c:5195
process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119
worker_thread+0x221/0x1850 kernel/workqueue.c:2253
kthread+0x363/0x440 kernel/kthread.c:231
ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431
Allocated by task 24:
save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59
save_stack+0x43/0xd0 mm/kasan/kasan.c:447
set_track mm/kasan/kasan.c:459
kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:551
kmem_cache_alloc_trace+0x11e/0x2d0 mm/slub.c:2772
kmalloc ./include/linux/slab.h:493
kzalloc ./include/linux/slab.h:666
dtt200u_fe_attach+0x4c/0x110 drivers/media/usb/dvb-usb/dtt200u-fe.c:212
dtt200u_frontend_attach+0x35/0x80 drivers/media/usb/dvb-usb/dtt200u.c:136
dvb_usb_adapter_frontend_init+0x32b/0x660 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:286
dvb_usb_adapter_init drivers/media/usb/dvb-usb/dvb-usb-init.c:86
dvb_usb_init drivers/media/usb/dvb-usb/dvb-usb-init.c:162
dvb_usb_device_init+0xf73/0x17f0 drivers/media/usb/dvb-usb/dvb-usb-init.c:277
dtt200u_usb_probe+0xa1/0xe0 drivers/media/usb/dvb-usb/dtt200u.c:155
usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361
really_probe drivers/base/dd.c:413
driver_probe_device+0x610/0xa00 drivers/base/dd.c:557
__device_attach_driver+0x230/0x290 drivers/base/dd.c:653
bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463
__device_attach+0x26b/0x3c0 drivers/base/dd.c:710
device_initial_probe+0x1f/0x30 drivers/base/dd.c:757
bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523
device_add+0xd0b/0x1660 drivers/base/core.c:1835
usb_set_configuration+0x104e/0x1870 drivers/usb/core/message.c:1932
generic_probe+0x73/0xe0 drivers/usb/core/generic.c:174
usb_probe_device+0xaf/0xe0 drivers/usb/core/driver.c:266
really_probe drivers/base/dd.c:413
driver_probe_device+0x610/0xa00 drivers/base/dd.c:557
__device_attach_driver+0x230/0x290 drivers/base/dd.c:653
bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463
__device_attach+0x26b/0x3c0 drivers/base/dd.c:710
device_initial_probe+0x1f/0x30 drivers/base/dd.c:757
bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523
device_add+0xd0b/0x1660 drivers/base/core.c:1835
usb_new_device+0x7b8/0x1020 drivers/usb/core/hub.c:2457
hub_port_connect drivers/usb/core/hub.c:4903
hub_port_connect_change drivers/usb/core/hub.c:5009
port_event drivers/usb/core/hub.c:5115
hub_event+0x194d/0x3740 drivers/usb/core/hub.c:5195
process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119
worker_thread+0x221/0x1850 kernel/workqueue.c:2253
kthread+0x363/0x440 kernel/kthread.c:231
ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431
Freed by task 24:
save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59
save_stack+0x43/0xd0 mm/kasan/kasan.c:447
set_track mm/kasan/kasan.c:459
kasan_slab_free+0x72/0xc0 mm/kasan/kasan.c:524
slab_free_hook mm/slub.c:1390
slab_free_freelist_hook mm/slub.c:1412
slab_free mm/slub.c:2988
kfree+0xf6/0x2f0 mm/slub.c:3919
dtt200u_fe_release+0x3c/0x50 drivers/media/usb/dvb-usb/dtt200u-fe.c:202
dvb_frontend_invoke_release.part.13+0x1c/0x30 drivers/media/dvb-core/dvb_frontend.c:2790
dvb_frontend_invoke_release drivers/media/dvb-core/dvb_frontend.c:2789
__dvb_frontend_free+0xad/0x120 drivers/media/dvb-core/dvb_frontend.c:153
dvb_frontend_put+0x59/0x70 drivers/media/dvb-core/dvb_frontend.c:176
dvb_frontend_detach+0x120/0x150 drivers/media/dvb-core/dvb_frontend.c:2803
dvb_usb_adapter_frontend_exit+0xd6/0x160 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:340
dvb_usb_adapter_exit drivers/media/usb/dvb-usb/dvb-usb-init.c:116
dvb_usb_exit+0x9b/0x200 drivers/media/usb/dvb-usb/dvb-usb-init.c:132
dvb_usb_device_exit+0xa5/0xf0 drivers/media/usb/dvb-usb/dvb-usb-init.c:295
usb_unbind_interface+0x21c/0xa90 drivers/usb/core/driver.c:423
__device_release_driver drivers/base/dd.c:861
device_release_driver_internal+0x4f1/0x5c0 drivers/base/dd.c:893
device_release_driver+0x1e/0x30 drivers/base/dd.c:918
bus_remove_device+0x2f4/0x4b0 drivers/base/bus.c:565
device_del+0x5c4/0xab0 drivers/base/core.c:1985
usb_disable_device+0x1e9/0x680 drivers/usb/core/message.c:1170
usb_disconnect+0x260/0x7a0 drivers/usb/core/hub.c:2124
hub_port_connect drivers/usb/core/hub.c:4754
hub_port_connect_change drivers/usb/core/hub.c:5009
port_event drivers/usb/core/hub.c:5115
hub_event+0x1318/0x3740 drivers/usb/core/hub.c:5195
process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119
worker_thread+0x221/0x1850 kernel/workqueue.c:2253
kthread+0x363/0x440 kernel/kthread.c:231
ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431
The buggy address belongs to the object at ffff880067d45500
which belongs to the cache kmalloc-2048 of size 2048
The buggy address is located 1280 bytes inside of
2048-byte region [ffff880067d45500, ffff880067d45d00)
The buggy address belongs to the page:
page:ffffea00019f5000 count:1 mapcount:0 mapping: (null)
index:0x0 compound_mapcount: 0
flags: 0x100000000008100(slab|head)
raw: 0100000000008100 0000000000000000 0000000000000000 00000001000f000f
raw: dead000000000100 dead000000000200 ffff88006c002d80 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff880067d45900: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880067d45980: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880067d45a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff880067d45a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880067d45b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
==================================================================
Fixes: ead666000a5f ("media: dvb_frontend: only use kref after initialized")
Reported-by: Andrey Konovalov <[email protected]>
Suggested-by: Matthias Schwarzott <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
struct vm_area_struct *vma)
{
pgd_t *src_pgd, *dst_pgd;
unsigned long next;
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
/*
* Don't copy ptes where a page fault will fill them correctly.
* Fork becomes much lighter when there are big shared or private
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
if (!vma->anon_vma)
return 0;
}
if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
dst_pgd = pgd_offset(dst_mm, addr);
src_pgd = pgd_offset(src_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(src_pgd))
continue;
if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
vma, addr, next))
return -ENOMEM;
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
return 0;
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
89f5b7da2a6bad2e84670422ab8192382a5aeb9f
| 108,881,401,337,051,750,000,000,000,000,000,000,000 | 34 |
Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP
KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit
557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed
the ZERO_PAGE from the VM mappings, any users of get_user_pages() will
generally now populate the VM with real empty pages needlessly.
We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but
since fault handling no longer uses ZERO_PAGE for new anonymous pages,
we now need to handle that special case in follow_page() instead.
In particular, the removal of ZERO_PAGE effectively removed the core
file writing optimization where we would skip writing pages that had not
been populated at all, and increased memory pressure a lot by allocating
all those useless newly zeroed pages.
This reinstates the optimization by making the unmapped PTE case the
same as for a non-existent page table, which already did this correctly.
While at it, this also fixes the XIP case for follow_page(), where the
caller could not differentiate between the case of a page that simply
could not be used (because it had no "struct page" associated with it)
and a page that just wasn't mapped.
We do that by simply returning an error pointer for pages that could not
be turned into a "struct page *". The error is arbitrarily picked to be
EFAULT, since that was what get_user_pages() already used for the
equivalent IO-mapped page case.
[ Also removed an impossible test for pte_offset_map_lock() failing:
that's not how that function works ]
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Nick Piggin <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Roland McGrath <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
GF_Err fiel_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_FieldInfoBox *ptr = (GF_FieldInfoBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u8(bs, ptr->field_count);
gf_bs_write_u8(bs, ptr->field_order);
return GF_OK;
}
| 0 |
[
"CWE-476"
] |
gpac
|
6170024568f4dda310e98ef7508477b425c58d09
| 278,509,594,242,048,800,000,000,000,000,000,000,000 | 10 |
fixed potential crash - cf #1263
|
char *d_path(const struct path *path, char *buf, int buflen)
{
char *res = buf + buflen;
struct path root;
int error;
/*
* We have various synthetic filesystems that never get mounted. On
* these filesystems dentries are never used for lookup purposes, and
* thus don't need to be hashed. They also don't need a name until a
* user wants to identify the object in /proc/pid/fd/. The little hack
* below allows us to generate a name for these objects on demand:
*
* Some pseudo inodes are mountable. When they are mounted
* path->dentry == path->mnt->mnt_root. In that case don't call d_dname
* and instead have d_path return the mounted path.
*/
if (path->dentry->d_op && path->dentry->d_op->d_dname &&
(!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
rcu_read_lock();
get_fs_root_rcu(current->fs, &root);
error = path_with_deleted(path, &root, &res, &buflen);
rcu_read_unlock();
if (error < 0)
res = ERR_PTR(error);
return res;
}
| 0 |
[
"CWE-362",
"CWE-399"
] |
linux
|
49d31c2f389acfe83417083e1208422b4091cd9e
| 97,646,878,779,109,780,000,000,000,000,000,000,000 | 30 |
dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <[email protected]>
|
CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
const char *tree, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage)
{
struct smb_hdr *smb_buffer;
struct smb_hdr *smb_buffer_response;
TCONX_REQ *pSMB;
TCONX_RSP *pSMBr;
unsigned char *bcc_ptr;
int rc = 0;
int length;
__u16 bytes_left, count;
if (ses == NULL)
return -EIO;
smb_buffer = cifs_buf_get();
if (smb_buffer == NULL)
return -ENOMEM;
smb_buffer_response = smb_buffer;
header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
NULL /*no tid */ , 4 /*wct */ );
smb_buffer->Mid = GetNextMid(ses->server);
smb_buffer->Uid = ses->Suid;
pSMB = (TCONX_REQ *) smb_buffer;
pSMBr = (TCONX_RSP *) smb_buffer_response;
pSMB->AndXCommand = 0xFF;
pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
bcc_ptr = &pSMB->Password[0];
if ((ses->server->secMode) & SECMODE_USER) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
*bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
/* already aligned so no need to do it below */
} else {
pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
/* BB FIXME add code to fail this if NTLMv2 or Kerberos
specified as required (when that support is added to
the vfs in the future) as only NTLM or the much
weaker LANMAN (which we do not send by default) is accepted
by Samba (not sure whether other servers allow
NTLMv2 password here) */
#ifdef CONFIG_CIFS_WEAK_PW_HASH
if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
(ses->server->secType == LANMAN))
calc_lanman_hash(tcon->password, ses->server->cryptkey,
ses->server->secMode &
SECMODE_PW_ENCRYPT ? true : false,
bcc_ptr);
else
#endif /* CIFS_WEAK_PW_HASH */
rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
bcc_ptr);
bcc_ptr += CIFS_AUTH_RESP_SIZE;
if (ses->capabilities & CAP_UNICODE) {
/* must align unicode strings */
*bcc_ptr = 0; /* null byte password */
bcc_ptr++;
}
}
if (ses->server->secMode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
if (ses->capabilities & CAP_STATUS32) {
smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
}
if (ses->capabilities & CAP_DFS) {
smb_buffer->Flags2 |= SMBFLG2_DFS;
}
if (ses->capabilities & CAP_UNICODE) {
smb_buffer->Flags2 |= SMBFLG2_UNICODE;
length =
cifs_strtoUCS((__le16 *) bcc_ptr, tree,
6 /* max utf8 char length in bytes */ *
(/* server len*/ + 256 /* share len */), nls_codepage);
bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */
bcc_ptr += 2; /* skip trailing null */
} else { /* ASCII */
strcpy(bcc_ptr, tree);
bcc_ptr += strlen(tree) + 1;
}
strcpy(bcc_ptr, "?????");
bcc_ptr += strlen("?????");
bcc_ptr += 1;
count = bcc_ptr - &pSMB->Password[0];
pSMB->hdr.smb_buf_length += count;
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
0);
/* above now done in SendReceive */
if ((rc == 0) && (tcon != NULL)) {
bool is_unicode;
tcon->tidStatus = CifsGood;
tcon->need_reconnect = false;
tcon->tid = smb_buffer_response->Tid;
bcc_ptr = pByteArea(smb_buffer_response);
bytes_left = get_bcc(smb_buffer_response);
length = strnlen(bcc_ptr, bytes_left - 2);
if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
is_unicode = true;
else
is_unicode = false;
/* skip service field (NB: this field is always ASCII) */
if (length == 3) {
if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
(bcc_ptr[2] == 'C')) {
cFYI(1, "IPC connection");
tcon->ipc = 1;
}
} else if (length == 2) {
if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
/* the most common case */
cFYI(1, "disk share connection");
}
}
bcc_ptr += length + 1;
bytes_left -= (length + 1);
strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
/* mostly informational -- no need to fail on error here */
kfree(tcon->nativeFileSystem);
tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr,
bytes_left, is_unicode,
nls_codepage);
cFYI(1, "nativeFileSystem=%s", tcon->nativeFileSystem);
if ((smb_buffer_response->WordCount == 3) ||
(smb_buffer_response->WordCount == 7))
/* field is in same location */
tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
else
tcon->Flags = 0;
cFYI(1, "Tcon flags: 0x%x ", tcon->Flags);
} else if ((rc == 0) && tcon == NULL) {
/* all we need to save for IPC$ connection */
ses->ipc_tid = smb_buffer_response->Tid;
}
cifs_buf_release(smb_buffer);
return rc;
}
| 0 |
[
"CWE-20"
] |
linux
|
70945643722ffeac779d2529a348f99567fa5c33
| 216,336,343,629,074,700,000,000,000,000,000,000,000 | 154 |
cifs: always do is_path_accessible check in cifs_mount
Currently, we skip doing the is_path_accessible check in cifs_mount if
there is no prefixpath. I have a report of at least one server however
that allows a TREE_CONNECT to a share that has a DFS referral at its
root. The reporter in this case was using a UNC that had no prefixpath,
so the is_path_accessible check was not triggered and the box later hit
a BUG() because we were chasing a DFS referral on the root dentry for
the mount.
This patch fixes this by removing the check for a zero-length
prefixpath. That should make the is_path_accessible check be done in
this situation and should allow the client to chase the DFS referral at
mount time instead.
Cc: [email protected]
Reported-and-Tested-by: Yogesh Sharma <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]>
|
void MYSQL_BIN_LOG::set_max_size(ulong max_size_arg)
{
/*
We need to take locks, otherwise this may happen:
new_file() is called, calls open(old_max_size), then before open() starts,
set_max_size() sets max_size to max_size_arg, then open() starts and
uses the old_max_size argument, so max_size_arg has been overwritten and
it's like if the SET command was never run.
*/
DBUG_ENTER("MYSQL_BIN_LOG::set_max_size");
mysql_mutex_lock(&LOCK_log);
if (is_open())
max_size= max_size_arg;
mysql_mutex_unlock(&LOCK_log);
DBUG_VOID_RETURN;
}
| 0 |
[
"CWE-264"
] |
mysql-server
|
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
| 309,723,113,461,544,500,000,000,000,000,000,000,000 | 16 |
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE
[This is the 5.5/5.6 version of the bugfix].
The problem was that it was possible to write log files ending
in .ini/.cnf that later could be parsed as an options file.
This made it possible for users to specify startup options
without the permissions to do so.
This patch fixes the problem by disallowing general query log
and slow query log to be written to files ending in .ini and .cnf.
|
GF_Err rvcc_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*) s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u16(bs, ptr->predefined_rvc_config);
if (!ptr->predefined_rvc_config) {
gf_bs_write_u16(bs, ptr->rvc_meta_idx);
}
return GF_OK;
}
| 0 |
[
"CWE-787"
] |
gpac
|
77510778516803b7f7402d7423c6d6bef50254c3
| 191,123,464,362,609,550,000,000,000,000,000,000,000 | 14 |
fixed #2255
|
static int mptsas_post_load(void *opaque, int version_id)
{
MPTSASState *s = opaque;
if (s->doorbell_idx > s->doorbell_cnt ||
s->doorbell_cnt > ARRAY_SIZE(s->doorbell_msg) ||
s->doorbell_reply_idx > s->doorbell_reply_size ||
s->doorbell_reply_size > ARRAY_SIZE(s->doorbell_reply) ||
MPTSAS_FIFO_INVALID(s, request_post) ||
MPTSAS_FIFO_INVALID(s, reply_post) ||
MPTSAS_FIFO_INVALID(s, reply_free) ||
s->diagnostic_idx > 4) {
return -EINVAL;
}
return 0;
}
| 0 |
[
"CWE-416"
] |
qemu
|
3791642c8d60029adf9b00bcb4e34d7d8a1aea4d
| 118,018,165,437,364,870,000,000,000,000,000,000,000 | 17 |
mptsas: Remove unused MPTSASState 'pending' field (CVE-2021-3392)
While processing SCSI i/o requests in mptsas_process_scsi_io_request(),
the Megaraid emulator appends new MPTSASRequest object 'req' to
the 's->pending' queue. In case of an error, this same object gets
dequeued in mptsas_free_request() only if SCSIRequest object
'req->sreq' is initialised. This may lead to a use-after-free issue.
Since s->pending is actually not used, simply remove it from
MPTSASState.
Cc: [email protected]
Signed-off-by: Michael Tokarev <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
Reported-by: Cheolwoo Myung <[email protected]>
Message-id: [email protected]
Message-Id: <[email protected]>
Suggested-by: Paolo Bonzini <[email protected]>
Reported-by: Cheolwoo Myung <[email protected]>
BugLink: https://bugs.launchpad.net/qemu/+bug/1914236 (CVE-2021-3392)
Fixes: e351b826112 ("hw: Add support for LSI SAS1068 (mptsas) device")
[PMD: Reworded description, added more tags]
Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
Reviewed-by: Peter Maydell <[email protected]>
Signed-off-by: Peter Maydell <[email protected]>
|
static int intAddEntry(Header h, rpmtd td)
{
indexEntry entry;
rpm_data_t data;
int length = 0;
/* Count must always be >= 1 for headerAddEntry. */
if (td->count <= 0)
return 0;
if (hdrchkType(td->type))
return 0;
if (hdrchkData(td->count))
return 0;
data = grabData(td->type, td->data, td->count, &length);
if (data == NULL)
return 0;
/* Allocate more index space if necessary */
if (h->indexUsed == h->indexAlloced) {
h->indexAlloced += INDEX_MALLOC_SIZE;
h->index = xrealloc(h->index, h->indexAlloced * sizeof(*h->index));
}
/* Fill in the index */
entry = h->index + h->indexUsed;
entry->info.tag = td->tag;
entry->info.type = td->type;
entry->info.count = td->count;
entry->info.offset = 0;
entry->data = data;
entry->length = length;
if (h->indexUsed > 0 && td->tag < h->index[h->indexUsed-1].info.tag)
h->sorted = 0;
h->indexUsed++;
return 1;
}
| 0 |
[
"CWE-125"
] |
rpm
|
8f4b3c3cab8922a2022b9e47c71f1ecf906077ef
| 266,924,498,674,138,480,000,000,000,000,000,000,000 | 40 |
hdrblobInit() needs bounds checks too
Users can pass untrusted data to hdrblobInit() and it must be robust
against this.
|
lvs_notify_fifo(vector_t *strvec)
{
notify_fifo(strvec, "lvs_", &global_data->lvs_notify_fifo);
}
| 0 |
[
"CWE-200"
] |
keepalived
|
c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
| 162,062,989,982,617,800,000,000,000,000,000,000,000 | 4 |
Add command line and configuration option to set umask
Issue #1048 identified that files created by keepalived are created
with mode 0666. This commit changes the default to 0644, and also
allows the umask to be specified in the configuration or as a command
line option.
Signed-off-by: Quentin Armitage <[email protected]>
|
GF_Err moov_Read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
e = gf_isom_box_array_read(s, bs, moov_AddBox);
if (e) {
return e;
}
else {
if (!((GF_MovieBox *)s)->mvhd) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MovieHeaderBox\n"));
return GF_ISOM_INVALID_FILE;
}
}
return e;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
| 273,419,362,590,518,230,000,000,000,000,000,000,000 | 15 |
prevent dref memleak on invalid input (#1183)
|
static Expr *sqlite3WindowOffsetExpr(Parse *pParse, Expr *pExpr){
if( 0==sqlite3ExprIsConstant(pExpr) ){
if( IN_RENAME_OBJECT ) sqlite3RenameExprUnmap(pParse, pExpr);
sqlite3ExprDelete(pParse->db, pExpr);
pExpr = sqlite3ExprAlloc(pParse->db, TK_NULL, 0, 0);
}
return pExpr;
}
| 0 |
[
"CWE-476"
] |
sqlite
|
75e95e1fcd52d3ec8282edb75ac8cd0814095d54
| 297,618,267,927,400,100,000,000,000,000,000,000,000 | 8 |
When processing constant integer values in ORDER BY clauses of window
definitions (see check-in [7e4809eadfe99ebf]) be sure to fully disable
the constant value to avoid an invalid pointer dereference if the expression
is ever duplicated. This fixes a crash report from Yongheng and Rui.
FossilOrigin-Name: 1ca0bd982ab1183bbafce0d260e4dceda5eb766ed2e7793374a88d1ae0bdd2ca
|
CImg<T>& pseudoinvert(const bool use_LU=false) {
return get_pseudoinvert(use_LU).move_to(*this);
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 255,688,013,751,447,660,000,000,000,000,000,000,000 | 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
static int r_cmd_java_print_import_definitions ( RBinJavaObj *obj ) {
RList * the_list = r_bin_java_get_import_definitions (obj);
char * str = NULL;
RListIter *iter;
r_list_foreach (the_list, iter, str) {
r_cons_printf("import %s;\n", str);
}
r_list_free (the_list);
return true;
}
| 0 |
[
"CWE-703",
"CWE-193"
] |
radare2
|
ced0223c7a1b3b5344af315715cd28fe7c0d9ebc
| 76,072,876,502,320,370,000,000,000,000,000,000,000 | 10 |
Fix unmatched array length in core_java.c (issue #16304) (#16313)
|
static int set_next_request(void)
{
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
floppy_errors = 0;
list_del_init(¤t_req->queuelist);
return 1;
}
return 0;
}
| 0 |
[
"CWE-416"
] |
linux
|
f71f01394f742fc4558b3f9f4c7ef4c4cf3b07c8
| 211,983,467,718,790,700,000,000,000,000,000,000,000 | 11 |
floppy: use a statically allocated error counter
Interrupt handler bad_flp_intr() may cause a UAF on the recently freed
request just to increment the error count. There's no point keeping
that one in the request anyway, and since the interrupt handler uses a
static pointer to the error which cannot be kept in sync with the
pending request, better make it use a static error counter that's reset
for each new request. This reset now happens when entering
redo_fd_request() for a new request via set_next_request().
One initial concern about a single error counter was that errors on one
floppy drive could be reported on another one, but this problem is not
real given that the driver uses a single drive at a time, as that
PC-compatible controllers also have this limitation by using shared
signals. As such the error count is always for the "current" drive.
Reported-by: Minh Yuan <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Tested-by: Denis Efremov <[email protected]>
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void flush_all_backlogs(void)
{
unsigned int cpu;
get_online_cpus();
for_each_online_cpu(cpu)
queue_work_on(cpu, system_highpri_wq,
per_cpu_ptr(&flush_works, cpu));
for_each_online_cpu(cpu)
flush_work(per_cpu_ptr(&flush_works, cpu));
put_online_cpus();
| 0 |
[
"CWE-476"
] |
linux
|
0ad646c81b2182f7fa67ec0c8c825e0ee165696d
| 20,889,396,293,837,446,000,000,000,000,000,000,000 | 15 |
tun: call dev_get_valid_name() before register_netdevice()
register_netdevice() could fail early when we have an invalid
dev name, in which case ->ndo_uninit() is not called. For tun
device, this is a problem because a timer etc. are already
initialized and it expects ->ndo_uninit() to clean them up.
We could move these initializations into a ->ndo_init() so
that register_netdevice() knows better, however this is still
complicated due to the logic in tun_detach().
Therefore, I choose to just call dev_get_valid_name() before
register_netdevice(), which is quicker and much easier to audit.
And for this specific case, it is already enough.
Fixes: 96442e42429e ("tuntap: choose the txq based on rxq")
Reported-by: Dmitry Alexeev <[email protected]>
Cc: Jason Wang <[email protected]>
Cc: "Michael S. Tsirkin" <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
u32 gf_m4v_parser_get_obj_type(GF_M4VParser *m4v)
{
if (m4v) return m4v->current_object_type;
return 0;
}
| 0 |
[
"CWE-190",
"CWE-787"
] |
gpac
|
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
| 77,464,264,708,097,700,000,000,000,000,000,000,000 | 5 |
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
|
static int dns_stream_update_io(DnsStream *s) {
int f = 0;
assert(s);
if (s->write_packet && s->n_written < sizeof(s->write_size) + s->write_packet->size)
f |= EPOLLOUT;
else if (!ordered_set_isempty(s->write_queue)) {
dns_packet_unref(s->write_packet);
s->write_packet = ordered_set_steal_first(s->write_queue);
s->write_size = htobe16(s->write_packet->size);
s->n_written = 0;
f |= EPOLLOUT;
}
if (!s->read_packet || s->n_read < sizeof(s->read_size) + s->read_packet->size)
f |= EPOLLIN;
#if ENABLE_DNS_OVER_TLS
/* For handshake and clean closing purposes, TLS can override requested events */
if (s->dnstls_events)
f = s->dnstls_events;
#endif
return sd_event_source_set_io_events(s->io_event_source, f);
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
systemd
|
d973d94dec349fb676fdd844f6fe2ada3538f27c
| 66,641,291,604,355,480,000,000,000,000,000,000,000 | 25 |
resolved: pin stream while calling callbacks for it
These callbacks might unref the stream, but we still have to access it,
let's hence ref it explicitly.
Maybe fixes: #10725
|
ns_client_aclmsg(const char *msg, const dns_name_t *name, dns_rdatatype_t type,
dns_rdataclass_t rdclass, char *buf, size_t len) {
char namebuf[DNS_NAME_FORMATSIZE];
char typebuf[DNS_RDATATYPE_FORMATSIZE];
char classbuf[DNS_RDATACLASS_FORMATSIZE];
dns_name_format(name, namebuf, sizeof(namebuf));
dns_rdatatype_format(type, typebuf, sizeof(typebuf));
dns_rdataclass_format(rdclass, classbuf, sizeof(classbuf));
(void)snprintf(buf, len, "%s '%s/%s/%s'", msg, namebuf, typebuf,
classbuf);
}
| 0 |
[
"CWE-617"
] |
bind9
|
15996f0cb15631b95a801e3e88928494a69ad6ee
| 157,506,244,992,937,580,000,000,000,000,000,000,000 | 12 |
ns_client_error() could assert if rcode was overridden to NOERROR
The client->rcode_override was originally created to force the server
to send SERVFAIL in some cases when it would normally have sent FORMERR.
More recently, it was used in a3ba95116ed04594ea59a8124bf781b30367a7a2
commit (part of GL #2790) to force the sending of a TC=1 NOERROR
response, triggering a retry via TCP, when a UDP packet could not be
sent due to ISC_R_MAXSIZE.
This ran afoul of a pre-existing INSIST in ns_client_error() when
RRL was in use. the INSIST was based on the assumption that
ns_client_error() could never result in a non-error rcode. as
that assumption is no longer valid, the INSIST has been removed.
|
static int _proc_do_string(void* data, int maxlen, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
size_t len;
char __user *p;
char c;
if (!data || !maxlen || !*lenp) {
*lenp = 0;
return 0;
}
if (write) {
len = 0;
p = buffer;
while (len < *lenp) {
if (get_user(c, p++))
return -EFAULT;
if (c == 0 || c == '\n')
break;
len++;
}
if (len >= maxlen)
len = maxlen-1;
if(copy_from_user(data, buffer, len))
return -EFAULT;
((char *) data)[len] = 0;
*ppos += *lenp;
} else {
len = strlen(data);
if (len > maxlen)
len = maxlen;
if (*ppos > len) {
*lenp = 0;
return 0;
}
data += *ppos;
len -= *ppos;
if (len > *lenp)
len = *lenp;
if (len)
if(copy_to_user(buffer, data, len))
return -EFAULT;
if (len < *lenp) {
if(put_user('\n', ((char __user *) buffer) + len))
return -EFAULT;
len++;
}
*lenp = len;
*ppos += len;
}
return 0;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
bfdc0b497faa82a0ba2f9dddcf109231dd519fcc
| 277,779,043,525,983,670,000,000,000,000,000,000,000 | 57 |
sysctl: restrict write access to dmesg_restrict
When dmesg_restrict is set to 1 CAP_SYS_ADMIN is needed to read the kernel
ring buffer. But a root user without CAP_SYS_ADMIN is able to reset
dmesg_restrict to 0.
This is an issue when e.g. LXC (Linux Containers) are used and complete
user space is running without CAP_SYS_ADMIN. A unprivileged and jailed
root user can bypass the dmesg_restrict protection.
With this patch writing to dmesg_restrict is only allowed when root has
CAP_SYS_ADMIN.
Signed-off-by: Richard Weinberger <[email protected]>
Acked-by: Dan Rosenberg <[email protected]>
Acked-by: Serge E. Hallyn <[email protected]>
Cc: Eric Paris <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: James Morris <[email protected]>
Cc: Eugene Teo <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void * gdImageGifPtr (gdImagePtr im, int *size)
{
void *rv;
gdIOCtx *out = gdNewDynamicCtx (2048, NULL);
if (!_gdImageGifCtx(im, out)) {
rv = gdDPExtractData(out, size);
} else {
rv = NULL;
}
out->gd_free (out);
return rv;
}
| 0 |
[
"CWE-415"
] |
php-src
|
089f7c0bc28d399b0420aa6ef058e4c1c120b2ae
| 271,402,365,854,142,630,000,000,000,000,000,000,000 | 12 |
Sync with upstream
Even though libgd/libgd#492 is not a relevant bug fix for PHP, since
the binding doesn't use the `gdImage*Ptr()` functions at all, we're
porting the fix to stay in sync here.
|
bool ZstdDecompressorImpl::isError(size_t result) {
switch (ZSTD_getErrorCode(result)) {
case ZSTD_error_no_error:
return false;
case ZSTD_error_memory_allocation:
stats_.zstd_memory_error_.inc();
break;
case ZSTD_error_dictionary_corrupted:
case ZSTD_error_dictionary_wrong:
stats_.zstd_dictionary_error_.inc();
break;
case ZSTD_error_checksum_wrong:
stats_.zstd_checksum_wrong_error_.inc();
break;
default:
stats_.zstd_generic_error_.inc();
break;
}
return true;
}
| 0 |
[] |
envoy
|
cb4ef0b09200c720dfdb07e097092dd105450343
| 112,410,785,308,378,700,000,000,000,000,000,000,000 | 20 |
decompressors: stop decompressing upon excessive compression ratio (#733)
Signed-off-by: Dmitry Rozhkov <[email protected]>
Co-authored-by: Ryan Hamilton <[email protected]>
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]>
|
static int hrtimer_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
ktime_t expires = ktime_sub(timer->expires, base->offset);
int res;
/*
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
* the callback is executed in the hrtimer_interupt context. The
* reprogramming is handled either by the softirq, which called the
* callback or at the end of the hrtimer_interrupt.
*/
if (hrtimer_callback_running(timer))
return 0;
if (expires.tv64 >= expires_next->tv64)
return 0;
/*
* Clockevents returns -ETIME, when the event was in the past.
*/
res = tick_program_event(expires, 0);
if (!IS_ERR_VALUE(res))
*expires_next = expires;
return res;
}
| 0 |
[
"CWE-189"
] |
linux-2.6
|
13788ccc41ceea5893f9c747c59bc0b28f2416c2
| 250,805,017,584,215,570,000,000,000,000,000,000,000 | 28 |
[PATCH] hrtimer: prevent overrun DoS in hrtimer_forward()
hrtimer_forward() does not check for the possible overflow of
timer->expires. This can happen on 64 bit machines with large interval
values and results currently in an endless loop in the softirq because the
expiry value becomes negative and therefor the timer is expired all the
time.
Check for this condition and set the expiry value to the max. expiry time
in the future. The fix should be applied to stable kernel series as well.
Signed-off-by: Thomas Gleixner <[email protected]>
Acked-by: Ingo Molnar <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
{
skb->dev = skb_dst(skb)->dev;
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
}
| 0 |
[
"CWE-17"
] |
linux-stable
|
9d289715eb5c252ae15bd547cb252ca547a3c4f2
| 15,045,242,029,885,373,000,000,000,000,000,000,000 | 5 |
ipv6: stop sending PTB packets for MTU < 1280
Reduce the attack vector and stop generating IPv6 Fragment Header for
paths with an MTU smaller than the minimum required IPv6 MTU
size (1280 byte) - called atomic fragments.
See IETF I-D "Deprecating the Generation of IPv6 Atomic Fragments" [1]
for more information and how this "feature" can be misused.
[1] https://tools.ietf.org/html/draft-ietf-6man-deprecate-atomfrag-generation-00
Signed-off-by: Fernando Gont <[email protected]>
Signed-off-by: Hagen Paul Pfeifer <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int traverseLclosure (global_State *g, LClosure *cl) {
int i;
markobjectN(g, cl->p); /* mark its prototype */
for (i = 0; i < cl->nupvalues; i++) { /* visit its upvalues */
UpVal *uv = cl->upvals[i];
markobjectN(g, uv); /* mark upvalue */
}
return 1 + cl->nupvalues;
}
| 0 |
[
"CWE-125"
] |
lua
|
127e7a6c8942b362aa3c6627f44d660a4fb75312
| 27,680,919,319,176,727,000,000,000,000,000,000,000 | 9 |
Fixed bug of old finalized objects in the GC
When an object aged OLD1 is finalized, it is moved from the list
'finobj' to the *beginning* of the list 'allgc'. So, this part of the
list (and not only the survival list) must be visited by 'markold'.
|
pixHtmlViewer(const char *dirin,
const char *dirout,
const char *rootname,
l_int32 thumbwidth,
l_int32 viewwidth)
{
char *fname, *fullname, *outname;
char *mainname, *linkname, *linknameshort;
char *viewfile, *thumbfile;
char *shtml, *slink;
char charbuf[512];
char htmlstring[] = "<html>";
char framestring[] = "</frameset></html>";
l_int32 i, nfiles, index, w, d, nimages, ret;
l_float32 factor;
PIX *pix, *pixthumb, *pixview;
SARRAY *safiles, *sathumbs, *saviews, *sahtml, *salink;
PROCNAME("pixHtmlViewer");
if (!dirin)
return ERROR_INT("dirin not defined", procName, 1);
if (!dirout)
return ERROR_INT("dirout not defined", procName, 1);
if (!rootname)
return ERROR_INT("rootname not defined", procName, 1);
if (thumbwidth == 0)
thumbwidth = DEFAULT_THUMB_WIDTH;
if (thumbwidth < MIN_THUMB_WIDTH) {
L_WARNING("thumbwidth too small; using min value\n", procName);
thumbwidth = MIN_THUMB_WIDTH;
}
if (viewwidth == 0)
viewwidth = DEFAULT_VIEW_WIDTH;
if (viewwidth < MIN_VIEW_WIDTH) {
L_WARNING("viewwidth too small; using min value\n", procName);
viewwidth = MIN_VIEW_WIDTH;
}
/* Make the output directory if it doesn't already exist */
#ifndef _WIN32
snprintf(charbuf, sizeof(charbuf), "mkdir -p %s", dirout);
ret = system(charbuf);
#else
ret = CreateDirectory(dirout, NULL) ? 0 : 1;
#endif /* !_WIN32 */
if (ret) {
L_ERROR("output directory %s not made\n", procName, dirout);
return 1;
}
/* Capture the filenames in the input directory */
if ((safiles = getFilenamesInDirectory(dirin)) == NULL)
return ERROR_INT("safiles not made", procName, 1);
/* Generate output text file names */
sprintf(charbuf, "%s/%s.html", dirout, rootname);
mainname = stringNew(charbuf);
sprintf(charbuf, "%s/%s-links.html", dirout, rootname);
linkname = stringNew(charbuf);
linknameshort = stringJoin(rootname, "-links.html");
/* Generate the thumbs and views */
sathumbs = sarrayCreate(0);
saviews = sarrayCreate(0);
nfiles = sarrayGetCount(safiles);
index = 0;
for (i = 0; i < nfiles; i++) {
fname = sarrayGetString(safiles, i, L_NOCOPY);
fullname = genPathname(dirin, fname);
fprintf(stderr, "name: %s\n", fullname);
if ((pix = pixRead(fullname)) == NULL) {
fprintf(stderr, "file %s not a readable image\n", fullname);
lept_free(fullname);
continue;
}
lept_free(fullname);
/* Make and store the thumbnail images */
pixGetDimensions(pix, &w, NULL, &d);
factor = (l_float32)thumbwidth / (l_float32)w;
pixthumb = pixScale(pix, factor, factor);
sprintf(charbuf, "%s_thumb_%03d", rootname, index);
sarrayAddString(sathumbs, charbuf, L_COPY);
outname = genPathname(dirout, charbuf);
WriteFormattedPix(outname, pixthumb);
lept_free(outname);
pixDestroy(&pixthumb);
/* Make and store the view images */
factor = (l_float32)viewwidth / (l_float32)w;
if (factor >= 1.0)
pixview = pixClone(pix); /* no upscaling */
else
pixview = pixScale(pix, factor, factor);
snprintf(charbuf, sizeof(charbuf), "%s_view_%03d", rootname, index);
sarrayAddString(saviews, charbuf, L_COPY);
outname = genPathname(dirout, charbuf);
WriteFormattedPix(outname, pixview);
lept_free(outname);
pixDestroy(&pixview);
pixDestroy(&pix);
index++;
}
/* Generate the main html file */
sahtml = sarrayCreate(0);
sarrayAddString(sahtml, htmlstring, L_COPY);
sprintf(charbuf, "<frameset cols=\"%d, *\">", thumbwidth + 30);
sarrayAddString(sahtml, charbuf, L_COPY);
sprintf(charbuf, "<frame name=\"thumbs\" src=\"%s\">", linknameshort);
sarrayAddString(sahtml, charbuf, L_COPY);
sprintf(charbuf, "<frame name=\"views\" src=\"%s\">",
sarrayGetString(saviews, 0, L_NOCOPY));
sarrayAddString(sahtml, charbuf, L_COPY);
sarrayAddString(sahtml, framestring, L_COPY);
shtml = sarrayToString(sahtml, 1);
l_binaryWrite(mainname, "w", shtml, strlen(shtml));
fprintf(stderr, "******************************************\n"
"Writing html file: %s\n"
"******************************************\n", mainname);
lept_free(shtml);
lept_free(mainname);
/* Generate the link html file */
nimages = sarrayGetCount(saviews);
fprintf(stderr, "num. images = %d\n", nimages);
salink = sarrayCreate(0);
for (i = 0; i < nimages; i++) {
viewfile = sarrayGetString(saviews, i, L_NOCOPY);
thumbfile = sarrayGetString(sathumbs, i, L_NOCOPY);
sprintf(charbuf, "<a href=\"%s\" TARGET=views><img src=\"%s\"></a>",
viewfile, thumbfile);
sarrayAddString(salink, charbuf, L_COPY);
}
slink = sarrayToString(salink, 1);
l_binaryWrite(linkname, "w", slink, strlen(slink));
lept_free(slink);
lept_free(linkname);
lept_free(linknameshort);
sarrayDestroy(&safiles);
sarrayDestroy(&sathumbs);
sarrayDestroy(&saviews);
sarrayDestroy(&sahtml);
sarrayDestroy(&salink);
return 0;
}
| 1 |
[
"CWE-119",
"CWE-787"
] |
leptonica
|
c1079bb8e77cdd426759e466729917ca37a3ed9f
| 172,924,717,160,575,000,000,000,000,000,000,000,000 | 148 |
prog/htmlviewer: Catch unbound memory access (CID 1386222)
rootname can have any size, so limit the amount of copied bytes.
Signed-off-by: Stefan Weil <[email protected]>
|
longlong Field::convert_decimal2longlong(const my_decimal *val,
bool unsigned_flag, int *err)
{
longlong i;
if (unsigned_flag)
{
if (val->sign())
{
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
i= 0;
*err= 1;
}
else if (warn_if_overflow(my_decimal2int((E_DEC_ERROR &
~E_DEC_OVERFLOW &
~E_DEC_TRUNCATED),
val, TRUE, &i)))
{
i= ~(longlong) 0;
*err= 1;
}
}
else if (warn_if_overflow(my_decimal2int((E_DEC_ERROR &
~E_DEC_OVERFLOW &
~E_DEC_TRUNCATED),
val, FALSE, &i)))
{
i= (val->sign() ? LONGLONG_MIN : LONGLONG_MAX);
*err= 1;
}
return i;
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 291,302,689,765,342,400,000,000,000,000,000,000,000 | 31 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
g_file_delete (GFile *file,
GCancellable *cancellable,
GError **error)
{
GFileIface *iface;
g_return_val_if_fail (G_IS_FILE (file), FALSE);
if (g_cancellable_set_error_if_cancelled (cancellable, error))
return FALSE;
iface = G_FILE_GET_IFACE (file);
if (iface->delete_file == NULL)
{
g_set_error_literal (error, G_IO_ERROR,
G_IO_ERROR_NOT_SUPPORTED,
_("Operation not supported"));
return FALSE;
}
return (* iface->delete_file) (file, cancellable, error);
}
| 0 |
[
"CWE-362"
] |
glib
|
d8f8f4d637ce43f8699ba94c9b7648beda0ca174
| 122,540,212,679,580,900,000,000,000,000,000,000,000 | 23 |
gfile: Limit access to files when copying
file_copy_fallback creates new files with default permissions and
set the correct permissions after the operation is finished. This
might cause that the files can be accessible by more users during
the operation than expected. Use G_FILE_CREATE_PRIVATE for the new
files to limit access to those files.
|
UTI_IsZeroTimespec(struct timespec *ts)
{
return !ts->tv_sec && !ts->tv_nsec;
}
| 0 |
[
"CWE-59"
] |
chrony
|
7a4c396bba8f92a3ee8018620983529152050c74
| 110,038,427,926,711,000,000,000,000,000,000,000,000 | 4 |
util: add functions for common file operations
Add a function to open a file for reading, writing, or appending.
In uppercase modes errors are handled as fatal, i.e. the caller doesn't
need to check for NULL. To avoid string manipulations in the callers,
the function accepts an optional directory and suffix. New files are
created with specified permissions, which will be needed for saving
keys. The O_EXCL flag is used in the writing mode to make sure a new
file is created (on filesystems that support it).
Also, add a function to rename a temporary file by changing its suffix,
and a function to remove a file.
All functions log all errors, at least as debug messages.
|
static void usbnet_terminate_urbs(struct usbnet *dev)
{
DECLARE_WAITQUEUE(wait, current);
int temp;
/* ensure there are no more active urbs */
add_wait_queue(&dev->wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
temp = unlink_urbs(dev, &dev->txq) +
unlink_urbs(dev, &dev->rxq);
/* maybe wait for deletions to finish. */
wait_skb_queue_empty(&dev->rxq);
wait_skb_queue_empty(&dev->txq);
wait_skb_queue_empty(&dev->done);
netif_dbg(dev, ifdown, dev->net,
"waited for %d urb completions\n", temp);
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->wait, &wait);
}
| 0 |
[
"CWE-703"
] |
linux
|
1666984c8625b3db19a9abc298931d35ab7bc64b
| 168,326,809,565,292,420,000,000,000,000,000,000,000 | 20 |
usbnet: cleanup after bind() in probe()
In case bind() works, but a later error forces bailing
in probe() in error cases work and a timer may be scheduled.
They must be killed. This fixes an error case related to
the double free reported in
http://www.spinics.net/lists/netdev/msg367669.html
and needs to go on top of Linus' fix to cdc-ncm.
Signed-off-by: Oliver Neukum <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
sh_backslash_quote_for_double_quotes (string)
char *string;
{
unsigned char c;
char *result, *r, *s;
result = (char *)xmalloc (2 * strlen (string) + 1);
for (r = result, s = string; s && (c = *s); s++)
{
if (sh_syntaxtab[c] & CBSDQUOTE)
*r++ = '\\';
/* I should probably add flags for these to sh_syntaxtab[] */
else if (c == CTLESC || c == CTLNUL)
*r++ = CTLESC; /* could be '\\'? */
*r++ = c;
}
*r = '\0';
return (result);
}
| 0 |
[] |
bash
|
863d31ae775d56b785dc5b0105b6d251515d81d5
| 332,264,750,211,320,700,000,000,000,000,000,000,000 | 22 |
commit bash-20120224 snapshot
|
CImg<T>& _load_pfm(std::FILE *const file, const char *const filename) {
if (!file && !filename)
throw CImgArgumentException(_cimg_instance
"load_pfm(): Specified filename is (null).",
cimg_instance);
std::FILE *const nfile = file?file:cimg::fopen(filename,"rb");
char pfm_type;
CImg<charT> item(16384,1,1,1,0);
int W = 0, H = 0, err = 0;
double scale = 0;
while ((err=std::fscanf(nfile,"%16383[^\n]",item.data()))!=EOF && (*item=='#' || !err)) std::fgetc(nfile);
if (cimg_sscanf(item," P%c",&pfm_type)!=1) {
if (!file) cimg::fclose(nfile);
throw CImgIOException(_cimg_instance
"load_pfm(): PFM header not found in file '%s'.",
cimg_instance,
filename?filename:"(FILE*)");
}
while ((err=std::fscanf(nfile," %16383[^\n]",item.data()))!=EOF && (*item=='#' || !err)) std::fgetc(nfile);
if ((err=cimg_sscanf(item," %d %d",&W,&H))<2) {
if (!file) cimg::fclose(nfile);
throw CImgIOException(_cimg_instance
"load_pfm(): WIDTH and HEIGHT fields are undefined in file '%s'.",
cimg_instance,
filename?filename:"(FILE*)");
} else if (W<=0 || H<=0) {
if (!file) cimg::fclose(nfile);
throw CImgIOException(_cimg_instance
"load_pfm(): WIDTH (%d) and HEIGHT (%d) fields are invalid in file '%s'.",
cimg_instance,W,H,
filename?filename:"(FILE*)");
}
if (err==2) {
while ((err=std::fscanf(nfile," %16383[^\n]",item.data()))!=EOF && (*item=='#' || !err)) std::fgetc(nfile);
if (cimg_sscanf(item,"%lf",&scale)!=1)
cimg::warn(_cimg_instance
"load_pfm(): SCALE field is undefined in file '%s'.",
cimg_instance,
filename?filename:"(FILE*)");
}
std::fgetc(nfile);
const bool is_color = (pfm_type=='F'), is_inverted = (scale>0)!=cimg::endianness();
if (is_color) {
assign(W,H,1,3,(T)0);
CImg<floatT> buf(3*W);
T *ptr_r = data(0,0,0,0), *ptr_g = data(0,0,0,1), *ptr_b = data(0,0,0,2);
cimg_forY(*this,y) {
cimg::fread(buf._data,3*W,nfile);
if (is_inverted) cimg::invert_endianness(buf._data,3*W);
const float *ptrs = buf._data;
cimg_forX(*this,x) {
*(ptr_r++) = (T)*(ptrs++);
*(ptr_g++) = (T)*(ptrs++);
*(ptr_b++) = (T)*(ptrs++);
}
}
} else {
assign(W,H,1,1,(T)0);
CImg<floatT> buf(W);
T *ptrd = data(0,0,0,0);
cimg_forY(*this,y) {
cimg::fread(buf._data,W,nfile);
if (is_inverted) cimg::invert_endianness(buf._data,W);
const float *ptrs = buf._data;
cimg_forX(*this,x) *(ptrd++) = (T)*(ptrs++);
}
}
if (!file) cimg::fclose(nfile);
return mirror('y'); // Most of the .pfm files are flipped along the y-axis
| 0 |
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
| 58,865,043,654,745,160,000,000,000,000,000,000,000 | 71 |
.
|
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page)
{
struct mem_cgroup *memcg;
spinlock_t *ptl;
pte_t *pte;
int ret = 1;
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
GFP_KERNEL, &memcg)) {
ret = -ENOMEM;
goto out_nolock;
}
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
if (ret > 0)
mem_cgroup_cancel_charge_swapin(memcg);
ret = 0;
goto out;
}
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
get_page(page);
set_pte_at(vma->vm_mm, addr, pte,
pte_mkold(mk_pte(page, vma->vm_page_prot)));
page_add_anon_rmap(page, vma, addr);
mem_cgroup_commit_charge_swapin(page, memcg);
swap_free(entry);
/*
* Move the page to the active list so it is not
* immediately swapped out again after swapon.
*/
activate_page(page);
out:
pte_unmap_unlock(pte, ptl);
out_nolock:
return ret;
}
| 0 |
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
| 232,527,427,433,414,600,000,000,000,000,000,000,000 | 40 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
xmlDOMWrapReconcileNamespaces(xmlDOMWrapCtxtPtr ctxt ATTRIBUTE_UNUSED,
xmlNodePtr elem,
int options)
{
int depth = -1, adoptns = 0, parnsdone = 0;
xmlNsPtr ns, prevns;
xmlDocPtr doc;
xmlNodePtr cur, curElem = NULL;
xmlNsMapPtr nsMap = NULL;
xmlNsMapItemPtr /* topmi = NULL, */ mi;
/* @ancestorsOnly should be set by an option flag. */
int ancestorsOnly = 0;
int optRemoveRedundantNS =
((xmlDOMReconcileNSOptions) options & XML_DOM_RECONNS_REMOVEREDUND) ? 1 : 0;
xmlNsPtr *listRedund = NULL;
int sizeRedund = 0, nbRedund = 0, ret, i, j;
if ((elem == NULL) || (elem->doc == NULL) ||
(elem->type != XML_ELEMENT_NODE))
return (-1);
doc = elem->doc;
cur = elem;
do {
switch (cur->type) {
case XML_ELEMENT_NODE:
adoptns = 1;
curElem = cur;
depth++;
/*
* Namespace declarations.
*/
if (cur->nsDef != NULL) {
prevns = NULL;
ns = cur->nsDef;
while (ns != NULL) {
if (! parnsdone) {
if ((elem->parent) &&
((xmlNodePtr) elem->parent->doc != elem->parent)) {
/*
* Gather ancestor in-scope ns-decls.
*/
if (xmlDOMWrapNSNormGatherInScopeNs(&nsMap,
elem->parent) == -1)
goto internal_error;
}
parnsdone = 1;
}
/*
* Lookup the ns ancestor-axis for equal ns-decls in scope.
*/
if (optRemoveRedundantNS && XML_NSMAP_NOTEMPTY(nsMap)) {
XML_NSMAP_FOREACH(nsMap, mi) {
if ((mi->depth >= XML_TREE_NSMAP_PARENT) &&
(mi->shadowDepth == -1) &&
((ns->prefix == mi->newNs->prefix) ||
xmlStrEqual(ns->prefix, mi->newNs->prefix)) &&
((ns->href == mi->newNs->href) ||
xmlStrEqual(ns->href, mi->newNs->href)))
{
/*
* A redundant ns-decl was found.
* Add it to the list of redundant ns-decls.
*/
if (xmlDOMWrapNSNormAddNsMapItem2(&listRedund,
&sizeRedund, &nbRedund, ns, mi->newNs) == -1)
goto internal_error;
/*
* Remove the ns-decl from the element-node.
*/
if (prevns)
prevns->next = ns->next;
else
cur->nsDef = ns->next;
goto next_ns_decl;
}
}
}
/*
* Skip ns-references handling if the referenced
* ns-decl is declared on the same element.
*/
if ((cur->ns != NULL) && adoptns && (cur->ns == ns))
adoptns = 0;
/*
* Does it shadow any ns-decl?
*/
if (XML_NSMAP_NOTEMPTY(nsMap)) {
XML_NSMAP_FOREACH(nsMap, mi) {
if ((mi->depth >= XML_TREE_NSMAP_PARENT) &&
(mi->shadowDepth == -1) &&
((ns->prefix == mi->newNs->prefix) ||
xmlStrEqual(ns->prefix, mi->newNs->prefix))) {
mi->shadowDepth = depth;
}
}
}
/*
* Push mapping.
*/
if (xmlDOMWrapNsMapAddItem(&nsMap, -1, ns, ns,
depth) == NULL)
goto internal_error;
prevns = ns;
next_ns_decl:
ns = ns->next;
}
}
if (! adoptns)
goto ns_end;
/* Falls through. */
case XML_ATTRIBUTE_NODE:
/* No ns, no fun. */
if (cur->ns == NULL)
goto ns_end;
if (! parnsdone) {
if ((elem->parent) &&
((xmlNodePtr) elem->parent->doc != elem->parent)) {
if (xmlDOMWrapNSNormGatherInScopeNs(&nsMap,
elem->parent) == -1)
goto internal_error;
}
parnsdone = 1;
}
/*
* Adjust the reference if this was a redundant ns-decl.
*/
if (listRedund) {
for (i = 0, j = 0; i < nbRedund; i++, j += 2) {
if (cur->ns == listRedund[j]) {
cur->ns = listRedund[++j];
break;
}
}
}
/*
* Adopt ns-references.
*/
if (XML_NSMAP_NOTEMPTY(nsMap)) {
/*
* Search for a mapping.
*/
XML_NSMAP_FOREACH(nsMap, mi) {
if ((mi->shadowDepth == -1) &&
(cur->ns == mi->oldNs)) {
cur->ns = mi->newNs;
goto ns_end;
}
}
}
/*
* Acquire a normalized ns-decl and add it to the map.
*/
if (xmlDOMWrapNSNormAcquireNormalizedNs(doc, curElem,
cur->ns, &ns,
&nsMap, depth,
ancestorsOnly,
(cur->type == XML_ATTRIBUTE_NODE) ? 1 : 0) == -1)
goto internal_error;
cur->ns = ns;
ns_end:
if ((cur->type == XML_ELEMENT_NODE) &&
(cur->properties != NULL)) {
/*
* Process attributes.
*/
cur = (xmlNodePtr) cur->properties;
continue;
}
break;
default:
goto next_sibling;
}
into_content:
if ((cur->type == XML_ELEMENT_NODE) &&
(cur->children != NULL)) {
/*
* Process content of element-nodes only.
*/
cur = cur->children;
continue;
}
next_sibling:
if (cur == elem)
break;
if (cur->type == XML_ELEMENT_NODE) {
if (XML_NSMAP_NOTEMPTY(nsMap)) {
/*
* Pop mappings.
*/
while ((nsMap->last != NULL) &&
(nsMap->last->depth >= depth))
{
XML_NSMAP_POP(nsMap, mi)
}
/*
* Unshadow.
*/
XML_NSMAP_FOREACH(nsMap, mi) {
if (mi->shadowDepth >= depth)
mi->shadowDepth = -1;
}
}
depth--;
}
if (cur->next != NULL)
cur = cur->next;
else {
if (cur->type == XML_ATTRIBUTE_NODE) {
cur = cur->parent;
goto into_content;
}
cur = cur->parent;
goto next_sibling;
}
} while (cur != NULL);
ret = 0;
goto exit;
internal_error:
ret = -1;
exit:
if (listRedund) {
for (i = 0, j = 0; i < nbRedund; i++, j += 2) {
xmlFreeNs(listRedund[j]);
}
xmlFree(listRedund);
}
if (nsMap != NULL)
xmlDOMWrapNsMapFree(nsMap);
return (ret);
}
| 0 |
[
"CWE-190"
] |
libxml2
|
6c283d83eccd940bcde15634ac8c7f100e3caefd
| 302,318,181,430,127,870,000,000,000,000,000,000,000 | 239 |
[CVE-2022-29824] Fix integer overflows in xmlBuf and xmlBuffer
In several places, the code handling string buffers didn't check for
integer overflow or used wrong types for buffer sizes. This could
result in out-of-bounds writes or other memory errors when working on
large, multi-gigabyte buffers.
Thanks to Felix Wilhelm for the report.
|
static void vrend_draw_bind_vertex_binding(struct vrend_context *ctx,
struct vrend_vertex_element_array *va)
{
int i;
glBindVertexArray(va->id);
if (ctx->sub->vbo_dirty) {
struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[0];
if (has_feature(feat_bind_vertex_buffers)) {
GLsizei count = MAX2(ctx->sub->num_vbos, ctx->sub->old_num_vbos);
GLuint buffers[PIPE_MAX_ATTRIBS];
GLintptr offsets[PIPE_MAX_ATTRIBS];
GLsizei strides[PIPE_MAX_ATTRIBS];
for (i = 0; i < ctx->sub->num_vbos; i++) {
struct vrend_resource *res = (struct vrend_resource *)vbo[i].base.buffer;
if (res) {
buffers[i] = res->id;
offsets[i] = vbo[i].base.buffer_offset;
strides[i] = vbo[i].base.stride;
} else {
buffers[i] = 0;
offsets[i] = 0;
strides[i] = 0;
}
}
for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) {
buffers[i] = 0;
offsets[i] = 0;
strides[i] = 0;
}
glBindVertexBuffers(0, count, buffers, offsets, strides);
} else {
for (i = 0; i < ctx->sub->num_vbos; i++) {
struct vrend_resource *res = (struct vrend_resource *)vbo[i].base.buffer;
if (res)
glBindVertexBuffer(i, res->id, vbo[i].base.buffer_offset, vbo[i].base.stride);
else
glBindVertexBuffer(i, 0, 0, 0);
}
for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++)
glBindVertexBuffer(i, 0, 0, 0);
}
ctx->sub->vbo_dirty = false;
}
}
| 0 |
[
"CWE-787"
] |
virglrenderer
|
95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
| 153,208,416,201,202,610,000,000,000,000,000,000,000 | 52 |
vrend: Add test to resource OOB write and fix it
v2: Also check that no depth != 1 has been send when none is due
Closes: #250
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]>
|
**/
Tfloat cubic_atXY(const float fx, const float fy, const int z=0, const int c=0) const {
if (is_empty())
throw CImgInstanceException(_cimg_instance
"cubic_atXY(): Empty instance.",
cimg_instance);
return _cubic_atXY(fx,fy,z,c);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 324,497,672,730,864,240,000,000,000,000,000,000,000 | 7 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
hash_aclitem(PG_FUNCTION_ARGS)
{
AclItem *a = PG_GETARG_ACLITEM_P(0);
/* not very bright, but avoids any issue of padding in struct */
PG_RETURN_UINT32((uint32) (a->ai_privs + a->ai_grantee + a->ai_grantor));
}
| 0 |
[
"CWE-264"
] |
postgres
|
fea164a72a7bfd50d77ba5fb418d357f8f2bb7d0
| 241,758,650,361,570,000,000,000,000,000,000,000,000 | 7 |
Shore up ADMIN OPTION restrictions.
Granting a role without ADMIN OPTION is supposed to prevent the grantee
from adding or removing members from the granted role. Issuing SET ROLE
before the GRANT bypassed that, because the role itself had an implicit
right to add or remove members. Plug that hole by recognizing that
implicit right only when the session user matches the current role.
Additionally, do not recognize it during a security-restricted operation
or during execution of a SECURITY DEFINER function. The restriction on
SECURITY DEFINER is not security-critical. However, it seems best for a
user testing his own SECURITY DEFINER function to see the same behavior
others will see. Back-patch to 8.4 (all supported versions).
The SQL standards do not conflate roles and users as PostgreSQL does;
only SQL roles have members, and only SQL users initiate sessions. An
application using PostgreSQL users and roles as SQL users and roles will
never attempt to grant membership in the role that is the session user,
so the implicit right to add or remove members will never arise.
The security impact was mostly that a role member could revoke access
from others, contrary to the wishes of his own grantor. Unapproved role
member additions are less notable, because the member can still largely
achieve that by creating a view or a SECURITY DEFINER function.
Reviewed by Andres Freund and Tom Lane. Reported, independently, by
Jonas Sundman and Noah Misch.
Security: CVE-2014-0060
|
batchNumMsgs(batch_t *pBatch) {
return pBatch->nElem;
}
| 0 |
[
"CWE-772",
"CWE-401"
] |
rsyslog
|
1ef709cc97d54f74d3fdeb83788cc4b01f4c6a2a
| 326,588,098,387,061,760,000,000,000,000,000,000,000 | 3 |
bugfix: fixed a memory leak and potential abort condition
this could happen if multiple rulesets were used and some output batches
contained messages belonging to more than one ruleset.
fixes: http://bugzilla.adiscon.com/show_bug.cgi?id=226
fixes: http://bugzilla.adiscon.com/show_bug.cgi?id=218
|
static int apparmor_path_unlink(struct path *dir, struct dentry *dentry)
{
return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE);
}
| 0 |
[
"CWE-20"
] |
linux
|
a5b2c5b2ad5853591a6cac6134cd0f599a720865
| 200,626,256,524,813,460,000,000,000,000,000,000,000 | 4 |
AppArmor: fix oops in apparmor_setprocattr
When invalid parameters are passed to apparmor_setprocattr a NULL deref
oops occurs when it tries to record an audit message. This is because
it is passing NULL for the profile parameter for aa_audit. But aa_audit
now requires that the profile passed is not NULL.
Fix this by passing the current profile on the task that is trying to
setprocattr.
Signed-off-by: Kees Cook <[email protected]>
Signed-off-by: John Johansen <[email protected]>
Cc: [email protected]
Signed-off-by: James Morris <[email protected]>
|
int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
u16 listen_interval,
u16 listen_beacons)
{
struct sk_buff *skb;
struct wmi_listen_int_cmd *cmd;
int ret;
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_listen_int_cmd *) skb->data;
cmd->listen_intvl = cpu_to_le16(listen_interval);
cmd->num_beacons = cpu_to_le16(listen_beacons);
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
| 0 |
[
"CWE-125"
] |
linux
|
5d6751eaff672ea77642e74e92e6c0ac7f9709ab
| 38,307,614,647,541,520,000,000,000,000,000,000,000 | 20 |
ath6kl: add some bounds checking
The "ev->traffic_class" and "reply->ac" variables come from the network
and they're used as an offset into the wmi->stream_exist_for_ac[] array.
Those variables are u8 so they can be 0-255 but the stream_exist_for_ac[]
array only has WMM_NUM_AC (4) elements. We need to add a couple bounds
checks to prevent array overflows.
I also modified one existing check from "if (traffic_class > 3) {" to
"if (traffic_class >= WMM_NUM_AC) {" just to make them all consistent.
Fixes: bdcd81707973 (" Add ath6kl cleaned up driver")
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
Binary::it_segments Binary::segments() {
return segments_;
}
| 0 |
[
"CWE-703"
] |
LIEF
|
7acf0bc4224081d4f425fcc8b2e361b95291d878
| 67,955,647,626,549,650,000,000,000,000,000,000,000 | 3 |
Resolve #764
|
ccbaGetCcb(CCBORDA *ccba,
l_int32 index)
{
CCBORD *ccb;
PROCNAME("ccbaGetCcb");
if (!ccba)
return (CCBORD *)ERROR_PTR("ccba not defined", procName, NULL);
if (index < 0 || index >= ccba->n)
return (CCBORD *)ERROR_PTR("index out of bounds", procName, NULL);
ccb = ccba->ccb[index];
ccb->refcount++;
return ccb;
}
| 0 |
[
"CWE-125"
] |
leptonica
|
8d6e1755518cfb98536d6c3daf0601f226d16842
| 272,155,099,043,994,300,000,000,000,000,000,000,000 | 16 |
Issue 23433 in oss-fuzz: Heap-buffer-overflow in findNextBorderPixel()
* Check pix boundary when looking for the next pixel.
|
xmlBufSetAllocationScheme(xmlBufPtr buf,
xmlBufferAllocationScheme scheme) {
if ((buf == NULL) || (buf->error != 0)) {
#ifdef DEBUG_BUFFER
xmlGenericError(xmlGenericErrorContext,
"xmlBufSetAllocationScheme: buf == NULL or in error\n");
#endif
return(-1);
}
if ((buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) ||
(buf->alloc == XML_BUFFER_ALLOC_IO))
return(-1);
if ((scheme == XML_BUFFER_ALLOC_DOUBLEIT) ||
(scheme == XML_BUFFER_ALLOC_EXACT) ||
(scheme == XML_BUFFER_ALLOC_HYBRID) ||
(scheme == XML_BUFFER_ALLOC_IMMUTABLE)) {
buf->alloc = scheme;
if (buf->buffer)
buf->buffer->alloc = scheme;
return(0);
}
/*
* Switching a buffer ALLOC_IO has the side effect of initializing
* the contentIO field with the current content
*/
if (scheme == XML_BUFFER_ALLOC_IO) {
buf->alloc = XML_BUFFER_ALLOC_IO;
buf->contentIO = buf->content;
}
return(-1);
}
| 1 |
[
"CWE-399"
] |
libxml2
|
213f1fe0d76d30eaed6e5853057defc43e6df2c9
| 155,484,866,204,748,730,000,000,000,000,000,000,000 | 31 |
CVE-2015-1819 Enforce the reader to run in constant memory
One of the operation on the reader could resolve entities
leading to the classic expansion issue. Make sure the
buffer used for xmlreader operation is bounded.
Introduce a new allocation type for the buffers for this effect.
|
bool pn_ssl_present(void)
{
return true;
}
| 0 |
[] |
qpid-proton
|
4aea0fd8502f5e9af7f22fd60645eeec07bce0b2
| 25,096,560,322,797,276,000,000,000,000,000,000,000 | 4 |
PROTON-2014: [c] Ensure SSL mutual authentication
(cherry picked from commit 97c7733f07712665f3d08091c82c393e4c3adbf7)
|
void LIRGenerator::do_If(If* x) {
assert(x->number_of_sux() == 2, "inconsistency");
ValueTag tag = x->x()->type()->tag();
bool is_safepoint = x->is_safepoint();
If::Condition cond = x->cond();
LIRItem xitem(x->x(), this);
LIRItem yitem(x->y(), this);
LIRItem* xin = &xitem;
LIRItem* yin = &yitem;
if (tag == longTag) {
// for longs, only conditions "eql", "neq", "lss", "geq" are valid;
// mirror for other conditions
if (cond == If::gtr || cond == If::leq) {
cond = Instruction::mirror(cond);
xin = &yitem;
yin = &xitem;
}
xin->set_destroys_register();
}
xin->load_item();
if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
// inline long zero
yin->dont_load_item();
} else if (tag == longTag || tag == floatTag || tag == doubleTag) {
// longs cannot handle constants at right side
yin->load_item();
} else {
yin->dont_load_item();
}
LIR_Opr left = xin->result();
LIR_Opr right = yin->result();
set_no_result(x);
// add safepoint before generating condition code so it can be recomputed
if (x->is_safepoint()) {
// increment backedge counter if needed
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
__ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
}
__ cmp(lir_cond(cond), left, right);
// Generate branch profiling. Profiling code doesn't kill flags.
profile_branch(x, cond);
move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) {
__ branch(lir_cond(cond), x->tsux(), x->usux());
} else {
__ branch(lir_cond(cond), x->tsux());
}
assert(x->default_sux() == x->fsux(), "wrong destination above");
__ jump(x->default_sux());
}
| 0 |
[] |
jdk17u
|
268c0159253b3de5d72eb826ef2329b27bb33fea
| 65,771,426,829,607,730,000,000,000,000,000,000,000 | 58 |
8272014: Better array indexing
Reviewed-by: thartmann
Backport-of: 937c31d896d05aa24543b74e98a2ea9f05b5d86f
|
template<typename t>
CImg<T>& distance_eikonal(const T& value, const CImg<t>& metric) {
return get_distance_eikonal(value,metric).move_to(*this);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 182,826,629,844,262,440,000,000,000,000,000,000,000 | 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static void fdctrl_handle_version(FDCtrl *fdctrl, int direction)
{
/* Controller's version */
fdctrl->fifo[0] = fdctrl->version;
fdctrl_set_fifo(fdctrl, 1);
}
| 0 |
[
"CWE-119"
] |
qemu
|
e907746266721f305d67bc0718795fedee2e824c
| 83,608,697,097,367,380,000,000,000,000,000,000,000 | 6 |
fdc: force the fifo access to be in bounds of the allocated buffer
During processing of certain commands such as FD_CMD_READ_ID and
FD_CMD_DRIVE_SPECIFICATION_COMMAND the fifo memory access could
get out of bounds leading to memory corruption with values coming
from the guest.
Fix this by making sure that the index is always bounded by the
allocated memory.
This is CVE-2015-3456.
Signed-off-by: Petr Matousek <[email protected]>
Reviewed-by: John Snow <[email protected]>
Signed-off-by: John Snow <[email protected]>
|
invoke_NPN_PostURL(PluginInstance *plugin, const char *url, const char *target, uint32_t len, const char *buf, NPBool file)
{
npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection),
NPERR_GENERIC_ERROR);
int error = rpc_method_invoke(g_rpc_connection,
RPC_METHOD_NPN_POST_URL,
RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin,
RPC_TYPE_STRING, url,
RPC_TYPE_STRING, target,
RPC_TYPE_ARRAY, RPC_TYPE_CHAR, len, buf,
RPC_TYPE_BOOLEAN, file,
RPC_TYPE_INVALID);
if (error != RPC_ERROR_NO_ERROR) {
npw_perror("NPN_PostURL() invoke", error);
return NPERR_GENERIC_ERROR;
}
int32_t ret;
error = rpc_method_wait_for_reply(g_rpc_connection, RPC_TYPE_INT32, &ret, RPC_TYPE_INVALID);
if (error != RPC_ERROR_NO_ERROR) {
npw_perror("NPN_PostURL() wait for reply", error);
return NPERR_GENERIC_ERROR;
}
return ret;
}
| 0 |
[
"CWE-264"
] |
nspluginwrapper
|
7e4ab8e1189846041f955e6c83f72bc1624e7a98
| 74,025,850,482,154,060,000,000,000,000,000,000,000 | 29 |
Support all the new variables added
|
static void handle_page_specs(QPDF& pdf, Options& o)
{
// Parse all page specifications and translate them into lists of
// actual pages.
// Handle "." as a shortcut for the input file
for (std::vector<PageSpec>::iterator iter = o.page_specs.begin();
iter != o.page_specs.end(); ++iter)
{
PageSpec& page_spec = *iter;
if (page_spec.filename == ".")
{
page_spec.filename = o.infilename;
}
}
if (! o.keep_files_open_set)
{
// Count the number of distinct files to determine whether we
// should keep files open or not. Rather than trying to code
// some portable heuristic based on OS limits, just hard-code
// this at a given number and allow users to override.
std::set<std::string> filenames;
for (std::vector<PageSpec>::iterator iter = o.page_specs.begin();
iter != o.page_specs.end(); ++iter)
{
PageSpec& page_spec = *iter;
filenames.insert(page_spec.filename);
}
if (filenames.size() > o.keep_files_open_threshold)
{
QTC::TC("qpdf", "qpdf disable keep files open");
if (o.verbose)
{
std::cout << whoami << ": selecting --keep-open-files=n"
<< std::endl;
}
o.keep_files_open = false;
}
else
{
if (o.verbose)
{
std::cout << whoami << ": selecting --keep-open-files=y"
<< std::endl;
}
o.keep_files_open = true;
QTC::TC("qpdf", "qpdf don't disable keep files open");
}
}
// Create a QPDF object for each file that we may take pages from.
std::vector<PointerHolder<QPDF> > page_heap;
std::map<std::string, QPDF*> page_spec_qpdfs;
std::map<std::string, ClosedFileInputSource*> page_spec_cfis;
page_spec_qpdfs[o.infilename] = &pdf;
std::vector<QPDFPageData> parsed_specs;
std::map<unsigned long long, std::set<QPDFObjGen> > copied_pages;
for (std::vector<PageSpec>::iterator iter = o.page_specs.begin();
iter != o.page_specs.end(); ++iter)
{
PageSpec& page_spec = *iter;
if (page_spec_qpdfs.count(page_spec.filename) == 0)
{
// Open the PDF file and store the QPDF object. Throw a
// PointerHolder to the qpdf into a heap so that it
// survives through copying to the output but gets cleaned up
// automatically at the end. Do not canonicalize the file
// name. Using two different paths to refer to the same
// file is a document workaround for duplicating a page.
// If you are using this an example of how to do this with
// the API, you can just create two different QPDF objects
// to the same underlying file with the same path to
// achieve the same affect.
char const* password = page_spec.password;
if (o.encryption_file && (password == 0) &&
(page_spec.filename == o.encryption_file))
{
QTC::TC("qpdf", "qpdf pages encryption password");
password = o.encryption_file_password;
}
if (o.verbose)
{
std::cout << whoami << ": processing "
<< page_spec.filename << std::endl;
}
PointerHolder<InputSource> is;
ClosedFileInputSource* cis = 0;
if (! o.keep_files_open)
{
QTC::TC("qpdf", "qpdf keep files open n");
cis = new ClosedFileInputSource(page_spec.filename.c_str());
is = cis;
cis->stayOpen(true);
}
else
{
QTC::TC("qpdf", "qpdf keep files open y");
FileInputSource* fis = new FileInputSource();
is = fis;
fis->setFilename(page_spec.filename.c_str());
}
PointerHolder<QPDF> qpdf_ph = process_input_source(is, password, o);
page_heap.push_back(qpdf_ph);
page_spec_qpdfs[page_spec.filename] = qpdf_ph.getPointer();
if (cis)
{
cis->stayOpen(false);
page_spec_cfis[page_spec.filename] = cis;
}
}
// Read original pages from the PDF, and parse the page range
// associated with this occurrence of the file.
parsed_specs.push_back(
QPDFPageData(page_spec.filename,
page_spec_qpdfs[page_spec.filename],
page_spec.range));
}
if (! o.preserve_unreferenced_page_resources)
{
for (std::map<std::string, QPDF*>::iterator iter =
page_spec_qpdfs.begin();
iter != page_spec_qpdfs.end(); ++iter)
{
std::string const& filename = (*iter).first;
ClosedFileInputSource* cis = 0;
if (page_spec_cfis.count(filename))
{
cis = page_spec_cfis[filename];
cis->stayOpen(true);
}
QPDFPageDocumentHelper dh(*((*iter).second));
dh.removeUnreferencedResources();
if (cis)
{
cis->stayOpen(false);
}
}
}
// Clear all pages out of the primary QPDF's pages tree but leave
// the objects in place in the file so they can be re-added
// without changing their object numbers. This enables other
// things in the original file, such as outlines, to continue to
// work.
if (o.verbose)
{
std::cout << whoami
<< ": removing unreferenced pages from primary input"
<< std::endl;
}
QPDFPageDocumentHelper dh(pdf);
std::vector<QPDFPageObjectHelper> orig_pages = dh.getAllPages();
for (std::vector<QPDFPageObjectHelper>::iterator iter =
orig_pages.begin();
iter != orig_pages.end(); ++iter)
{
dh.removePage(*iter);
}
if (o.collate && (parsed_specs.size() > 1))
{
// Collate the pages by selecting one page from each spec in
// order. When a spec runs out of pages, stop selecting from
// it.
std::vector<QPDFPageData> new_parsed_specs;
size_t nspecs = parsed_specs.size();
size_t cur_page = 0;
bool got_pages = true;
while (got_pages)
{
got_pages = false;
for (size_t i = 0; i < nspecs; ++i)
{
QPDFPageData& page_data = parsed_specs.at(i);
if (cur_page < page_data.selected_pages.size())
{
got_pages = true;
new_parsed_specs.push_back(
QPDFPageData(
page_data,
page_data.selected_pages.at(cur_page)));
}
}
++cur_page;
}
parsed_specs = new_parsed_specs;
}
// Add all the pages from all the files in the order specified.
// Keep track of any pages from the original file that we are
// selecting.
std::set<int> selected_from_orig;
std::vector<QPDFObjectHandle> new_labels;
bool any_page_labels = false;
int out_pageno = 0;
for (std::vector<QPDFPageData>::iterator iter =
parsed_specs.begin();
iter != parsed_specs.end(); ++iter)
{
QPDFPageData& page_data = *iter;
ClosedFileInputSource* cis = 0;
if (page_spec_cfis.count(page_data.filename))
{
cis = page_spec_cfis[page_data.filename];
cis->stayOpen(true);
}
QPDFPageLabelDocumentHelper pldh(*page_data.qpdf);
if (pldh.hasPageLabels())
{
any_page_labels = true;
}
if (o.verbose)
{
std::cout << whoami << ": adding pages from "
<< page_data.filename << std::endl;
}
for (std::vector<int>::iterator pageno_iter =
page_data.selected_pages.begin();
pageno_iter != page_data.selected_pages.end();
++pageno_iter, ++out_pageno)
{
// Pages are specified from 1 but numbered from 0 in the
// vector
int pageno = *pageno_iter - 1;
pldh.getLabelsForPageRange(pageno, pageno, out_pageno,
new_labels);
QPDFPageObjectHelper to_copy = page_data.orig_pages.at(pageno);
QPDFObjGen to_copy_og = to_copy.getObjectHandle().getObjGen();
unsigned long long from_uuid = page_data.qpdf->getUniqueId();
if (copied_pages[from_uuid].count(to_copy_og))
{
QTC::TC("qpdf", "qpdf copy same page more than once",
(page_data.qpdf == &pdf) ? 0 : 1);
to_copy = to_copy.shallowCopyPage();
}
else
{
copied_pages[from_uuid].insert(to_copy_og);
}
dh.addPage(to_copy, false);
if (page_data.qpdf == &pdf)
{
// This is a page from the original file. Keep track
// of the fact that we are using it.
selected_from_orig.insert(pageno);
}
}
if (cis)
{
cis->stayOpen(false);
}
}
if (any_page_labels)
{
QPDFObjectHandle page_labels =
QPDFObjectHandle::newDictionary();
page_labels.replaceKey(
"/Nums", QPDFObjectHandle::newArray(new_labels));
pdf.getRoot().replaceKey("/PageLabels", page_labels);
}
// Delete page objects for unused page in primary. This prevents
// those objects from being preserved by being referred to from
// other places, such as the outlines dictionary.
for (size_t pageno = 0; pageno < orig_pages.size(); ++pageno)
{
if (selected_from_orig.count(pageno) == 0)
{
pdf.replaceObject(
orig_pages.at(pageno).getObjectHandle().getObjGen(),
QPDFObjectHandle::newNull());
}
}
}
| 1 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 309,945,331,195,298,500,000,000,000,000,000,000,000 | 277 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
RZ_API RZ_OWN char *rz_bin_dex_resolve_field_by_idx(RZ_NONNULL RzBinDex *dex, ut32 field_idx) {
rz_return_val_if_fail(dex, NULL);
if (field_idx >= rz_pvector_len(dex->field_ids)) {
return NULL;
}
DexFieldId *field_id = (DexFieldId *)rz_pvector_at(dex->field_ids, field_idx);
char *class_name = dex_resolve_type_id(dex, field_id->class_idx);
if (!class_name) {
return NULL;
}
char *name = dex_resolve_string_id(dex, field_id->name_idx);
if (!name) {
free(class_name);
return NULL;
}
char *type = dex_resolve_type_id(dex, field_id->type_idx);
if (!type) {
free(class_name);
free(name);
return NULL;
}
char *method = rz_str_newf("%s->%s %s", class_name, name, type);
free(type);
free(class_name);
free(name);
return method;
}
| 0 |
[
"CWE-787"
] |
rizin
|
1524f85211445e41506f98180f8f69f7bf115406
| 250,954,714,905,686,840,000,000,000,000,000,000,000 | 33 |
fix #2969 - oob write (1 byte) in dex.c
|
void WebContents::Unselect() {
web_contents()->CollapseSelection();
}
| 0 |
[] |
electron
|
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
| 143,567,570,793,988,180,000,000,000,000,000,000,000 | 3 |
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]>
|
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int ipi_pcpu = vcpu->cpu;
int cpu = get_cpu();
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
++vcpu->stat.halt_wakeup;
}
/*
* We may be called synchronously with irqs disabled in guest mode,
* So need not to call smp_call_function_single() in that case.
*/
if (vcpu->guest_mode && vcpu->cpu != cpu)
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
put_cpu();
}
| 0 |
[
"CWE-476"
] |
linux-2.6
|
59839dfff5eabca01cc4e20b45797a60a80af8cb
| 129,155,663,276,575,470,000,000,000,000,000,000,000 | 17 |
KVM: x86: check for cr3 validity in ioctl_set_sregs
Matt T. Yourst notes that kvm_arch_vcpu_ioctl_set_sregs lacks validity
checking for the new cr3 value:
"Userspace callers of KVM_SET_SREGS can pass a bogus value of cr3 to
the kernel. This will trigger a NULL pointer access in gfn_to_rmap()
when userspace next tries to call KVM_RUN on the affected VCPU and kvm
attempts to activate the new non-existent page table root.
This happens since kvm only validates that cr3 points to a valid guest
physical memory page when code *inside* the guest sets cr3. However, kvm
currently trusts the userspace caller (e.g. QEMU) on the host machine to
always supply a valid page table root, rather than properly validating
it along with the rest of the reloaded guest state."
http://sourceforge.net/tracker/?func=detail&atid=893831&aid=2687641&group_id=180599
Check for a valid cr3 address in kvm_arch_vcpu_ioctl_set_sregs, triple
fault in case of failure.
Cc: [email protected]
Signed-off-by: Marcelo Tosatti <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
|
bool mg_log_prefix(int level, const char *file, int line, const char *fname) {
// static unsigned long seq;
int max = LL_INFO;
struct mg_str k, v, s = mg_str(s_spec);
const char *p = strrchr(file, '/');
if (s_fn == NULL) return false;
if (p == NULL) p = strrchr(file, '\\');
p = p == NULL ? file : p + 1;
while (mg_commalist(&s, &k, &v)) {
if (v.len == 0) max = atoi(k.ptr);
if (v.len > 0 && strncmp(p, k.ptr, k.len) == 0) max = atoi(v.ptr);
}
if (level <= max) {
char timebuf[21], buf[50] = "";
time_t t = time(NULL);
struct tm tmp, *tm = gmtime_r(&t, &tmp);
int n;
(void) tmp;
strftime(timebuf, sizeof(timebuf), "%Y-%m-%d %H:%M:%S", tm);
n = snprintf(buf, sizeof(buf), "%s %d %s:%d:%s", timebuf, level, p, line,
fname);
if (n < 0 || n > (int) sizeof(buf) - 2) n = sizeof(buf) - 2;
while (n < (int) sizeof(buf) - 1) buf[n++] = ' ';
s_fn(buf, sizeof(buf) - 1, s_fn_param);
return true;
} else {
return false;
}
}
| 0 |
[
"CWE-552"
] |
mongoose
|
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
| 225,108,906,122,510,140,000,000,000,000,000,000,000 | 33 |
Protect against the directory traversal in mg_upload()
|
static char *userauth_list(LIBSSH2_SESSION *session, const char *username,
unsigned int username_len)
{
static const unsigned char reply_codes[3] =
{ SSH_MSG_USERAUTH_SUCCESS, SSH_MSG_USERAUTH_FAILURE, 0 };
/* packet_type(1) + username_len(4) + service_len(4) +
service(14)"ssh-connection" + method_len(4) = 27 */
unsigned long methods_len;
unsigned char *s;
int rc;
if(session->userauth_list_state == libssh2_NB_state_idle) {
/* Zero the whole thing out */
memset(&session->userauth_list_packet_requirev_state, 0,
sizeof(session->userauth_list_packet_requirev_state));
session->userauth_list_data_len = username_len + 27;
s = session->userauth_list_data =
LIBSSH2_ALLOC(session, session->userauth_list_data_len);
if(!session->userauth_list_data) {
_libssh2_error(session, LIBSSH2_ERROR_ALLOC,
"Unable to allocate memory for userauth_list");
return NULL;
}
*(s++) = SSH_MSG_USERAUTH_REQUEST;
_libssh2_store_str(&s, username, username_len);
_libssh2_store_str(&s, "ssh-connection", 14);
_libssh2_store_u32(&s, 4); /* send "none" separately */
session->userauth_list_state = libssh2_NB_state_created;
}
if(session->userauth_list_state == libssh2_NB_state_created) {
rc = _libssh2_transport_send(session, session->userauth_list_data,
session->userauth_list_data_len,
(unsigned char *)"none", 4);
if(rc == LIBSSH2_ERROR_EAGAIN) {
_libssh2_error(session, LIBSSH2_ERROR_EAGAIN,
"Would block requesting userauth list");
return NULL;
}
/* now free the packet that was sent */
LIBSSH2_FREE(session, session->userauth_list_data);
session->userauth_list_data = NULL;
if(rc) {
_libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND,
"Unable to send userauth-none request");
session->userauth_list_state = libssh2_NB_state_idle;
return NULL;
}
session->userauth_list_state = libssh2_NB_state_sent;
}
if(session->userauth_list_state == libssh2_NB_state_sent) {
rc = _libssh2_packet_requirev(session, reply_codes,
&session->userauth_list_data,
&session->userauth_list_data_len, 0,
NULL, 0,
&session->userauth_list_packet_requirev_state);
if(rc == LIBSSH2_ERROR_EAGAIN) {
_libssh2_error(session, LIBSSH2_ERROR_EAGAIN,
"Would block requesting userauth list");
return NULL;
}
else if(rc || (session->userauth_list_data_len < 1)) {
_libssh2_error(session, rc, "Failed getting response");
session->userauth_list_state = libssh2_NB_state_idle;
return NULL;
}
if(session->userauth_list_data[0] == SSH_MSG_USERAUTH_SUCCESS) {
/* Wow, who'dve thought... */
_libssh2_error(session, LIBSSH2_ERROR_NONE, "No error");
LIBSSH2_FREE(session, session->userauth_list_data);
session->userauth_list_data = NULL;
session->state |= LIBSSH2_STATE_AUTHENTICATED;
session->userauth_list_state = libssh2_NB_state_idle;
return NULL;
}
if(session->userauth_list_data_len < 5) {
LIBSSH2_FREE(session, session->userauth_list_data);
session->userauth_list_data = NULL;
_libssh2_error(session, LIBSSH2_ERROR_PROTO,
"Unexpected packet size");
return NULL;
}
methods_len = _libssh2_ntohu32(session->userauth_list_data + 1);
if(methods_len >= session->userauth_list_data_len - 5) {
_libssh2_error(session, LIBSSH2_ERROR_OUT_OF_BOUNDARY,
"Unexpected userauth list size");
return NULL;
}
/* Do note that the memory areas overlap! */
memmove(session->userauth_list_data, session->userauth_list_data + 5,
methods_len);
session->userauth_list_data[methods_len] = '\0';
_libssh2_debug(session, LIBSSH2_TRACE_AUTH,
"Permitted auth methods: %s",
session->userauth_list_data);
}
session->userauth_list_state = libssh2_NB_state_idle;
return (char *) session->userauth_list_data;
}
| 0 |
[
"CWE-787"
] |
libssh2
|
dc109a7f518757741590bb993c0c8412928ccec2
| 164,775,050,950,921,160,000,000,000,000,000,000,000 | 111 |
Security fixes (#315)
* Bounds checks
Fixes for CVEs
https://www.libssh2.org/CVE-2019-3863.html
https://www.libssh2.org/CVE-2019-3856.html
* Packet length bounds check
CVE
https://www.libssh2.org/CVE-2019-3855.html
* Response length check
CVE
https://www.libssh2.org/CVE-2019-3859.html
* Bounds check
CVE
https://www.libssh2.org/CVE-2019-3857.html
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
and additional data validation
* Check bounds before reading into buffers
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
* declare SIZE_MAX and UINT_MAX if needed
|
std::shared_ptr<Grammar> perform_core(const char *s, size_t n,
const Rules &rules, std::string &start,
Log log) {
Data data;
any dt = &data;
auto r = g["Grammar"].parse(s, n, dt);
if (!r.ret) {
if (log) {
if (r.message_pos) {
auto line = line_info(s, r.message_pos);
log(line.first, line.second, r.message);
} else {
auto line = line_info(s, r.error_pos);
log(line.first, line.second, "syntax error");
}
}
return nullptr;
}
auto &grammar = *data.grammar;
// User provided rules
for (const auto &x : rules) {
auto name = x.first;
bool ignore = false;
if (!name.empty() && name[0] == '~') {
ignore = true;
name.erase(0, 1);
}
if (!name.empty()) {
auto &rule = grammar[name];
rule <= x.second;
rule.name = name;
rule.ignoreSemanticValue = ignore;
}
}
// Check duplicated definitions
bool ret = data.duplicates.empty();
for (const auto &x : data.duplicates) {
if (log) {
const auto &name = x.first;
auto ptr = x.second;
auto line = line_info(s, ptr);
log(line.first, line.second, "'" + name + "' is already defined.");
}
}
// Check missing definitions
for (auto &x : grammar) {
auto &rule = x.second;
ReferenceChecker vis(*data.grammar, rule.params);
rule.accept(vis);
for (const auto &y : vis.error_s) {
const auto &name = y.first;
const auto ptr = y.second;
if (log) {
auto line = line_info(s, ptr);
log(line.first, line.second, vis.error_message[name]);
}
ret = false;
}
}
if (!ret) { return nullptr; }
// Link references
for (auto &x : grammar) {
auto &rule = x.second;
LinkReferences vis(*data.grammar, rule.params);
rule.accept(vis);
}
// Check left recursion
ret = true;
for (auto &x : grammar) {
const auto &name = x.first;
auto &rule = x.second;
DetectLeftRecursion vis(name);
rule.accept(vis);
if (vis.error_s) {
if (log) {
auto line = line_info(s, vis.error_s);
log(line.first, line.second, "'" + name + "' is left recursive.");
}
ret = false;
}
}
if (!ret) { return nullptr; }
// Set root definition
auto &start_rule = (*data.grammar)[data.start];
// Check infinite loop
{
DetectInfiniteLoop vis(data.start_pos, data.start);
start_rule.accept(vis);
if (vis.has_error) {
if (log) {
auto line = line_info(s, vis.error_s);
log(line.first, line.second,
"infinite loop is detected in '" + vis.error_name + "'.");
}
return nullptr;
}
}
// Automatic whitespace skipping
if (grammar.count(WHITESPACE_DEFINITION_NAME)) {
for (auto &x : grammar) {
auto &rule = x.second;
auto ope = rule.get_core_operator();
if (IsLiteralToken::check(*ope)) { rule <= tok(ope); }
}
start_rule.whitespaceOpe =
wsp((*data.grammar)[WHITESPACE_DEFINITION_NAME].get_core_operator());
}
// Word expression
if (grammar.count(WORD_DEFINITION_NAME)) {
start_rule.wordOpe =
(*data.grammar)[WORD_DEFINITION_NAME].get_core_operator();
}
// Apply instructions
for (const auto &item : data.instructions) {
const auto &name = item.first;
const auto &instruction = item.second;
auto &rule = grammar[name];
if (instruction.type == "precedence") {
const auto &info =
any_cast<PrecedenceClimbing::BinOpeInfo>(instruction.data);
if (!apply_precedence_instruction(rule, info, s, log)) {
return nullptr;
}
}
}
// Set root definition
start = data.start;
return data.grammar;
}
| 1 |
[
"CWE-476"
] |
cpp-peglib
|
0061f393de54cf0326621c079dc2988336d1ebb3
| 316,834,522,271,309,500,000,000,000,000,000,000,000 | 152 |
Fix #121
|
int get_partition_id_key_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
*part_id= get_part_id_key(part_info->part_field_array,
part_info->num_parts, func_value);
return 0;
}
| 0 |
[] |
mysql-server
|
be901b60ae59c93848c829d1b0b2cb523ab8692e
| 193,698,876,431,005,270,000,000,000,000,000,000,000 | 8 |
Bug#26390632: CREATE TABLE CAN CAUSE MYSQL TO EXIT.
Analysis
========
CREATE TABLE of InnoDB table with a partition name
which exceeds the path limit can cause the server
to exit.
During the preparation of the partition name,
there was no check to identify whether the complete
path name for partition exceeds the max supported
path length, causing the server to exit during
subsequent processing.
Fix
===
During the preparation of partition name, check and report
an error if the partition path name exceeds the maximum path
name limit.
This is a 5.5 patch.
|
const Config *get_config() const { return config_; }
| 0 |
[] |
nghttp2
|
95efb3e19d174354ca50c65d5d7227d92bcd60e1
| 269,347,338,868,138,640,000,000,000,000,000,000,000 | 1 |
Don't read too greedily
|
static int wc_ecc_alloc_async(ecc_key* key)
{
int err = wc_ecc_alloc_mpint(key, &key->r);
if (err == 0)
err = wc_ecc_alloc_mpint(key, &key->s);
return err;
}
| 0 |
[
"CWE-326",
"CWE-203"
] |
wolfssl
|
1de07da61f0c8e9926dcbd68119f73230dae283f
| 251,014,938,931,414,800,000,000,000,000,000,000,000 | 7 |
Constant time EC map to affine for private operations
For fast math, use a constant time modular inverse when mapping to
affine when operation involves a private key - key gen, calc shared
secret, sign.
|
static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
{
int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
if (id < 0)
return 0;
return (id | 0x800U) << 20;
}
| 0 |
[
"CWE-416"
] |
linux
|
3db09e762dc79584a69c10d74a6b98f89a9979f8
| 316,708,866,236,625,720,000,000,000,000,000,000,000 | 7 |
net/sched: cls_u32: fix netns refcount changes in u32_change()
We are now able to detect extra put_net() at the moment
they happen, instead of much later in correct code paths.
u32_init_knode() / tcf_exts_init() populates the ->exts.net
pointer, but as mentioned in tcf_exts_init(),
the refcount on netns has not been elevated yet.
The refcount is taken only once tcf_exts_get_net()
is called.
So the two u32_destroy_key() calls from u32_change()
are attempting to release an invalid reference on the netns.
syzbot report:
refcount_t: decrement hit 0; leaking memory.
WARNING: CPU: 0 PID: 21708 at lib/refcount.c:31 refcount_warn_saturate+0xbf/0x1e0 lib/refcount.c:31
Modules linked in:
CPU: 0 PID: 21708 Comm: syz-executor.5 Not tainted 5.18.0-rc2-next-20220412-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
RIP: 0010:refcount_warn_saturate+0xbf/0x1e0 lib/refcount.c:31
Code: 1d 14 b6 b2 09 31 ff 89 de e8 6d e9 89 fd 84 db 75 e0 e8 84 e5 89 fd 48 c7 c7 40 aa 26 8a c6 05 f4 b5 b2 09 01 e8 e5 81 2e 05 <0f> 0b eb c4 e8 68 e5 89 fd 0f b6 1d e3 b5 b2 09 31 ff 89 de e8 38
RSP: 0018:ffffc900051af1b0 EFLAGS: 00010286
RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000040000 RSI: ffffffff8160a0c8 RDI: fffff52000a35e28
RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000
R10: ffffffff81604a9e R11: 0000000000000000 R12: 1ffff92000a35e3b
R13: 00000000ffffffef R14: ffff8880211a0194 R15: ffff8880577d0a00
FS: 00007f25d183e700(0000) GS:ffff8880b9c00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f19c859c028 CR3: 0000000051009000 CR4: 00000000003506f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
<TASK>
__refcount_dec include/linux/refcount.h:344 [inline]
refcount_dec include/linux/refcount.h:359 [inline]
ref_tracker_free+0x535/0x6b0 lib/ref_tracker.c:118
netns_tracker_free include/net/net_namespace.h:327 [inline]
put_net_track include/net/net_namespace.h:341 [inline]
tcf_exts_put_net include/net/pkt_cls.h:255 [inline]
u32_destroy_key.isra.0+0xa7/0x2b0 net/sched/cls_u32.c:394
u32_change+0xe01/0x3140 net/sched/cls_u32.c:909
tc_new_tfilter+0x98d/0x2200 net/sched/cls_api.c:2148
rtnetlink_rcv_msg+0x80d/0xb80 net/core/rtnetlink.c:6016
netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2495
netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
netlink_unicast+0x543/0x7f0 net/netlink/af_netlink.c:1345
netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1921
sock_sendmsg_nosec net/socket.c:705 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:725
____sys_sendmsg+0x6e2/0x800 net/socket.c:2413
___sys_sendmsg+0xf3/0x170 net/socket.c:2467
__sys_sendmsg+0xe5/0x1b0 net/socket.c:2496
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f25d0689049
Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f25d183e168 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00007f25d079c030 RCX: 00007f25d0689049
RDX: 0000000000000000 RSI: 0000000020000340 RDI: 0000000000000005
RBP: 00007f25d06e308d R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
R13: 00007ffd0b752e3f R14: 00007f25d183e300 R15: 0000000000022000
</TASK>
Fixes: 35c55fc156d8 ("cls_u32: use tcf_exts_get_net() before call_rcu()")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: syzbot <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: Jiri Pirko <[email protected]>
Acked-by: Jamal Hadi Salim <[email protected]>
Signed-off-by: Jakub Kicinski <[email protected]>
|
mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg, void *priv_ctx)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
u32 crdump_size = dev->priv.health.crdump_size;
u32 *cr_data;
u32 data_size;
u32 offset;
int err;
if (!mlx5_core_is_pf(dev))
return -EPERM;
cr_data = kvmalloc(crdump_size, GFP_KERNEL);
if (!cr_data)
return -ENOMEM;
err = mlx5_crdump_collect(dev, cr_data);
if (err)
goto free_data;
if (priv_ctx) {
struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
if (err)
goto free_data;
}
err = devlink_fmsg_arr_pair_nest_start(fmsg, "crdump_data");
if (err)
goto free_data;
for (offset = 0; offset < crdump_size; offset += data_size) {
if (crdump_size - offset < MLX5_CR_DUMP_CHUNK_SIZE)
data_size = crdump_size - offset;
else
data_size = MLX5_CR_DUMP_CHUNK_SIZE;
err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
data_size);
if (err)
goto free_data;
}
err = devlink_fmsg_arr_pair_nest_end(fmsg);
free_data:
kvfree(cr_data);
return err;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
c7ed6d0183d5ea9bc31bcaeeba4070bd62546471
| 64,655,936,825,596,470,000,000,000,000,000,000,000 | 47 |
net/mlx5: fix memory leak in mlx5_fw_fatal_reporter_dump
In mlx5_fw_fatal_reporter_dump if mlx5_crdump_collect fails the
allocated memory for cr_data must be released otherwise there will be
memory leak. To fix this, this commit changes the return instruction
into goto error handling.
Fixes: 9b1f29823605 ("net/mlx5: Add support for FW fatal reporter dump")
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Saeed Mahameed <[email protected]>
|
datetime_s_xmlschema(int argc, VALUE *argv, VALUE klass)
{
VALUE str, sg;
rb_scan_args(argc, argv, "02", &str, &sg);
switch (argc) {
case 0:
str = rb_str_new2("-4712-01-01T00:00:00+00:00");
case 1:
sg = INT2FIX(DEFAULT_SG);
}
{
VALUE hash = date_s__xmlschema(klass, str);
return dt_new_by_frags(klass, hash, sg);
}
}
| 1 |
[] |
date
|
3959accef8da5c128f8a8e2fd54e932a4fb253b0
| 61,041,937,090,538,860,000,000,000,000,000,000,000 | 18 |
Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301
|
cmsBool GrowNamedColorList(cmsNAMEDCOLORLIST* v)
{
cmsUInt32Number size;
_cmsNAMEDCOLOR * NewPtr;
if (v == NULL) return FALSE;
if (v ->Allocated == 0)
size = 64; // Initial guess
else
size = v ->Allocated * 2;
// Keep a maximum color lists can grow, 100K entries seems reasonable
if (size > 1024*100) return FALSE;
NewPtr = (_cmsNAMEDCOLOR*) _cmsRealloc(v ->ContextID, v ->List, size * sizeof(_cmsNAMEDCOLOR));
if (NewPtr == NULL)
return FALSE;
v ->List = NewPtr;
v ->Allocated = size;
return TRUE;
}
| 0 |
[] |
Little-CMS
|
886e2f524268efe8a1c3aa838c28e446fda24486
| 71,286,296,317,429,610,000,000,000,000,000,000,000 | 23 |
Fixes from coverity check
|
void virtio_gpu_get_display_info(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_resp_display_info display_info;
trace_virtio_gpu_cmd_get_display_info();
memset(&display_info, 0, sizeof(display_info));
display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
virtio_gpu_fill_display_info(g, &display_info);
virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
sizeof(display_info));
}
| 0 |
[] |
qemu
|
acfc4846508a02cc4c83aa27799fd74ac280bdb2
| 298,090,043,977,263,950,000,000,000,000,000,000,000 | 12 |
virtio-gpu: use VIRTIO_GPU_MAX_SCANOUTS
The value is defined in virtio_gpu.h already (changing from 4 to 16).
Signed-off-by: Marc-André Lureau <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
static uint64_t megasas_frame_get_context(MegasasState *s,
unsigned long frame)
{
PCIDevice *pci = &s->parent_obj;
return ldq_le_pci_dma(pci, frame + offsetof(struct mfi_frame_header, context));
}
| 0 |
[
"CWE-401"
] |
qemu
|
765a707000e838c30b18d712fe6cb3dd8e0435f3
| 142,673,983,360,967,150,000,000,000,000,000,000,000 | 6 |
megasas: fix guest-triggered memory leak
If the guest sets the sglist size to a value >=2GB, megasas_handle_dcmd
will return MFI_STAT_MEMORY_NOT_AVAILABLE without freeing the memory.
Avoid this by returning only the status from map_dcmd, and loading
cmd->iov_size in the caller.
Reported-by: Li Qiang <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
void test_nghttp2_http_record_request_method(void) {
nghttp2_session *session;
nghttp2_session_callbacks callbacks;
const nghttp2_nv conn_reqnv[] = {MAKE_NV(":method", "CONNECT"),
MAKE_NV(":authority", "localhost")};
const nghttp2_nv conn_resnv[] = {MAKE_NV(":status", "200"),
MAKE_NV("content-length", "9999")};
nghttp2_stream *stream;
ssize_t rv;
nghttp2_bufs bufs;
nghttp2_hd_deflater deflater;
nghttp2_mem *mem;
nghttp2_outbound_item *item;
mem = nghttp2_mem_default();
frame_pack_bufs_init(&bufs);
memset(&callbacks, 0, sizeof(nghttp2_session_callbacks));
callbacks.send_callback = null_send_callback;
nghttp2_session_client_new(&session, &callbacks, NULL);
nghttp2_hd_deflate_init(&deflater, mem);
CU_ASSERT(1 == nghttp2_submit_request(session, NULL, conn_reqnv,
ARRLEN(conn_reqnv), NULL, NULL));
CU_ASSERT(0 == nghttp2_session_send(session));
stream = nghttp2_session_get_stream(session, 1);
CU_ASSERT(NGHTTP2_HTTP_FLAG_METH_CONNECT == stream->http_flags);
rv = pack_headers(&bufs, &deflater, 1, NGHTTP2_FLAG_END_HEADERS, conn_resnv,
ARRLEN(conn_resnv), mem);
CU_ASSERT(0 == rv);
rv = nghttp2_session_mem_recv(session, bufs.head->buf.pos,
nghttp2_buf_len(&bufs.head->buf));
CU_ASSERT((ssize_t)nghttp2_buf_len(&bufs.head->buf) == rv);
CU_ASSERT((NGHTTP2_HTTP_FLAG_METH_CONNECT & stream->http_flags) > 0);
CU_ASSERT(-1 == stream->content_length);
/* content-length is ignored in 200 response to a CONNECT request */
item = nghttp2_session_get_next_ob_item(session);
CU_ASSERT(NULL == item);
nghttp2_hd_deflate_free(&deflater);
nghttp2_session_del(session);
nghttp2_bufs_free(&bufs);
}
| 0 |
[] |
nghttp2
|
0a6ce87c22c69438ecbffe52a2859c3a32f1620f
| 83,135,654,032,529,070,000,000,000,000,000,000,000 | 54 |
Add nghttp2_option_set_max_outbound_ack
|
static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len,
int flags)
{
struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
}
return ret;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
b92946e2919134ebe2a4083e4302236295ea2a73
| 198,614,888,363,450,870,000,000,000,000,000,000,000 | 16 |
macvtap: zerocopy: validate vectors before building skb
There're several reasons that the vectors need to be validated:
- Return error when caller provides vectors whose num is greater than UIO_MAXIOV.
- Linearize part of skb when userspace provides vectors grater than MAX_SKB_FRAGS.
- Return error when userspace provides vectors whose total length may exceed
- MAX_SKB_FRAGS * PAGE_SIZE.
Signed-off-by: Jason Wang <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
|
psutil_proc_io_counters(PyObject *self, PyObject *args) {
long pid;
int rc;
perfstat_process_t procinfo;
perfstat_id_t id;
if (! PyArg_ParseTuple(args, "l", &pid))
return NULL;
snprintf(id.name, sizeof(id.name), "%ld", pid);
rc = perfstat_process(&id, &procinfo, sizeof(perfstat_process_t), 1);
if (rc <= 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
return Py_BuildValue("(KKKK)",
procinfo.inOps, // XXX always 0
procinfo.outOps,
procinfo.inBytes, // XXX always 0
procinfo.outBytes);
}
| 0 |
[
"CWE-415"
] |
psutil
|
7d512c8e4442a896d56505be3e78f1156f443465
| 47,162,452,915,568,770,000,000,000,000,000,000,000 | 21 |
Use Py_CLEAR instead of Py_DECREF to also set the variable to NULL (#1616)
These files contain loops that convert system data into python objects
and during the process they create objects and dereference their
refcounts after they have been added to the resulting list.
However, in case of errors during the creation of those python objects,
the refcount to previously allocated objects is dropped again with
Py_XDECREF, which should be a no-op in case the paramater is NULL. Even
so, in most of these loops the variables pointing to the objects are
never set to NULL, even after Py_DECREF is called at the end of the loop
iteration. This means, after the first iteration, if an error occurs
those python objects will get their refcount dropped two times,
resulting in a possible double-free.
|
static ssize_t uart_mode_store(struct device *dev,
struct device_attribute *attr, const char *valbuf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int v = simple_strtoul(valbuf, NULL, 0);
dev_dbg(dev, "%s: setting uart_mode = %d\n", __func__, v);
if (v < 256)
edge_port->bUartMode = v;
else
dev_err(dev, "%s - uart_mode %d is invalid\n", __func__, v);
return count;
}
| 0 |
[
"CWE-191"
] |
linux
|
654b404f2a222f918af9b0cd18ad469d0c941a8e
| 59,406,251,017,607,620,000,000,000,000,000,000,000 | 16 |
USB: serial: io_ti: fix information leak in completion handler
Add missing sanity check to the bulk-in completion handler to avoid an
integer underflow that can be triggered by a malicious device.
This avoids leaking 128 kB of memory content from after the URB transfer
buffer to user space.
Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable <[email protected]> # 2.6.30
Signed-off-by: Johan Hovold <[email protected]>
|
bool createUpgradeFilterChain(absl::string_view, const Http::FilterChainFactory::UpgradeMap*,
Http::FilterChainFactoryCallbacks&) override {
return false;
}
| 0 |
[
"CWE-400"
] |
envoy
|
dfddb529e914d794ac552e906b13d71233609bf7
| 259,881,163,148,932,620,000,000,000,000,000,000,000 | 4 |
listener: Add configurable accepted connection limits (#153)
Add support for per-listener limits on accepted connections.
Signed-off-by: Tony Allen <[email protected]>
|
TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteNotFound) {
setup(false, "", true, true);
setupFilterChain(1, 0); // Recreate the chain for second stream.
EXPECT_CALL(*static_cast<const Router::MockScopedConfig*>(
scopedRouteConfigProvider()->config<Router::ScopedConfig>().get()),
getRouteConfig(_))
.Times(2)
.WillRepeatedly(Return(nullptr));
EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void {
StreamDecoder* decoder = &conn_manager_->newStream(response_encoder_);
HeaderMapPtr headers{
new TestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}};
decoder->decodeHeaders(std::move(headers), true);
data.drain(4);
}));
EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true))
.WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus {
EXPECT_EQ(nullptr, decoder_filters_[0]->callbacks_->route());
return FilterHeadersStatus::StopIteration;
}));
EXPECT_CALL(*decoder_filters_[0], decodeComplete()); // end_stream=true.
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false);
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
| 303,297,009,489,210,540,000,000,000,000,000,000,000 | 27 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]>
|
void Downstream::set_chunked_response(bool f) { chunked_response_ = f; }
| 0 |
[] |
nghttp2
|
319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c
| 29,425,947,078,046,157,000,000,000,000,000,000,000 | 1 |
nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full.
|
void ide_flush_cache(IDEState *s)
{
if (s->bs == NULL) {
ide_flush_cb(s, 0);
return;
}
s->status |= BUSY_STAT;
bdrv_acct_start(s->bs, &s->acct, 0, BDRV_ACCT_FLUSH);
bdrv_aio_flush(s->bs, ide_flush_cb, s);
}
| 0 |
[
"CWE-189"
] |
qemu
|
940973ae0b45c9b6817bab8e4cf4df99a9ef83d7
| 94,793,628,274,221,980,000,000,000,000,000,000,000 | 11 |
ide: Correct improper smart self test counter reset in ide core.
The SMART self test counter was incorrectly being reset to zero,
not 1. This had the effect that on every 21st SMART EXECUTE OFFLINE:
* We would write off the beginning of a dynamically allocated buffer
* We forgot the SMART history
Fix this.
Signed-off-by: Benoit Canet <[email protected]>
Message-id: [email protected]
Reviewed-by: Markus Armbruster <[email protected]>
Cc: [email protected]
Acked-by: Kevin Wolf <[email protected]>
[PMM: tweaked commit message as per suggestions from Markus]
Signed-off-by: Peter Maydell <[email protected]>
|
getftypest(stat_T *st)
{
char *t;
if (S_ISREG(st->st_mode))
t = "file";
else if (S_ISDIR(st->st_mode))
t = "dir";
else if (S_ISLNK(st->st_mode))
t = "link";
else if (S_ISBLK(st->st_mode))
t = "bdev";
else if (S_ISCHR(st->st_mode))
t = "cdev";
else if (S_ISFIFO(st->st_mode))
t = "fifo";
else if (S_ISSOCK(st->st_mode))
t = "socket";
else
t = "other";
return (char_u*)t;
}
| 0 |
[
"CWE-823",
"CWE-703"
] |
vim
|
5921aeb5741fc6e84c870d68c7c35b93ad0c9f87
| 168,330,982,698,545,540,000,000,000,000,000,000,000 | 22 |
patch 8.2.4418: crash when using special multi-byte character
Problem: Crash when using special multi-byte character.
Solution: Don't use isalpha() for an arbitrary character.
|
COMPAT_SYSCALL_DEFINE5(waitid,
int, which, compat_pid_t, pid,
struct compat_siginfo __user *, infop, int, options,
struct compat_rusage __user *, uru)
{
struct rusage ru;
struct waitid_info info = {.status = 0};
long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
int signo = 0;
if (err > 0) {
signo = SIGCHLD;
err = 0;
if (uru) {
/* kernel_waitid() overwrites everything in ru */
if (COMPAT_USE_64BIT_TIME)
err = copy_to_user(uru, &ru, sizeof(ru));
else
err = put_compat_rusage(&ru, uru);
if (err)
return -EFAULT;
}
}
if (!infop)
return err;
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
unsafe_put_user(info.cause, &infop->si_code, Efault);
unsafe_put_user(info.pid, &infop->si_pid, Efault);
unsafe_put_user(info.uid, &infop->si_uid, Efault);
unsafe_put_user(info.status, &infop->si_status, Efault);
user_access_end();
return err;
Efault:
user_access_end();
return -EFAULT;
}
| 1 |
[
"CWE-20"
] |
linux
|
96ca579a1ecc943b75beba58bebb0356f6cc4b51
| 214,296,876,056,560,050,000,000,000,000,000,000,000 | 39 |
waitid(): Add missing access_ok() checks
Adds missing access_ok() checks.
CVE-2017-5123
Reported-by: Chris Salls <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: Al Viro <[email protected]>
Fixes: 4c48abe91be0 ("waitid(): switch copyout of siginfo to unsafe_put_user()")
Cc: [email protected] # 4.13
Signed-off-by: Linus Torvalds <[email protected]>
|
int can_nice(const struct task_struct *p, const int nice)
{
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
| 0 |
[
"CWE-200"
] |
linux
|
4efbc454ba68def5ef285b26ebfcfdb605b52755
| 338,456,630,711,551,860,000,000,000,000,000,000,000 | 8 |
sched: Fix information leak in sys_sched_getattr()
We're copying the on-stack structure to userspace, but forgot to give
the right number of bytes to copy. This allows the calling process to
obtain up to PAGE_SIZE bytes from the stack (and possibly adjacent
kernel memory).
This fix copies only as much as we actually have on the stack
(attr->size defaults to the size of the struct) and leaves the rest of
the userspace-provided buffer untouched.
Found using kmemcheck + trinity.
Fixes: d50dde5a10f30 ("sched: Add new scheduler syscalls to support an extended scheduling parameters ABI")
Cc: Dario Faggioli <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Vegard Nossum <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]>
|
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
{
bool resched_timer = false;
int signr;
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
if (!signr) {
signr = __dequeue_signal(&tsk->signal->shared_pending,
mask, info, &resched_timer);
#ifdef CONFIG_POSIX_TIMERS
/*
* itimer signal ?
*
* itimers are process shared and we restart periodic
* itimers in the signal delivery path to prevent DoS
* attacks in the high resolution timer case. This is
* compliant with the old way of self-restarting
* itimers, as the SIGALRM is a legacy signal and only
* queued once. Changing the restart behaviour to
* restart the timer in the signal dequeue path is
* reducing the timer noise on heavy loaded !highres
* systems too.
*/
if (unlikely(signr == SIGALRM)) {
struct hrtimer *tmr = &tsk->signal->real_timer;
if (!hrtimer_is_queued(tmr) &&
tsk->signal->it_real_incr != 0) {
hrtimer_forward(tmr, tmr->base->get_time(),
tsk->signal->it_real_incr);
hrtimer_restart(tmr);
}
}
#endif
}
recalc_sigpending();
if (!signr)
return 0;
if (unlikely(sig_kernel_stop(signr))) {
/*
* Set a marker that we have dequeued a stop signal. Our
* caller might release the siglock and then the pending
* stop signal it is about to process is no longer in the
* pending bitmasks, but must still be cleared by a SIGCONT
* (and overruled by a SIGKILL). So those cases clear this
* shared flag after we've set it. Note that this flag may
* remain set after the signal we return is ignored or
* handled. That doesn't matter because its only purpose
* is to alert stop-signal processing code when another
* processor has come along and cleared the flag.
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
#ifdef CONFIG_POSIX_TIMERS
if (resched_timer) {
/*
* Release the siglock to ensure proper locking order
* of timer locks outside of siglocks. Note, we leave
* irqs disabled here, since the posix-timers code is
* about to disable them again anyway.
*/
spin_unlock(&tsk->sighand->siglock);
posixtimer_rearm(info);
spin_lock(&tsk->sighand->siglock);
/* Don't expose the si_sys_private value to userspace */
info->si_sys_private = 0;
}
#endif
return signr;
}
| 0 |
[
"CWE-190"
] |
linux
|
d1e7fd6462ca9fc76650fbe6ca800e35b24267da
| 314,283,153,019,600,640,000,000,000,000,000,000,000 | 76 |
signal: Extend exec_id to 64bits
Replace the 32bit exec_id with a 64bit exec_id to make it impossible
to wrap the exec_id counter. With care an attacker can cause exec_id
wrap and send arbitrary signals to a newly exec'd parent. This
bypasses the signal sending checks if the parent changes their
credentials during exec.
The severity of this problem can been seen that in my limited testing
of a 32bit exec_id it can take as little as 19s to exec 65536 times.
Which means that it can take as little as 14 days to wrap a 32bit
exec_id. Adam Zabrocki has succeeded wrapping the self_exe_id in 7
days. Even my slower timing is in the uptime of a typical server.
Which means self_exec_id is simply a speed bump today, and if exec
gets noticably faster self_exec_id won't even be a speed bump.
Extending self_exec_id to 64bits introduces a problem on 32bit
architectures where reading self_exec_id is no longer atomic and can
take two read instructions. Which means that is is possible to hit
a window where the read value of exec_id does not match the written
value. So with very lucky timing after this change this still
remains expoiltable.
I have updated the update of exec_id on exec to use WRITE_ONCE
and the read of exec_id in do_notify_parent to use READ_ONCE
to make it clear that there is no locking between these two
locations.
Link: https://lore.kernel.org/kernel-hardening/[email protected]
Fixes: 2.3.23pre2
Cc: [email protected]
Signed-off-by: "Eric W. Biederman" <[email protected]>
|
static ssize_t event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
}
| 0 |
[
"CWE-119",
"CWE-189",
"CWE-703"
] |
linux
|
7314e613d5ff9f0934f7a0f74ed7973b903315d1
| 155,827,085,922,745,510,000,000,000,000,000,000,000 | 6 |
Fix a few incorrectly checked [io_]remap_pfn_range() calls
Nico Golde reports a few straggling uses of [io_]remap_pfn_range() that
really should use the vm_iomap_memory() helper. This trivially converts
two of them to the helper, and comments about why the third one really
needs to continue to use remap_pfn_range(), and adds the missing size
check.
Reported-by: Nico Golde <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected].
|
static void SFDDumpDHintList( FILE *sfd,const char *key, DStemInfo *d ) {
HintInstance *hi;
if ( d==NULL )
return;
fprintf(sfd, "%s", key );
for ( ; d!=NULL; d=d->next ) {
fprintf(sfd, "%g %g %g %g %g %g",
(double) d->left.x, (double) d->left.y,
(double) d->right.x, (double) d->right.y,
(double) d->unit.x, (double) d->unit.y );
if ( d->where!=NULL ) {
putc('<',sfd);
for ( hi=d->where; hi!=NULL; hi=hi->next )
fprintf(sfd, "%g %g%c", (double) hi->begin, (double) hi->end, hi->next?' ':'>');
}
putc(d->next?' ':'\n',sfd);
}
}
| 0 |
[
"CWE-416"
] |
fontforge
|
048a91e2682c1a8936ae34dbc7bd70291ec05410
| 66,059,815,398,243,780,000,000,000,000,000,000,000 | 19 |
Fix for #4084 Use-after-free (heap) in the SFD_GetFontMetaData() function
Fix for #4086 NULL pointer dereference in the SFDGetSpiros() function
Fix for #4088 NULL pointer dereference in the SFD_AssignLookups() function
Add empty sf->fontname string if it isn't set, fixing #4089 #4090 and many
other potential issues (many downstream calls to strlen() on the value).
|
nm_utils_arp_type_get_hwaddr_relevant_part(int arp_type, const guint8 **hwaddr, gsize *hwaddr_len)
{
g_return_val_if_fail(hwaddr && hwaddr_len
&& nm_utils_arp_type_validate_hwaddr(arp_type, *hwaddr, *hwaddr_len),
FALSE);
/* for infiniband, we only consider the last 8 bytes. */
if (arp_type == ARPHRD_INFINIBAND) {
*hwaddr += (INFINIBAND_ALEN - 8);
*hwaddr_len = 8;
}
return TRUE;
}
| 0 |
[
"CWE-20"
] |
NetworkManager
|
420784e342da4883f6debdfe10cde68507b10d27
| 183,931,546,966,462,920,000,000,000,000,000,000,000 | 14 |
core: fix crash in nm_wildcard_match_check()
It's not entirely clear how to treat %NULL.
Clearly "match.interface-name=eth0" should not
match with an interface %NULL. But what about
"match.interface-name=!eth0"? It's now implemented
that negative matches still succeed against %NULL.
What about "match.interface-name=*"? That probably
should also match with %NULL. So we treat %NULL really
like "".
Against commit 11cd443448bc ('iwd: Don't call IWD methods when device
unmanaged'), we got this backtrace:
#0 0x00007f1c164069f1 in __strnlen_avx2 () at ../sysdeps/x86_64/multiarch/strlen-avx2.S:62
#1 0x00007f1c1637ac9e in __fnmatch (pattern=<optimized out>, string=<optimized out>, string@entry=0x0, flags=flags@entry=0) at fnmatch.c:379
p = 0x0
res = <optimized out>
orig_pattern = <optimized out>
n = <optimized out>
wpattern = 0x7fff8d860730 L"pci-0000:03:00.0"
ps = {__count = 0, __value = {__wch = 0, __wchb = "\000\000\000"}}
wpattern_malloc = 0x0
wstring_malloc = 0x0
wstring = <optimized out>
alloca_used = 80
__PRETTY_FUNCTION__ = "__fnmatch"
#2 0x0000564484a978bf in nm_wildcard_match_check (str=0x0, patterns=<optimized out>, num_patterns=<optimized out>) at src/core/nm-core-utils.c:1959
is_inverted = 0
is_mandatory = 0
match = <optimized out>
p = 0x564486c43fa0 "pci-0000:03:00.0"
has_optional = 0
has_any_optional = 0
i = <optimized out>
#3 0x0000564484bf4797 in check_connection_compatible (self=<optimized out>, connection=<optimized out>, error=0x0) at src/core/devices/nm-device.c:7499
patterns = <optimized out>
device_driver = 0x564486c76bd0 "veth"
num_patterns = 1
priv = 0x564486cbe0b0
__func__ = "check_connection_compatible"
device_iface = <optimized out>
local = 0x564486c99a60
conn_iface = 0x0
klass = <optimized out>
s_match = 0x564486c63df0 [NMSettingMatch]
#4 0x0000564484c38491 in check_connection_compatible (device=0x564486cbe590 [NMDeviceVeth], connection=0x564486c6b160, error=0x0) at src/core/devices/nm-device-ethernet.c:348
self = 0x564486cbe590 [NMDeviceVeth]
s_wired = <optimized out>
Fixes: 3ced486f4162 ('libnm/match: extend syntax for match patterns with '|', '&', '!' and '\\'')
https://bugzilla.redhat.com/show_bug.cgi?id=1942741
|
static PyObject *do_curl_setstate(CurlObject *self, PyObject *args)
{
PyErr_SetString(PyExc_TypeError, "Curl objects do not support deserialization");
return NULL;
}
| 0 |
[] |
pycurl
|
2a743674dcf152beaaf6adaddb1ef51b18d1fffe
| 95,893,826,424,207,770,000,000,000,000,000,000,000 | 5 |
Fix use after free in Curl object's HTTPPOST setopt with unicode FORM_BUFFERPTR.
Fixes use after free in the Curl object's HTTPPOST setopt when a unicode value
is passed as a value with a FORM_BUFFERPTR. The str object created from
the passed in unicode object would have its buffer used but the unicode object
would be stored instead of the str object.
|
static enum_func_status
php_mysqlnd_cached_sha2_result_read(MYSQLND_CONN_DATA * conn, void * _packet)
{
MYSQLND_PACKET_CACHED_SHA2_RESULT * packet= (MYSQLND_PACKET_CACHED_SHA2_RESULT *) _packet;
MYSQLND_ERROR_INFO * error_info = conn->error_info;
MYSQLND_PFC * pfc = conn->protocol_frame_codec;
MYSQLND_VIO * vio = conn->vio;
MYSQLND_STATS * stats = conn->stats;
MYSQLND_CONNECTION_STATE * connection_state = &conn->state;
zend_uchar buf[SHA256_PK_REQUEST_RESP_BUFFER_SIZE];
zend_uchar *p = buf;
const zend_uchar * const begin = buf;
DBG_ENTER("php_mysqlnd_cached_sha2_result_read");
if (FAIL == mysqlnd_read_packet_header_and_body(&(packet->header), pfc, vio, stats, error_info, connection_state, buf, sizeof(buf), "PROT_CACHED_SHA2_RESULT_PACKET", PROT_CACHED_SHA2_RESULT_PACKET)) {
DBG_RETURN(FAIL);
}
BAIL_IF_NO_MORE_DATA;
packet->response_code = uint1korr(p);
p++;
BAIL_IF_NO_MORE_DATA;
if (ERROR_MARKER == packet->response_code) {
php_mysqlnd_read_error_from_line(p, packet->header.size - 1,
packet->error, sizeof(packet->error),
&packet->error_no, packet->sqlstate
);
DBG_RETURN(PASS);
}
if (0xFE == packet->response_code) {
/* Authentication Switch Response */
if (packet->header.size > (size_t) (p - buf)) {
packet->new_auth_protocol = mnd_pestrdup((char *)p, FALSE);
packet->new_auth_protocol_len = strlen(packet->new_auth_protocol);
p+= packet->new_auth_protocol_len + 1; /* +1 for the \0 */
packet->new_auth_protocol_data_len = packet->header.size - (size_t) (p - buf);
if (packet->new_auth_protocol_data_len) {
packet->new_auth_protocol_data = mnd_emalloc(packet->new_auth_protocol_data_len);
memcpy(packet->new_auth_protocol_data, p, packet->new_auth_protocol_data_len);
}
DBG_INF_FMT("The server requested switching auth plugin to : %s", packet->new_auth_protocol);
DBG_INF_FMT("Server salt : [%d][%.*s]", packet->new_auth_protocol_data_len, packet->new_auth_protocol_data_len, packet->new_auth_protocol_data);
}
DBG_RETURN(PASS);
}
if (0x1 != packet->response_code) {
DBG_ERR_FMT("Unexpected response code %d", packet->response_code);
}
/* This is not really the response code, but we reuse the field. */
packet->response_code = uint1korr(p);
p++;
BAIL_IF_NO_MORE_DATA;
packet->result = uint1korr(p);
BAIL_IF_NO_MORE_DATA;
DBG_RETURN(PASS);
premature_end:
DBG_ERR_FMT("OK packet %d bytes shorter than expected", p - begin - packet->header.size);
php_error_docref(NULL, E_WARNING, "SHA256_PK_REQUEST_RESPONSE packet "MYSQLND_SZ_T_SPEC" bytes shorter than expected",
p - begin - packet->header.size);
DBG_RETURN(FAIL);
| 0 |
[
"CWE-120"
] |
php-src
|
58006537fc5f133ae8549efe5118cde418b3ace9
| 16,470,164,598,147,790,000,000,000,000,000,000,000 | 67 |
Fix bug #81719: mysqlnd/pdo password buffer overflow
|
Pl_AES_PDF::flush(bool strip_padding)
{
assert(this->offset == this->buf_size);
if (first)
{
first = false;
bool return_after_init = false;
if (this->cbc_mode)
{
if (encrypt)
{
// Set cbc_block to the initialization vector, and if
// not zero, write it to the output stream.
initializeVector();
if (! (this->use_zero_iv || this->use_specified_iv))
{
getNext()->write(this->cbc_block, this->buf_size);
}
}
else if (this->use_zero_iv || this->use_specified_iv)
{
// Initialize vector with zeroes; zero vector was not
// written to the beginning of the input file.
initializeVector();
}
else
{
// Take the first block of input as the initialization
// vector. There's nothing to write at this time.
memcpy(this->cbc_block, this->inbuf, this->buf_size);
this->offset = 0;
return_after_init = true;
}
}
this->crypto->rijndael_init(
encrypt, this->key.get(), key_bytes,
this->cbc_mode, this->cbc_block);
if (return_after_init)
{
return;
}
}
if (this->encrypt)
{
this->crypto->rijndael_process(this->inbuf, this->outbuf);
}
else
{
this->crypto->rijndael_process(this->inbuf, this->outbuf);
}
unsigned int bytes = this->buf_size;
if (strip_padding)
{
unsigned char last = this->outbuf[this->buf_size - 1];
if (last <= this->buf_size)
{
bool strip = true;
for (unsigned int i = 1; i <= last; ++i)
{
if (this->outbuf[this->buf_size - i] != last)
{
strip = false;
break;
}
}
if (strip)
{
bytes -= last;
}
}
}
getNext()->write(this->outbuf, bytes);
this->offset = 0;
}
| 1 |
[
"CWE-787"
] |
qpdf
|
dc92574c10f3e2516ec6445b88c5d584f40df4e5
| 210,723,206,961,795,060,000,000,000,000,000,000,000 | 76 |
Fix some pipelines to be safe if downstream write fails (fuzz issue 28262)
|
GF_Box *fiin_New()
{
ISOM_DECL_BOX_ALLOC(FDItemInformationBox, GF_ISOM_BOX_TYPE_FIIN);
return (GF_Box *)tmp;
}
| 0 |
[
"CWE-125"
] |
gpac
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
| 114,717,419,588,075,570,000,000,000,000,000,000,000 | 5 |
fixed 2 possible heap overflows (inc. #1088)
|
static int racls_add_cb(const mbentry_t *mbentry, void *rock)
{
struct txn **txn = (struct txn **)rock;
return mboxlist_update_racl(mbentry->name, NULL, mbentry, txn);
}
| 0 |
[
"CWE-20"
] |
cyrus-imapd
|
6bd33275368edfa71ae117de895488584678ac79
| 120,714,388,520,717,340,000,000,000,000,000,000,000 | 5 |
mboxlist: fix uninitialised memory use where pattern is "Other Users"
|
void mark_select_range_as_dependent(THD *thd,
SELECT_LEX *last_select,
SELECT_LEX *current_sel,
Field *found_field, Item *found_item,
Item_ident *resolved_item)
{
/*
Go from current SELECT to SELECT where field was resolved (it
have to be reachable from current SELECT, because it was already
done once when we resolved this field and cached result of
resolving)
*/
SELECT_LEX *previous_select= current_sel;
for (; previous_select->outer_select() != last_select;
previous_select= previous_select->outer_select())
{
Item_subselect *prev_subselect_item=
previous_select->master_unit()->item;
prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT;
prev_subselect_item->const_item_cache= 0;
}
{
Item_subselect *prev_subselect_item=
previous_select->master_unit()->item;
Item_ident *dependent= resolved_item;
if (found_field == view_ref_found)
{
Item::Type type= found_item->type();
prev_subselect_item->used_tables_cache|=
found_item->used_tables();
dependent= ((type == Item::REF_ITEM || type == Item::FIELD_ITEM) ?
(Item_ident*) found_item :
0);
}
else
prev_subselect_item->used_tables_cache|=
found_field->table->map;
prev_subselect_item->const_item_cache= 0;
mark_as_dependent(thd, last_select, current_sel, resolved_item,
dependent);
}
}
| 0 |
[] |
server
|
b000e169562697aa072600695d4f0c0412f94f4f
| 195,087,597,793,564,160,000,000,000,000,000,000,000 | 42 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
|
decode_str(const char *input, int single, struct tok_state *tok)
{
PyObject* utf8 = NULL;
const char *str;
const char *s;
const char *newl[2] = {NULL, NULL};
int lineno = 0;
tok->input = str = translate_newlines(input, single, tok);
if (str == NULL)
return NULL;
tok->enc = NULL;
tok->str = str;
if (!check_bom(buf_getc, buf_ungetc, buf_setreadl, tok))
return error_ret(tok);
str = tok->str; /* string after BOM if any */
assert(str);
if (tok->enc != NULL) {
utf8 = translate_into_utf8(str, tok->enc);
if (utf8 == NULL)
return error_ret(tok);
str = PyBytes_AsString(utf8);
}
for (s = str;; s++) {
if (*s == '\0') break;
else if (*s == '\n') {
assert(lineno < 2);
newl[lineno] = s;
lineno++;
if (lineno == 2) break;
}
}
tok->enc = NULL;
/* need to check line 1 and 2 separately since check_coding_spec
assumes a single line as input */
if (newl[0]) {
if (!check_coding_spec(str, newl[0] - str, tok, buf_setreadl))
return error_ret(tok);
if (tok->enc == NULL && !tok->read_coding_spec && newl[1]) {
if (!check_coding_spec(newl[0]+1, newl[1] - newl[0],
tok, buf_setreadl))
return error_ret(tok);
}
}
if (tok->enc != NULL) {
assert(utf8 == NULL);
utf8 = translate_into_utf8(str, tok->enc);
if (utf8 == NULL)
return error_ret(tok);
str = PyBytes_AS_STRING(utf8);
}
assert(tok->decoding_buffer == NULL);
tok->decoding_buffer = utf8; /* CAUTION */
return str;
}
| 0 |
[
"CWE-125"
] |
cpython
|
dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c
| 118,645,302,573,418,970,000,000,000,000,000,000,000 | 54 |
bpo-35766: Merge typed_ast back into CPython (GH-11645)
|
gif_get_next_step (GifContext *context)
{
unsigned char c;
while (TRUE) {
if (!gif_read (context, &c, 1)) {
return -1;
}
if (c == ';') {
/* GIF terminator */
/* hmm. Not 100% sure what to do about this. Should
* i try to return a blank image instead? */
context->state = GIF_DONE;
return 0;
}
if (c == '!') {
/* Check the extension */
gif_set_get_extension (context);
return 0;
}
/* look for frame */
if (c != ',') {
/* Not a valid start character */
continue;
}
/* load the frame */
gif_set_get_frame_info (context);
return 0;
}
}
| 0 |
[] |
gdk-pixbuf
|
f8569bb13e2aa1584dde61ca545144750f7a7c98
| 187,122,611,851,680,030,000,000,000,000,000,000,000 | 31 |
GIF: Don't return a partially initialized pixbuf structure
It was found that gdk-pixbuf GIF image loader gdk_pixbuf__gif_image_load()
routine did not properly handle certain return values from their subroutines.
A remote attacker could provide a specially-crafted GIF image, which once
opened in an application, linked against gdk-pixbuf would lead to gdk-pixbuf
to return partially initialized pixbuf structure, possibly having huge
width and height, leading to that particular application termination due
excessive memory use.
The CVE identifier of CVE-2011-2485 has been assigned to this issue.
|
void Magick::Image::alphaChannel(AlphaChannelOption alphaOption_)
{
modifyImage();
GetPPException;
SetImageAlphaChannel(image(),alphaOption_,exceptionInfo);
ThrowImageException;
}
| 0 |
[
"CWE-416"
] |
ImageMagick
|
8c35502217c1879cb8257c617007282eee3fe1cc
| 197,011,381,193,232,350,000,000,000,000,000,000,000 | 7 |
Added missing return to avoid use after free.
|
tracing_thresh_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
int ret;
mutex_lock(&trace_types_lock);
ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
if (ret < 0)
goto out;
if (tr->current_trace->update_thresh) {
ret = tr->current_trace->update_thresh(tr);
if (ret < 0)
goto out;
}
ret = cnt;
out:
mutex_unlock(&trace_types_lock);
return ret;
}
| 0 |
[
"CWE-415"
] |
linux
|
4397f04575c44e1440ec2e49b6302785c95fd2f8
| 277,913,101,575,410,140,000,000,000,000,000,000,000 | 23 |
tracing: Fix possible double free on failure of allocating trace buffer
Jing Xia and Chunyan Zhang reported that on failing to allocate part of the
tracing buffer, memory is freed, but the pointers that point to them are not
initialized back to NULL, and later paths may try to free the freed memory
again. Jing and Chunyan fixed one of the locations that does this, but
missed a spot.
Link: http://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code")
Reported-by: Jing Xia <[email protected]>
Reported-by: Chunyan Zhang <[email protected]>
Signed-off-by: Steven Rostedt (VMware) <[email protected]>
|
static int cx24116_cmd_execute(struct dvb_frontend *fe, struct cx24116_cmd *cmd)
{
struct cx24116_state *state = fe->demodulator_priv;
int i, ret;
dprintk("%s()\n", __func__);
/* Load the firmware if required */
ret = cx24116_firmware_ondemand(fe);
if (ret != 0) {
printk(KERN_ERR "%s(): Unable initialise the firmware\n",
__func__);
return ret;
}
/* Write the command */
for (i = 0; i < cmd->len ; i++) {
dprintk("%s: 0x%02x == 0x%02x\n", __func__, i, cmd->args[i]);
cx24116_writereg(state, i, cmd->args[i]);
}
/* Start execution and wait for cmd to terminate */
cx24116_writereg(state, CX24116_REG_EXECUTE, 0x01);
while (cx24116_readreg(state, CX24116_REG_EXECUTE)) {
msleep(10);
if (i++ > 64) {
/* Avoid looping forever if the firmware does
not respond */
printk(KERN_WARNING "%s() Firmware not responding\n",
__func__);
return -EREMOTEIO;
}
}
return 0;
}
| 0 |
[
"CWE-476",
"CWE-119",
"CWE-125"
] |
linux
|
1fa2337a315a2448c5434f41e00d56b01a22283c
| 33,304,631,638,715,900,000,000,000,000,000,000,000 | 35 |
[media] cx24116: fix a buffer overflow when checking userspace params
The maximum size for a DiSEqC command is 6, according to the
userspace API. However, the code allows to write up much more values:
drivers/media/dvb-frontends/cx24116.c:983 cx24116_send_diseqc_msg() error: buffer overflow 'd->msg' 6 <= 23
Cc: [email protected]
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
e_mail_part_update_validity (EMailPart *part,
CamelCipherValidity *validity,
EMailPartValidityFlags validity_type)
{
EMailPartValidityPair *pair;
EMailPartValidityFlags mask;
g_return_if_fail (E_IS_MAIL_PART (part));
g_return_if_fail (validity != NULL);
mask = E_MAIL_PART_VALIDITY_PGP | E_MAIL_PART_VALIDITY_SMIME;
pair = mail_part_find_validity_pair (part, validity_type & mask);
if (pair != NULL) {
pair->validity_type |= validity_type;
camel_cipher_validity_envelope (pair->validity, validity);
} else {
pair = g_new0 (EMailPartValidityPair, 1);
pair->validity_type = validity_type;
pair->validity = camel_cipher_validity_clone (validity);
g_queue_push_tail (&part->validities, pair);
}
}
| 1 |
[
"CWE-347"
] |
evolution
|
f66cd3e1db301d264563b4222a3574e2e58e2b85
| 36,298,089,107,314,850,000,000,000,000,000,000,000 | 24 |
eds-I#3 - [GPG] Mails that are not encrypted look encrypted
Related to https://gitlab.gnome.org/GNOME/evolution-data-server/issues/3
|
static void sysbus_fdc_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->props = sysbus_fdc_properties;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
| 0 |
[
"CWE-119"
] |
qemu
|
e907746266721f305d67bc0718795fedee2e824c
| 208,568,555,707,644,900,000,000,000,000,000,000,000 | 7 |
fdc: force the fifo access to be in bounds of the allocated buffer
During processing of certain commands such as FD_CMD_READ_ID and
FD_CMD_DRIVE_SPECIFICATION_COMMAND the fifo memory access could
get out of bounds leading to memory corruption with values coming
from the guest.
Fix this by making sure that the index is always bounded by the
allocated memory.
This is CVE-2015-3456.
Signed-off-by: Petr Matousek <[email protected]>
Reviewed-by: John Snow <[email protected]>
Signed-off-by: John Snow <[email protected]>
|
static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain)
{
const struct nft_expr *expr, *last;
struct nft_regs_track track = {};
unsigned int size, data_size;
void *data, *data_boundary;
struct nft_rule_dp *prule;
struct nft_rule *rule;
/* already handled or inactive chain? */
if (chain->blob_next || !nft_is_active_next(net, chain))
return 0;
rule = list_entry(&chain->rules, struct nft_rule, list);
data_size = 0;
list_for_each_entry_continue(rule, &chain->rules, list) {
if (nft_is_active_next(net, rule)) {
data_size += sizeof(*prule) + rule->dlen;
if (data_size > INT_MAX)
return -ENOMEM;
}
}
data_size += offsetof(struct nft_rule_dp, data); /* last rule */
chain->blob_next = nf_tables_chain_alloc_rules(data_size);
if (!chain->blob_next)
return -ENOMEM;
data = (void *)chain->blob_next->data;
data_boundary = data + data_size;
size = 0;
list_for_each_entry_continue(rule, &chain->rules, list) {
if (!nft_is_active_next(net, rule))
continue;
prule = (struct nft_rule_dp *)data;
data += offsetof(struct nft_rule_dp, data);
if (WARN_ON_ONCE(data > data_boundary))
return -ENOMEM;
size = 0;
track.last = nft_expr_last(rule);
nft_rule_for_each_expr(expr, last, rule) {
track.cur = expr;
if (nft_expr_reduce(&track, expr)) {
expr = track.cur;
continue;
}
if (WARN_ON_ONCE(data + expr->ops->size > data_boundary))
return -ENOMEM;
memcpy(data + size, expr, expr->ops->size);
size += expr->ops->size;
}
if (WARN_ON_ONCE(size >= 1 << 12))
return -ENOMEM;
prule->handle = rule->handle;
prule->dlen = size;
prule->is_last = 0;
data += size;
size = 0;
chain->blob_next->size += (unsigned long)(data - (void *)prule);
}
prule = (struct nft_rule_dp *)data;
data += offsetof(struct nft_rule_dp, data);
if (WARN_ON_ONCE(data > data_boundary))
return -ENOMEM;
nft_last_rule(chain->blob_next, prule);
return 0;
}
| 0 |
[
"CWE-787"
] |
linux
|
6e1acfa387b9ff82cfc7db8cc3b6959221a95851
| 252,471,165,109,778,500,000,000,000,000,000,000,000 | 79 |
netfilter: nf_tables: validate registers coming from userspace.
Bail out in case userspace uses unsupported registers.
Fixes: 49499c3e6e18 ("netfilter: nf_tables: switch registers to 32 bit addressing")
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
Status MakeShapeFromFormat(TensorFormat format, DimensionOrConstant N,
const std::vector<DimensionOrConstant>& spatial,
DimensionOrConstant C, ShapeHandle* out,
shape_inference::InferenceContext* context) {
const int num_dims = GetTensorDimsFromSpatialDims(spatial.size(), format);
std::vector<DimensionHandle> dims_actual(num_dims);
dims_actual[GetTensorBatchDimIndex(num_dims, format)] = context->MakeDim(N);
int outer_c_index = GetTensorFeatureDimIndex(num_dims, format);
dims_actual[outer_c_index] = context->MakeDim(C);
if (format == FORMAT_NCHW_VECT_C) {
dims_actual[GetTensorInnerFeatureDimIndex(num_dims, format)] =
context->MakeDim(4);
} else if (format == FORMAT_NHWC_VECT_W) {
dims_actual[GetTensorInnerWidthDimIndex(num_dims, format)] =
context->MakeDim(4);
}
for (int spatial_dim = 0, end = spatial.size(); spatial_dim < end;
spatial_dim++) {
dims_actual[GetTensorSpatialDimIndex(num_dims, format, spatial_dim)] =
context->MakeDim(spatial[spatial_dim]);
}
*out = context->MakeShape(dims_actual);
return Status::OK();
}
| 0 |
[
"CWE-369"
] |
tensorflow
|
8a793b5d7f59e37ac7f3cd0954a750a2fe76bad4
| 164,923,715,571,851,730,000,000,000,000,000,000,000 | 24 |
Prevent division by 0 in common shape functions.
PiperOrigin-RevId: 387712197
Change-Id: Id25c7460e35b68aeeeac23b9a88e455b443ee149
|
static int ZEND_FASTCALL ZEND_CLONE_SPEC_CV_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zval *obj = _get_zval_ptr_cv(&opline->op1, EX(Ts), BP_VAR_R TSRMLS_CC);
zend_class_entry *ce;
zend_function *clone;
zend_object_clone_obj_t clone_call;
if (IS_CV == IS_CONST ||
(IS_CV == IS_VAR && !obj) ||
Z_TYPE_P(obj) != IS_OBJECT) {
zend_error_noreturn(E_ERROR, "__clone method called on non-object");
}
ce = Z_OBJCE_P(obj);
clone = ce ? ce->clone : NULL;
clone_call = Z_OBJ_HT_P(obj)->clone_obj;
if (!clone_call) {
if (ce) {
zend_error_noreturn(E_ERROR, "Trying to clone an uncloneable object of class %s", ce->name);
} else {
zend_error_noreturn(E_ERROR, "Trying to clone an uncloneable object");
}
}
if (ce && clone) {
if (clone->op_array.fn_flags & ZEND_ACC_PRIVATE) {
/* Ensure that if we're calling a private function, we're allowed to do so.
*/
if (ce != EG(scope)) {
zend_error_noreturn(E_ERROR, "Call to private %s::__clone() from context '%s'", ce->name, EG(scope) ? EG(scope)->name : "");
}
} else if ((clone->common.fn_flags & ZEND_ACC_PROTECTED)) {
/* Ensure that if we're calling a protected function, we're allowed to do so.
*/
if (!zend_check_protected(clone->common.scope, EG(scope))) {
zend_error_noreturn(E_ERROR, "Call to protected %s::__clone() from context '%s'", ce->name, EG(scope) ? EG(scope)->name : "");
}
}
}
EX_T(opline->result.u.var).var.ptr_ptr = &EX_T(opline->result.u.var).var.ptr;
if (!EG(exception)) {
ALLOC_ZVAL(EX_T(opline->result.u.var).var.ptr);
Z_OBJVAL_P(EX_T(opline->result.u.var).var.ptr) = clone_call(obj TSRMLS_CC);
Z_TYPE_P(EX_T(opline->result.u.var).var.ptr) = IS_OBJECT;
Z_SET_REFCOUNT_P(EX_T(opline->result.u.var).var.ptr, 1);
Z_SET_ISREF_P(EX_T(opline->result.u.var).var.ptr);
if (!RETURN_VALUE_USED(opline) || EG(exception)) {
zval_ptr_dtor(&EX_T(opline->result.u.var).var.ptr);
}
}
ZEND_VM_NEXT_OPCODE();
}
| 0 |
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
| 79,552,618,559,082,680,000,000,000,000,000,000,000 | 56 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.