func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
njs_string_slice_args(njs_vm_t *vm, njs_slice_prop_t *slice, njs_value_t *args,
njs_uint_t nargs)
{
int64_t start, end, length;
njs_int_t ret;
njs_value_t *value;
length = slice->string_length;
value = njs_arg(args, nargs, 1);
if (njs_slow_path(!njs_is_number(value))) {
ret = njs_value_to_integer(vm, value, &start);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
} else {
start = njs_number_to_integer(njs_number(value));
}
if (start < 0) {
start += length;
if (start < 0) {
start = 0;
}
}
if (start >= length) {
start = 0;
length = 0;
} else {
value = njs_arg(args, nargs, 2);
if (njs_slow_path(!njs_is_number(value))) {
if (njs_is_defined(value)) {
ret = njs_value_to_integer(vm, value, &end);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
} else {
end = length;
}
} else {
end = njs_number_to_integer(njs_number(value));
}
if (end < 0) {
end += length;
}
if (length >= end) {
length = end - start;
if (length < 0) {
start = 0;
length = 0;
}
} else {
length -= start;
}
}
slice->start = start;
slice->length = length;
return NJS_OK;
}
| 0 |
[] |
njs
|
36f04a3178fcb6da8513cc3dbf35215c2a581b3f
| 117,597,368,835,140,000,000,000,000,000,000,000,000 | 73 |
Fixed String.prototype.replace() with byte strings.
This closes #522 issue on Github.
|
static void cryptinfo_node_end(void *sax_cbck, const char *node_name, const char *name_space)
{
GF_CryptInfo *info = (GF_CryptInfo *)sax_cbck;
if (!strcmp(node_name, "OMATextHeader")) {
info->in_text_header = 0;
return;
}
}
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 24,810,234,710,489,434,000,000,000,000,000,000,000 | 8 |
fixed #2138
|
set_info_text_and_icon (GSWindow *window,
const char *icon_stock_id,
const char *primary_text,
const char *secondary_text)
{
GtkWidget *content_area;
GtkWidget *hbox_content;
GtkWidget *image;
GtkWidget *vbox;
gchar *primary_markup;
gchar *secondary_markup;
GtkWidget *primary_label;
GtkWidget *secondary_label;
hbox_content = gtk_hbox_new (FALSE, 8);
gtk_widget_show (hbox_content);
image = gtk_image_new_from_stock (icon_stock_id, GTK_ICON_SIZE_DIALOG);
gtk_widget_show (image);
gtk_box_pack_start (GTK_BOX (hbox_content), image, FALSE, FALSE, 0);
gtk_misc_set_alignment (GTK_MISC (image), 0.5, 0);
vbox = gtk_vbox_new (FALSE, 6);
gtk_widget_show (vbox);
gtk_box_pack_start (GTK_BOX (hbox_content), vbox, FALSE, FALSE, 0);
primary_markup = g_strdup_printf ("<b>%s</b>", primary_text);
primary_label = gtk_label_new (primary_markup);
g_free (primary_markup);
gtk_widget_show (primary_label);
gtk_box_pack_start (GTK_BOX (vbox), primary_label, TRUE, TRUE, 0);
gtk_label_set_use_markup (GTK_LABEL (primary_label), TRUE);
gtk_label_set_line_wrap (GTK_LABEL (primary_label), TRUE);
gtk_misc_set_alignment (GTK_MISC (primary_label), 0, 0.5);
if (secondary_text != NULL) {
secondary_markup = g_strdup_printf ("<small>%s</small>",
secondary_text);
secondary_label = gtk_label_new (secondary_markup);
g_free (secondary_markup);
gtk_widget_show (secondary_label);
gtk_box_pack_start (GTK_BOX (vbox), secondary_label, TRUE, TRUE, 0);
gtk_label_set_use_markup (GTK_LABEL (secondary_label), TRUE);
gtk_label_set_line_wrap (GTK_LABEL (secondary_label), TRUE);
gtk_misc_set_alignment (GTK_MISC (secondary_label), 0, 0.5);
}
/* remove old content */
content_area = gtk_info_bar_get_content_area (GTK_INFO_BAR (window->priv->info_bar));
if (window->priv->info_content != NULL) {
gtk_container_remove (GTK_CONTAINER (content_area), window->priv->info_content);
}
gtk_box_pack_start (GTK_BOX (content_area),
hbox_content,
TRUE, FALSE, 0);
window->priv->info_content = hbox_content;
}
| 0 |
[] |
gnome-screensaver
|
a5f66339be6719c2b8fc478a1d5fc6545297d950
| 95,445,895,977,845,380,000,000,000,000,000,000,000 | 57 |
Ensure keyboard grab and unlock dialog exist after monitor removal
gnome-screensaver currently doesn't deal with monitors getting
removed properly. If the unlock dialog is on the removed monitor
then the unlock dialog and its associated keyboard grab are not
moved to an existing monitor when the monitor removal is processed.
This means that users can gain access to the locked system by placing
the mouse pointer on an external monitor and then disconnect the
external monitor.
CVE-2010-0414
https://bugzilla.gnome.org/show_bug.cgi?id=609337
|
static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
struct compat_ifreq __user *u_ifreq32)
{
struct ifreq __user *u_ifreq64;
char tmp_buf[IFNAMSIZ];
void __user *data64;
u32 data32;
if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
IFNAMSIZ))
return -EFAULT;
if (get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
return -EFAULT;
data64 = compat_ptr(data32);
u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64));
if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0],
IFNAMSIZ))
return -EFAULT;
if (put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
return -EFAULT;
return dev_ioctl(net, cmd, u_ifreq64);
}
| 0 |
[
"CWE-264"
] |
net
|
4de930efc23b92ddf88ce91c405ee645fe6e27ea
| 148,831,751,879,429,000,000,000,000,000,000,000,000 | 25 |
net: validate the range we feed to iov_iter_init() in sys_sendto/sys_recvfrom
Cc: [email protected] # v3.19
Signed-off-by: Al Viro <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void netlink_set_nonroot(int protocol, unsigned int flags)
{
if ((unsigned int)protocol < MAX_LINKS)
nl_table[protocol].nl_nonroot = flags;
}
| 0 |
[] |
linux-2.6
|
16e5726269611b71c930054ffe9b858c1cea88eb
| 290,865,963,659,963,140,000,000,000,000,000,000,000 | 5 |
af_unix: dont send SCM_CREDENTIALS by default
Since commit 7361c36c5224 (af_unix: Allow credentials to work across
user and pid namespaces) af_unix performance dropped a lot.
This is because we now take a reference on pid and cred in each write(),
and release them in read(), usually done from another process,
eventually from another cpu. This triggers false sharing.
# Events: 154K cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... .................. .........................
#
10.40% hackbench [kernel.kallsyms] [k] put_pid
8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg
7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg
6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock
4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb
4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns
4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred
2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm
2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count
1.75% hackbench [kernel.kallsyms] [k] fget_light
1.51% hackbench [kernel.kallsyms] [k]
__mutex_lock_interruptible_slowpath
1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb
This patch includes SCM_CREDENTIALS information in a af_unix message/skb
only if requested by the sender, [man 7 unix for details how to include
ancillary data using sendmsg() system call]
Note: This might break buggy applications that expected SCM_CREDENTIAL
from an unaware write() system call, and receiver not using SO_PASSCRED
socket option.
If SOCK_PASSCRED is set on source or destination socket, we still
include credentials for mere write() syscalls.
Performance boost in hackbench : more than 50% gain on a 16 thread
machine (2 quad-core cpus, 2 threads per core)
hackbench 20 thread 2000
4.228 sec instead of 9.102 sec
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Tim Chen <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct edgeport_serial *edge_serial;
struct usb_device *dev;
struct urb *urb;
int port_number;
int status;
u16 open_settings;
u8 transaction_timeout;
if (edge_port == NULL)
return -ENODEV;
port_number = port->port_number;
dev = port->serial->dev;
/* turn off loopback */
status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0);
if (status) {
dev_err(&port->dev,
"%s - cannot send clear loopback command, %d\n",
__func__, status);
return status;
}
/* set up the port settings */
if (tty)
edge_set_termios(tty, port, &tty->termios);
/* open up the port */
/* milliseconds to timeout for DMA transfer */
transaction_timeout = 2;
edge_port->ump_read_timeout =
max(20, ((transaction_timeout * 3) / 2));
/* milliseconds to timeout for DMA transfer */
open_settings = (u8)(UMP_DMA_MODE_CONTINOUS |
UMP_PIPE_TRANS_TIMEOUT_ENA |
(transaction_timeout << 2));
dev_dbg(&port->dev, "%s - Sending UMPC_OPEN_PORT\n", __func__);
/* Tell TI to open and start the port */
status = send_cmd(dev, UMPC_OPEN_PORT,
(u8)(UMPM_UART1_PORT + port_number), open_settings, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send open command, %d\n",
__func__, status);
return status;
}
/* Start the DMA? */
status = send_cmd(dev, UMPC_START_PORT,
(u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send start DMA command, %d\n",
__func__, status);
return status;
}
/* Clear TX and RX buffers in UMP */
status = purge_port(port, UMP_PORT_DIR_OUT | UMP_PORT_DIR_IN);
if (status) {
dev_err(&port->dev,
"%s - cannot send clear buffers command, %d\n",
__func__, status);
return status;
}
/* Read Initial MSR */
status = ti_vread_sync(dev, UMPC_READ_MSR, 0,
(__u16)(UMPM_UART1_PORT + port_number),
&edge_port->shadow_msr, 1);
if (status) {
dev_err(&port->dev, "%s - cannot send read MSR command, %d\n",
__func__, status);
return status;
}
dev_dbg(&port->dev, "ShadowMSR 0x%X\n", edge_port->shadow_msr);
/* Set Initial MCR */
edge_port->shadow_mcr = MCR_RTS | MCR_DTR;
dev_dbg(&port->dev, "ShadowMCR 0x%X\n", edge_port->shadow_mcr);
edge_serial = edge_port->edge_serial;
if (mutex_lock_interruptible(&edge_serial->es_lock))
return -ERESTARTSYS;
if (edge_serial->num_ports_open == 0) {
/* we are the first port to open, post the interrupt urb */
urb = edge_serial->serial->port[0]->interrupt_in_urb;
if (!urb) {
dev_err(&port->dev,
"%s - no interrupt urb present, exiting\n",
__func__);
status = -EINVAL;
goto release_es_lock;
}
urb->context = edge_serial;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb failed with value %d\n",
__func__, status);
goto release_es_lock;
}
}
/*
* reset the data toggle on the bulk endpoints to work around bug in
* host controllers where things get out of sync some times
*/
usb_clear_halt(dev, port->write_urb->pipe);
usb_clear_halt(dev, port->read_urb->pipe);
/* start up our bulk read urb */
urb = port->read_urb;
if (!urb) {
dev_err(&port->dev, "%s - no read urb present, exiting\n",
__func__);
status = -EINVAL;
goto unlink_int_urb;
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
urb->context = edge_port;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
"%s - read bulk usb_submit_urb failed with value %d\n",
__func__, status);
goto unlink_int_urb;
}
++edge_serial->num_ports_open;
goto release_es_lock;
unlink_int_urb:
if (edge_port->edge_serial->num_ports_open == 0)
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
release_es_lock:
mutex_unlock(&edge_serial->es_lock);
return status;
}
| 0 |
[
"CWE-191"
] |
linux
|
654b404f2a222f918af9b0cd18ad469d0c941a8e
| 72,662,994,258,185,945,000,000,000,000,000,000,000 | 148 |
USB: serial: io_ti: fix information leak in completion handler
Add missing sanity check to the bulk-in completion handler to avoid an
integer underflow that can be triggered by a malicious device.
This avoids leaking 128 kB of memory content from after the URB transfer
buffer to user space.
Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable <[email protected]> # 2.6.30
Signed-off-by: Johan Hovold <[email protected]>
|
zvoid *getRISCOSexfield(ef_buf, ef_len)
ZCONST uch *ef_buf; /* buffer containing extra field */
unsigned ef_len; /* total length of extra field */
{
unsigned eb_id;
unsigned eb_len;
/*---------------------------------------------------------------------------
This function scans the extra field for a Acorn SPARK filetype ef-block.
If a valid block is found, the function returns a pointer to the start
of the SPARK_EF block in the extra field buffer. Otherwise, a NULL
pointer is returned.
---------------------------------------------------------------------------*/
if (ef_len == 0 || ef_buf == NULL)
return NULL;
Trace((stderr,"\ngetRISCOSexfield: scanning extra field of length %u\n",
ef_len));
while (ef_len >= EB_HEADSIZE) {
eb_id = makeword(EB_ID + ef_buf);
eb_len = makeword(EB_LEN + ef_buf);
if (eb_len > (ef_len - EB_HEADSIZE)) {
/* discovered some extra field inconsistency! */
Trace((stderr,
"getRISCOSexfield: block length %u > rest ef_size %u\n", eb_len,
ef_len - EB_HEADSIZE));
break;
}
if (eb_id == EF_SPARK && (eb_len == 24 || eb_len == 20)) {
if (makelong(EB_HEADSIZE + ef_buf) == SPARKID_2) {
/* Return a pointer to the valid SPARK filetype ef block */
return (zvoid *)ef_buf;
}
}
/* Skip this extra field block */
ef_buf += (eb_len + EB_HEADSIZE);
ef_len -= (eb_len + EB_HEADSIZE);
}
return NULL;
}
| 0 |
[
"CWE-400"
] |
unzip
|
47b3ceae397d21bf822bc2ac73052a4b1daf8e1c
| 321,923,005,031,760,840,000,000,000,000,000,000,000 | 46 |
Detect and reject a zip bomb using overlapped entries.
This detects an invalid zip file that has at least one entry that
overlaps with another entry or with the central directory to the
end of the file. A Fifield zip bomb uses overlapped local entries
to vastly increase the potential inflation ratio. Such an invalid
zip file is rejected.
See https://www.bamsoftware.com/hacks/zipbomb/ for David Fifield's
analysis, construction, and examples of such zip bombs.
The detection maintains a list of covered spans of the zip files
so far, where the central directory to the end of the file and any
bytes preceding the first entry at zip file offset zero are
considered covered initially. Then as each entry is decompressed
or tested, it is considered covered. When a new entry is about to
be processed, its initial offset is checked to see if it is
contained by a covered span. If so, the zip file is rejected as
invalid.
This commit depends on a preceding commit: "Fix bug in
undefer_input() that misplaced the input state."
|
xmlXzfileRead (void * context, char * buffer, int len) {
int ret;
ret = __libxml2_xzread((xzFile) context, &buffer[0], len);
if (ret < 0) xmlIOErr(0, "xzread()");
return(ret);
}
| 0 |
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
| 260,298,594,734,509,200,000,000,000,000,000,000,000 | 7 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
|
static int imap_mbox_check_stats(struct Mailbox *m, int flags)
{
return imap_mailbox_status(m, true);
}
| 0 |
[
"CWE-94",
"CWE-74"
] |
neomutt
|
fb013ec666759cb8a9e294347c7b4c1f597639cc
| 212,771,978,112,706,740,000,000,000,000,000,000,000 | 4 |
tls: clear data after a starttls acknowledgement
After a starttls acknowledgement message, clear the buffers of any
incoming data / commands. This will ensure that all future data is
handled securely.
Co-authored-by: Pietro Cerutti <[email protected]>
|
xmlTextReaderConstValue(xmlTextReaderPtr reader) {
xmlNodePtr node;
if (reader == NULL)
return(NULL);
if (reader->node == NULL)
return(NULL);
if (reader->curnode != NULL)
node = reader->curnode;
else
node = reader->node;
switch (node->type) {
case XML_NAMESPACE_DECL:
return(((xmlNsPtr) node)->href);
case XML_ATTRIBUTE_NODE:{
xmlAttrPtr attr = (xmlAttrPtr) node;
const xmlChar *ret;
if ((attr->children != NULL) &&
(attr->children->type == XML_TEXT_NODE) &&
(attr->children->next == NULL))
return(attr->children->content);
else {
if (reader->buffer == NULL) {
reader->buffer = xmlBufCreateSize(100);
if (reader->buffer == NULL) {
xmlGenericError(xmlGenericErrorContext,
"xmlTextReaderSetup : malloc failed\n");
return (NULL);
}
xmlBufSetAllocationScheme(reader->buffer,
XML_BUFFER_ALLOC_BOUNDED);
} else
xmlBufEmpty(reader->buffer);
xmlBufGetNodeContent(reader->buffer, node);
ret = xmlBufContent(reader->buffer);
if (ret == NULL) {
/* error on the buffer best to reallocate */
xmlBufFree(reader->buffer);
reader->buffer = xmlBufCreateSize(100);
xmlBufSetAllocationScheme(reader->buffer,
XML_BUFFER_ALLOC_BOUNDED);
ret = BAD_CAST "";
}
return(ret);
}
break;
}
case XML_TEXT_NODE:
case XML_CDATA_SECTION_NODE:
case XML_PI_NODE:
case XML_COMMENT_NODE:
return(node->content);
default:
break;
}
return(NULL);
}
| 0 |
[
"CWE-399"
] |
libxml2
|
213f1fe0d76d30eaed6e5853057defc43e6df2c9
| 84,697,203,120,517,840,000,000,000,000,000,000,000 | 58 |
CVE-2015-1819 Enforce the reader to run in constant memory
One of the operation on the reader could resolve entities
leading to the classic expansion issue. Make sure the
buffer used for xmlreader operation is bounded.
Introduce a new allocation type for the buffers for this effect.
|
static inline int is_cgroup_event(struct perf_event *event)
{
return event->cgrp != NULL;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
8176cced706b5e5d15887584150764894e94e02f
| 303,395,512,478,264,930,000,000,000,000,000,000,000 | 4 |
perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static void srpt_refresh_port_work(struct work_struct *work)
{
struct srpt_port *sport = container_of(work, struct srpt_port, work);
srpt_refresh_port(sport);
}
| 0 |
[
"CWE-200",
"CWE-476"
] |
linux
|
51093254bf879bc9ce96590400a87897c7498463
| 8,074,310,308,962,773,000,000,000,000,000,000,000 | 6 |
IB/srpt: Simplify srpt_handle_tsk_mgmt()
Let the target core check task existence instead of the SRP target
driver. Additionally, let the target core check the validity of the
task management request instead of the ib_srpt driver.
This patch fixes the following kernel crash:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000001
IP: [<ffffffffa0565f37>] srpt_handle_new_iu+0x6d7/0x790 [ib_srpt]
Oops: 0002 [#1] SMP
Call Trace:
[<ffffffffa05660ce>] srpt_process_completion+0xde/0x570 [ib_srpt]
[<ffffffffa056669f>] srpt_compl_thread+0x13f/0x160 [ib_srpt]
[<ffffffff8109726f>] kthread+0xcf/0xe0
[<ffffffff81613cfc>] ret_from_fork+0x7c/0xb0
Signed-off-by: Bart Van Assche <[email protected]>
Fixes: 3e4f574857ee ("ib_srpt: Convert TMR path to target_submit_tmr")
Tested-by: Alex Estrin <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: Nicholas Bellinger <[email protected]>
Cc: Sagi Grimberg <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Doug Ledford <[email protected]>
|
void x86_spec_ctrl_setup_ap(void)
{
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
x86_amd_ssb_disable();
}
| 0 |
[] |
linux
|
a2059825986a1c8143fd6698774fa9d83733bb11
| 300,628,470,526,006,940,000,000,000,000,000,000,000 | 8 |
x86/speculation: Enable Spectre v1 swapgs mitigations
The previous commit added macro calls in the entry code which mitigate the
Spectre v1 swapgs issue if the X86_FEATURE_FENCE_SWAPGS_* features are
enabled. Enable those features where applicable.
The mitigations may be disabled with "nospectre_v1" or "mitigations=off".
There are different features which can affect the risk of attack:
- When FSGSBASE is enabled, unprivileged users are able to place any
value in GS, using the wrgsbase instruction. This means they can
write a GS value which points to any value in kernel space, which can
be useful with the following gadget in an interrupt/exception/NMI
handler:
if (coming from user space)
swapgs
mov %gs:<percpu_offset>, %reg1
// dependent load or store based on the value of %reg
// for example: mov %(reg1), %reg2
If an interrupt is coming from user space, and the entry code
speculatively skips the swapgs (due to user branch mistraining), it
may speculatively execute the GS-based load and a subsequent dependent
load or store, exposing the kernel data to an L1 side channel leak.
Note that, on Intel, a similar attack exists in the above gadget when
coming from kernel space, if the swapgs gets speculatively executed to
switch back to the user GS. On AMD, this variant isn't possible
because swapgs is serializing with respect to future GS-based
accesses.
NOTE: The FSGSBASE patch set hasn't been merged yet, so the above case
doesn't exist quite yet.
- When FSGSBASE is disabled, the issue is mitigated somewhat because
unprivileged users must use prctl(ARCH_SET_GS) to set GS, which
restricts GS values to user space addresses only. That means the
gadget would need an additional step, since the target kernel address
needs to be read from user space first. Something like:
if (coming from user space)
swapgs
mov %gs:<percpu_offset>, %reg1
mov (%reg1), %reg2
// dependent load or store based on the value of %reg2
// for example: mov %(reg2), %reg3
It's difficult to audit for this gadget in all the handlers, so while
there are no known instances of it, it's entirely possible that it
exists somewhere (or could be introduced in the future). Without
tooling to analyze all such code paths, consider it vulnerable.
Effects of SMAP on the !FSGSBASE case:
- If SMAP is enabled, and the CPU reports RDCL_NO (i.e., not
susceptible to Meltdown), the kernel is prevented from speculatively
reading user space memory, even L1 cached values. This effectively
disables the !FSGSBASE attack vector.
- If SMAP is enabled, but the CPU *is* susceptible to Meltdown, SMAP
still prevents the kernel from speculatively reading user space
memory. But it does *not* prevent the kernel from reading the
user value from L1, if it has already been cached. This is probably
only a small hurdle for an attacker to overcome.
Thanks to Dave Hansen for contributing the speculative_smap() function.
Thanks to Andrew Cooper for providing the inside scoop on whether swapgs
is serializing on AMD.
[ tglx: Fixed the USER fence decision and polished the comment as suggested
by Dave Hansen ]
Signed-off-by: Josh Poimboeuf <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Dave Hansen <[email protected]>
|
bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfter100ContinueHeadersCallback(
FilterHeadersStatus status) {
ASSERT(parent_.state_.has_continue_headers_);
ASSERT(!continue_headers_continued_);
ASSERT(canIterate());
if (status == FilterHeadersStatus::StopIteration) {
iteration_state_ = IterationState::StopSingleIteration;
return false;
} else {
ASSERT(status == FilterHeadersStatus::Continue);
continue_headers_continued_ = true;
return true;
}
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 216,882,560,073,212,250,000,000,000,000,000,000,000 | 15 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
u32 msr, u64 data, bool host)
{
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
int ret;
if (!synic->active && !host)
return 1;
trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
ret = 0;
switch (msr) {
case HV_X64_MSR_SCONTROL:
synic->control = data;
if (!host)
synic_exit(synic, msr);
break;
case HV_X64_MSR_SVERSION:
if (!host) {
ret = 1;
break;
}
synic->version = data;
break;
case HV_X64_MSR_SIEFP:
if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
!synic->dont_zero_synic_pages)
if (kvm_clear_guest(vcpu->kvm,
data & PAGE_MASK, PAGE_SIZE)) {
ret = 1;
break;
}
synic->evt_page = data;
if (!host)
synic_exit(synic, msr);
break;
case HV_X64_MSR_SIMP:
if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
!synic->dont_zero_synic_pages)
if (kvm_clear_guest(vcpu->kvm,
data & PAGE_MASK, PAGE_SIZE)) {
ret = 1;
break;
}
synic->msg_page = data;
if (!host)
synic_exit(synic, msr);
break;
case HV_X64_MSR_EOM: {
int i;
for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
kvm_hv_notify_acked_sint(vcpu, i);
break;
}
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
break;
default:
ret = 1;
break;
}
return ret;
}
| 1 |
[
"CWE-476"
] |
linux
|
b1e34d325397a33d97d845e312d7cf2a8b646b44
| 311,923,606,119,974,350,000,000,000,000,000,000,000 | 65 |
KVM: x86: Forbid VMM to set SYNIC/STIMER MSRs when SynIC wasn't activated
Setting non-zero values to SYNIC/STIMER MSRs activates certain features,
this should not happen when KVM_CAP_HYPERV_SYNIC{,2} was not activated.
Note, it would've been better to forbid writing anything to SYNIC/STIMER
MSRs, including zeroes, however, at least QEMU tries clearing
HV_X64_MSR_STIMER0_CONFIG without SynIC. HV_X64_MSR_EOM MSR is somewhat
'special' as writing zero there triggers an action, this also should not
happen when SynIC wasn't activated.
Signed-off-by: Vitaly Kuznetsov <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case IPT_SO_SET_REPLACE:
ret = do_replace(user, len);
break;
case IPT_SO_SET_ADD_COUNTERS:
ret = do_add_counters(user, len, 0);
break;
default:
duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
ret = -EINVAL;
}
return ret;
}
| 0 |
[
"CWE-787"
] |
linux
|
9fa492cdc160cd27ce1046cb36f47d3b2b1efa21
| 179,318,190,706,914,180,000,000,000,000,000,000,000 | 23 |
[NETFILTER]: x_tables: simplify compat API
Split the xt_compat_match/xt_compat_target into smaller type-safe functions
performing just one operation. Handle all alignment and size-related
conversions centrally in these function instead of requiring each module to
implement a full-blown conversion function. Replace ->compat callback by
->compat_from_user and ->compat_to_user callbacks, responsible for
converting just a single private structure.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
long __sched io_schedule_timeout(long timeout)
{
struct rq *rq = &__raw_get_cpu_var(runqueues);
long ret;
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
ret = schedule_timeout(timeout);
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
return ret;
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 294,748,483,450,013,500,000,000,000,000,000,000,000 | 12 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
void LineBitmapRequester::ResetToStartOfImage(void)
{
for(UBYTE i = 0;i < m_ucCount;i++) {
m_pppImage[i] = &m_ppTop[i];
m_pulReadyLines[i] = 0;
}
}
| 0 |
[
"CWE-476"
] |
libjpeg
|
51c3241b6da39df30f016b63f43f31c4011222c7
| 205,978,086,072,951,880,000,000,000,000,000,000,000 | 7 |
Fixed a NULL-pointer access in the line-based reconstruction process
in case no valid scan was found and no data is present.
|
verify_client_eku(krb5_context context,
pkinit_kdc_context plgctx,
pkinit_kdc_req_context reqctx,
int *eku_accepted)
{
krb5_error_code retval;
*eku_accepted = 0;
if (plgctx->opts->require_eku == 0) {
pkiDebug("%s: configuration requests no EKU checking\n", __FUNCTION__);
*eku_accepted = 1;
retval = 0;
goto out;
}
retval = crypto_check_cert_eku(context, plgctx->cryptoctx,
reqctx->cryptoctx, plgctx->idctx,
0, /* kdc cert */
plgctx->opts->accept_secondary_eku,
eku_accepted);
if (retval) {
pkiDebug("%s: Error from crypto_check_cert_eku %d (%s)\n",
__FUNCTION__, retval, error_message(retval));
goto out;
}
out:
pkiDebug("%s: returning retval %d, eku_accepted %d\n",
__FUNCTION__, retval, *eku_accepted);
return retval;
}
| 0 |
[
"CWE-476"
] |
krb5
|
db64ca25d661a47b996b4e2645998b5d7f0eb52c
| 204,514,422,944,463,560,000,000,000,000,000,000,000 | 32 |
PKINIT (draft9) null ptr deref [CVE-2012-1016]
Don't check for an agility KDF identifier in the non-draft9 reply
structure when we're building a draft9 reply, because it'll be NULL.
The KDC plugin for PKINIT can dereference a null pointer when handling
a draft9 request, leading to a crash of the KDC process. An attacker
would need to have a valid PKINIT certificate, or an unauthenticated
attacker could execute the attack if anonymous PKINIT is enabled.
CVSSv2 vector: AV:N/AC:M/Au:N/C:N/I:N/A:P/E:P/RL:O/RC:C
[[email protected]: reformat comment and edit log message]
(back ported from commit cd5ff932c9d1439c961b0cf9ccff979356686aff)
ticket: 7527 (new)
version_fixed: 1.10.4
status: resolved
|
void CalculateActivationRange(TfLiteFusedActivation activation,
T* activation_min, T* activation_max) {
if (activation == kTfLiteActRelu) {
*activation_min = 0;
*activation_max = std::numeric_limits<T>::max();
} else if (activation == kTfLiteActRelu6) {
*activation_min = 0;
*activation_max = 6;
} else if (activation == kTfLiteActReluN1To1) {
*activation_min = -1;
*activation_max = 1;
} else {
*activation_min = std::numeric_limits<T>::lowest();
*activation_max = std::numeric_limits<T>::max();
}
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
46d5b0852528ddfd614ded79bccc75589f801bd9
| 229,009,524,347,904,960,000,000,000,000,000,000,000 | 16 |
[tflite] Test for `kTfLiteOptionalTensor` in `GetInput`.
`GetInput`, `GetVariableInput` and `GetOutput` all fail to check for the case where `node->inputs->data[index]` is the special `kTfLiteOptionalTensor` value (-1) which then causes `context->tensors[node->inputs->data[index]]` to read from invalid memory location.
This fix makes `GetInput` and related return `nullptr` in those cases, asking the caller to check for `nullptr`. This is better than having `GetOptionalInputTensor` and `GetOptionalOutputTensor` (does not exist but could be added) as using the patched `GetInput` in error would be caught by a sanitizer test in the default optimized build (due to the `-fsanitize=null` option).
PiperOrigin-RevId: 332512190
Change-Id: Iabca54da2f2de02b6ece3c38b54f76d4277d689e
|
GtkTreeModel *bluetooth_client_get_adapter_model (BluetoothClient *client)
{
return bluetooth_client_get_filter_model (client, adapter_filter,
NULL, NULL);
}
| 0 |
[] |
gnome-bluetooth
|
6b5086d42ea64d46277f3c93b43984f331d12f89
| 335,863,528,249,620,030,000,000,000,000,000,000,000 | 5 |
lib: Fix Discoverable being reset when turned off
Work-around race in bluetoothd which would reset the discoverable
flag if a timeout change was requested before discoverable finished
being set to off:
See https://bugzilla.redhat.com/show_bug.cgi?id=1602985
|
static bool _launch_job_test(uint32_t job_id)
{
bool found = false;
int j;
slurm_mutex_lock(&job_state_mutex);
for (j = 0; j < JOB_STATE_CNT; j++) {
if (job_id == active_job_id[j]) {
found = true;
break;
}
}
slurm_mutex_unlock(&job_state_mutex);
return found;
}
| 0 |
[
"CWE-20"
] |
slurm
|
df545955e4f119974c278bff0c47155257d5afc7
| 325,872,382,632,877,830,000,000,000,000,000,000,000 | 15 |
Validate gid and user_name values provided to slurmd up front.
Do not defer until later, and do not potentially miss out on proper
validation of the user_name field which can lead to improper authentication
handling.
CVE-2018-10995.
|
int URI_FUNC(ComposeQueryCharsRequiredEx)(const URI_TYPE(QueryList) * queryList,
int * charsRequired, UriBool spaceToPlus, UriBool normalizeBreaks) {
if ((queryList == NULL) || (charsRequired == NULL)) {
return URI_ERROR_NULL;
}
return URI_FUNC(ComposeQueryEngine)(NULL, queryList, 0, NULL,
charsRequired, spaceToPlus, normalizeBreaks);
}
| 0 |
[
"CWE-787"
] |
uriparser
|
864f5d4c127def386dd5cc926ad96934b297f04e
| 168,304,320,478,761,200,000,000,000,000,000,000,000 | 9 |
UriQuery.c: Fix out-of-bounds-write in ComposeQuery and ...Ex
Reported by Google Autofuzz team
|
static void mbed_debug(void *context, int level, const char *f_name,
int line_nb, const char *line)
{
struct Curl_easy *data = NULL;
if(!context)
return;
data = (struct Curl_easy *)context;
infof(data, "%s", line);
(void) level;
}
| 0 |
[
"CWE-290"
] |
curl
|
b09c8ee15771c614c4bf3ddac893cdb12187c844
| 116,099,363,785,973,090,000,000,000,000,000,000,000 | 13 |
vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890
|
static int nfs_readlink_reply(uchar *pkt, unsigned len)
{
struct rpc_t rpc_pkt;
int rlen;
int nfsv3_data_offset = 0;
debug("%s\n", __func__);
memcpy((unsigned char *)&rpc_pkt, pkt, len);
if (ntohl(rpc_pkt.u.reply.id) > rpc_id)
return -NFS_RPC_ERR;
else if (ntohl(rpc_pkt.u.reply.id) < rpc_id)
return -NFS_RPC_DROP;
if (rpc_pkt.u.reply.rstatus ||
rpc_pkt.u.reply.verifier ||
rpc_pkt.u.reply.astatus ||
rpc_pkt.u.reply.data[0])
return -1;
if (!(supported_nfs_versions & NFSV2_FLAG)) { /* NFSV3_FLAG */
nfsv3_data_offset =
nfs3_get_attributes_offset(rpc_pkt.u.reply.data);
}
/* new path length */
rlen = ntohl(rpc_pkt.u.reply.data[1 + nfsv3_data_offset]);
if (((uchar *)&(rpc_pkt.u.reply.data[0]) - (uchar *)(&rpc_pkt) + rlen) > len)
return -NFS_RPC_DROP;
if (*((char *)&(rpc_pkt.u.reply.data[2 + nfsv3_data_offset])) != '/') {
int pathlen;
strcat(nfs_path, "/");
pathlen = strlen(nfs_path);
memcpy(nfs_path + pathlen,
(uchar *)&(rpc_pkt.u.reply.data[2 + nfsv3_data_offset]),
rlen);
nfs_path[pathlen + rlen] = 0;
} else {
memcpy(nfs_path,
(uchar *)&(rpc_pkt.u.reply.data[2 + nfsv3_data_offset]),
rlen);
nfs_path[rlen] = 0;
}
return 0;
}
| 0 |
[
"CWE-120",
"CWE-703"
] |
u-boot
|
5d14ee4e53a81055d34ba280cb8fd90330f22a96
| 94,420,508,376,432,090,000,000,000,000,000,000,000 | 49 |
CVE-2019-14196: nfs: fix unbounded memcpy with a failed length check at nfs_lookup_reply
This patch adds a check to rpc_pkt.u.reply.data at nfs_lookup_reply.
Signed-off-by: Cheng Liu <[email protected]>
Reported-by: Fermín Serna <[email protected]>
Acked-by: Joe Hershberger <[email protected]>
|
set_reuseaddr(int fd)
{
int on = 1;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) == -1) {
error("setsockopt SO_REUSEADDR fd %d: %s", fd, strerror(errno));
return -1;
}
return 0;
}
| 0 |
[] |
openssh-portable
|
f3cbe43e28fe71427d41cfe3a17125b972710455
| 167,682,029,388,946,530,000,000,000,000,000,000,000 | 10 |
upstream: need initgroups() before setresgid(); reported by anton@,
ok deraadt@
OpenBSD-Commit-ID: 6aa003ee658b316960d94078f2a16edbc25087ce
|
static void wq_watchdog_timer_fn(unsigned long data)
{
unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
bool lockup_detected = false;
struct worker_pool *pool;
int pi;
if (!thresh)
return;
rcu_read_lock();
for_each_pool(pool, pi) {
unsigned long pool_ts, touched, ts;
if (list_empty(&pool->worklist))
continue;
/* get the latest of pool and touched timestamps */
pool_ts = READ_ONCE(pool->watchdog_ts);
touched = READ_ONCE(wq_watchdog_touched);
if (time_after(pool_ts, touched))
ts = pool_ts;
else
ts = touched;
if (pool->cpu >= 0) {
unsigned long cpu_touched =
READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
pool->cpu));
if (time_after(cpu_touched, ts))
ts = cpu_touched;
}
/* did we stall? */
if (time_after(jiffies, ts + thresh)) {
lockup_detected = true;
pr_emerg("BUG: workqueue lockup - pool");
pr_cont_pool_info(pool);
pr_cont(" stuck for %us!\n",
jiffies_to_msecs(jiffies - pool_ts) / 1000);
}
}
rcu_read_unlock();
if (lockup_detected)
show_workqueue_state();
wq_watchdog_reset_touched();
mod_timer(&wq_watchdog_timer, jiffies + thresh);
}
| 0 |
[
"CWE-200"
] |
tip
|
dfb4357da6ddbdf57d583ba64361c9d792b0e0b1
| 202,013,963,942,709,270,000,000,000,000,000,000,000 | 53 |
time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Nicolas Pitre <[email protected]>
Cc: [email protected]
Cc: Lai Jiangshan <[email protected]>
Cc: Shuah Khan <[email protected]>
Cc: Xing Gao <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Jessica Frazelle <[email protected]>
Cc: [email protected]
Cc: Nicolas Iooss <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Richard Cochran <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Michal Marek <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Olof Johansson <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: Arjan van de Ven <[email protected]>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <[email protected]>
|
Logger *Logger::instance()
{
return m_instance;
}
| 0 |
[
"CWE-20",
"CWE-79"
] |
qBittorrent
|
6ca3e4f094da0a0017cb2d483ec1db6176bb0b16
| 123,169,893,947,697,500,000,000,000,000,000,000,000 | 4 |
Add Utils::String::toHtmlEscaped
|
static int MqttClient_WaitType(MqttClient *client, void *packet_obj,
byte wait_type, word16 wait_packet_id, int timeout_ms)
{
int rc;
word16 packet_id;
MqttPacketType packet_type;
#ifdef WOLFMQTT_MULTITHREAD
MqttPendResp *pendResp;
int readLocked;
#endif
MqttMsgStat* mms_stat;
int waitMatchFound;
if (client == NULL || packet_obj == NULL) {
return MQTT_CODE_ERROR_BAD_ARG;
}
/* all packet type structures must have MqttMsgStat at top */
mms_stat = (MqttMsgStat*)packet_obj;
wait_again:
/* initialize variables */
packet_id = 0;
packet_type = MQTT_PACKET_TYPE_RESERVED;
#ifdef WOLFMQTT_MULTITHREAD
pendResp = NULL;
readLocked = 0;
#endif
waitMatchFound = 0;
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("MqttClient_WaitType: Type %s (%d), ID %d",
MqttPacket_TypeDesc((MqttPacketType)wait_type),
wait_type, wait_packet_id);
#endif
switch ((int)*mms_stat)
{
case MQTT_MSG_BEGIN:
{
#ifdef WOLFMQTT_MULTITHREAD
/* Lock recv socket mutex */
rc = wm_SemLock(&client->lockRecv);
if (rc != 0) {
PRINTF("MqttClient_WaitType: recv lock error!");
return rc;
}
readLocked = 1;
#endif
/* reset the packet state */
client->packet.stat = MQTT_PK_BEGIN;
}
FALL_THROUGH;
#ifdef WOLFMQTT_V5
case MQTT_MSG_AUTH:
#endif
case MQTT_MSG_WAIT:
{
#ifdef WOLFMQTT_MULTITHREAD
/* Check to see if packet type and id have already completed */
pendResp = NULL;
rc = wm_SemLock(&client->lockClient);
if (rc == 0) {
if (MqttClient_RespList_Find(client, (MqttPacketType)wait_type,
wait_packet_id, &pendResp)) {
if (pendResp->packetDone) {
/* pending response is already done, so return */
rc = pendResp->packet_ret;
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("PendResp already Done %p: Rc %d", pendResp, rc);
#endif
MqttClient_RespList_Remove(client, pendResp);
wm_SemUnlock(&client->lockClient);
wm_SemUnlock(&client->lockRecv);
return rc;
}
}
wm_SemUnlock(&client->lockClient);
}
else {
break; /* error */
}
#endif /* WOLFMQTT_MULTITHREAD */
*mms_stat = MQTT_MSG_WAIT;
/* Wait for packet */
rc = MqttPacket_Read(client, client->rx_buf, client->rx_buf_len,
timeout_ms);
/* handle failure */
if (rc <= 0) {
break;
}
/* capture length read */
client->packet.buf_len = rc;
/* Decode Packet - get type and id */
rc = MqttClient_DecodePacket(client, client->rx_buf,
client->packet.buf_len, NULL, &packet_type, NULL, &packet_id);
if (rc < 0) {
break;
}
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("Read Packet: Len %d, Type %d, ID %d",
client->packet.buf_len, packet_type, packet_id);
#endif
*mms_stat = MQTT_MSG_READ;
}
FALL_THROUGH;
case MQTT_MSG_READ:
case MQTT_MSG_READ_PAYLOAD:
{
MqttPacketType use_packet_type;
void* use_packet_obj;
#ifdef WOLFMQTT_MULTITHREAD
readLocked = 1; /* if in this state read is locked */
#endif
/* read payload state only happens for publish messages */
if (*mms_stat == MQTT_MSG_READ_PAYLOAD) {
packet_type = MQTT_PACKET_TYPE_PUBLISH;
}
/* Determine if we received data for this request */
if ((wait_type == MQTT_PACKET_TYPE_ANY ||
wait_type == packet_type ||
(MqttIsPubRespPacket(packet_type) &&
MqttIsPubRespPacket(wait_type))) &&
(wait_packet_id == 0 || wait_packet_id == packet_id))
{
use_packet_obj = packet_obj;
waitMatchFound = 1;
}
else {
/* use generic packet object */
use_packet_obj = &client->msg;
}
use_packet_type = packet_type;
#ifdef WOLFMQTT_MULTITHREAD
/* Check to see if we have a pending response for this packet */
pendResp = NULL;
rc = wm_SemLock(&client->lockClient);
if (rc == 0) {
if (MqttClient_RespList_Find(client, packet_type, packet_id,
&pendResp)) {
/* we found packet match this incoming read packet */
pendResp->packetProcessing = 1;
use_packet_obj = pendResp->packet_obj;
use_packet_type = pendResp->packet_type;
/* req from another thread... not a match */
waitMatchFound = 0;
}
wm_SemUnlock(&client->lockClient);
}
else {
break; /* error */
}
#endif /* WOLFMQTT_MULTITHREAD */
/* Perform packet handling for publish callback and QoS */
rc = MqttClient_HandlePacket(client, use_packet_type,
use_packet_obj, timeout_ms);
#ifdef WOLFMQTT_NONBLOCK
if (rc == MQTT_CODE_CONTINUE) {
/* we have received some data, so keep the recv
mutex lock active and return */
return rc;
}
#endif
/* handle success case */
if (rc >= 0) {
rc = MQTT_CODE_SUCCESS;
}
#ifdef WOLFMQTT_MULTITHREAD
if (pendResp) {
/* Mark pending response entry done */
if (wm_SemLock(&client->lockClient) == 0) {
pendResp->packetDone = 1;
pendResp->packet_ret = rc;
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("PendResp Done %p", pendResp);
#endif
pendResp = NULL;
wm_SemUnlock(&client->lockClient);
}
}
#endif /* WOLFMQTT_MULTITHREAD */
break;
}
case MQTT_MSG_WRITE:
case MQTT_MSG_WRITE_PAYLOAD:
default:
{
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("MqttClient_WaitType: Invalid state %d!", *mms_stat);
#endif
rc = MQTT_CODE_ERROR_STAT;
break;
}
} /* switch (*mms_stat) */
#ifdef WOLFMQTT_NONBLOCK
if (rc != MQTT_CODE_CONTINUE)
#endif
{
/* reset state */
*mms_stat = MQTT_MSG_BEGIN;
}
#ifdef WOLFMQTT_MULTITHREAD
if (readLocked) {
wm_SemUnlock(&client->lockRecv);
}
#endif
if (rc < 0) {
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("MqttClient_WaitType: Failure: %s (%d)",
MqttClient_ReturnCodeToString(rc), rc);
#endif
return rc;
}
if (!waitMatchFound) {
/* if we get here, then the we are still waiting for a packet */
goto wait_again;
}
return rc;
}
| 0 |
[
"CWE-787"
] |
wolfMQTT
|
84d4b53122e0fa0280c7872350b89d5777dabbb2
| 202,517,059,666,085,040,000,000,000,000,000,000,000 | 242 |
Fix wolfmqtt-fuzzer: Null-dereference WRITE in MqttProps_Free
|
static void __exit pppoe_exit(void)
{
unregister_netdevice_notifier(&pppoe_notifier);
dev_remove_pack(&pppoed_ptype);
dev_remove_pack(&pppoes_ptype);
unregister_pppox_proto(PX_PROTO_OE);
proto_unregister(&pppoe_sk_proto);
unregister_pernet_device(&pppoe_net_ops);
}
| 0 |
[
"CWE-20",
"CWE-269"
] |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 96,039,082,789,355,570,000,000,000,000,000,000,000 | 9 |
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int sock_common_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
}
| 0 |
[
"CWE-264"
] |
linux-2.6
|
df0bca049d01c0ee94afb7cd5dfd959541e6c8da
| 261,607,069,552,369,000,000,000,000,000,000,000,000 | 7 |
net: 4 bytes kernel memory disclosure in SO_BSDCOMPAT gsopt try #2
In function sock_getsockopt() located in net/core/sock.c, optval v.val
is not correctly initialized and directly returned in userland in case
we have SO_BSDCOMPAT option set.
This dummy code should trigger the bug:
int main(void)
{
unsigned char buf[4] = { 0, 0, 0, 0 };
int len;
int sock;
sock = socket(33, 2, 2);
getsockopt(sock, 1, SO_BSDCOMPAT, &buf, &len);
printf("%x%x%x%x\n", buf[0], buf[1], buf[2], buf[3]);
close(sock);
}
Here is a patch that fix this bug by initalizing v.val just after its
declaration.
Signed-off-by: Clément Lecigne <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int ZEND_FASTCALL ZEND_IS_NOT_EQUAL_SPEC_CV_CV_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zval *result = &EX_T(opline->result.u.var).tmp_var;
compare_function(result,
_get_zval_ptr_cv(&opline->op1, EX(Ts), BP_VAR_R TSRMLS_CC),
_get_zval_ptr_cv(&opline->op2, EX(Ts), BP_VAR_R TSRMLS_CC) TSRMLS_CC);
ZVAL_BOOL(result, (Z_LVAL_P(result) != 0));
ZEND_VM_NEXT_OPCODE();
}
| 0 |
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
| 219,748,451,309,919,070,000,000,000,000,000,000,000 | 14 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
|
init_ctx_nego(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
OM_uint32 acc_negState, gss_OID supportedMech,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret;
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
ret = GSS_S_DEFECTIVE_TOKEN;
/*
* Both supportedMech and negState must be present in first
* acceptor token.
*/
if (supportedMech == GSS_C_NO_OID) {
*minor_status = ERR_SPNEGO_NO_MECH_FROM_ACCEPTOR;
map_errcode(minor_status);
return GSS_S_DEFECTIVE_TOKEN;
}
if (acc_negState == ACCEPT_DEFECTIVE_TOKEN) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
return GSS_S_DEFECTIVE_TOKEN;
}
/*
* If the mechanism we sent is not the mechanism returned from
* the server, we need to handle the server's counter
* proposal. There is a bug in SAMBA servers that always send
* the old Kerberos mech OID, even though we sent the new one.
* So we will treat all the Kerberos mech OIDS as the same.
*/
if (!(is_kerb_mech(supportedMech) &&
is_kerb_mech(sc->internal_mech)) &&
!g_OID_equal(supportedMech, sc->internal_mech)) {
ret = init_ctx_reselect(minor_status, sc,
acc_negState, supportedMech,
responseToken, mechListMIC,
negState, tokflag);
} else if (*responseToken == GSS_C_NO_BUFFER) {
if (sc->mech_complete) {
/*
* Mech completed on first call to its
* init_sec_context(). Acceptor sends no mech
* token.
*/
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
} else {
/*
* Reject missing mech token when optimistic
* mech selected.
*/
*minor_status = ERR_SPNEGO_NO_TOKEN_FROM_ACCEPTOR;
map_errcode(minor_status);
ret = GSS_S_DEFECTIVE_TOKEN;
}
} else if ((*responseToken)->length == 0 && sc->mech_complete) {
/* Handle old IIS servers returning empty token instead of
* null tokens in the non-mutual auth case. */
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
} else if (sc->mech_complete) {
/* Reject spurious mech token. */
ret = GSS_S_DEFECTIVE_TOKEN;
} else {
*negState = ACCEPT_INCOMPLETE;
*tokflag = CONT_TOKEN_SEND;
ret = GSS_S_CONTINUE_NEEDED;
}
sc->nego_done = 1;
return ret;
}
| 0 |
[
"CWE-415"
] |
krb5
|
f18ddf5d82de0ab7591a36e465bc24225776940f
| 207,918,548,403,150,550,000,000,000,000,000,000,000 | 76 |
Fix double-free in SPNEGO [CVE-2014-4343]
In commit cd7d6b08 ("Verify acceptor's mech in SPNEGO initiator") the
pointer sc->internal_mech became an alias into sc->mech_set->elements,
which should be considered constant for the duration of the SPNEGO
context. So don't free it.
CVE-2014-4343:
In MIT krb5 releases 1.10 and newer, an unauthenticated remote
attacker with the ability to spoof packets appearing to be from a
GSSAPI acceptor can cause a double-free condition in GSSAPI initiators
(clients) which are using the SPNEGO mechanism, by returning a
different underlying mechanism than was proposed by the initiator. At
this stage of the negotiation, the acceptor is unauthenticated, and
the acceptor's response could be spoofed by an attacker with the
ability to inject traffic to the initiator.
Historically, some double-free vulnerabilities can be translated into
remote code execution, though the necessary exploits must be tailored
to the individual application and are usually quite
complicated. Double-frees can also be exploited to cause an
application crash, for a denial of service. However, most GSSAPI
client applications are not vulnerable, as the SPNEGO mechanism is not
used by default (when GSS_C_NO_OID is passed as the mech_type argument
to gss_init_sec_context()). The most common use of SPNEGO is for
HTTP-Negotiate, used in web browsers and other web clients. Most such
clients are believed to not offer HTTP-Negotiate by default, instead
requiring a whitelist of sites for which it may be used to be
configured. If the whitelist is configured to only allow
HTTP-Negotiate over TLS connections ("https://"), a successful
attacker must also spoof the web server's SSL certificate, due to the
way the WWW-Authenticate header is sent in a 401 (Unauthorized)
response message. Unfortunately, many instructions for enabling
HTTP-Negotiate in common web browsers do not include a TLS
requirement.
CVSSv2 Vector: AV:N/AC:H/Au:N/C:C/I:C/A:C/E:POC/RL:OF/RC:C
[[email protected]: CVE summary and CVSSv2 vector]
ticket: 7969 (new)
target_version: 1.12.2
tags: pullup
|
GF_Box *sdp_New()
{
ISOM_DECL_BOX_ALLOC(GF_SDPBox, GF_ISOM_BOX_TYPE_SDP);
return (GF_Box *)tmp;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
| 119,776,141,425,311,930,000,000,000,000,000,000,000 | 5 |
prevent dref memleak on invalid input (#1183)
|
char *cgit_fileurl(const char *reponame, const char *pagename,
const char *filename, const char *query)
{
struct strbuf sb = STRBUF_INIT;
char *delim;
if (ctx.cfg.virtual_root) {
strbuf_addf(&sb, "%s%s/%s/%s", ctx.cfg.virtual_root, reponame,
pagename, (filename ? filename:""));
delim = "?";
} else {
strbuf_addf(&sb, "?url=%s/%s/%s", reponame, pagename,
(filename ? filename : ""));
delim = "&";
}
if (query)
strbuf_addf(&sb, "%s%s", delim, query);
return strbuf_detach(&sb, NULL);
}
| 0 |
[] |
cgit
|
513b3863d999f91b47d7e9f26710390db55f9463
| 326,102,344,526,672,620,000,000,000,000,000,000,000 | 19 |
ui-shared: prevent malicious filename from injecting headers
|
static void mtrr_lookup_init(struct mtrr_iter *iter,
struct kvm_mtrr *mtrr_state, u64 start, u64 end)
{
iter->mtrr_state = mtrr_state;
iter->start = start;
iter->end = end;
iter->mtrr_disabled = false;
iter->partial_map = false;
iter->fixed = false;
iter->range = NULL;
mtrr_lookup_start(iter);
}
| 0 |
[
"CWE-284"
] |
linux
|
9842df62004f366b9fed2423e24df10542ee0dc5
| 130,212,362,937,476,350,000,000,000,000,000,000,000 | 13 |
KVM: MTRR: remove MSR 0x2f8
MSR 0x2f8 accessed the 124th Variable Range MTRR ever since MTRR support
was introduced by 9ba075a664df ("KVM: MTRR support").
0x2f8 became harmful when 910a6aae4e2e ("KVM: MTRR: exactly define the
size of variable MTRRs") shrinked the array of VR MTRRs from 256 to 8,
which made access to index 124 out of bounds. The surrounding code only
WARNs in this situation, thus the guest gained a limited read/write
access to struct kvm_arch_vcpu.
0x2f8 is not a valid VR MTRR MSR, because KVM has/advertises only 16 VR
MTRR MSRs, 0x200-0x20f. Every VR MTRR is set up using two MSRs, 0x2f8
was treated as a PHYSBASE and 0x2f9 would be its PHYSMASK, but 0x2f9 was
not implemented in KVM, therefore 0x2f8 could never do anything useful
and getting rid of it is safe.
This fixes CVE-2016-3713.
Fixes: 910a6aae4e2e ("KVM: MTRR: exactly define the size of variable MTRRs")
Cc: [email protected]
Reported-by: David Matlack <[email protected]>
Signed-off-by: Andy Honig <[email protected]>
Signed-off-by: Radim Krčmář <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
int err, retries = 0;
if (EXT4_DIR_LINK_MAX(dir))
return -EMLINK;
dquot_initialize(dir);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
&dentry->d_name, 0, NULL);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_stop;
inode->i_op = &ext4_dir_inode_operations;
inode->i_fop = &ext4_dir_operations;
err = ext4_init_new_dir(handle, dir, inode);
if (err)
goto out_clear_inode;
err = ext4_mark_inode_dirty(handle, inode);
if (!err)
err = ext4_add_entry(handle, dentry, inode);
if (err) {
out_clear_inode:
clear_nlink(inode);
unlock_new_inode(inode);
ext4_mark_inode_dirty(handle, inode);
iput(inode);
goto out_stop;
}
ext4_inc_count(handle, dir);
ext4_update_dx_flag(dir);
err = ext4_mark_inode_dirty(handle, dir);
if (err)
goto out_clear_inode;
unlock_new_inode(inode);
d_instantiate(dentry, inode);
out_stop:
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
| 0 |
[
"CWE-399"
] |
linux
|
0e9a9a1ad619e7e987815d20262d36a2f95717ca
| 284,865,177,720,548,360,000,000,000,000,000,000,000 | 56 |
ext4: avoid hang when mounting non-journal filesystems with orphan list
When trying to mount a file system which does not contain a journal,
but which does have a orphan list containing an inode which needs to
be truncated, the mount call with hang forever in
ext4_orphan_cleanup() because ext4_orphan_del() will return
immediately without removing the inode from the orphan list, leading
to an uninterruptible loop in kernel code which will busy out one of
the CPU's on the system.
This can be trivially reproduced by trying to mount the file system
found in tests/f_orphan_extents_inode/image.gz from the e2fsprogs
source tree. If a malicious user were to put this on a USB stick, and
mount it on a Linux desktop which has automatic mounts enabled, this
could be considered a potential denial of service attack. (Not a big
deal in practice, but professional paranoids worry about such things,
and have even been known to allocate CVE numbers for such problems.)
Signed-off-by: "Theodore Ts'o" <[email protected]>
Reviewed-by: Zheng Liu <[email protected]>
Cc: [email protected]
|
const Type_handler *real_type_handler() const
{ return (*ref)->real_type_handler(); }
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 339,014,198,957,615,300,000,000,000,000,000,000,000 | 2 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
select_str_opcode(int mb_len, int str_len, int ignore_case)
{
int op;
if (ignore_case) {
switch (str_len) {
case 1: op = OP_EXACT1_IC; break;
default: op = OP_EXACTN_IC; break;
}
}
else {
switch (mb_len) {
case 1:
switch (str_len) {
case 1: op = OP_EXACT1; break;
case 2: op = OP_EXACT2; break;
case 3: op = OP_EXACT3; break;
case 4: op = OP_EXACT4; break;
case 5: op = OP_EXACT5; break;
default: op = OP_EXACTN; break;
}
break;
case 2:
switch (str_len) {
case 1: op = OP_EXACTMB2N1; break;
case 2: op = OP_EXACTMB2N2; break;
case 3: op = OP_EXACTMB2N3; break;
default: op = OP_EXACTMB2N; break;
}
break;
case 3:
op = OP_EXACTMB3N;
break;
default:
op = OP_EXACTMBN;
break;
}
}
return op;
}
| 0 |
[
"CWE-125"
] |
php-src
|
c6e34d91b88638966662caac62c4d0e90538e317
| 58,867,347,496,371,480,000,000,000,000,000,000,000 | 43 |
Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node)
|
void _gnutls_fips_mode_reset_zombie(void)
{
if (_fips_mode == 2) {
_fips_mode = 0;
}
}
| 0 |
[
"CWE-20"
] |
gnutls
|
b0a3048e56611a2deee4976aeba3b8c0740655a6
| 146,163,980,856,910,670,000,000,000,000,000,000,000 | 6 |
env: use secure_getenv when reading environment variables
|
int Field_decimal::store(double nr)
{
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
if (unsigned_flag && nr < 0)
{
overflow(1);
return 1;
}
if (!std::isfinite(nr)) // Handle infinity as special case
{
overflow(nr < 0.0);
return 1;
}
uint i;
size_t length;
uchar fyllchar,*to;
char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
fyllchar = zerofill ? (char) '0' : (char) ' ';
length= my_fcvt(nr, dec, buff, NULL);
if (length > field_length)
{
overflow(nr < 0.0);
return 1;
}
else
{
to=ptr;
for (i=field_length-length ; i-- > 0 ;)
*to++ = fyllchar;
memcpy(to,buff,length);
return 0;
}
}
| 0 |
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
| 104,763,863,920,780,700,000,000,000,000,000,000,000 | 37 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
|
ASN1_INTEGER *TS_ACCURACY_get_millis(TS_ACCURACY *a)
{
return a->millis;
}
| 0 |
[] |
openssl
|
c7235be6e36c4bef84594aa3b2f0561db84b63d8
| 259,782,601,147,787,200,000,000,000,000,000,000,000 | 4 |
RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <[email protected]>
Reviewed by: Ulf Moeller
|
static inline void sk_wake_async(struct sock *sk, int how, int band)
{
if (sock_flag(sk, SOCK_FASYNC))
sock_wake_async(sk->sk_socket, how, band);
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
c377411f2494a931ff7facdbb3a6839b1266bcf6
| 222,602,639,916,200,050,000,000,000,000,000,000,000 | 5 |
net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
virDomainChrTargetTypeToString(int deviceType,
int targetType)
{
const char *type = NULL;
switch (deviceType) {
case VIR_DOMAIN_CHR_DEVICE_TYPE_CHANNEL:
type = virDomainChrChannelTargetTypeToString(targetType);
break;
case VIR_DOMAIN_CHR_DEVICE_TYPE_CONSOLE:
type = virDomainChrConsoleTargetTypeToString(targetType);
break;
case VIR_DOMAIN_CHR_DEVICE_TYPE_SERIAL:
type = virDomainChrSerialTargetTypeToString(targetType);
break;
default:
break;
}
return type;
}
| 0 |
[
"CWE-212"
] |
libvirt
|
a5b064bf4b17a9884d7d361733737fb614ad8979
| 136,051,833,812,834,770,000,000,000,000,000,000,000 | 21 |
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]>
|
static int read_tree_cb(
const char *root, const git_tree_entry *tentry, void *payload)
{
read_tree_data *data = payload;
git_index_entry *entry = NULL, *old_entry;
git_buf path = GIT_BUF_INIT;
size_t pos;
if (git_tree_entry__is_tree(tentry))
return 0;
if (git_buf_joinpath(&path, root, tentry->filename) < 0)
return -1;
if (index_entry_create(&entry, INDEX_OWNER(data->index), path.ptr, false) < 0)
return -1;
entry->mode = tentry->attr;
git_oid_cpy(&entry->id, git_tree_entry_id(tentry));
/* look for corresponding old entry and copy data to new entry */
if (data->old_entries != NULL &&
!index_find_in_entries(
&pos, data->old_entries, data->entry_cmp, path.ptr, 0, 0) &&
(old_entry = git_vector_get(data->old_entries, pos)) != NULL &&
entry->mode == old_entry->mode &&
git_oid_equal(&entry->id, &old_entry->id))
{
index_entry_cpy(entry, old_entry);
entry->flags_extended = 0;
}
index_entry_adjust_namemask(entry, path.size);
git_buf_free(&path);
if (git_vector_insert(data->new_entries, entry) < 0) {
index_entry_free(entry);
return -1;
}
return 0;
}
| 0 |
[
"CWE-415",
"CWE-190"
] |
libgit2
|
3db1af1f370295ad5355b8f64b865a2a357bcac0
| 118,803,613,033,000,030,000,000,000,000,000,000,000 | 42 |
index: error out on unreasonable prefix-compressed path lengths
When computing the complete path length from the encoded
prefix-compressed path, we end up just allocating the complete path
without ever checking what the encoded path length actually is. This can
easily lead to a denial of service by just encoding an unreasonable long
path name inside of the index. Git already enforces a maximum path
length of 4096 bytes. As we also have that enforcement ready in some
places, just make sure that the resulting path is smaller than
GIT_PATH_MAX.
Reported-by: Krishna Ram Prakash R <[email protected]>
Reported-by: Vivek Parikh <[email protected]>
|
strhash(string)
register const char *string;
{
register int c;
#ifdef HASH_ELFHASH
register unsigned int h = 0, g;
while ((c = *string++) != '\0') {
h = ( h << 4 ) + c;
if ( g = h & 0xF0000000 )
h ^= g >> 24;
h &= ~g;
}
return h;
#elif HASH_PERL
register int val = 0;
while ((c = *string++) != '\0') {
val += c;
val += (val << 10);
val ^= (val >> 6);
}
val += (val << 3);
val ^= (val >> 11);
return val + (val << 15);
#else
register int val = 0;
while ((c = *string++) != '\0') {
val = val*997 + c;
}
return val + (val>>5);
#endif
}
| 0 |
[
"CWE-125"
] |
oniguruma
|
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
| 38,463,517,530,511,160,000,000,000,000,000,000,000 | 37 |
onig-5.9.2
|
static int sieve_notify(void *ac,
void *interp_context __attribute__((unused)),
void *script_context,
void *mc __attribute__((unused)),
const char **errmsg __attribute__((unused)))
{
const char *notifier = config_getstring(IMAPOPT_SIEVENOTIFIER);
if (notifier) {
sieve_notify_context_t *nc = (sieve_notify_context_t *) ac;
script_data_t *sd = (script_data_t *) script_context;
int nopt = 0;
prometheus_increment(CYRUS_LMTP_SIEVE_NOTIFY_TOTAL);
snmp_increment(SIEVE_NOTIFY, 1);
/* count options */
while (nc->options[nopt]) nopt++;
/* "default" is a magic value that implies the default */
notify(!strcmp("default",nc->method) ? notifier : nc->method,
"SIEVE", nc->priority, mbname_userid(sd->mbname), NULL,
nopt, nc->options, nc->message, nc->fname);
}
return SIEVE_OK;
}
| 0 |
[
"CWE-269"
] |
cyrus-imapd
|
673ebd96e2efbb8895d08648983377262f35b3f7
| 257,304,593,184,878,140,000,000,000,000,000,000,000 | 27 |
lmtp_sieve: don't create mailbox with admin for sieve autocreate
|
void noop_io_until_death(void)
{
char buf[1024];
if (!iobuf.in.buf || !iobuf.out.buf || iobuf.in_fd < 0 || iobuf.out_fd < 0 || kluge_around_eof)
return;
kluge_around_eof = 2;
/* Setting an I/O timeout ensures that if something inexplicably weird
* happens, we won't hang around forever. */
if (!io_timeout)
set_io_timeout(60);
while (1)
read_buf(iobuf.in_fd, buf, sizeof buf);
}
| 0 |
[
"CWE-59"
] |
rsync
|
962f8b90045ab331fc04c9e65f80f1a53e68243b
| 211,249,941,792,597,550,000,000,000,000,000,000,000 | 16 |
Complain if an inc-recursive path is not right for its dir.
This ensures that a malicious sender can't use a just-sent
symlink as a trasnfer path.
|
qtdemux_parse_saio (GstQTDemux * qtdemux, QtDemuxStream * stream,
GstByteReader * br, guint32 * info_type, guint32 * info_type_parameter,
guint64 * offset)
{
guint8 version = 0;
guint32 flags = 0;
guint32 aux_info_type = 0;
guint32 aux_info_type_parameter = 0;
guint32 entry_count;
guint32 off_32;
guint64 off_64;
const guint8 *aux_info_type_data = NULL;
g_return_val_if_fail (qtdemux != NULL, FALSE);
g_return_val_if_fail (stream != NULL, FALSE);
g_return_val_if_fail (br != NULL, FALSE);
g_return_val_if_fail (offset != NULL, FALSE);
if (!gst_byte_reader_get_uint8 (br, &version))
return FALSE;
if (!gst_byte_reader_get_uint24_be (br, &flags))
return FALSE;
if (flags & 0x1) {
if (!gst_byte_reader_get_data (br, 4, &aux_info_type_data))
return FALSE;
aux_info_type = QT_FOURCC (aux_info_type_data);
if (!gst_byte_reader_get_uint32_be (br, &aux_info_type_parameter))
return FALSE;
} else if (stream->protected) {
aux_info_type = stream->protection_scheme_type;
} else {
aux_info_type = stream->fourcc;
}
if (info_type)
*info_type = aux_info_type;
if (info_type_parameter)
*info_type_parameter = aux_info_type_parameter;
GST_DEBUG_OBJECT (qtdemux, "aux_info_type: '%" GST_FOURCC_FORMAT "', "
"aux_info_type_parameter: %#06x",
GST_FOURCC_ARGS (aux_info_type), aux_info_type_parameter);
if (!gst_byte_reader_get_uint32_be (br, &entry_count))
return FALSE;
if (entry_count != 1) {
GST_ERROR_OBJECT (qtdemux, "multiple offsets are not supported");
return FALSE;
}
if (version == 0) {
if (!gst_byte_reader_get_uint32_be (br, &off_32))
return FALSE;
*offset = (guint64) off_32;
} else {
if (!gst_byte_reader_get_uint64_be (br, &off_64))
return FALSE;
*offset = off_64;
}
GST_DEBUG_OBJECT (qtdemux, "offset: %" G_GUINT64_FORMAT, *offset);
return TRUE;
}
| 0 |
[
"CWE-125"
] |
gst-plugins-good
|
d0949baf3dadea6021d54abef6802fed5a06af75
| 292,018,954,782,135,260,000,000,000,000,000,000,000 | 68 |
qtdemux: Fix out of bounds read in tag parsing code
We can't simply assume that the length of the tag value as given
inside the stream is correct but should also check against the amount of
data we have actually available.
https://bugzilla.gnome.org/show_bug.cgi?id=775451
|
static int smaps_rollup_open(struct inode *inode, struct file *file)
{
int ret;
struct proc_maps_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
if (!priv)
return -ENOMEM;
ret = single_open(file, show_smaps_rollup, priv);
if (ret)
goto out_free;
priv->inode = inode;
priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
if (IS_ERR(priv->mm)) {
ret = PTR_ERR(priv->mm);
single_release(inode, file);
goto out_free;
}
return 0;
out_free:
kfree(priv);
return ret;
}
| 0 |
[
"CWE-362",
"CWE-703",
"CWE-667"
] |
linux
|
04f5866e41fb70690e28397487d8bd8eea7d712a
| 155,973,139,533,807,580,000,000,000,000,000,000,000 | 28 |
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/[email protected]
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Jann Horn <[email protected]>
Suggested-by: Oleg Nesterov <[email protected]>
Acked-by: Peter Xu <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
Reviewed-by: Oleg Nesterov <[email protected]>
Reviewed-by: Jann Horn <[email protected]>
Acked-by: Jason Gunthorpe <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
bfd_no_swap_handler(__attribute__((unused)) vector_t *strvec)
{
global_data->bfd_no_swap = true;
}
| 0 |
[
"CWE-200"
] |
keepalived
|
c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
| 177,792,856,188,860,970,000,000,000,000,000,000,000 | 4 |
Add command line and configuration option to set umask
Issue #1048 identified that files created by keepalived are created
with mode 0666. This commit changes the default to 0644, and also
allows the umask to be specified in the configuration or as a command
line option.
Signed-off-by: Quentin Armitage <[email protected]>
|
static int vdbeCompareMemString(
const Mem *pMem1,
const Mem *pMem2,
const CollSeq *pColl,
u8 *prcErr /* If an OOM occurs, set to SQLITE_NOMEM */
){
if( pMem1->enc==pColl->enc ){
/* The strings are already in the correct encoding. Call the
** comparison function directly */
return pColl->xCmp(pColl->pUser,pMem1->n,pMem1->z,pMem2->n,pMem2->z);
}else{
int rc;
const void *v1, *v2;
Mem c1;
Mem c2;
sqlite3VdbeMemInit(&c1, pMem1->db, MEM_Null);
sqlite3VdbeMemInit(&c2, pMem1->db, MEM_Null);
sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem);
sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem);
v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc);
v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc);
if( (v1==0 || v2==0) ){
if( prcErr ) *prcErr = SQLITE_NOMEM_BKPT;
rc = 0;
}else{
rc = pColl->xCmp(pColl->pUser, c1.n, v1, c2.n, v2);
}
sqlite3VdbeMemRelease(&c1);
sqlite3VdbeMemRelease(&c2);
return rc;
}
}
| 0 |
[
"CWE-755"
] |
sqlite
|
8654186b0236d556aa85528c2573ee0b6ab71be3
| 192,038,675,338,400,030,000,000,000,000,000,000,000 | 32 |
When an error occurs while rewriting the parser tree for window functions
in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set,
and make sure that this shuts down any subsequent code generation that might
depend on the transformations that were implemented. This fixes a problem
discovered by the Yongheng and Rui fuzzer.
FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f
|
static int __init llc2_init(void)
{
int rc = proto_register(&llc_proto, 0);
if (rc != 0)
goto out;
llc_build_offset_table();
llc_station_init();
llc_ui_sap_last_autoport = LLC_SAP_DYN_START;
rc = llc_proc_init();
if (rc != 0) {
printk(llc_proc_err_msg);
goto out_unregister_llc_proto;
}
rc = llc_sysctl_init();
if (rc) {
printk(llc_sysctl_err_msg);
goto out_proc;
}
rc = sock_register(&llc_ui_family_ops);
if (rc) {
printk(llc_sock_err_msg);
goto out_sysctl;
}
llc_add_pack(LLC_DEST_SAP, llc_sap_handler);
llc_add_pack(LLC_DEST_CONN, llc_conn_handler);
out:
return rc;
out_sysctl:
llc_sysctl_exit();
out_proc:
llc_proc_exit();
out_unregister_llc_proto:
proto_unregister(&llc_proto);
goto out;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
28e9fc592cb8c7a43e4d3147b38be6032a0e81bc
| 93,231,372,502,281,040,000,000,000,000,000,000,000 | 37 |
NET: llc, zero sockaddr_llc struct
sllc_arphrd member of sockaddr_llc might not be changed. Zero sllc
before copying to the above layer's structure.
Signed-off-by: Jiri Slaby <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
const TABLE_SHARE *Item::field_table_or_null()
{
if (real_item()->type() != Item::FIELD_ITEM)
return NULL;
return ((Item_field *) this)->field->table->s;
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 117,110,019,998,859,700,000,000,000,000,000,000,000 | 7 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
Fty make_adaptor(F fn, R (F::*)(const SemanticValues &sv, any &dt)) {
return TypeAdaptor_csv_dt<R>(fn);
}
| 0 |
[
"CWE-125"
] |
cpp-peglib
|
b3b29ce8f3acf3a32733d930105a17d7b0ba347e
| 225,573,309,517,226,400,000,000,000,000,000,000,000 | 3 |
Fix #122
|
static void dvierr(DviContext *dvi, const char *format, ...)
{
va_list ap;
va_start(ap, format);
fprintf(stderr, "%s[%d]: Error: ",
dvi->filename, dvi->currpage);
vfprintf(stderr, format, ap);
va_end(ap);
}
| 0 |
[
"CWE-20"
] |
evince
|
d4139205b010ed06310d14284e63114e88ec6de2
| 16,626,861,787,423,154,000,000,000,000,000,000,000 | 10 |
backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643.
|
dns64_init(struct module_env* env, int id)
{
struct dns64_env* dns64_env =
(struct dns64_env*)calloc(1, sizeof(struct dns64_env));
if (!dns64_env) {
log_err("malloc failure");
return 0;
}
env->modinfo[id] = (void*)dns64_env;
name_tree_init(&dns64_env->ignore_aaaa);
if (!dns64_apply_cfg(dns64_env, env->cfg)) {
log_err("dns64: could not apply configuration settings.");
return 0;
}
return 1;
}
| 0 |
[
"CWE-613",
"CWE-703"
] |
unbound
|
f6753a0f1018133df552347a199e0362fc1dac68
| 204,189,137,078,984,200,000,000,000,000,000,000,000 | 16 |
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
|
BOOL nla_set_sspi_module(rdpNla* nla, const char* sspiModule)
{
if (!nla)
return FALSE;
if (nla->SspiModule)
{
free(nla->SspiModule);
nla->SspiModule = NULL;
}
if (!sspiModule)
return TRUE;
nla->SspiModule = _strdup(sspiModule);
if (!nla->SspiModule)
return FALSE;
return TRUE;
}
| 0 |
[] |
FreeRDP
|
479e891545473f01c187daffdfa05fc752b54b72
| 125,312,105,394,718,610,000,000,000,000,000,000,000 | 20 |
check return values for SetCredentialsAttributes, throw warnings for unsupported attributes
|
root_scan_phase(mrb_state *mrb, mrb_gc *gc)
{
size_t i, e;
if (!is_minor_gc(gc)) {
gc->gray_list = NULL;
gc->atomic_gray_list = NULL;
}
mrb_gc_mark_gv(mrb);
/* mark arena */
for (i=0,e=gc->arena_idx; i<e; i++) {
mrb_gc_mark(mrb, gc->arena[i]);
}
/* mark class hierarchy */
mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
/* mark built-in classes */
mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->fixnum_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module);
mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class);
/* mark top_self */
mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
/* mark exception */
mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
/* mark backtrace */
mrb_gc_mark(mrb, (struct RBasic*)mrb->backtrace.exc);
e = (size_t)mrb->backtrace.n;
for (i=0; i<e; i++) {
mrb_gc_mark(mrb, (struct RBasic*)mrb->backtrace.entries[i].klass);
}
/* mark pre-allocated exception */
mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err);
#ifdef MRB_GC_FIXED_ARENA
mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err);
#endif
mark_context(mrb, mrb->root_c);
if (mrb->root_c->fib) {
mrb_gc_mark(mrb, (struct RBasic*)mrb->root_c->fib);
}
if (mrb->root_c != mrb->c) {
mark_context(mrb, mrb->c);
}
}
| 0 |
[
"CWE-416"
] |
mruby
|
5c114c91d4ff31859fcd84cf8bf349b737b90d99
| 261,035,320,928,393,560,000,000,000,000,000,000,000 | 61 |
Clear unused stack region that may refer freed objects; fix #3596
|
static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared)
{
RAMBlock *block;
RAMBlock *last_block = NULL;
ram_addr_t old_ram_size, new_ram_size;
Error *err = NULL;
old_ram_size = last_ram_page();
qemu_mutex_lock_ramlist();
new_block->offset = find_ram_offset(new_block->max_length);
if (!new_block->host) {
if (xen_enabled()) {
xen_ram_alloc(new_block->offset, new_block->max_length,
new_block->mr, &err);
if (err) {
error_propagate(errp, err);
qemu_mutex_unlock_ramlist();
return;
}
} else {
new_block->host = phys_mem_alloc(new_block->max_length,
&new_block->mr->align, shared);
if (!new_block->host) {
error_setg_errno(errp, errno,
"cannot set up guest memory '%s'",
memory_region_name(new_block->mr));
qemu_mutex_unlock_ramlist();
return;
}
memory_try_enable_merging(new_block->host, new_block->max_length);
}
}
new_ram_size = MAX(old_ram_size,
(new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
if (new_ram_size > old_ram_size) {
dirty_memory_extend(old_ram_size, new_ram_size);
}
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
* QLIST (which has an RCU-friendly variant) does not have insertion at
* tail, so save the last element in last_block.
*/
RAMBLOCK_FOREACH(block) {
last_block = block;
if (block->max_length < new_block->max_length) {
break;
}
}
if (block) {
QLIST_INSERT_BEFORE_RCU(block, new_block, next);
} else if (last_block) {
QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
} else { /* list is empty */
QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
}
ram_list.mru_block = NULL;
/* Write list before version */
smp_wmb();
ram_list.version++;
qemu_mutex_unlock_ramlist();
cpu_physical_memory_set_dirty_range(new_block->offset,
new_block->used_length,
DIRTY_CLIENTS_ALL);
if (new_block->host) {
qemu_ram_setup_dump(new_block->host, new_block->max_length);
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
/*
* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU
* Configure it unless the machine is a qtest server, in which case
* KVM is not used and it may be forked (eg for fuzzing purposes).
*/
if (!qtest_enabled()) {
qemu_madvise(new_block->host, new_block->max_length,
QEMU_MADV_DONTFORK);
}
ram_block_notify_add(new_block->host, new_block->max_length);
}
}
| 0 |
[
"CWE-787"
] |
qemu
|
4bfb024bc76973d40a359476dc0291f46e435442
| 235,302,681,449,355,070,000,000,000,000,000,000,000 | 83 |
memory: clamp cached translation in case it points to an MMIO region
In using the address_space_translate_internal API, address_space_cache_init
forgot one piece of advice that can be found in the code for
address_space_translate_internal:
/* MMIO registers can be expected to perform full-width accesses based only
* on their address, without considering adjacent registers that could
* decode to completely different MemoryRegions. When such registers
* exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
* regions overlap wildly. For this reason we cannot clamp the accesses
* here.
*
* If the length is small (as is the case for address_space_ldl/stl),
* everything works fine. If the incoming length is large, however,
* the caller really has to do the clamping through memory_access_size.
*/
address_space_cache_init is exactly one such case where "the incoming length
is large", therefore we need to clamp the resulting length---not to
memory_access_size though, since we are not doing an access yet, but to
the size of the resulting section. This ensures that subsequent accesses
to the cached MemoryRegionSection will be in range.
With this patch, the enclosed testcase notices that the used ring does
not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used"
error.
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void io_req_task_work_add(struct io_kiocb *req)
{
struct io_uring_task *tctx = req->task->io_uring;
__io_req_task_work_add(req, tctx, &tctx->task_list);
}
| 0 |
[
"CWE-416"
] |
linux
|
9cae36a094e7e9d6e5fe8b6dcd4642138b3eb0c7
| 296,360,229,851,040,940,000,000,000,000,000,000,000 | 6 |
io_uring: reinstate the inflight tracking
After some debugging, it was realized that we really do still need the
old inflight tracking for any file type that has io_uring_fops assigned.
If we don't, then trivial circular references will mean that we never get
the ctx cleaned up and hence it'll leak.
Just bring back the inflight tracking, which then also means we can
eliminate the conditional dropping of the file when task_work is queued.
Fixes: d5361233e9ab ("io_uring: drop the old style inflight file tracking")
Signed-off-by: Jens Axboe <[email protected]>
|
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
| 0 |
[
"CWE-125"
] |
ImageMagick6
|
e2a21735e3a3f3930bd431585ec36334c4c2eb77
| 151,073,723,954,680,860,000,000,000,000,000,000,000 | 16 |
https://github.com/ImageMagick/ImageMagick/issues/1540
|
static inline void process_get_command(conn *c, token_t *tokens, size_t ntokens, bool return_cas) {
char *key;
size_t nkey;
int i = 0;
item *it;
token_t *key_token = &tokens[KEY_TOKEN];
char *suffix;
assert(c != NULL);
do {
while(key_token->length != 0) {
key = key_token->value;
nkey = key_token->length;
if(nkey > KEY_MAX_LENGTH) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
it = item_get(key, nkey);
if (settings.detail_enabled) {
stats_prefix_record_get(key, nkey, NULL != it);
}
if (it) {
if (i >= c->isize) {
item **new_list = realloc(c->ilist, sizeof(item *) * c->isize * 2);
if (new_list) {
c->isize *= 2;
c->ilist = new_list;
} else {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
item_remove(it);
break;
}
}
/*
* Construct the response. Each hit adds three elements to the
* outgoing data list:
* "VALUE "
* key
* " " + flags + " " + data length + "\r\n" + data (with \r\n)
*/
if (return_cas)
{
MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey,
it->nbytes, ITEM_get_cas(it));
/* Goofy mid-flight realloc. */
if (i >= c->suffixsize) {
char **new_suffix_list = realloc(c->suffixlist,
sizeof(char *) * c->suffixsize * 2);
if (new_suffix_list) {
c->suffixsize *= 2;
c->suffixlist = new_suffix_list;
} else {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
item_remove(it);
break;
}
}
suffix = cache_alloc(c->thread->suffix_cache);
if (suffix == NULL) {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
out_string(c, "SERVER_ERROR out of memory making CAS suffix");
item_remove(it);
return;
}
*(c->suffixlist + i) = suffix;
int suffix_len = snprintf(suffix, SUFFIX_SIZE,
" %llu\r\n",
(unsigned long long)ITEM_get_cas(it));
if (add_iov(c, "VALUE ", 6) != 0 ||
add_iov(c, ITEM_key(it), it->nkey) != 0 ||
add_iov(c, ITEM_suffix(it), it->nsuffix - 2) != 0 ||
add_iov(c, suffix, suffix_len) != 0 ||
add_iov(c, ITEM_data(it), it->nbytes) != 0)
{
item_remove(it);
break;
}
}
else
{
MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey,
it->nbytes, ITEM_get_cas(it));
if (add_iov(c, "VALUE ", 6) != 0 ||
add_iov(c, ITEM_key(it), it->nkey) != 0 ||
add_iov(c, ITEM_suffix(it), it->nsuffix + it->nbytes) != 0)
{
item_remove(it);
break;
}
}
if (settings.verbose > 1)
fprintf(stderr, ">%d sending key %s\n", c->sfd, ITEM_key(it));
/* item_get() has incremented it->refcount for us */
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.slab_stats[it->slabs_clsid].get_hits++;
c->thread->stats.get_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
item_update(it);
*(c->ilist + i) = it;
i++;
} else {
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.get_misses++;
c->thread->stats.get_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0);
}
key_token++;
}
/*
* If the command string hasn't been fully processed, get the next set
* of tokens.
*/
if(key_token->value != NULL) {
ntokens = tokenize_command(key_token->value, tokens, MAX_TOKENS);
key_token = tokens;
}
} while(key_token->value != NULL);
c->icurr = c->ilist;
c->ileft = i;
if (return_cas) {
c->suffixcurr = c->suffixlist;
c->suffixleft = i;
}
if (settings.verbose > 1)
fprintf(stderr, ">%d END\n", c->sfd);
/*
If the loop was terminated because of out-of-memory, it is not
reliable to add END\r\n to the buffer, because it might not end
in \r\n. So we send SERVER_ERROR instead.
*/
if (key_token->value != NULL || add_iov(c, "END\r\n", 5) != 0
|| (IS_UDP(c->transport) && build_udp_headers(c) != 0)) {
out_string(c, "SERVER_ERROR out of memory writing get response");
}
else {
conn_set_state(c, conn_mwrite);
c->msgcurr = 0;
}
return;
}
| 1 |
[
"CWE-119"
] |
memcached
|
fbe823d9a61b5149cd6e3b5e17bd28dd3b8dd760
| 319,897,502,446,313,200,000,000,000,000,000,000,000 | 164 |
fix potential unbounded key prints
item key isn't necessarily null terminated. user submitted a patch for one,
this clears two more.
|
static bool evtchn_fifo_is_pending(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
linux
|
e99502f76271d6bc4e374fe368c50c67a1fd3070
| 251,383,143,457,693,530,000,000,000,000,000,000,000 | 5 |
xen/events: defer eoi in case of excessive number of events
In case rogue guests are sending events at high frequency it might
happen that xen_evtchn_do_upcall() won't stop processing events in
dom0. As this is done in irq handling a crash might be the result.
In order to avoid that, delay further inter-domain events after some
time in xen_evtchn_do_upcall() by forcing eoi processing into a
worker on the same cpu, thus inhibiting new events coming in.
The time after which eoi processing is to be delayed is configurable
via a new module parameter "event_loop_timeout" which specifies the
maximum event loop time in jiffies (default: 2, the value was chosen
after some tests showing that a value of 2 was the lowest with an
only slight drop of dom0 network throughput while multiple guests
performed an event storm).
How long eoi processing will be delayed can be specified via another
parameter "event_eoi_delay" (again in jiffies, default 10, again the
value was chosen after testing with different delay values).
This is part of XSA-332.
Cc: [email protected]
Reported-by: Julien Grall <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
Reviewed-by: Stefano Stabellini <[email protected]>
Reviewed-by: Wei Liu <[email protected]>
|
static void nfs4_close_done(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
if (RPC_ASSASSINATED(task))
return;
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
*/
switch (task->tk_status) {
case 0:
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
renew_lease(server, calldata->timestamp);
break;
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_EXPIRED:
if (calldata->arg.fmode == 0)
break;
default:
if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
rpc_restart_call(task);
return;
}
}
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
}
| 0 |
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
| 170,070,142,442,935,830,000,000,000,000,000,000,000 | 30 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]>
|
uint8_t* SecureElementGetDevEui( void )
{
return SeNvmCtx.DevEui;
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
LoRaMac-node
|
e3063a91daa7ad8a687223efa63079f0c24568e4
| 331,650,410,234,890,860,000,000,000,000,000,000,000 | 4 |
Added received buffer size checks.
|
static void hidpp_ff_work_handler(struct work_struct *w)
{
struct hidpp_ff_work_data *wd = container_of(w, struct hidpp_ff_work_data, work);
struct hidpp_ff_private_data *data = wd->data;
struct hidpp_report response;
u8 slot;
int ret;
/* add slot number if needed */
switch (wd->effect_id) {
case HIDPP_FF_EFFECTID_AUTOCENTER:
wd->params[0] = data->slot_autocenter;
break;
case HIDPP_FF_EFFECTID_NONE:
/* leave slot as zero */
break;
default:
/* find current slot for effect */
wd->params[0] = hidpp_ff_find_effect(data, wd->effect_id);
break;
}
/* send command and wait for reply */
ret = hidpp_send_fap_command_sync(data->hidpp, data->feature_index,
wd->command, wd->params, wd->size, &response);
if (ret) {
hid_err(data->hidpp->hid_dev, "Failed to send command to device!\n");
goto out;
}
/* parse return data */
switch (wd->command) {
case HIDPP_FF_DOWNLOAD_EFFECT:
slot = response.fap.params[0];
if (slot > 0 && slot <= data->num_effects) {
if (wd->effect_id >= 0)
/* regular effect uploaded */
data->effect_ids[slot-1] = wd->effect_id;
else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
/* autocenter spring uploaded */
data->slot_autocenter = slot;
}
break;
case HIDPP_FF_DESTROY_EFFECT:
if (wd->effect_id >= 0)
/* regular effect destroyed */
data->effect_ids[wd->params[0]-1] = -1;
else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
/* autocenter spring destoyed */
data->slot_autocenter = 0;
break;
case HIDPP_FF_SET_GLOBAL_GAINS:
data->gain = (wd->params[0] << 8) + wd->params[1];
break;
case HIDPP_FF_SET_APERTURE:
data->range = (wd->params[0] << 8) + wd->params[1];
break;
default:
/* no action needed */
break;
}
out:
atomic_dec(&data->workqueue_size);
kfree(wd);
}
| 0 |
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
| 1,287,468,520,997,176,000,000,000,000,000,000,000 | 67 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: [email protected]
Signed-off-by: Alan Stern <[email protected]>
CC: <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]>
|
rfbSendBell(rfbScreenInfoPtr rfbScreen)
{
rfbClientIteratorPtr i;
rfbClientPtr cl;
rfbBellMsg b;
i = rfbGetClientIterator(rfbScreen);
while((cl=rfbClientIteratorNext(i))) {
b.type = rfbBell;
if (rfbWriteExact(cl, (char *)&b, sz_rfbBellMsg) < 0) {
rfbLogPerror("rfbSendBell: write");
rfbCloseClient(cl);
}
}
rfbStatRecordMessageSent(cl, rfbBell, sz_rfbBellMsg, sz_rfbBellMsg);
rfbReleaseClientIterator(i);
}
| 1 |
[] |
libvncserver
|
804335f9d296440bb708ca844f5d89b58b50b0c6
| 38,273,528,982,950,493,000,000,000,000,000,000,000 | 17 |
Thread safety for zrle, zlib, tight.
Proposed tight security type fix for debian bug 517422.
|
ex_find(exarg_T *eap)
{
#ifdef FEAT_SEARCHPATH
char_u *fname;
int count;
fname = find_file_in_path(eap->arg, (int)STRLEN(eap->arg), FNAME_MESS,
TRUE, curbuf->b_ffname);
if (eap->addr_count > 0)
{
/* Repeat finding the file "count" times. This matters when it
* appears several times in the path. */
count = eap->line2;
while (fname != NULL && --count > 0)
{
vim_free(fname);
fname = find_file_in_path(NULL, 0, FNAME_MESS,
FALSE, curbuf->b_ffname);
}
}
if (fname != NULL)
{
eap->arg = fname;
#endif
do_exedit(eap, NULL);
#ifdef FEAT_SEARCHPATH
vim_free(fname);
}
#endif
}
| 0 |
[
"CWE-78"
] |
vim
|
8c62a08faf89663e5633dc5036cd8695c80f1075
| 272,460,157,785,762,800,000,000,000,000,000,000,000 | 31 |
patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others.
|
static int interface_req_cursor_notification(QXLInstance *sin)
{
PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl);
int wait = 1;
switch (qxl->mode) {
case QXL_MODE_COMPAT:
case QXL_MODE_NATIVE:
case QXL_MODE_UNDEFINED:
SPICE_RING_CONS_WAIT(&qxl->ram->cursor_ring, wait);
qxl_ring_set_dirty(qxl);
break;
default:
/* nothing */
break;
}
return wait;
}
| 0 |
[] |
qemu-kvm
|
5ff4e36c804157bd84af43c139f8cd3a59722db9
| 67,869,496,754,839,290,000,000,000,000,000,000,000 | 18 |
qxl: async io support using new spice api
Some of the QXL port i/o commands are waiting for the spice server to
complete certain actions. Add async versions for these commands, so we
don't block the vcpu while the spice server processses the command.
Instead the qxl device will raise an IRQ when done.
The async command processing relies on an added QXLInterface::async_complete
and added QXLWorker::*_async additions, in spice server qxl >= 3.1
Signed-off-by: Gerd Hoffmann <[email protected]>
Signed-off-by: Alon Levy <[email protected]>
|
static void cil_reset_default(struct cil_default *def)
{
cil_list_destroy(&def->class_datums, CIL_FALSE);
}
| 0 |
[
"CWE-416"
] |
selinux
|
f34d3d30c8325e4847a6b696fe7a3936a8a361f3
| 131,991,038,728,557,750,000,000,000,000,000,000,000 | 4 |
libsepol/cil: Destroy classperms list when resetting classpermission
Nicolas Iooss reports:
A few months ago, OSS-Fuzz found a crash in the CIL compiler, which
got reported as
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=28648 (the title
is misleading, or is caused by another issue that conflicts with the
one I report in this message). Here is a minimized CIL policy which
reproduces the issue:
(class CLASS (PERM))
(classorder (CLASS))
(sid SID)
(sidorder (SID))
(user USER)
(role ROLE)
(type TYPE)
(category CAT)
(categoryorder (CAT))
(sensitivity SENS)
(sensitivityorder (SENS))
(sensitivitycategory SENS (CAT))
(allow TYPE self (CLASS (PERM)))
(roletype ROLE TYPE)
(userrole USER ROLE)
(userlevel USER (SENS))
(userrange USER ((SENS)(SENS (CAT))))
(sidcontext SID (USER ROLE TYPE ((SENS)(SENS))))
(classpermission CLAPERM)
(optional OPT
(roletype nonexistingrole nonexistingtype)
(classpermissionset CLAPERM (CLASS (PERM)))
)
The CIL policy fuzzer (which mimics secilc built with clang Address
Sanitizer) reports:
==36541==ERROR: AddressSanitizer: heap-use-after-free on address
0x603000004f98 at pc 0x56445134c842 bp 0x7ffe2a256590 sp
0x7ffe2a256588
READ of size 8 at 0x603000004f98 thread T0
#0 0x56445134c841 in __cil_verify_classperms
/selinux/libsepol/src/../cil/src/cil_verify.c:1620:8
#1 0x56445134a43e in __cil_verify_classpermission
/selinux/libsepol/src/../cil/src/cil_verify.c:1650:9
#2 0x56445134a43e in __cil_pre_verify_helper
/selinux/libsepol/src/../cil/src/cil_verify.c:1715:8
#3 0x5644513225ac in cil_tree_walk_core
/selinux/libsepol/src/../cil/src/cil_tree.c:272:9
#4 0x564451322ab1 in cil_tree_walk
/selinux/libsepol/src/../cil/src/cil_tree.c:316:7
#5 0x5644513226af in cil_tree_walk_core
/selinux/libsepol/src/../cil/src/cil_tree.c:284:9
#6 0x564451322ab1 in cil_tree_walk
/selinux/libsepol/src/../cil/src/cil_tree.c:316:7
#7 0x5644512b88fd in cil_pre_verify
/selinux/libsepol/src/../cil/src/cil_post.c:2510:7
#8 0x5644512b88fd in cil_post_process
/selinux/libsepol/src/../cil/src/cil_post.c:2524:7
#9 0x5644511856ff in cil_compile
/selinux/libsepol/src/../cil/src/cil.c:564:7
The classperms list of a classpermission rule is created and filled
in when classpermissionset rules are processed, so it doesn't own any
part of the list and shouldn't retain any of it when it is reset.
Destroy the classperms list (without destroying the data in it) when
resetting a classpermission rule.
Reported-by: Nicolas Iooss <[email protected]>
Signed-off-by: James Carter <[email protected]>
|
virDomainVsockDefNew(virDomainXMLOptionPtr xmlopt)
{
virDomainVsockDefPtr ret = NULL;
virDomainVsockDefPtr vsock;
if (VIR_ALLOC(vsock) < 0)
return NULL;
if (xmlopt &&
xmlopt->privateData.vsockNew &&
!(vsock->privateData = xmlopt->privateData.vsockNew()))
goto cleanup;
ret = g_steal_pointer(&vsock);
cleanup:
virDomainVsockDefFree(vsock);
return ret;
}
| 0 |
[
"CWE-212"
] |
libvirt
|
a5b064bf4b17a9884d7d361733737fb614ad8979
| 254,525,770,819,822,500,000,000,000,000,000,000,000 | 18 |
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]>
|
check_tclkit_std_channels(void)
{
Tcl_Channel chan;
/*
* We need to verify if we have the standard channels and create them if
* not. Otherwise internals channels may get used as standard channels
* (like for encodings) and panic.
*/
chan = Tcl_GetStdChannel(TCL_STDIN);
if (chan == NULL) {
chan = Tcl_OpenFileChannel(NULL, DEV_NULL, "r", 0);
if (chan != NULL) {
Tcl_SetChannelOption(NULL, chan, "-encoding", "utf-8");
}
Tcl_SetStdChannel(chan, TCL_STDIN);
}
chan = Tcl_GetStdChannel(TCL_STDOUT);
if (chan == NULL) {
chan = Tcl_OpenFileChannel(NULL, DEV_NULL, "w", 0);
if (chan != NULL) {
Tcl_SetChannelOption(NULL, chan, "-encoding", "utf-8");
}
Tcl_SetStdChannel(chan, TCL_STDOUT);
}
chan = Tcl_GetStdChannel(TCL_STDERR);
if (chan == NULL) {
chan = Tcl_OpenFileChannel(NULL, DEV_NULL, "w", 0);
if (chan != NULL) {
Tcl_SetChannelOption(NULL, chan, "-encoding", "utf-8");
}
Tcl_SetStdChannel(chan, TCL_STDERR);
}
}
| 0 |
[] |
tk
|
ebd0fc80d62eeb7b8556522256f8d035e013eb65
| 131,951,784,524,259,860,000,000,000,000,000,000,000 | 34 |
tcltklib.c: check argument
* ext/tk/tcltklib.c (ip_cancel_eval_core): check argument type and
length.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
|
newSWFShape()
{
SWFShape shape = (SWFShape)malloc(sizeof(struct SWFShape_s));
/* If malloc failed, return NULL to signify this */
if (NULL == shape)
return NULL;
SWFCharacterInit((SWFCharacter)shape);
BLOCK(shape)->writeBlock = writeSWFShapeBlockToMethod;
BLOCK(shape)->complete = completeSWFShapeBlock;
BLOCK(shape)->dtor = (destroySWFBlockMethod) destroySWFShape;
BLOCK(shape)->type = SWF_DEFINESHAPE3;
CHARACTERID(shape) = ++SWF_gNumCharacters;
shape->out = newSWFOutput();
CHARACTER(shape)->bounds = newSWFRect(0,0,0,0);
shape->edgeBounds = newSWFRect(0,0,0,0);
shape->records = NULL;
shape->lines = NULL;
shape->fills = NULL;
shape->nRecords = 0;
shape->xpos = 0;
shape->ypos = 0;
shape->nLines = 0;
shape->nFills = 0;
shape->lineWidth = 0;
shape->isMorph = FALSE;
shape->isEnded = FALSE;
shape->flags = 0;
shape->useVersion = SWF_SHAPE3;
SWFOutput_writeUInt8(shape->out, 0); /* space for nFillBits, nLineBits */
#if TRACK_ALLOCS
shape->gcnode = ming_gc_add_node(shape, (dtorfunctype) destroySWFShape);
#endif
return shape;
}
| 0 |
[
"CWE-20",
"CWE-476"
] |
libming
|
6e76e8c71cb51c8ba0aa9737a636b9ac3029887f
| 235,294,114,396,404,640,000,000,000,000,000,000,000 | 44 |
SWFShape_setLeftFillStyle: prevent fill overflow
|
static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
{
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
struct hugetlbfs_inode_info *p;
if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
return NULL;
p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
if (unlikely(!p)) {
hugetlbfs_inc_free_inodes(sbinfo);
return NULL;
}
return &p->vfs_inode;
}
| 0 |
[] |
linux
|
1bfad99ab42569807d0ca1698449cae5e8c0334a
| 262,576,128,451,471,100,000,000,000,000,000,000,000 | 14 |
hugetlbfs: hugetlb_vmtruncate_list() needs to take a range to delete
fallocate hole punch will want to unmap a specific range of pages.
Modify the existing hugetlb_vmtruncate_list() routine to take a
start/end range. If end is 0, this indicates all pages after start
should be unmapped. This is the same as the existing truncate
functionality. Modify existing callers to add 0 as end of range.
Since the routine will be used in hole punch as well as truncate
operations, it is more appropriately renamed to hugetlb_vmdelete_list().
Signed-off-by: Mike Kravetz <[email protected]>
Reviewed-by: Naoya Horiguchi <[email protected]>
Acked-by: Hillf Danton <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: Aneesh Kumar <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Michal Hocko <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void extract_command(int argc, char **argv, int index) {
EUID_ASSERT();
if (index >= argc)
return;
// doubledash followed by positional parameters
if (strcmp(argv[index], "--") == 0) {
arg_doubledash = 1;
index++;
if (index >= argc)
return;
}
// first argv needs to be a valid command
if (arg_doubledash == 0 && *argv[index] == '-') {
fprintf(stderr, "Error: invalid option %s after --join\n", argv[index]);
exit(1);
}
// build command
build_cmdline(&cfg.command_line, &cfg.window_title, argc, argv, index, true);
}
| 0 |
[
"CWE-269",
"CWE-94"
] |
firejail
|
27cde3d7d1e4e16d4190932347c7151dc2a84c50
| 81,927,651,206,865,000,000,000,000,000,000,000,000 | 22 |
fixing CVE-2022-31214
|
static int proc_show_options(struct seq_file *seq, struct dentry *root)
{
return 0;
}
| 1 |
[] |
linux
|
0499680a42141d86417a8fbaa8c8db806bea1201
| 83,280,379,089,329,685,000,000,000,000,000,000,000 | 4 |
procfs: add hidepid= and gid= mount options
Add support for mount options to restrict access to /proc/PID/
directories. The default backward-compatible "relaxed" behaviour is left
untouched.
The first mount option is called "hidepid" and its value defines how much
info about processes we want to be available for non-owners:
hidepid=0 (default) means the old behavior - anybody may read all
world-readable /proc/PID/* files.
hidepid=1 means users may not access any /proc/<pid>/ directories, but
their own. Sensitive files like cmdline, sched*, status are now protected
against other users. As permission checking done in proc_pid_permission()
and files' permissions are left untouched, programs expecting specific
files' modes are not confused.
hidepid=2 means hidepid=1 plus all /proc/PID/ will be invisible to other
users. It doesn't mean that it hides whether a process exists (it can be
learned by other means, e.g. by kill -0 $PID), but it hides process' euid
and egid. It compicates intruder's task of gathering info about running
processes, whether some daemon runs with elevated privileges, whether
another user runs some sensitive program, whether other users run any
program at all, etc.
gid=XXX defines a group that will be able to gather all processes' info
(as in hidepid=0 mode). This group should be used instead of putting
nonroot user in sudoers file or something. However, untrusted users (like
daemons, etc.) which are not supposed to monitor the tasks in the whole
system should not be added to the group.
hidepid=1 or higher is designed to restrict access to procfs files, which
might reveal some sensitive private information like precise keystrokes
timings:
http://www.openwall.com/lists/oss-security/2011/11/05/3
hidepid=1/2 doesn't break monitoring userspace tools. ps, top, pgrep, and
conky gracefully handle EPERM/ENOENT and behave as if the current user is
the only user running processes. pstree shows the process subtree which
contains "pstree" process.
Note: the patch doesn't deal with setuid/setgid issues of keeping
preopened descriptors of procfs files (like
https://lkml.org/lkml/2011/2/7/368). We rely on that the leaked
information like the scheduling counters of setuid apps doesn't threaten
anybody's privacy - only the user started the setuid program may read the
counters.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Cc: Alexey Dobriyan <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Randy Dunlap <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Greg KH <[email protected]>
Cc: Theodore Tso <[email protected]>
Cc: Alan Cox <[email protected]>
Cc: James Morris <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Hugh Dickins <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
bool LEX::sp_continue_loop(THD *thd, sp_label *lab, Item *when)
{
if (!when)
return sp_continue_loop(thd, lab);
DBUG_ASSERT(sphead == thd->lex->sphead);
DBUG_ASSERT(spcont == thd->lex->spcont);
sp_instr_jump_if_not *i= new (thd->mem_root)
sp_instr_jump_if_not(sphead->instructions(),
spcont,
when, thd->lex);
if (unlikely(i == NULL) ||
unlikely(sphead->add_instr(i)) ||
unlikely(sp_continue_loop(thd, lab)))
return true;
i->backpatch(sphead->instructions(), spcont);
return false;
}
| 0 |
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
| 290,792,788,572,531,900,000,000,000,000,000,000,000 | 18 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]>
|
static int nci_dep_link_down(struct nfc_dev *nfc_dev)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
if (nfc_dev->rf_mode == NFC_RF_INITIATOR) {
nci_deactivate_target(nfc_dev, NULL, NCI_DEACTIVATE_TYPE_IDLE_MODE);
} else {
if (atomic_read(&ndev->state) == NCI_LISTEN_ACTIVE ||
atomic_read(&ndev->state) == NCI_DISCOVERY) {
nci_request(ndev, nci_rf_deactivate_req, (void *)0,
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
}
rc = nfc_tm_deactivated(nfc_dev);
if (rc)
pr_err("error when signaling tm deactivation\n");
}
return 0;
}
| 0 |
[] |
linux
|
48b71a9e66c2eab60564b1b1c85f4928ed04e406
| 305,448,471,819,356,400,000,000,000,000,000,000,000 | 21 |
NFC: add NCI_UNREG flag to eliminate the race
There are two sites that calls queue_work() after the
destroy_workqueue() and lead to possible UAF.
The first site is nci_send_cmd(), which can happen after the
nci_close_device as below
nfcmrvl_nci_unregister_dev | nfc_genl_dev_up
nci_close_device |
flush_workqueue |
del_timer_sync |
nci_unregister_device | nfc_get_device
destroy_workqueue | nfc_dev_up
nfc_unregister_device | nci_dev_up
device_del | nci_open_device
| __nci_request
| nci_send_cmd
| queue_work !!!
Another site is nci_cmd_timer, awaked by the nci_cmd_work from the
nci_send_cmd.
... | ...
nci_unregister_device | queue_work
destroy_workqueue |
nfc_unregister_device | ...
device_del | nci_cmd_work
| mod_timer
| ...
| nci_cmd_timer
| queue_work !!!
For the above two UAF, the root cause is that the nfc_dev_up can race
between the nci_unregister_device routine. Therefore, this patch
introduce NCI_UNREG flag to easily eliminate the possible race. In
addition, the mutex_lock in nci_close_device can act as a barrier.
Signed-off-by: Lin Ma <[email protected]>
Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
Reviewed-by: Jakub Kicinski <[email protected]>
Reviewed-by: Krzysztof Kozlowski <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
|
int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
evtchn_port_t remote_port)
{
return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
&xen_lateeoi_chip);
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
linux
|
e99502f76271d6bc4e374fe368c50c67a1fd3070
| 891,099,105,265,000,200,000,000,000,000,000,000 | 6 |
xen/events: defer eoi in case of excessive number of events
In case rogue guests are sending events at high frequency it might
happen that xen_evtchn_do_upcall() won't stop processing events in
dom0. As this is done in irq handling a crash might be the result.
In order to avoid that, delay further inter-domain events after some
time in xen_evtchn_do_upcall() by forcing eoi processing into a
worker on the same cpu, thus inhibiting new events coming in.
The time after which eoi processing is to be delayed is configurable
via a new module parameter "event_loop_timeout" which specifies the
maximum event loop time in jiffies (default: 2, the value was chosen
after some tests showing that a value of 2 was the lowest with an
only slight drop of dom0 network throughput while multiple guests
performed an event storm).
How long eoi processing will be delayed can be specified via another
parameter "event_eoi_delay" (again in jiffies, default 10, again the
value was chosen after testing with different delay values).
This is part of XSA-332.
Cc: [email protected]
Reported-by: Julien Grall <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
Reviewed-by: Stefano Stabellini <[email protected]>
Reviewed-by: Wei Liu <[email protected]>
|
int newsgroups_cb(const char *mailbox,
const char *entry __attribute__((unused)),
const char *userid,
struct annotation_data *attrib, void *rock)
{
struct wildmat *wild = (struct wildmat *) rock;
/* skip personal mailboxes */
if ((!strncasecmp(mailbox, "INBOX", 5) &&
(!mailbox[5] || mailbox[5] == '.')) ||
!strncmp(mailbox, "user.", 5))
return 0;
/* see if the mailbox matches one of our wildmats */
while (wild->pat && wildmat(mailbox, wild->pat) != 1) wild++;
/* if we don't have a match, or its a negative match, skip it */
if (!wild->pat || wild->not) return 0;
/* we only care about shared /comment */
if (userid[0]) return 0;
prot_printf(nntp_out, "%s\t%s\r\n", mailbox+strlen(newsprefix),
attrib->value);
return 0;
}
| 0 |
[
"CWE-287"
] |
cyrus-imapd
|
77903669e04c9788460561dd0560b9c916519594
| 252,398,753,314,241,920,000,000,000,000,000,000,000 | 27 |
Secunia SA46093 - make sure nntp authentication completes
Discovered by Stefan Cornelius, Secunia Research
The vulnerability is caused due to the access restriction for certain
commands only checking whether or not variable "nntp_userid" is non-NULL,
without performing additional checks to verify that a complete, successful
authentication actually took place. The variable "nntp_userid" can be set to
point to a string holding the username (changing it to a non-NULL, thus
allowing attackers to bypass the checks) by sending an "AUTHINFO USER"
command. The variable is not reset to NULL until e.g. a wrong "AUTHINFO
PASS" command is received. This can be exploited to bypass the
authentication mechanism and allows access to e.g. the "NEWNEWS" or the
"LIST NEWSGROUPS" commands by sending an "AUTHINFO USER" command without a
following "AUTHINFO PASS" command.
|
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
}
| 0 |
[] |
kvm
|
0769c5de24621141c953fbe1f943582d37cb4244
| 319,398,949,790,229,970,000,000,000,000,000,000,000 | 5 |
KVM: x86: extend "struct x86_emulate_ops" with "get_cpuid"
In order to be able to proceed checks on CPU-specific properties
within the emulator, function "get_cpuid" is introduced.
With "get_cpuid" it is possible to virtually call the guests
"cpuid"-opcode without changing the VM's context.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
static void enter_smm(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ds;
struct desc_ptr dt;
unsigned long cr0;
char buf[512];
memset(buf, 0, 512);
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
enter_smm_save_state_64(vcpu, buf);
else
#endif
enter_smm_save_state_32(vcpu, buf);
/*
* Give enter_smm() a chance to make ISA-specific changes to the vCPU
* state (e.g. leave guest mode) after we've saved the state into the
* SMM state-save area.
*/
static_call(kvm_x86_enter_smm)(vcpu, buf);
kvm_smm_changed(vcpu, true);
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
if (static_call(kvm_x86_get_nmi_mask)(vcpu))
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
else
static_call(kvm_x86_set_nmi_mask)(vcpu, true);
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
kvm_rip_write(vcpu, 0x8000);
cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
static_call(kvm_x86_set_cr0)(vcpu, cr0);
vcpu->arch.cr0 = cr0;
static_call(kvm_x86_set_cr4)(vcpu, 0);
/* Undocumented: IDT limit is set to zero on entry to SMM. */
dt.address = dt.size = 0;
static_call(kvm_x86_set_idt)(vcpu, &dt);
kvm_set_dr(vcpu, 7, DR7_FIXED_1);
cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
cs.base = vcpu->arch.smbase;
ds.selector = 0;
ds.base = 0;
cs.limit = ds.limit = 0xffffffff;
cs.type = ds.type = 0x3;
cs.dpl = ds.dpl = 0;
cs.db = ds.db = 0;
cs.s = ds.s = 1;
cs.l = ds.l = 0;
cs.g = ds.g = 1;
cs.avl = ds.avl = 0;
cs.present = ds.present = 1;
cs.unusable = ds.unusable = 0;
cs.padding = ds.padding = 0;
kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
static_call(kvm_x86_set_efer)(vcpu, 0);
#endif
kvm_update_cpuid_runtime(vcpu);
kvm_mmu_reset_context(vcpu);
}
| 0 |
[
"CWE-476"
] |
linux
|
55749769fe608fa3f4a075e42e89d237c8e37637
| 322,755,791,046,001,130,000,000,000,000,000,000,000 | 78 |
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
information_schema_numeric_attributes() const
{
return Information_schema_numeric_attributes();
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 106,416,058,484,996,080,000,000,000,000,000,000,000 | 4 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
gostdsa_vko (const struct ecc_scalar *priv,
const struct ecc_point *pub,
size_t ukm_length, const uint8_t *ukm,
uint8_t *out)
{
const struct ecc_curve *ecc = priv->ecc;
unsigned bsize = (ecc_bit_size (ecc) + 7) / 8;
mp_size_t size = ecc->p.size;
mp_size_t itch = 4*size + ecc->mul_itch;
mp_limb_t *scratch;
if (itch < 5*size + ecc->h_to_a_itch)
itch = 5*size + ecc->h_to_a_itch;
assert (pub->ecc == ecc);
assert (priv->ecc == ecc);
assert (ukm_length <= bsize);
scratch = gmp_alloc_limbs (itch);
#define UKM scratch
#define TEMP (scratch + 3*size)
#define XYZ scratch
#define TEMP_Y (scratch + 4*size)
mpn_set_base256_le (UKM, size, ukm, ukm_length);
/* If ukm is 0, set it to 1, otherwise the result will be allways equal to 0,
* no matter what private and public keys are. See RFC 4357 referencing GOST
* R 34.10-2001 (RFC 5832) Section 6.1 step 2. */
if (mpn_zero_p (UKM, size))
UKM[0] = 1;
ecc_mod_mul (&ecc->q, TEMP, priv->p, UKM, TEMP); /* TEMP = UKM * priv */
ecc->mul (ecc, XYZ, TEMP, pub->p, scratch + 4*size); /* XYZ = UKM * priv * pub */
ecc->h_to_a (ecc, 0, TEMP, XYZ, scratch + 5*size); /* TEMP = XYZ */
mpn_get_base256_le (out, bsize, TEMP, size);
mpn_get_base256_le (out+bsize, bsize, TEMP_Y, size);
gmp_free_limbs (scratch, itch);
}
| 1 |
[
"CWE-787"
] |
nettle
|
63f222c60b03470c0005aa9bc4296fbf585f68b9
| 61,914,879,089,805,790,000,000,000,000,000,000,000 | 40 |
Fix canonical reduction in gostdsa_vko.
* gostdsa-vko.c (gostdsa_vko): Use ecc_mod_mul_canonical to
compute the scalar used for ecc multiplication.
(cherry picked from commit b30e0ca6d2b41579a5b6a010fc54065d790e8d55)
|
int window_width() const {
return (int)_window_width;
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 336,990,473,205,955,700,000,000,000,000,000,000,000 | 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
static void addClassName(const char *prefix,
const char *actual,
size_t length)
{
size_t offset = strlen(prefix);
size_t have = (unsigned) (Style_className_end - Style_className);
size_t need = (offset + length + 1);
if ((have + need) >= Style_className_len) {
Style_className_len += 1024 + 2 * (have + need);
if (Style_className == 0) {
Style_className = typeMallocn(char, Style_className_len);
} else {
Style_className = typeRealloc(char, Style_className, Style_className_len);
}
if (Style_className == NULL)
outofmem(__FILE__, "addClassName");
Style_className_end = Style_className + have;
}
if (offset)
strcpy(Style_className_end, prefix);
if (length)
memcpy(Style_className_end + offset, actual, length);
Style_className_end[offset + length] = '\0';
strtolower(Style_className_end);
Style_className_end += (offset + length);
}
| 0 |
[
"CWE-416"
] |
lynx-snapshots
|
280a61b300a1614f6037efc0902ff7ecf17146e9
| 10,303,834,464,579,054,000,000,000,000,000,000,000 | 28 |
snapshot of project "lynx", label v2-8-9dev_15b
|
static JSON_INLINE void list_init(list_t *list)
{
list->next = list;
list->prev = list;
}
| 0 |
[
"CWE-310"
] |
jansson
|
8f80c2d83808150724d31793e6ade92749b1faa4
| 39,473,201,207,489,440,000,000,000,000,000,000,000 | 5 |
CVE-2013-6401: Change hash function, randomize hashes
Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing
and testing.
|
static void tzset (void) { }
| 0 |
[] |
gnulib
|
94e01571507835ff59dd8ce2a0b56a4b566965a4
| 292,894,390,022,897,400,000,000,000,000,000,000,000 | 1 |
time_rz: fix heap buffer overflow vulnerability
This issue has been assigned CVE-2017-7476 and was
detected with American Fuzzy Lop 2.41b run on the
coreutils date(1) program with ASAN enabled.
ERROR: AddressSanitizer: heap-buffer-overflow on address 0x...
WRITE of size 8 at 0x60d00000cff8 thread T0
#1 0x443020 in extend_abbrs lib/time_rz.c:88
#2 0x443356 in save_abbr lib/time_rz.c:155
#3 0x44393f in localtime_rz lib/time_rz.c:290
#4 0x41e4fe in parse_datetime2 lib/parse-datetime.y:1798
A minimized reproducer is the following 120 byte TZ value,
which goes beyond the value of ABBR_SIZE_MIN (119) on x86_64.
Extend the aa...b portion to overwrite more of the heap.
date -d $(printf 'TZ="aaa%020daaaaaab%089d"')
localtime_rz and mktime_z were affected since commit 4bc76593.
parse_datetime was affected since commit 4e6e16b3f.
* lib/time_rz.c (save_abbr): Rearrange the calculation determining
whether there is enough buffer space available. The rearrangement
ensures we're only dealing with positive numbers, thus avoiding
the problematic promotion of signed to unsigned causing an invalid
comparison when zone_copy is more than ABBR_SIZE_MIN bytes beyond
the start of the buffer.
* tests/test-parse-datetime.c (main): Add a test case written by
Paul Eggert, which overwrites enough of the heap so that
standard glibc will fail with "free(): invalid pointer"
without the patch applied.
Reported and analyzed at https://bugzilla.redhat.com/1444774
|
static const struct usb_cdc_union_desc *
ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
{
const void *buf = intf->altsetting->extra;
size_t buflen = intf->altsetting->extralen;
struct usb_cdc_union_desc *union_desc;
if (!buf) {
dev_err(&intf->dev, "Missing descriptor data\n");
return NULL;
}
if (!buflen) {
dev_err(&intf->dev, "Zero length descriptor\n");
return NULL;
}
while (buflen >= sizeof(*union_desc)) {
union_desc = (struct usb_cdc_union_desc *)buf;
if (union_desc->bLength > buflen) {
dev_err(&intf->dev, "Too large descriptor\n");
return NULL;
}
if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "Found union header\n");
if (union_desc->bLength >= sizeof(*union_desc))
return union_desc;
dev_err(&intf->dev,
"Union descriptor to short (%d vs %zd\n)",
union_desc->bLength, sizeof(*union_desc));
return NULL;
}
buflen -= union_desc->bLength;
buf += union_desc->bLength;
}
dev_err(&intf->dev, "Missing CDC union descriptor\n");
return NULL;
| 0 |
[
"CWE-125"
] |
linux
|
ea04efee7635c9120d015dcdeeeb6988130cb67a
| 204,228,016,526,138,200,000,000,000,000,000,000,000 | 44 |
Input: ims-psu - check if CDC union descriptor is sane
Before trying to use CDC union descriptor, try to validate whether that it
is sane by checking that intf->altsetting->extra is big enough and that
descriptor bLength is not too big and not too small.
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Dmitry Torokhov <[email protected]>
|
static inline LineContribType *_gdContributionsCalc(unsigned int line_size, unsigned int src_size, double scale_d, const interpolation_method pFilter)
{
double width_d;
double scale_f_d = 1.0;
const double filter_width_d = DEFAULT_BOX_RADIUS;
int windows_size;
unsigned int u;
LineContribType *res;
if (scale_d < 1.0) {
width_d = filter_width_d / scale_d;
scale_f_d = scale_d;
} else {
width_d= filter_width_d;
}
windows_size = 2 * (int)ceil(width_d) + 1;
res = _gdContributionsAlloc(line_size, windows_size);
for (u = 0; u < line_size; u++) {
const double dCenter = (double)u / scale_d;
/* get the significant edge points affecting the pixel */
register int iLeft = MAX(0, (int)floor (dCenter - width_d));
int iRight = MIN((int)ceil(dCenter + width_d), (int)src_size - 1);
double dTotalWeight = 0.0;
int iSrc;
/* Cut edge points to fit in filter window in case of spill-off */
if (iRight - iLeft + 1 > windows_size) {
if (iLeft < ((int)src_size - 1 / 2)) {
iLeft++;
} else {
iRight--;
}
}
res->ContribRow[u].Left = iLeft;
res->ContribRow[u].Right = iRight;
for (iSrc = iLeft; iSrc <= iRight; iSrc++) {
dTotalWeight += (res->ContribRow[u].Weights[iSrc-iLeft] = scale_f_d * (*pFilter)(scale_f_d * (dCenter - (double)iSrc)));
}
if (dTotalWeight < 0.0) {
_gdContributionsFree(res);
return NULL;
}
if (dTotalWeight > 0.0) {
for (iSrc = iLeft; iSrc <= iRight; iSrc++) {
res->ContribRow[u].Weights[iSrc-iLeft] /= dTotalWeight;
}
}
}
return res;
}
| 0 |
[
"CWE-125"
] |
php-src
|
7a1aac3343af85b4af4df5f8844946eaa27394ab
| 245,768,795,403,828,800,000,000,000,000,000,000,000 | 56 |
Fixed bug #72227: imagescale out-of-bounds read
Ported from https://github.com/libgd/libgd/commit/4f65a3e4eedaffa1efcf9ee1eb08f0b504fbc31a
|
static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
{
int t;
int s_t;
struct net_device *dev;
struct Qdisc *q;
struct tcf_proto *tp, **chain;
struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
unsigned long cl = 0;
struct Qdisc_class_ops *cops;
struct tcf_dump_args arg;
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return skb->len;
if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
return skb->len;
read_lock_bh(&qdisc_tree_lock);
if (!tcm->tcm_parent)
q = dev->qdisc_sleeping;
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
goto out;
if ((cops = q->ops->cl_ops) == NULL)
goto errout;
if (TC_H_MIN(tcm->tcm_parent)) {
cl = cops->get(q, tcm->tcm_parent);
if (cl == 0)
goto errout;
}
chain = cops->tcf_chain(q, cl);
if (chain == NULL)
goto errout;
s_t = cb->args[0];
for (tp=*chain, t=0; tp; tp = tp->next, t++) {
if (t < s_t) continue;
if (TC_H_MAJ(tcm->tcm_info) &&
TC_H_MAJ(tcm->tcm_info) != tp->prio)
continue;
if (TC_H_MIN(tcm->tcm_info) &&
TC_H_MIN(tcm->tcm_info) != tp->protocol)
continue;
if (t > s_t)
memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
if (cb->args[1] == 0) {
if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER) <= 0) {
break;
}
cb->args[1] = 1;
}
if (tp->ops->walk == NULL)
continue;
arg.w.fn = tcf_node_dump;
arg.skb = skb;
arg.cb = cb;
arg.w.stop = 0;
arg.w.skip = cb->args[1]-1;
arg.w.count = 0;
tp->ops->walk(tp, &arg.w);
cb->args[1] = arg.w.count+1;
if (arg.w.stop)
break;
}
cb->args[0] = t;
errout:
if (cl)
cops->put(q, cl);
out:
read_unlock_bh(&qdisc_tree_lock);
dev_put(dev);
return skb->len;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
| 126,225,422,520,051,660,000,000,000,000,000,000,000 | 78 |
[NETLINK]: Missing initializations in dumped data
Mostly missing initialization of padding fields of 1 or 2 bytes length,
two instances of uninitialized nlmsgerr->msg of 16 bytes length.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int handshake_server(gnutls_session_t session)
{
int ret = 0;
switch (STATE) {
case STATE0:
case STATE1:
ret =
_gnutls_recv_handshake(session,
GNUTLS_HANDSHAKE_CLIENT_HELLO,
0, NULL);
if (ret == GNUTLS_E_INT_RET_0) {
/* this is triggered by post_client_hello, and instructs the
* handshake to proceed but be put on hold */
ret = GNUTLS_E_INTERRUPTED;
STATE = STATE2; /* hello already parsed -> move on */
} else {
STATE = STATE1;
}
IMED_RET("recv hello", ret, 1);
case STATE2:
ret = _gnutls_ext_sr_verify(session);
STATE = STATE2;
IMED_RET("recv hello", ret, 0);
case STATE3:
ret = send_hello(session, AGAIN(STATE3));
STATE = STATE3;
IMED_RET("send hello", ret, 1);
case STATE4:
if (session->security_parameters.do_send_supplemental) {
ret =
_gnutls_send_supplemental(session,
AGAIN(STATE4));
STATE = STATE4;
IMED_RET("send supplemental data", ret, 0);
}
/* SEND CERTIFICATE + KEYEXCHANGE + CERTIFICATE_REQUEST */
case STATE5:
/* NOTE: these should not be send if we are resuming */
if (session->internals.resumed == RESUME_FALSE)
ret =
_gnutls_send_server_certificate(session,
AGAIN(STATE5));
STATE = STATE5;
IMED_RET("send server certificate", ret, 0);
case STATE6:
#ifdef ENABLE_OCSP
if (session->internals.resumed == RESUME_FALSE)
ret =
_gnutls_send_server_certificate_status(session,
AGAIN
(STATE6));
STATE = STATE6;
IMED_RET("send server certificate status", ret, 0);
#endif
case STATE7:
/* send server key exchange (A) */
if (session->internals.resumed == RESUME_FALSE)
ret =
_gnutls_send_server_kx_message(session,
AGAIN(STATE7));
STATE = STATE7;
IMED_RET("send server kx", ret, 0);
case STATE8:
/* Send certificate request - if requested to */
if (session->internals.resumed == RESUME_FALSE)
ret =
_gnutls_send_server_crt_request(session,
AGAIN(STATE8));
STATE = STATE8;
IMED_RET("send server cert request", ret, 0);
case STATE9:
/* send the server hello done */
if (session->internals.resumed == RESUME_FALSE) /* if we are not resuming */
ret =
_gnutls_send_empty_handshake(session,
GNUTLS_HANDSHAKE_SERVER_HELLO_DONE,
AGAIN(STATE9));
STATE = STATE9;
IMED_RET("send server hello done", ret, 1);
case STATE10:
if (session->security_parameters.do_recv_supplemental) {
ret = _gnutls_recv_supplemental(session);
STATE = STATE10;
IMED_RET("recv client supplemental", ret, 1);
}
/* RECV CERTIFICATE + KEYEXCHANGE + CERTIFICATE_VERIFY */
case STATE11:
/* receive the client certificate message */
if (session->internals.resumed == RESUME_FALSE) /* if we are not resuming */
ret = _gnutls_recv_client_certificate(session);
STATE = STATE11;
IMED_RET("recv client certificate", ret, 1);
case STATE12:
ret = run_verify_callback(session, GNUTLS_SERVER);
STATE = STATE12;
if (ret < 0)
return gnutls_assert_val(ret);
case STATE13:
/* receive the client key exchange message */
if (session->internals.resumed == RESUME_FALSE) /* if we are not resuming */
ret = _gnutls_recv_client_kx_message(session);
STATE = STATE13;
IMED_RET("recv client kx", ret, 1);
case STATE14:
/* receive the client certificate verify message */
if (session->internals.resumed == RESUME_FALSE) /* if we are not resuming */
ret =
_gnutls_recv_client_certificate_verify_message
(session);
STATE = STATE14;
IMED_RET("recv client certificate verify", ret, 1);
case STATE15:
STATE = STATE15;
if (session->internals.resumed == RESUME_FALSE) { /* if we are not resuming */
ret = recv_handshake_final(session, TRUE);
IMED_RET("recv handshake final", ret, 1);
} else {
ret = send_handshake_final(session, TRUE);
IMED_RET("send handshake final 2", ret, 1);
}
case STATE16:
#ifdef ENABLE_SESSION_TICKETS
ret =
_gnutls_send_new_session_ticket(session,
AGAIN(STATE16));
STATE = STATE16;
IMED_RET("send handshake new session ticket", ret, 0);
#endif
case STATE17:
STATE = STATE17;
if (session->internals.resumed == RESUME_FALSE) { /* if we are not resuming */
ret = send_handshake_final(session, FALSE);
IMED_RET("send handshake final", ret, 1);
if (session->security_parameters.entity ==
GNUTLS_SERVER
&& session->internals.ticket_sent == 0) {
/* if no ticket, save session data */
_gnutls_server_register_current_session
(session);
}
} else {
ret = recv_handshake_final(session, FALSE);
IMED_RET("recv handshake final 2", ret, 1);
}
STATE = STATE0;
default:
break;
}
return 0;
}
| 0 |
[
"CWE-310"
] |
gnutls
|
db9a7d810f9ee4c9cc49731f5fd9bdeae68d7eaa
| 289,755,447,293,708,500,000,000,000,000,000,000,000 | 170 |
handshake: check for TLS_FALLBACK_SCSV
If TLS_FALLBACK_SCSV was sent by the client during the handshake, and
the advertised protocol version is lower than GNUTLS_TLS_VERSION_MAX,
send the "Inappropriate fallback" fatal alert and abort the handshake.
This mechanism was defined in RFC7507.
|
op2arg_type(int opcode)
{
int i;
for (i = 0; OnigOpInfo[i].opcode >= 0; i++) {
if (opcode == OnigOpInfo[i].opcode)
return OnigOpInfo[i].arg_type;
}
return ARG_SPECIAL;
}
| 0 |
[
"CWE-125"
] |
php-src
|
c6e34d91b88638966662caac62c4d0e90538e317
| 38,401,588,750,560,924,000,000,000,000,000,000,000 | 10 |
Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node)
|
static void sv_usage(void)
{
BIO_printf(bio_err,"usage: s_server [args ...]\n");
BIO_printf(bio_err,"\n");
BIO_printf(bio_err," -accept arg - port to accept on (default is %d)\n",PORT);
BIO_printf(bio_err," -context arg - set session ID context\n");
BIO_printf(bio_err," -verify arg - turn on peer certificate verification\n");
BIO_printf(bio_err," -Verify arg - turn on peer certificate verification, must have a cert.\n");
BIO_printf(bio_err," -cert arg - certificate file to use\n");
BIO_printf(bio_err," (default is %s)\n",TEST_CERT);
BIO_printf(bio_err," -crl_check - check the peer certificate has not been revoked by its CA.\n" \
" The CRL(s) are appended to the certificate file\n");
BIO_printf(bio_err," -crl_check_all - check the peer certificate has not been revoked by its CA\n" \
" or any other CRL in the CA chain. CRL(s) are appened to the\n" \
" the certificate file.\n");
BIO_printf(bio_err," -certform arg - certificate format (PEM or DER) PEM default\n");
BIO_printf(bio_err," -key arg - Private Key file to use, in cert file if\n");
BIO_printf(bio_err," not specified (default is %s)\n",TEST_CERT);
BIO_printf(bio_err," -keyform arg - key format (PEM, DER or ENGINE) PEM default\n");
BIO_printf(bio_err," -pass arg - private key file pass phrase source\n");
BIO_printf(bio_err," -dcert arg - second certificate file to use (usually for DSA)\n");
BIO_printf(bio_err," -dcertform x - second certificate format (PEM or DER) PEM default\n");
BIO_printf(bio_err," -dkey arg - second private key file to use (usually for DSA)\n");
BIO_printf(bio_err," -dkeyform arg - second key format (PEM, DER or ENGINE) PEM default\n");
BIO_printf(bio_err," -dpass arg - second private key file pass phrase source\n");
BIO_printf(bio_err," -dhparam arg - DH parameter file to use, in cert file if not specified\n");
BIO_printf(bio_err," or a default set of parameters is used\n");
#ifndef OPENSSL_NO_ECDH
BIO_printf(bio_err," -named_curve arg - Elliptic curve name to use for ephemeral ECDH keys.\n" \
" Use \"openssl ecparam -list_curves\" for all names\n" \
" (default is nistp256).\n");
#endif
#ifdef FIONBIO
BIO_printf(bio_err," -nbio - Run with non-blocking IO\n");
#endif
BIO_printf(bio_err," -nbio_test - test with the non-blocking test bio\n");
BIO_printf(bio_err," -crlf - convert LF from terminal into CRLF\n");
BIO_printf(bio_err," -debug - Print more output\n");
BIO_printf(bio_err," -msg - Show protocol messages\n");
BIO_printf(bio_err," -state - Print the SSL states\n");
BIO_printf(bio_err," -CApath arg - PEM format directory of CA's\n");
BIO_printf(bio_err," -CAfile arg - PEM format file of CA's\n");
BIO_printf(bio_err," -nocert - Don't use any certificates (Anon-DH)\n");
BIO_printf(bio_err," -cipher arg - play with 'openssl ciphers' to see what goes here\n");
BIO_printf(bio_err," -serverpref - Use server's cipher preferences\n");
BIO_printf(bio_err," -quiet - No server output\n");
BIO_printf(bio_err," -no_tmp_rsa - Do not generate a tmp RSA key\n");
#ifndef OPENSSL_NO_PSK
BIO_printf(bio_err," -psk_hint arg - PSK identity hint to use\n");
BIO_printf(bio_err," -psk arg - PSK in hex (without 0x)\n");
# ifndef OPENSSL_NO_JPAKE
BIO_printf(bio_err," -jpake arg - JPAKE secret to use\n");
# endif
#endif
BIO_printf(bio_err," -ssl2 - Just talk SSLv2\n");
BIO_printf(bio_err," -ssl3 - Just talk SSLv3\n");
BIO_printf(bio_err," -tls1_1 - Just talk TLSv1_1\n");
BIO_printf(bio_err," -tls1 - Just talk TLSv1\n");
BIO_printf(bio_err," -dtls1 - Just talk DTLSv1\n");
BIO_printf(bio_err," -timeout - Enable timeouts\n");
BIO_printf(bio_err," -mtu - Set link layer MTU\n");
BIO_printf(bio_err," -chain - Read a certificate chain\n");
BIO_printf(bio_err," -no_ssl2 - Just disable SSLv2\n");
BIO_printf(bio_err," -no_ssl3 - Just disable SSLv3\n");
BIO_printf(bio_err," -no_tls1 - Just disable TLSv1\n");
BIO_printf(bio_err," -no_tls1_1 - Just disable TLSv1.1\n");
#ifndef OPENSSL_NO_DH
BIO_printf(bio_err," -no_dhe - Disable ephemeral DH\n");
#endif
#ifndef OPENSSL_NO_ECDH
BIO_printf(bio_err," -no_ecdhe - Disable ephemeral ECDH\n");
#endif
BIO_printf(bio_err," -bugs - Turn on SSL bug compatibility\n");
BIO_printf(bio_err," -www - Respond to a 'GET /' with a status page\n");
BIO_printf(bio_err," -WWW - Respond to a 'GET /<path> HTTP/1.0' with file ./<path>\n");
BIO_printf(bio_err," -HTTP - Respond to a 'GET /<path> HTTP/1.0' with file ./<path>\n");
BIO_printf(bio_err," with the assumption it contains a complete HTTP response.\n");
#ifndef OPENSSL_NO_ENGINE
BIO_printf(bio_err," -engine id - Initialise and use the specified engine\n");
#endif
BIO_printf(bio_err," -id_prefix arg - Generate SSL/TLS session IDs prefixed by 'arg'\n");
BIO_printf(bio_err," -rand file%cfile%c...\n", LIST_SEPARATOR_CHAR, LIST_SEPARATOR_CHAR);
#ifndef OPENSSL_NO_TLSEXT
BIO_printf(bio_err," -servername host - servername for HostName TLS extension\n");
BIO_printf(bio_err," -servername_fatal - on mismatch send fatal alert (default warning alert)\n");
BIO_printf(bio_err," -cert2 arg - certificate file to use for servername\n");
BIO_printf(bio_err," (default is %s)\n",TEST_CERT2);
BIO_printf(bio_err," -key2 arg - Private Key file to use for servername, in cert file if\n");
BIO_printf(bio_err," not specified (default is %s)\n",TEST_CERT2);
BIO_printf(bio_err," -tlsextdebug - hex dump of all TLS extensions received\n");
BIO_printf(bio_err," -no_ticket - disable use of RFC4507bis session tickets\n");
BIO_printf(bio_err," -legacy_renegotiation - enable use of legacy renegotiation (dangerous)\n");
#endif
}
| 1 |
[] |
openssl
|
ee2ffc279417f15fef3b1073c7dc81a908991516
| 289,233,632,029,114,880,000,000,000,000,000,000,000 | 94 |
Add Next Protocol Negotiation.
|
static int __get_any_page(struct page *p, unsigned long pfn, int flags)
{
int ret;
if (flags & MF_COUNT_INCREASED)
return 1;
/*
* When the target page is a free hugepage, just remove it
* from free hugepage list.
*/
if (!get_hwpoison_page(p)) {
if (PageHuge(p)) {
pr_info("%s: %#lx free huge page\n", __func__, pfn);
ret = 0;
} else if (is_free_buddy_page(p)) {
pr_info("%s: %#lx free buddy page\n", __func__, pfn);
ret = 0;
} else {
pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
__func__, pfn, p->flags);
ret = -EIO;
}
} else {
/* Not a free page */
ret = 1;
}
return ret;
}
| 0 |
[] |
linux
|
46612b751c4941c5c0472ddf04027e877ae5990f
| 121,233,944,429,839,980,000,000,000,000,000,000,000 | 29 |
mm: hwpoison: fix thp split handing in soft_offline_in_use_page()
When soft_offline_in_use_page() runs on a thp tail page after pmd is
split, we trigger the following VM_BUG_ON_PAGE():
Memory failure: 0x3755ff: non anonymous thp
__get_any_page: 0x3755ff: unknown zero refcount page type 2fffff80000000
Soft offlining pfn 0x34d805 at process virtual address 0x20fff000
page:ffffea000d360140 count:0 mapcount:0 mapping:0000000000000000 index:0x1
flags: 0x2fffff80000000()
raw: 002fffff80000000 ffffea000d360108 ffffea000d360188 0000000000000000
raw: 0000000000000001 0000000000000000 00000000ffffffff 0000000000000000
page dumped because: VM_BUG_ON_PAGE(page_ref_count(page) == 0)
------------[ cut here ]------------
kernel BUG at ./include/linux/mm.h:519!
soft_offline_in_use_page() passed refcount and page lock from tail page
to head page, which is not needed because we can pass any subpage to
split_huge_page().
Naoya had fixed a similar issue in c3901e722b29 ("mm: hwpoison: fix thp
split handling in memory_failure()"). But he missed fixing soft
offline.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 61f5d698cc97 ("mm: re-enable THP")
Signed-off-by: zhongjiang <[email protected]>
Acked-by: Naoya Horiguchi <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: <[email protected]> [4.5+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void put_event(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
if (!atomic_long_dec_and_test(&event->refcount))
return;
if (!is_kernel_event(event))
perf_remove_from_owner(event);
WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
*
* 1) there is a lock recursion from perf_event_exit_task
* see the comment there.
*
* 2) there is a lock-inversion with mmap_sem through
* perf_event_read_group(), which takes faults while
* holding ctx->mutex, however this is called after
* the last filedesc died, so there is no possibility
* to trigger the AB-BA case.
*/
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
perf_remove_from_context(event, true);
mutex_unlock(&ctx->mutex);
_free_event(event);
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
f63a8daa5812afef4f06c962351687e1ff9ccb2b
| 300,892,231,014,953,830,000,000,000,000,000,000,000 | 29 |
perf: Fix event->ctx locking
There have been a few reported issues wrt. the lack of locking around
changing event->ctx. This patch tries to address those.
It avoids the whole rwsem thing; and while it appears to work, please
give it some thought in review.
What I did fail at is sensible runtime checks on the use of
event->ctx, the RCU use makes it very hard.
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
int Curl_protocol_getsock(struct connectdata *conn,
curl_socket_t *socks,
int numsocks)
{
if(conn->handler->proto_getsock)
return conn->handler->proto_getsock(conn, socks, numsocks);
/* Backup getsock logic. Since there is a live socket in use, we must wait
for it or it will be removed from watching when the multi_socket API is
used. */
socks[0] = conn->sock[FIRSTSOCKET];
return GETSOCK_READSOCK(0) | GETSOCK_WRITESOCK(0);
}
| 0 |
[
"CWE-416"
] |
curl
|
81d135d67155c5295b1033679c606165d4e28f3f
| 48,628,353,533,765,880,000,000,000,000,000,000,000 | 12 |
Curl_close: clear data->multi_easy on free to avoid use-after-free
Regression from b46cfbc068 (7.59.0)
CVE-2018-16840
Reported-by: Brian Carpenter (Geeknik Labs)
Bug: https://curl.haxx.se/docs/CVE-2018-16840.html
|
R_API bool r_io_bank_map_add_top(RIO *io, const ut32 bankid, const ut32 mapid) {
RIOBank *bank = r_io_bank_get (io, bankid);
RIOMap *map = r_io_map_get (io, mapid);
r_return_val_if_fail (io && bank && map, false);
RIOMapRef *mapref = _mapref_from_map (map);
if (!mapref) {
return false;
}
RIOSubMap *sm = r_io_submap_new (io, mapref);
if (!sm) {
free (mapref);
return false;
}
RRBNode *entry = _find_entry_submap_node (bank, sm);
if (!entry) {
// no intersection with any submap, so just insert
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
bank->last_used = NULL;
RIOSubMap *bd = (RIOSubMap *)entry->data;
if (r_io_submap_to (bd) == r_io_submap_to (sm) &&
r_io_submap_from (bd) >= r_io_submap_from (sm)) {
// _find_entry_submap_node guarantees, that there is no submap
// prior to bd in the range of sm, so instead of deleting and inserting
// we can just memcpy
memcpy (bd, sm, sizeof (RIOSubMap));
free (sm);
r_list_append (bank->maprefs, mapref);
return true;
}
if (r_io_submap_from (bd) < r_io_submap_from (sm) &&
r_io_submap_to (sm) < r_io_submap_to (bd)) {
// split bd into 2 maps => bd and bdsm
RIOSubMap *bdsm = R_NEWCOPY (RIOSubMap, bd);
if (!bdsm) {
free (sm);
free (mapref);
return false;
}
r_io_submap_set_from (bdsm, r_io_submap_to (sm) + 1);
r_io_submap_set_to (bd, r_io_submap_from (sm) - 1);
// TODO: insert and check return value, before adjusting sm size
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (bdsm);
free (mapref);
return false;
}
if (!r_crbtree_insert (bank->submaps, bdsm, _find_sm_by_from_vaddr_cb, NULL)) {
r_crbtree_delete (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL);
free (sm);
free (bdsm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
// guaranteed intersection
if (r_io_submap_from (bd) < r_io_submap_from (sm)) {
r_io_submap_set_to (bd, r_io_submap_from (sm) - 1);
entry = r_rbnode_next (entry);
}
ut64 smto = r_io_submap_to (sm);
while (entry && r_io_submap_to (((RIOSubMap *)entry->data)) <= smto) {
//delete all submaps that are completly included in sm
RRBNode *next = r_rbnode_next (entry);
// this can be optimized, there is no need to do search here
// XXX this is a workaround to avoid an UAF in Reproducer: iobank-crash
void *smfree = bank->submaps->free;
bank->submaps->free = NULL;
bool a = r_crbtree_delete (bank->submaps, entry->data, _find_sm_by_from_vaddr_cb, NULL);
bank->submaps->free = smfree;
if (!a) {
entry = NULL;
break;
}
entry = next;
}
if (entry && r_io_submap_from (((RIOSubMap *)entry->data)) <= r_io_submap_to (sm)) {
bd = (RIOSubMap *)entry->data;
r_io_submap_set_from (bd, r_io_submap_to (sm) + 1);
}
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
| 1 |
[
"CWE-416"
] |
radare2
|
3345147916b9bb3da225248d571cdbac690c0c4d
| 63,723,229,772,803,150,000,000,000,000,000,000,000 | 98 |
Properly fix the UAF in r_io_bank_map_add_top ##crash
* Associated with the CVE-2022-0559
* Reported by alkyne Choi via huntr.dev
|
ZEND_VM_HANDLER(85, ZEND_FETCH_OBJ_W, VAR|UNUSED|THIS|CV, CONST|TMPVAR|CV, FETCH_REF|DIM_OBJ_WRITE|CACHE_SLOT)
{
USE_OPLINE
zend_free_op free_op1, free_op2;
zval *property, *container, *result;
SAVE_OPLINE();
container = GET_OP1_OBJ_ZVAL_PTR_PTR_UNDEF(BP_VAR_W);
if (OP1_TYPE == IS_UNUSED && UNEXPECTED(Z_TYPE_P(container) == IS_UNDEF)) {
ZEND_VM_DISPATCH_TO_HELPER(zend_this_not_in_object_context_helper);
}
property = GET_OP2_ZVAL_PTR(BP_VAR_R);
result = EX_VAR(opline->result.var);
zend_fetch_property_address(
result, container, OP1_TYPE, property, OP2_TYPE,
((OP2_TYPE == IS_CONST) ? CACHE_ADDR(opline->extended_value & ~ZEND_FETCH_OBJ_FLAGS) : NULL),
BP_VAR_W, opline->extended_value & ZEND_FETCH_OBJ_FLAGS, 1 OPLINE_CC EXECUTE_DATA_CC);
FREE_OP2();
if (OP1_TYPE == IS_VAR) {
FREE_VAR_PTR_AND_EXTRACT_RESULT_IF_NECESSARY(free_op1, result);
}
ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION();
}
| 0 |
[
"CWE-787"
] |
php-src
|
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
| 69,898,140,970,851,325,000,000,000,000,000,000,000 | 25 |
Fix #73122: Integer Overflow when concatenating strings
We must avoid integer overflows in memory allocations, so we introduce
an additional check in the VM, and bail out in the rare case of an
overflow. Since the recent fix for bug #74960 still doesn't catch all
possible overflows, we fix that right away.
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.