func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
int noblock)
{
DEFINE_WAIT(wait);
struct vhost_msg_node *node;
ssize_t ret = 0;
unsigned size = sizeof(struct vhost_msg);
if (iov_iter_count(to) < size)
return 0;
while (1) {
if (!noblock)
prepare_to_wait(&dev->wait, &wait,
TASK_INTERRUPTIBLE);
node = vhost_dequeue_msg(dev, &dev->read_list);
if (node)
break;
if (noblock) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
if (!dev->iotlb) {
ret = -EBADFD;
break;
}
schedule();
}
if (!noblock)
finish_wait(&dev->wait, &wait);
if (node) {
struct vhost_iotlb_msg *msg;
void *start = &node->msg;
switch (node->msg.type) {
case VHOST_IOTLB_MSG:
size = sizeof(node->msg);
msg = &node->msg.iotlb;
break;
case VHOST_IOTLB_MSG_V2:
size = sizeof(node->msg_v2);
msg = &node->msg_v2.iotlb;
break;
default:
BUG();
break;
}
ret = copy_to_iter(start, size, to);
if (ret != size || msg->type != VHOST_IOTLB_MISS) {
kfree(node);
return ret;
}
vhost_enqueue_msg(dev, &dev->pending_list, node);
}
return ret;
}
| 0 |
[
"CWE-120"
] |
linux
|
060423bfdee3f8bc6e2c1bac97de24d5415e2bc4
| 245,755,155,277,177,360,000,000,000,000,000,000,000 | 66 |
vhost: make sure log_num < in_num
The code assumes log_num < in_num everywhere, and that is true as long as
in_num is incremented by descriptor iov count, and log_num by 1. However
this breaks if there's a zero sized descriptor.
As a result, if a malicious guest creates a vring desc with desc.len = 0,
it may cause the host kernel to crash by overflowing the log array. This
bug can be triggered during the VM migration.
There's no need to log when desc.len = 0, so just don't increment log_num
in this case.
Fixes: 3a4d5c94e959 ("vhost_net: a kernel-level virtio server")
Cc: [email protected]
Reviewed-by: Lidong Chen <[email protected]>
Signed-off-by: ruippan <[email protected]>
Signed-off-by: yongduan <[email protected]>
Acked-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
|
tty_cmd_linefeed(struct tty *tty, const struct tty_ctx *ctx)
{
struct window_pane *wp = ctx->wp;
if (ctx->ocy != ctx->orlower)
return;
if (ctx->bigger ||
(!tty_pane_full_width(tty, ctx) && !tty_use_margin(tty)) ||
tty_fake_bce(tty, wp, 8) ||
!tty_term_has(tty->term, TTYC_CSR) ||
wp->sx == 1 ||
wp->sy == 1) {
tty_redraw_region(tty, ctx);
return;
}
tty_default_attributes(tty, wp, ctx->bg);
tty_region_pane(tty, ctx, ctx->orupper, ctx->orlower);
tty_margin_pane(tty, ctx);
/*
* If we want to wrap a pane while using margins, the cursor needs to
* be exactly on the right of the region. If the cursor is entirely off
* the edge - move it back to the right. Some terminals are funny about
* this and insert extra spaces, so only use the right if margins are
* enabled.
*/
if (ctx->xoff + ctx->ocx > tty->rright) {
if (!tty_use_margin(tty))
tty_cursor(tty, 0, ctx->yoff + ctx->ocy);
else
tty_cursor(tty, tty->rright, ctx->yoff + ctx->ocy);
} else
tty_cursor_pane(tty, ctx, ctx->ocx, ctx->ocy);
tty_putc(tty, '\n');
}
| 0 |
[] |
src
|
b32e1d34e10a0da806823f57f02a4ae6e93d756e
| 78,350,892,613,986,610,000,000,000,000,000,000,000 | 39 |
evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547.
|
_PUBLIC_ bool trim_string(char *s, const char *front, const char *back)
{
bool ret = false;
size_t front_len;
size_t back_len;
size_t len;
/* Ignore null or empty strings. */
if (!s || (s[0] == '\0'))
return false;
front_len = front? strlen(front) : 0;
back_len = back? strlen(back) : 0;
len = strlen(s);
if (front_len) {
while (len && strncmp(s, front, front_len)==0) {
/* Must use memmove here as src & dest can
* easily overlap. Found by valgrind. JRA. */
memmove(s, s+front_len, (len-front_len)+1);
len -= front_len;
ret=true;
}
}
if (back_len) {
while ((len >= back_len) && strncmp(s+len-back_len,back,back_len)==0) {
s[len-back_len]='\0';
len -= back_len;
ret=true;
}
}
return ret;
}
| 0 |
[] |
samba
|
8eae8d28bce2c3f6a323d3dc48ed10c2e6bb1ba5
| 283,372,084,500,840,100,000,000,000,000,000,000,000 | 35 |
CVE-2013-4476: lib-util: add file_check_permissions()
Bug: https://bugzilla.samba.org/show_bug.cgi?id=10234
Signed-off-by: Björn Baumbach <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
|
NTSTATUS TCCompleteIrp (PIRP irp, NTSTATUS status, ULONG_PTR information)
{
irp->IoStatus.Status = status;
irp->IoStatus.Information = information;
IoCompleteRequest (irp, IO_NO_INCREMENT);
return status;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
VeraCrypt
|
f30f9339c9a0b9bbcc6f5ad38804af39db1f479e
| 185,077,641,225,555,820,000,000,000,000,000,000,000 | 7 |
Windows: fix low severity vulnerability in driver that allowed reading 3 bytes of kernel stack memory (with a rare possibility of 25 additional bytes). Reported by Tim Harrison.
|
static bool check_ftb_syntax(sys_var *self, THD *thd, set_var *var)
{
return ft_boolean_check_syntax_string((uchar*)
(var->save_result.string_value.str));
}
| 0 |
[
"CWE-264"
] |
mysql-server
|
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
| 157,109,509,814,520,940,000,000,000,000,000,000,000 | 5 |
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE
[This is the 5.5/5.6 version of the bugfix].
The problem was that it was possible to write log files ending
in .ini/.cnf that later could be parsed as an options file.
This made it possible for users to specify startup options
without the permissions to do so.
This patch fixes the problem by disallowing general query log
and slow query log to be written to files ending in .ini and .cnf.
|
v8::Local<v8::Value> FindFrameByRoutingId(v8::Isolate* isolate,
int routing_id) {
content::RenderFrame* render_frame =
content::RenderFrame::FromRoutingID(routing_id);
if (render_frame)
return WebFrameRenderer::Create(isolate, render_frame).ToV8();
else
return v8::Null(isolate);
}
| 0 |
[] |
electron
|
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
| 176,127,702,477,164,840,000,000,000,000,000,000,000 | 9 |
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]>
|
int main(int argc, char **argv)
{
unsigned int i;
git_extract_argv0_path(argv[0]);
git_setup_gettext();
if (argc == 2 && !strcmp(argv[1], "-h"))
usage(fast_import_usage);
setup_git_directory();
reset_pack_idx_option(&pack_idx_opts);
git_pack_config();
if (!pack_compression_seen && core_compression_seen)
pack_compression_level = core_compression_level;
alloc_objects(object_entry_alloc);
strbuf_init(&command_buf, 0);
atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*));
branch_table = xcalloc(branch_table_sz, sizeof(struct branch*));
avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*));
marks = pool_calloc(1, sizeof(struct mark_set));
global_argc = argc;
global_argv = argv;
rc_free = pool_alloc(cmd_save * sizeof(*rc_free));
for (i = 0; i < (cmd_save - 1); i++)
rc_free[i].next = &rc_free[i + 1];
rc_free[cmd_save - 1].next = NULL;
prepare_packed_git();
start_packfile();
set_die_routine(die_nicely);
set_checkpoint_signal();
while (read_next_command() != EOF) {
const char *v;
if (!strcmp("blob", command_buf.buf))
parse_new_blob();
else if (skip_prefix(command_buf.buf, "ls ", &v))
parse_ls(v, NULL);
else if (skip_prefix(command_buf.buf, "commit ", &v))
parse_new_commit(v);
else if (skip_prefix(command_buf.buf, "tag ", &v))
parse_new_tag(v);
else if (skip_prefix(command_buf.buf, "reset ", &v))
parse_reset_branch(v);
else if (!strcmp("checkpoint", command_buf.buf))
parse_checkpoint();
else if (!strcmp("done", command_buf.buf))
break;
else if (starts_with(command_buf.buf, "progress "))
parse_progress();
else if (skip_prefix(command_buf.buf, "feature ", &v))
parse_feature(v);
else if (skip_prefix(command_buf.buf, "option git ", &v))
parse_option(v);
else if (starts_with(command_buf.buf, "option "))
/* ignore non-git options*/;
else
die("Unsupported command: %s", command_buf.buf);
if (checkpoint_requested)
checkpoint();
}
/* argv hasn't been parsed yet, do so */
if (!seen_data_command)
parse_argv();
if (require_explicit_termination && feof(stdin))
die("stream ends early");
end_packfile();
dump_branches();
dump_tags();
unkeep_all_packs();
dump_marks();
if (pack_edges)
fclose(pack_edges);
if (show_stats) {
uintmax_t total_count = 0, duplicate_count = 0;
for (i = 0; i < ARRAY_SIZE(object_count_by_type); i++)
total_count += object_count_by_type[i];
for (i = 0; i < ARRAY_SIZE(duplicate_count_by_type); i++)
duplicate_count += duplicate_count_by_type[i];
fprintf(stderr, "%s statistics:\n", argv[0]);
fprintf(stderr, "---------------------------------------------------------------------\n");
fprintf(stderr, "Alloc'd objects: %10" PRIuMAX "\n", alloc_count);
fprintf(stderr, "Total objects: %10" PRIuMAX " (%10" PRIuMAX " duplicates )\n", total_count, duplicate_count);
fprintf(stderr, " blobs : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_BLOB], duplicate_count_by_type[OBJ_BLOB], delta_count_by_type[OBJ_BLOB], delta_count_attempts_by_type[OBJ_BLOB]);
fprintf(stderr, " trees : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TREE], duplicate_count_by_type[OBJ_TREE], delta_count_by_type[OBJ_TREE], delta_count_attempts_by_type[OBJ_TREE]);
fprintf(stderr, " commits: %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_COMMIT], duplicate_count_by_type[OBJ_COMMIT], delta_count_by_type[OBJ_COMMIT], delta_count_attempts_by_type[OBJ_COMMIT]);
fprintf(stderr, " tags : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TAG], duplicate_count_by_type[OBJ_TAG], delta_count_by_type[OBJ_TAG], delta_count_attempts_by_type[OBJ_TAG]);
fprintf(stderr, "Total branches: %10lu (%10lu loads )\n", branch_count, branch_load_count);
fprintf(stderr, " marks: %10" PRIuMAX " (%10" PRIuMAX " unique )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count);
fprintf(stderr, " atoms: %10u\n", atom_cnt);
fprintf(stderr, "Memory total: %10" PRIuMAX " KiB\n", (total_allocd + alloc_count*sizeof(struct object_entry))/1024);
fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)(total_allocd/1024));
fprintf(stderr, " objects: %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024);
fprintf(stderr, "---------------------------------------------------------------------\n");
pack_report();
fprintf(stderr, "---------------------------------------------------------------------\n");
fprintf(stderr, "\n");
}
return failure ? 1 : 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
git
|
34fa79a6cde56d6d428ab0d3160cb094ebad3305
| 84,942,446,975,962,900,000,000,000,000,000,000,000 | 113 |
prefer memcpy to strcpy
When we already know the length of a string (e.g., because
we just malloc'd to fit it), it's nicer to use memcpy than
strcpy, as it makes it more obvious that we are not going to
overflow the buffer (because the size we pass matches the
size in the allocation).
This also eliminates calls to strcpy, which make auditing
the code base harder.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
static u32 rxclk_rx_s_max_pulse_width(struct cx23885_dev *dev, u32 ns,
u16 *divider)
{
u64 pulse_clocks;
if (ns > IR_MAX_DURATION)
ns = IR_MAX_DURATION;
pulse_clocks = ns_to_pulse_clocks(ns);
*divider = pulse_clocks_to_clock_divider(pulse_clocks);
cx23888_ir_write4(dev, CX23888_IR_RXCLK_REG, *divider);
return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider);
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
a7b2df76b42bdd026e3106cf2ba97db41345a177
| 162,463,036,201,029,660,000,000,000,000,000,000,000 | 12 |
media: rc: prevent memory leak in cx23888_ir_probe
In cx23888_ir_probe if kfifo_alloc fails the allocated memory for state
should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Sean Young <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
bool add_key(Key::Keytype key_type, const LEX_CSTRING *key_name,
ha_key_alg algorithm, DDL_options_st ddl)
{
if (check_add_key(ddl) ||
!(last_key= new Key(key_type, key_name, algorithm, false, ddl)))
return true;
alter_info.key_list.push_back(last_key);
return false;
}
| 0 |
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
| 274,632,849,005,610,020,000,000,000,000,000,000,000 | 9 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]>
|
int r_jwe_set_full_unprotected_header_json_str(jwe_t * jwe, const char * str_unprotected_header) {
int ret;
json_t * j_unprotected_header = json_loads(str_unprotected_header, JSON_DECODE_ANY, NULL);
ret = r_jwe_set_full_unprotected_header_json_t(jwe, j_unprotected_header);
json_decref(j_unprotected_header);
return ret;
}
| 0 |
[
"CWE-787"
] |
rhonabwy
|
b4c2923a1ba4fabf9b55a89244127e153a3e549b
| 251,913,217,541,793,700,000,000,000,000,000,000,000 | 9 |
Fix buffer overflow on r_jwe_aesgcm_key_unwrap
|
static phys_addr_t pgd_pgtable_alloc(void)
{
void *ptr = (void *)__get_free_page(PGALLOC_GFP);
if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
BUG();
/* Ensure the zeroed page is visible to the page table walker */
dsb(ishst);
return __pa(ptr);
}
| 0 |
[] |
linux
|
15122ee2c515a253b0c66a3e618bc7ebe35105eb
| 9,787,432,075,830,255,000,000,000,000,000,000,000 | 10 |
arm64: Enforce BBM for huge IO/VMAP mappings
ioremap_page_range doesn't honour break-before-make and attempts to put
down huge mappings (using p*d_set_huge) over the top of pre-existing
table entries. This leads to us leaking page table memory and also gives
rise to TLB conflicts and spurious aborts, which have been seen in
practice on Cortex-A75.
Until this has been resolved, refuse to put block mappings when the
existing entry is found to be present.
Fixes: 324420bf91f60 ("arm64: add support for ioremap() block mappings")
Reported-by: Hanjun Guo <[email protected]>
Reported-by: Lei Li <[email protected]>
Acked-by: Ard Biesheuvel <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: Catalin Marinas <[email protected]>
|
verify_substring_values (v, value, substr, vtype, e1p, e2p)
SHELL_VAR *v;
char *value, *substr;
int vtype;
intmax_t *e1p, *e2p;
{
char *t, *temp1, *temp2;
arrayind_t len;
int expok;
#if defined (ARRAY_VARS)
ARRAY *a;
HASH_TABLE *h;
#endif
/* duplicate behavior of strchr(3) */
t = skiparith (substr, ':');
if (*t && *t == ':')
*t = '\0';
else
t = (char *)0;
temp1 = expand_arith_string (substr, Q_DOUBLE_QUOTES);
*e1p = evalexp (temp1, &expok);
free (temp1);
if (expok == 0)
return (0);
len = -1; /* paranoia */
switch (vtype)
{
case VT_VARIABLE:
case VT_ARRAYMEMBER:
len = MB_STRLEN (value);
break;
case VT_POSPARMS:
len = number_of_args () + 1;
if (*e1p == 0)
len++; /* add one arg if counting from $0 */
break;
#if defined (ARRAY_VARS)
case VT_ARRAYVAR:
/* For arrays, the first value deals with array indices. Negative
offsets count from one past the array's maximum index. Associative
arrays treat the number of elements as the maximum index. */
if (assoc_p (v))
{
h = assoc_cell (v);
len = assoc_num_elements (h) + (*e1p < 0);
}
else
{
a = (ARRAY *)value;
len = array_max_index (a) + (*e1p < 0); /* arrays index from 0 to n - 1 */
}
break;
#endif
}
if (len == -1) /* paranoia */
return -1;
if (*e1p < 0) /* negative offsets count from end */
*e1p += len;
if (*e1p > len || *e1p < 0)
return (-1);
#if defined (ARRAY_VARS)
/* For arrays, the second offset deals with the number of elements. */
if (vtype == VT_ARRAYVAR)
len = assoc_p (v) ? assoc_num_elements (h) : array_num_elements (a);
#endif
if (t)
{
t++;
temp2 = savestring (t);
temp1 = expand_arith_string (temp2, Q_DOUBLE_QUOTES);
free (temp2);
t[-1] = ':';
*e2p = evalexp (temp1, &expok);
free (temp1);
if (expok == 0)
return (0);
#if 1
if ((vtype == VT_ARRAYVAR || vtype == VT_POSPARMS) && *e2p < 0)
#else
/* bash-4.3: allow positional parameter length < 0 to count backwards
from end of positional parameters */
if (vtype == VT_ARRAYVAR && *e2p < 0)
#endif
{
internal_error (_("%s: substring expression < 0"), t);
return (0);
}
#if defined (ARRAY_VARS)
/* In order to deal with sparse arrays, push the intelligence about how
to deal with the number of elements desired down to the array-
specific functions. */
if (vtype != VT_ARRAYVAR)
#endif
{
if (*e2p < 0)
{
*e2p += len;
if (*e2p < 0 || *e2p < *e1p)
{
internal_error (_("%s: substring expression < 0"), t);
return (0);
}
}
else
*e2p += *e1p; /* want E2 chars starting at E1 */
if (*e2p > len)
*e2p = len;
}
}
else
*e2p = len;
return (1);
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 202,268,796,990,387,200,000,000,000,000,000,000,000 | 122 |
bash-4.4-rc2 release
|
sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
Sg_device *sdp = dev_get_drvdata(cl_dev);
unsigned long iflags;
Sg_fd *sfp;
int val;
if (!sdp)
return;
/* want sdp->detaching non-zero as soon as possible */
val = atomic_inc_return(&sdp->detaching);
if (val > 1)
return; /* only want to do following once per device */
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"%s\n", __func__));
read_lock_irqsave(&sdp->sfd_lock, iflags);
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
wake_up_interruptible_all(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
}
wake_up_interruptible_all(&sdp->open_wait);
read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
kref_put(&sdp->d_ref, sg_device_destroy);
}
| 0 |
[
"CWE-190",
"CWE-189"
] |
linux
|
fdc81f45e9f57858da6351836507fbcf1b7583ee
| 106,160,273,077,868,630,000,000,000,000,000,000,000 | 33 |
sg_start_req(): use import_iovec()
Signed-off-by: Al Viro <[email protected]>
|
void ide_sector_write(IDEState *s)
{
int64_t sector_num;
int n;
s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
sector_num = ide_get_sector(s);
#if defined(DEBUG_IDE)
printf("sector=%" PRId64 "\n", sector_num);
#endif
n = s->nsector;
if (n > s->req_nb_sectors) {
n = s->req_nb_sectors;
}
s->iov.iov_base = s->io_buffer;
s->iov.iov_len = n * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&s->qiov, &s->iov, 1);
bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
s->pio_aiocb = bdrv_aio_writev(s->bs, sector_num, &s->qiov, n,
ide_sector_write_cb, s);
}
| 0 |
[
"CWE-189"
] |
qemu
|
940973ae0b45c9b6817bab8e4cf4df99a9ef83d7
| 272,557,220,141,543,850,000,000,000,000,000,000,000 | 23 |
ide: Correct improper smart self test counter reset in ide core.
The SMART self test counter was incorrectly being reset to zero,
not 1. This had the effect that on every 21st SMART EXECUTE OFFLINE:
* We would write off the beginning of a dynamically allocated buffer
* We forgot the SMART history
Fix this.
Signed-off-by: Benoit Canet <[email protected]>
Message-id: [email protected]
Reviewed-by: Markus Armbruster <[email protected]>
Cc: [email protected]
Acked-by: Kevin Wolf <[email protected]>
[PMM: tweaked commit message as per suggestions from Markus]
Signed-off-by: Peter Maydell <[email protected]>
|
regexp (void)
{
branch();
while (tok == OR)
{
tok = lex();
branch();
addtok(OR);
}
}
| 0 |
[
"CWE-189"
] |
grep
|
cbbc1a45b9f843c811905c97c90a5d31f8e6c189
| 121,729,251,659,025,750,000,000,000,000,000,000,000 | 10 |
grep: fix some core dumps with long lines etc.
These problems mostly occur because the code attempts to stuff
sizes into int or into unsigned int; this doesn't work on most
64-bit hosts and the errors can lead to core dumps.
* NEWS: Document this.
* src/dfa.c (token): Typedef to ptrdiff_t, since the enum's
range could be as small as -128 .. 127 on practical hosts.
(position.index): Now size_t, not unsigned int.
(leaf_set.elems): Now size_t *, not unsigned int *.
(dfa_state.hash, struct mb_char_classes.nchars, .nch_classes)
(.nranges, .nequivs, .ncoll_elems, struct dfa.cindex, .calloc, .tindex)
(.talloc, .depth, .nleaves, .nregexps, .nmultibyte_prop, .nmbcsets):
(.mbcsets_alloc): Now size_t, not int.
(dfa_state.first_end): Now token, not int.
(state_num): New type.
(struct mb_char_classes.cset): Now ptrdiff_t, not int.
(struct dfa.utf8_anychar_classes): Now token[5], not int[5].
(struct dfa.sindex, .salloc, .tralloc): Now state_num, not int.
(struct dfa.trans, .realtrans, .fails): Now state_num **, not int **.
(struct dfa.newlines): Now state_num *, not int *.
(prtok): Don't assume 'token' is no wider than int.
(lexleft, parens, depth): Now size_t, not int.
(charclass_index, nsubtoks)
(parse_bracket_exp, addtok, copytoks, closure, insert, merge, delete)
(state_index, epsclosure, state_separate_contexts)
(dfaanalyze, dfastate, build_state, realloc_trans_if_necessary)
(transit_state_singlebyte, match_anychar, match_mb_charset)
(check_matching_with_multibyte_ops, transit_state_consume_1char)
(transit_state, dfaexec, free_mbdata, dfaoptimize, dfafree)
(freelist, enlist, addlists, inboth, dfamust):
Don't assume indexes fit in 'int'.
(lex): Avoid overflow in string-to-{hi,lo} conversions.
(dfaanalyze): Redo indexing so that it works with size_t values,
which cannot go negative.
* src/dfa.h (dfaexec): Count argument is now size_t *, not int *.
(dfastate): State numbers are now ptrdiff_t, not int.
* src/dfasearch.c: Include "intprops.h", for TYPE_MAXIMUM.
(kwset_exact_matches): Now size_t, not int.
(EGexecute): Don't assume indexes fit in 'int'.
Check for overflow before converting a ptrdiff_t to a regoff_t,
as regoff_t is narrower than ptrdiff_t in 64-bit glibc (contra POSIX).
Check for memory exhaustion in re_search rather than treating
it merely as failure to match; use xalloc_die () to report any error.
* src/kwset.c (struct trie.accepting): Now size_t, not unsigned int.
(struct kwset.words): Now ptrdiff_t, not int.
* src/kwset.h (struct kwsmatch.index): Now size_t, not int.
|
merge_parse_objectid(struct node *np, FILE * fp, char *name)
{
struct node *nnp;
/*
* printf("merge defval --> %s\n",np->defaultValue);
*/
nnp = parse_objectid(fp, name);
if (nnp) {
/*
* apply last OID sub-identifier data to the information
*/
/*
* already collected for this node.
*/
struct node *headp, *nextp;
int ncount = 0;
nextp = headp = nnp;
while (nnp->next) {
nextp = nnp;
ncount++;
nnp = nnp->next;
}
np->label = nnp->label;
np->subid = nnp->subid;
np->modid = nnp->modid;
np->parent = nnp->parent;
if (nnp->filename != NULL) {
free(nnp->filename);
}
free(nnp);
if (ncount) {
nextp->next = np;
np = headp;
}
} else {
free_node(np);
np = NULL;
}
return np;
}
| 0 |
[
"CWE-59",
"CWE-61"
] |
net-snmp
|
4fd9a450444a434a993bc72f7c3486ccce41f602
| 166,848,878,251,070,400,000,000,000,000,000,000,000 | 44 |
CHANGES: snmpd: Stop reading and writing the mib_indexes/* files
Caching directory contents is something the operating system should do
and is not something Net-SNMP should do. Instead of storing a copy of
the directory contents in ${tmp_dir}/mib_indexes/${n}, always scan a
MIB directory.
|
void vrend_set_streamout_targets(struct vrend_context *ctx,
UNUSED uint32_t append_bitmask,
uint32_t num_targets,
uint32_t *handles)
{
struct vrend_so_target *target;
uint i;
if (!has_feature(feat_transform_feedback))
return;
if (num_targets) {
bool found = false;
struct vrend_streamout_object *obj;
LIST_FOR_EACH_ENTRY(obj, &ctx->sub->streamout_list, head) {
if (obj->num_targets == num_targets) {
if (!memcmp(handles, obj->handles, num_targets * 4)) {
found = true;
break;
}
}
}
if (found) {
ctx->sub->current_so = obj;
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
return;
}
obj = CALLOC_STRUCT(vrend_streamout_object);
if (has_feature(feat_transform_feedback2)) {
glGenTransformFeedbacks(1, &obj->id);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
}
obj->num_targets = num_targets;
for (i = 0; i < num_targets; i++) {
obj->handles[i] = handles[i];
if (handles[i] == 0)
continue;
target = vrend_object_lookup(ctx->sub->object_hash, handles[i], VIRGL_OBJECT_STREAMOUT_TARGET);
if (!target) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handles[i]);
free(obj);
return;
}
vrend_so_target_reference(&obj->so_targets[i], target);
}
vrend_hw_emit_streamout_targets(ctx, obj);
list_addtail(&obj->head, &ctx->sub->streamout_list);
ctx->sub->current_so = obj;
obj->xfb_state = XFB_STATE_STARTED_NEED_BEGIN;
} else {
if (has_feature(feat_transform_feedback2))
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);
ctx->sub->current_so = NULL;
}
}
| 0 |
[
"CWE-787"
] |
virglrenderer
|
cbc8d8b75be360236cada63784046688aeb6d921
| 96,151,726,868,106,850,000,000,000,000,000,000,000 | 56 |
vrend: check transfer bounds for negative values too and report error
Closes #138
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Emil Velikov <[email protected]>
|
TIFFWriteDirectoryTagTransferfunction(TIFF* tif, uint32* ndir, TIFFDirEntry* dir)
{
static const char module[] = "TIFFWriteDirectoryTagTransferfunction";
uint32 m;
uint16 n;
uint16* o;
int p;
if (dir==NULL)
{
(*ndir)++;
return(1);
}
m=(1<<tif->tif_dir.td_bitspersample);
n=tif->tif_dir.td_samplesperpixel-tif->tif_dir.td_extrasamples;
/*
* Check if the table can be written as a single column,
* or if it must be written as 3 columns. Note that we
* write a 3-column tag if there are 2 samples/pixel and
* a single column of data won't suffice--hmm.
*/
if (n>3)
n=3;
if (n==3)
{
if (!_TIFFmemcmp(tif->tif_dir.td_transferfunction[0],tif->tif_dir.td_transferfunction[2],m*sizeof(uint16)))
n=2;
}
if (n==2)
{
if (!_TIFFmemcmp(tif->tif_dir.td_transferfunction[0],tif->tif_dir.td_transferfunction[1],m*sizeof(uint16)))
n=1;
}
if (n==0)
n=1;
o=_TIFFmalloc(n*m*sizeof(uint16));
if (o==NULL)
{
TIFFErrorExt(tif->tif_clientdata,module,"Out of memory");
return(0);
}
_TIFFmemcpy(&o[0],tif->tif_dir.td_transferfunction[0],m*sizeof(uint16));
if (n>1)
_TIFFmemcpy(&o[m],tif->tif_dir.td_transferfunction[1],m*sizeof(uint16));
if (n>2)
_TIFFmemcpy(&o[2*m],tif->tif_dir.td_transferfunction[2],m*sizeof(uint16));
p=TIFFWriteDirectoryTagCheckedShortArray(tif,ndir,dir,TIFFTAG_TRANSFERFUNCTION,n*m,o);
_TIFFfree(o);
return(p);
}
| 0 |
[
"CWE-617"
] |
libtiff
|
de144fd228e4be8aa484c3caf3d814b6fa88c6d9
| 115,948,368,230,912,520,000,000,000,000,000,000,000 | 49 |
TIFFWriteDirectorySec: avoid assertion. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2795. CVE-2018-10963
|
static Image *ReadICONImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
IconFile
icon_file;
IconInfo
icon_info;
Image
*image;
MagickBooleanType
status;
MagickSizeType
extent;
register ssize_t
i,
x;
register Quantum
*q;
register unsigned char
*p;
size_t
bit,
byte,
bytes_per_line,
one,
scanline_pad;
ssize_t
count,
offset,
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"%s",image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
icon_file.reserved=(short) ReadBlobLSBShort(image);
icon_file.resource_type=(short) ReadBlobLSBShort(image);
icon_file.count=(short) ReadBlobLSBShort(image);
if ((icon_file.reserved != 0) ||
((icon_file.resource_type != 1) && (icon_file.resource_type != 2)) ||
(icon_file.count > MaxIcons))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
extent=0;
for (i=0; i < icon_file.count; i++)
{
icon_file.directory[i].width=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].height=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].colors=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].reserved=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].planes=(unsigned short) ReadBlobLSBShort(image);
icon_file.directory[i].bits_per_pixel=(unsigned short)
ReadBlobLSBShort(image);
icon_file.directory[i].size=ReadBlobLSBLong(image);
icon_file.directory[i].offset=ReadBlobLSBLong(image);
if (EOFBlob(image) != MagickFalse)
break;
extent=MagickMax(extent,icon_file.directory[i].size);
}
if ((EOFBlob(image) != MagickFalse) || (extent > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
one=1;
for (i=0; i < icon_file.count; i++)
{
/*
Verify Icon identifier.
*/
offset=(ssize_t) SeekBlob(image,(MagickOffsetType)
icon_file.directory[i].offset,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
icon_info.size=ReadBlobLSBLong(image);
icon_info.width=(unsigned char) ReadBlobLSBSignedLong(image);
icon_info.height=(unsigned char) (ReadBlobLSBSignedLong(image)/2);
icon_info.planes=ReadBlobLSBShort(image);
icon_info.bits_per_pixel=ReadBlobLSBShort(image);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
if (((icon_info.planes == 18505) && (icon_info.bits_per_pixel == 21060)) ||
(icon_info.size == 0x474e5089))
{
Image
*icon_image;
ImageInfo
*read_info;
size_t
length;
unsigned char
*png;
/*
Icon image encoded as a compressed PNG image.
*/
length=icon_file.directory[i].size;
if ((length < 16) || (~length < 16))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
png=(unsigned char *) AcquireQuantumMemory(length+16,sizeof(*png));
if (png == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) CopyMagickMemory(png,"\211PNG\r\n\032\n\000\000\000\015",12);
png[12]=(unsigned char) icon_info.planes;
png[13]=(unsigned char) (icon_info.planes >> 8);
png[14]=(unsigned char) icon_info.bits_per_pixel;
png[15]=(unsigned char) (icon_info.bits_per_pixel >> 8);
count=ReadBlob(image,length-16,png+16);
icon_image=(Image *) NULL;
if (count > 0)
{
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(read_info->magick,"PNG",MagickPathExtent);
icon_image=BlobToImage(read_info,png,length+16,exception);
read_info=DestroyImageInfo(read_info);
}
png=(unsigned char *) RelinquishMagickMemory(png);
if (icon_image == (Image *) NULL)
{
if (count != (ssize_t) (length-16))
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
image=DestroyImageList(image);
return((Image *) NULL);
}
DestroyBlob(icon_image);
icon_image->blob=ReferenceBlob(image->blob);
ReplaceImageInList(&image,icon_image);
}
else
{
if (icon_info.bits_per_pixel > 32)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
icon_info.compression=ReadBlobLSBLong(image);
icon_info.image_size=ReadBlobLSBLong(image);
icon_info.x_pixels=ReadBlobLSBLong(image);
icon_info.y_pixels=ReadBlobLSBLong(image);
icon_info.number_colors=ReadBlobLSBLong(image);
icon_info.colors_important=ReadBlobLSBLong(image);
image->alpha_trait=BlendPixelTrait;
image->columns=(size_t) icon_file.directory[i].width;
if ((ssize_t) image->columns > icon_info.width)
image->columns=(size_t) icon_info.width;
if (image->columns == 0)
image->columns=256;
image->rows=(size_t) icon_file.directory[i].height;
if ((ssize_t) image->rows > icon_info.height)
image->rows=(size_t) icon_info.height;
if (image->rows == 0)
image->rows=256;
image->depth=icon_info.bits_per_pixel;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" scene = %.20g",(double) i);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" size = %.20g",(double) icon_info.size);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" width = %.20g",(double) icon_file.directory[i].width);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" height = %.20g",(double) icon_file.directory[i].height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" colors = %.20g",(double ) icon_info.number_colors);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" planes = %.20g",(double) icon_info.planes);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" bpp = %.20g",(double) icon_info.bits_per_pixel);
}
if ((icon_info.number_colors != 0) || (icon_info.bits_per_pixel <= 16U))
{
image->storage_class=PseudoClass;
image->colors=icon_info.number_colors;
if (image->colors == 0)
image->colors=one << icon_info.bits_per_pixel;
}
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
unsigned char
*icon_colormap;
/*
Read Icon raster colormap.
*/
if (AcquireImageColormap(image,image->colors,exception) ==
MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
icon_colormap=(unsigned char *) AcquireQuantumMemory((size_t)
image->colors,4UL*sizeof(*icon_colormap));
if (icon_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) (4*image->colors),icon_colormap);
if (count != (ssize_t) (4*image->colors))
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
p=icon_colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].blue=(Quantum) ScaleCharToQuantum(*p++);
image->colormap[i].green=(Quantum) ScaleCharToQuantum(*p++);
image->colormap[i].red=(Quantum) ScaleCharToQuantum(*p++);
p++;
}
icon_colormap=(unsigned char *) RelinquishMagickMemory(icon_colormap);
}
/*
Convert Icon raster image to pixel packets.
*/
if ((image_info->ping != MagickFalse) &&
(image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
bytes_per_line=(((image->columns*icon_info.bits_per_pixel)+31) &
~31) >> 3;
(void) bytes_per_line;
scanline_pad=((((image->columns*icon_info.bits_per_pixel)+31) & ~31)-
(image->columns*icon_info.bits_per_pixel)) >> 3;
switch (icon_info.bits_per_pixel)
{
case 1:
{
/*
Convert bitmap scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) (image->columns-7); x+=8)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < 8; bit++)
{
SetPixelIndex(image,((byte & (0x80 >> bit)) != 0 ? 0x01 :
0x00),q);
q+=GetPixelChannels(image);
}
}
if ((image->columns % 8) != 0)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < (image->columns % 8); bit++)
{
SetPixelIndex(image,((byte & (0x80 >> bit)) != 0 ? 0x01 :
0x00),q);
q+=GetPixelChannels(image);
}
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 4:
{
/*
Read 4-bit Icon scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-1); x+=2)
{
byte=(size_t) ReadBlobByte(image);
SetPixelIndex(image,((byte >> 4) & 0xf),q);
q+=GetPixelChannels(image);
SetPixelIndex(image,((byte) & 0xf),q);
q+=GetPixelChannels(image);
}
if ((image->columns % 2) != 0)
{
byte=(size_t) ReadBlobByte(image);
SetPixelIndex(image,((byte >> 4) & 0xf),q);
q+=GetPixelChannels(image);
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 8:
{
/*
Convert PseudoColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte=(size_t) ReadBlobByte(image);
SetPixelIndex(image,(Quantum) byte,q);
q+=GetPixelChannels(image);
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 16:
{
/*
Convert PseudoColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte=(size_t) ReadBlobByte(image);
byte|=(size_t) (ReadBlobByte(image) << 8);
SetPixelIndex(image,(Quantum) byte,q);
q+=GetPixelChannels(image);
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 24:
case 32:
{
/*
Convert DirectColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (icon_info.bits_per_pixel == 32)
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
q+=GetPixelChannels(image);
}
if (icon_info.bits_per_pixel == 24)
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if ((image_info->ping == MagickFalse) &&
(icon_info.bits_per_pixel <= 16))
(void) SyncImage(image,exception);
if (icon_info.bits_per_pixel != 32)
{
/*
Read the ICON alpha mask.
*/
image->storage_class=DirectClass;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < 8; bit++)
{
SetPixelAlpha(image,(((byte & (0x80 >> bit)) != 0) ?
TransparentAlpha : OpaqueAlpha),q);
q+=GetPixelChannels(image);
}
}
if ((image->columns % 8) != 0)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < (image->columns % 8); bit++)
{
SetPixelAlpha(image,(((byte & (0x80 >> bit)) != 0) ?
TransparentAlpha : OpaqueAlpha),q);
q+=GetPixelChannels(image);
}
}
if ((image->columns % 32) != 0)
for (x=0; x < (ssize_t) ((32-(image->columns % 32))/8); x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (i < (ssize_t) (icon_file.count-1))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 1 |
[
"CWE-772"
] |
ImageMagick
|
fbb14283450d3001403e7d9725566dd4fb2c3bb5
| 287,405,269,820,115,900,000,000,000,000,000,000,000 | 507 |
Fixed memory leak reported in #457.
|
u64 gf_sys_clock_high_res()
{
return OS_GetSysClockHR();
}
| 0 |
[
"CWE-787"
] |
gpac
|
f3698bb1bce62402805c3fda96551a23101a32f9
| 109,584,420,288,678,690,000,000,000,000,000,000,000 | 4 |
fix buffer overrun in gf_bin128_parse
closes #1204
closes #1205
|
static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{
u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
u32 interruptibility = interruptibility_old;
interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
if (mask & KVM_X86_SHADOW_INT_MOV_SS)
interruptibility |= GUEST_INTR_STATE_MOV_SS;
else if (mask & KVM_X86_SHADOW_INT_STI)
interruptibility |= GUEST_INTR_STATE_STI;
if ((interruptibility != interruptibility_old))
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
9581d442b9058d3699b4be568b6e5eae38a41493
| 221,718,398,620,304,950,000,000,000,000,000,000,000 | 15 |
KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
get_user_id_native (u32 * keyid)
{
size_t rn;
char *p = get_user_id (keyid, &rn);
char *p2 = utf8_to_native (p, rn, 0);
xfree (p);
return p2;
}
| 0 |
[
"CWE-310"
] |
gnupg
|
4bde12206c5bf199dc6e12a74af8da4558ba41bf
| 241,328,443,314,502,800,000,000,000,000,000,000,000 | 8 |
gpg: Distinguish between missing and cleared key flags.
* include/cipher.h (PUBKEY_USAGE_NONE): New.
* g10/getkey.c (parse_key_usage): Set new flag.
--
We do not want to use the default capabilities (derived from the
algorithm) if any key flags are given in a signature. Thus if key
flags are used in any way, the default key capabilities are never
used.
This allows to create a key with key flags set to all zero so it can't
be used. This better reflects common sense.
|
const VTermLineInfo *vterm_state_get_lineinfo(const VTermState *state, int row)
{
return state->lineinfo + row;
}
| 0 |
[
"CWE-476"
] |
vim
|
cd929f7ba8cc5b6d6dcf35c8b34124e969fed6b8
| 334,691,320,236,616,300,000,000,000,000,000,000 | 4 |
patch 8.1.0633: crash when out of memory while opening a terminal window
Problem: Crash when out of memory while opening a terminal window.
Solution: Handle out-of-memory more gracefully.
|
static void cil_reset_rangetransition(struct cil_rangetransition *rangetrans)
{
if (rangetrans->range_str == NULL) {
cil_reset_levelrange(rangetrans->range);
}
}
| 0 |
[
"CWE-416"
] |
selinux
|
f34d3d30c8325e4847a6b696fe7a3936a8a361f3
| 197,782,370,229,658,770,000,000,000,000,000,000,000 | 6 |
libsepol/cil: Destroy classperms list when resetting classpermission
Nicolas Iooss reports:
A few months ago, OSS-Fuzz found a crash in the CIL compiler, which
got reported as
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=28648 (the title
is misleading, or is caused by another issue that conflicts with the
one I report in this message). Here is a minimized CIL policy which
reproduces the issue:
(class CLASS (PERM))
(classorder (CLASS))
(sid SID)
(sidorder (SID))
(user USER)
(role ROLE)
(type TYPE)
(category CAT)
(categoryorder (CAT))
(sensitivity SENS)
(sensitivityorder (SENS))
(sensitivitycategory SENS (CAT))
(allow TYPE self (CLASS (PERM)))
(roletype ROLE TYPE)
(userrole USER ROLE)
(userlevel USER (SENS))
(userrange USER ((SENS)(SENS (CAT))))
(sidcontext SID (USER ROLE TYPE ((SENS)(SENS))))
(classpermission CLAPERM)
(optional OPT
(roletype nonexistingrole nonexistingtype)
(classpermissionset CLAPERM (CLASS (PERM)))
)
The CIL policy fuzzer (which mimics secilc built with clang Address
Sanitizer) reports:
==36541==ERROR: AddressSanitizer: heap-use-after-free on address
0x603000004f98 at pc 0x56445134c842 bp 0x7ffe2a256590 sp
0x7ffe2a256588
READ of size 8 at 0x603000004f98 thread T0
#0 0x56445134c841 in __cil_verify_classperms
/selinux/libsepol/src/../cil/src/cil_verify.c:1620:8
#1 0x56445134a43e in __cil_verify_classpermission
/selinux/libsepol/src/../cil/src/cil_verify.c:1650:9
#2 0x56445134a43e in __cil_pre_verify_helper
/selinux/libsepol/src/../cil/src/cil_verify.c:1715:8
#3 0x5644513225ac in cil_tree_walk_core
/selinux/libsepol/src/../cil/src/cil_tree.c:272:9
#4 0x564451322ab1 in cil_tree_walk
/selinux/libsepol/src/../cil/src/cil_tree.c:316:7
#5 0x5644513226af in cil_tree_walk_core
/selinux/libsepol/src/../cil/src/cil_tree.c:284:9
#6 0x564451322ab1 in cil_tree_walk
/selinux/libsepol/src/../cil/src/cil_tree.c:316:7
#7 0x5644512b88fd in cil_pre_verify
/selinux/libsepol/src/../cil/src/cil_post.c:2510:7
#8 0x5644512b88fd in cil_post_process
/selinux/libsepol/src/../cil/src/cil_post.c:2524:7
#9 0x5644511856ff in cil_compile
/selinux/libsepol/src/../cil/src/cil.c:564:7
The classperms list of a classpermission rule is created and filled
in when classpermissionset rules are processed, so it doesn't own any
part of the list and shouldn't retain any of it when it is reset.
Destroy the classperms list (without destroying the data in it) when
resetting a classpermission rule.
Reported-by: Nicolas Iooss <[email protected]>
Signed-off-by: James Carter <[email protected]>
|
del_menutrans_vars(void)
{
hashitem_T *hi;
int todo;
hash_lock(&globvarht);
todo = (int)globvarht.ht_used;
for (hi = globvarht.ht_array; todo > 0 && !got_int; ++hi)
{
if (!HASHITEM_EMPTY(hi))
{
--todo;
if (STRNCMP(HI2DI(hi)->di_key, "menutrans_", 10) == 0)
delete_var(&globvarht, hi);
}
}
hash_unlock(&globvarht);
}
| 0 |
[
"CWE-476"
] |
vim
|
0f6e28f686dbb59ab3b562408ab9b2234797b9b1
| 174,144,574,563,817,430,000,000,000,000,000,000,000 | 18 |
patch 8.2.4428: crash when switching tabpage while in the cmdline window
Problem: Crash when switching tabpage while in the cmdline window.
Solution: Disallow switching tabpage when in the cmdline window.
|
static void item_unlink_q(item *it) {
pthread_mutex_lock(&lru_locks[it->slabs_clsid]);
do_item_unlink_q(it);
pthread_mutex_unlock(&lru_locks[it->slabs_clsid]);
}
| 0 |
[
"CWE-190"
] |
memcached
|
bd578fc34b96abe0f8d99c1409814a09f51ee71c
| 101,246,997,127,230,880,000,000,000,000,000,000,000 | 5 |
CVE reported by cisco talos
|
tiffcvt(TIFF* in, TIFF* out)
{
uint32 width, height; /* image width & height */
uint16 shortv;
float floatv;
char *stringv;
uint32 longv;
uint16 v[1];
TIFFGetField(in, TIFFTAG_IMAGEWIDTH, &width);
TIFFGetField(in, TIFFTAG_IMAGELENGTH, &height);
CopyField(TIFFTAG_SUBFILETYPE, longv);
TIFFSetField(out, TIFFTAG_IMAGEWIDTH, width);
TIFFSetField(out, TIFFTAG_IMAGELENGTH, height);
TIFFSetField(out, TIFFTAG_BITSPERSAMPLE, 8);
TIFFSetField(out, TIFFTAG_COMPRESSION, compression);
TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
CopyField(TIFFTAG_FILLORDER, shortv);
TIFFSetField(out, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);
if( no_alpha )
TIFFSetField(out, TIFFTAG_SAMPLESPERPIXEL, 3);
else
TIFFSetField(out, TIFFTAG_SAMPLESPERPIXEL, 4);
if( !no_alpha )
{
v[0] = EXTRASAMPLE_ASSOCALPHA;
TIFFSetField(out, TIFFTAG_EXTRASAMPLES, 1, v);
}
CopyField(TIFFTAG_XRESOLUTION, floatv);
CopyField(TIFFTAG_YRESOLUTION, floatv);
CopyField(TIFFTAG_RESOLUTIONUNIT, shortv);
TIFFSetField(out, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFSetField(out, TIFFTAG_SOFTWARE, TIFFGetVersion());
CopyField(TIFFTAG_DOCUMENTNAME, stringv);
if (maxMalloc != 0 && TIFFStripSize(in) > maxMalloc)
{
TIFFError(TIFFFileName(in),
"Strip Size " TIFF_UINT64_FORMAT " over memory limit (" TIFF_UINT64_FORMAT ")",
(uint64)TIFFStripSize(in), (uint64)maxMalloc);
return 0;
}
if( process_by_block && TIFFIsTiled( in ) )
return( cvt_by_tile( in, out ) );
else if( process_by_block )
return( cvt_by_strip( in, out ) );
else
return( cvt_whole_image( in, out ) );
}
| 0 |
[
"CWE-119"
] |
libtiff
|
98a254f5b92cea22f5436555ff7fceb12afee84d
| 170,167,887,180,386,860,000,000,000,000,000,000,000 | 54 |
enforce (configurable) memory limit in tiff2rgba
fixes #207
fixes #209
|
static void mariadb_get_charset_info(MYSQL *mysql, MY_CHARSET_INFO *cs)
{
if (!cs)
return;
cs->number= mysql->charset->nr;
cs->csname= mysql->charset->csname;
cs->name= mysql->charset->name;
cs->state= 0;
cs->comment= NULL;
cs->dir= NULL;
cs->mbminlen= mysql->charset->char_minlen;
cs->mbmaxlen= mysql->charset->char_maxlen;
return;
}
| 0 |
[] |
mariadb-connector-c
|
27b2f3d1f1550dfaee0f63a331a406ab31c1b37e
| 234,853,578,981,854,850,000,000,000,000,000,000,000 | 16 |
various checks for corrupted packets in the protocol
also: check the return value of unpack_fields()
|
static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base,
unsigned int len, unsigned int shift)
{
const struct kvec *tail = buf->tail;
if (base >= tail->iov_len)
return;
if (len > tail->iov_len - base)
len = tail->iov_len - base;
/* Shift data into head */
if (shift > buf->page_len + base) {
const struct kvec *head = buf->head;
unsigned int hdto =
head->iov_len + buf->page_len + base - shift;
unsigned int hdlen = len;
if (WARN_ONCE(shift > head->iov_len + buf->page_len + base,
"SUNRPC: Misaligned data.\n"))
return;
if (hdto + hdlen > head->iov_len)
hdlen = head->iov_len - hdto;
memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen);
base += hdlen;
len -= hdlen;
if (!len)
return;
}
/* Shift data into pages */
if (shift > base) {
unsigned int pgto = buf->page_len + base - shift;
unsigned int pglen = len;
if (pgto + pglen > buf->page_len)
pglen = buf->page_len - pgto;
_copy_to_pages(buf->pages, buf->page_base + pgto,
tail->iov_base + base, pglen);
base += pglen;
len -= pglen;
if (!len)
return;
}
memmove(tail->iov_base + base - shift, tail->iov_base + base, len);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
6d1c0f3d28f98ea2736128ed3e46821496dc3a8c
| 165,365,942,419,942,170,000,000,000,000,000,000,000 | 43 |
sunrpc: Avoid a KASAN slab-out-of-bounds bug in xdr_set_page_base()
This seems to happen fairly easily during READ_PLUS testing on NFS v4.2.
I found that we could end up accessing xdr->buf->pages[pgnr] with a pgnr
greater than the number of pages in the array. So let's just return
early if we're setting base to a point at the end of the page data and
let xdr_set_tail_base() handle setting up the buffer pointers instead.
Signed-off-by: Anna Schumaker <[email protected]>
Fixes: 8d86e373b0ef ("SUNRPC: Clean up helpers xdr_set_iov() and xdr_set_page_base()")
Signed-off-by: Trond Myklebust <[email protected]>
|
static int check_tty_count(struct tty_struct *tty, const char *routine)
{
#ifdef CHECK_TTY_COUNT
struct list_head *p;
int count = 0;
spin_lock(&tty_files_lock);
list_for_each(p, &tty->tty_files) {
count++;
}
spin_unlock(&tty_files_lock);
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_SLAVE &&
tty->link && tty->link->count)
count++;
if (tty->count != count) {
tty_warn(tty, "%s: tty->count(%d) != #fd's(%d)\n",
routine, tty->count, count);
return count;
}
#endif
return 0;
}
| 0 |
[
"CWE-200",
"CWE-362"
] |
linux
|
5c17c861a357e9458001f021a7afa7aab9937439
| 53,308,435,656,836,810,000,000,000,000,000,000,000 | 23 |
tty: Fix unsafe ldisc reference via ioctl(TIOCGETD)
ioctl(TIOCGETD) retrieves the line discipline id directly from the
ldisc because the line discipline id (c_line) in termios is untrustworthy;
userspace may have set termios via ioctl(TCSETS*) without actually
changing the line discipline via ioctl(TIOCSETD).
However, directly accessing the current ldisc via tty->ldisc is
unsafe; the ldisc ptr dereferenced may be stale if the line discipline
is changing via ioctl(TIOCSETD) or hangup.
Wait for the line discipline reference (just like read() or write())
to retrieve the "current" line discipline id.
Cc: <[email protected]>
Signed-off-by: Peter Hurley <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int rdn_modify_callback(struct ldb_request *req, struct ldb_reply *ares)
{
struct rename_context *ac;
ac = talloc_get_type(req->context, struct rename_context);
if (!ares) {
return ldb_module_done(ac->req, NULL, NULL,
LDB_ERR_OPERATIONS_ERROR);
}
if (ares->type == LDB_REPLY_REFERRAL) {
return ldb_module_send_referral(ac->req, ares->referral);
}
if (ares->error != LDB_SUCCESS) {
return ldb_module_done(ac->req, ares->controls,
ares->response, ares->error);
}
/* the only supported reply right now is a LDB_REPLY_DONE */
if (ares->type != LDB_REPLY_DONE) {
return ldb_module_done(ac->req, NULL, NULL,
LDB_ERR_OPERATIONS_ERROR);
}
/* send saved controls eventually */
return ldb_module_done(ac->req, ac->ares->controls,
ac->ares->response, LDB_SUCCESS);
}
| 0 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 302,025,647,224,329,600,000,000,000,000,000,000,000 | 30 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
fill_evalarg_from_eap(evalarg_T *evalarg, exarg_T *eap, int skip)
{
init_evalarg(evalarg);
evalarg->eval_flags = skip ? 0 : EVAL_EVALUATE;
if (eap != NULL)
{
evalarg->eval_cstack = eap->cstack;
if (getline_equal(eap->getline, eap->cookie, getsourceline))
{
evalarg->eval_getline = eap->getline;
evalarg->eval_cookie = eap->cookie;
}
}
}
| 0 |
[
"CWE-122",
"CWE-787"
] |
vim
|
605ec91e5a7330d61be313637e495fa02a6dc264
| 99,457,302,818,775,640,000,000,000,000,000,000,000 | 14 |
patch 8.2.3847: illegal memory access when using a lambda with an error
Problem: Illegal memory access when using a lambda with an error.
Solution: Avoid skipping over the NUL after a string.
|
PHP_FUNCTION(fnmatch)
{
char *pattern, *filename;
int pattern_len, filename_len;
long flags = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss|l", &pattern, &pattern_len, &filename, &filename_len, &flags) == FAILURE) {
return;
}
if (filename_len >= MAXPATHLEN) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Filename exceeds the maximum allowed length of %d characters", MAXPATHLEN);
RETURN_FALSE;
}
if (pattern_len >= MAXPATHLEN) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Pattern exceeds the maximum allowed length of %d characters", MAXPATHLEN);
RETURN_FALSE;
}
RETURN_BOOL( ! fnmatch( pattern, filename, flags ));
}
| 1 |
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
| 272,621,086,094,644,100,000,000,000,000,000,000,000 | 21 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
|
static int mymemwrite(RAnalEsil *esil, ut64 addr, const ut8 *buf, int len) {
RListIter *iter;
AeaMemItem *n;
r_list_foreach (mymemxsw, iter, n) {
if (addr == n->addr) {
return len;
}
}
if (!r_io_is_valid_offset (esil->anal->iob.io, addr, 0)) {
return false;
}
n = R_NEW (AeaMemItem);
if (n) {
n->addr = addr;
n->size = len;
r_list_push (mymemxsw, n);
}
return len;
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
radare2
|
a1bc65c3db593530775823d6d7506a457ed95267
| 36,960,057,113,245,486,000,000,000,000,000,000,000 | 19 |
Fix #12375 - Crash in bd+ao (#12382)
|
void Server::handleCommand_Init2(NetworkPacket* pkt)
{
session_t peer_id = pkt->getPeerId();
verbosestream << "Server: Got TOSERVER_INIT2 from " << peer_id << std::endl;
m_clients.event(peer_id, CSE_GotInit2);
u16 protocol_version = m_clients.getProtocolVersion(peer_id);
std::string lang;
if (pkt->getSize() > 0)
*pkt >> lang;
/*
Send some initialization data
*/
infostream << "Server: Sending content to " << getPlayerName(peer_id) <<
std::endl;
// Send item definitions
SendItemDef(peer_id, m_itemdef, protocol_version);
// Send node definitions
SendNodeDef(peer_id, m_nodedef, protocol_version);
m_clients.event(peer_id, CSE_SetDefinitionsSent);
// Send media announcement
sendMediaAnnouncement(peer_id, lang);
RemoteClient *client = getClient(peer_id, CS_InitDone);
// Keep client language for server translations
client->setLangCode(lang);
// Send active objects
{
PlayerSAO *sao = getPlayerSAO(peer_id);
if (client && sao)
SendActiveObjectRemoveAdd(client, sao);
}
// Send detached inventories
sendDetachedInventories(peer_id, false);
// Send player movement settings
SendMovement(peer_id);
// Send time of day
u16 time = m_env->getTimeOfDay();
float time_speed = g_settings->getFloat("time_speed");
SendTimeOfDay(peer_id, time, time_speed);
SendCSMRestrictionFlags(peer_id);
// Warnings about protocol version can be issued here
if (client->net_proto_version < LATEST_PROTOCOL_VERSION) {
SendChatMessage(peer_id, ChatMessage(CHATMESSAGE_TYPE_SYSTEM,
L"# Server: WARNING: YOUR CLIENT'S VERSION MAY NOT BE FULLY COMPATIBLE "
L"WITH THIS SERVER!"));
}
}
| 0 |
[
"CWE-276"
] |
minetest
|
3693b6871eba268ecc79b3f52d00d3cefe761131
| 306,482,839,987,909,970,000,000,000,000,000,000,000 | 62 |
Prevent players accessing inventories of other players (#10341)
|
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
paravirt_arch_dup_mmap(oldmm, mm);
}
| 0 |
[
"CWE-362"
] |
linux
|
71b3c126e61177eb693423f2e18a1914205b165e
| 175,450,320,719,429,560,000,000,000,000,000,000,000 | 5 |
x86/mm: Add barriers and document switch_mm()-vs-flush synchronization
When switch_mm() activates a new PGD, it also sets a bit that
tells other CPUs that the PGD is in use so that TLB flush IPIs
will be sent. In order for that to work correctly, the bit
needs to be visible prior to loading the PGD and therefore
starting to fill the local TLB.
Document all the barriers that make this work correctly and add
a couple that were missing.
Signed-off-by: Andy Lutomirski <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Brian Gerst <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
Pl_LZWDecoder::write(unsigned char* bytes, size_t len)
{
for (size_t i = 0; i < len; ++i)
{
this->buf[next++] = bytes[i];
if (this->next == 3)
{
this->next = 0;
}
this->bits_available += 8;
if (this->bits_available >= this->code_size)
{
sendNextCode();
}
}
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 275,606,982,036,743,500,000,000,000,000,000,000,000 | 16 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
struct alloc_chunk_ctl *ctl,
struct btrfs_device_info *devices_info)
{
struct btrfs_fs_info *info = fs_devices->fs_info;
/*
* Round down to number of usable stripes, devs_increment can be any
* number so we can't use round_down() that requires power of 2, while
* rounddown is safe.
*/
ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
if (ctl->ndevs < ctl->devs_min) {
if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
btrfs_debug(info,
"%s: not enough devices with free space: have=%d minimum required=%d",
__func__, ctl->ndevs, ctl->devs_min);
}
return -ENOSPC;
}
ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
switch (fs_devices->chunk_alloc_policy) {
case BTRFS_CHUNK_ALLOC_REGULAR:
return decide_stripe_size_regular(ctl, devices_info);
case BTRFS_CHUNK_ALLOC_ZONED:
return decide_stripe_size_zoned(ctl, devices_info);
default:
BUG();
}
}
| 0 |
[
"CWE-476",
"CWE-703"
] |
linux
|
e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091
| 304,751,717,331,678,470,000,000,000,000,000,000,000 | 33 |
btrfs: fix NULL pointer dereference when deleting device by invalid id
[BUG]
It's easy to trigger NULL pointer dereference, just by removing a
non-existing device id:
# mkfs.btrfs -f -m single -d single /dev/test/scratch1 \
/dev/test/scratch2
# mount /dev/test/scratch1 /mnt/btrfs
# btrfs device remove 3 /mnt/btrfs
Then we have the following kernel NULL pointer dereference:
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: 0000 [#1] PREEMPT SMP NOPTI
CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs]
btrfs_ioctl+0x18bb/0x3190 [btrfs]
? lock_is_held_type+0xa5/0x120
? find_held_lock.constprop.0+0x2b/0x80
? do_user_addr_fault+0x201/0x6a0
? lock_release+0xd2/0x2d0
? __x64_sys_ioctl+0x83/0xb0
__x64_sys_ioctl+0x83/0xb0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
[CAUSE]
Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return
btrfs_device directly") moves the "missing" device path check into
btrfs_rm_device().
But btrfs_rm_device() itself can have case where it only receives
@devid, with NULL as @device_path.
In that case, calling strcmp() on NULL will trigger the NULL pointer
dereference.
Before that commit, we handle the "missing" case inside
btrfs_find_device_by_devspec(), which will not check @device_path at all
if @devid is provided, thus no way to trigger the bug.
[FIX]
Before calling strcmp(), also make sure @device_path is not NULL.
Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly")
CC: [email protected] # 5.4+
Reported-by: butt3rflyh4ck <[email protected]>
Reviewed-by: Anand Jain <[email protected]>
Signed-off-by: Qu Wenruo <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
int snd_interval_ratnum(struct snd_interval *i,
unsigned int rats_count, const struct snd_ratnum *rats,
unsigned int *nump, unsigned int *denp)
{
unsigned int best_num, best_den;
int best_diff;
unsigned int k;
struct snd_interval t;
int err;
unsigned int result_num, result_den;
int result_diff;
best_num = best_den = best_diff = 0;
for (k = 0; k < rats_count; ++k) {
unsigned int num = rats[k].num;
unsigned int den;
unsigned int q = i->min;
int diff;
if (q == 0)
q = 1;
den = div_up(num, q);
if (den < rats[k].den_min)
continue;
if (den > rats[k].den_max)
den = rats[k].den_max;
else {
unsigned int r;
r = (den - rats[k].den_min) % rats[k].den_step;
if (r != 0)
den -= r;
}
diff = num - q * den;
if (diff < 0)
diff = -diff;
if (best_num == 0 ||
diff * best_den < best_diff * den) {
best_diff = diff;
best_den = den;
best_num = num;
}
}
if (best_den == 0) {
i->empty = 1;
return -EINVAL;
}
t.min = div_down(best_num, best_den);
t.openmin = !!(best_num % best_den);
result_num = best_num;
result_diff = best_diff;
result_den = best_den;
best_num = best_den = best_diff = 0;
for (k = 0; k < rats_count; ++k) {
unsigned int num = rats[k].num;
unsigned int den;
unsigned int q = i->max;
int diff;
if (q == 0) {
i->empty = 1;
return -EINVAL;
}
den = div_down(num, q);
if (den > rats[k].den_max)
continue;
if (den < rats[k].den_min)
den = rats[k].den_min;
else {
unsigned int r;
r = (den - rats[k].den_min) % rats[k].den_step;
if (r != 0)
den += rats[k].den_step - r;
}
diff = q * den - num;
if (diff < 0)
diff = -diff;
if (best_num == 0 ||
diff * best_den < best_diff * den) {
best_diff = diff;
best_den = den;
best_num = num;
}
}
if (best_den == 0) {
i->empty = 1;
return -EINVAL;
}
t.max = div_up(best_num, best_den);
t.openmax = !!(best_num % best_den);
t.integer = 0;
err = snd_interval_refine(i, &t);
if (err < 0)
return err;
if (snd_interval_single(i)) {
if (best_diff * result_den < result_diff * best_den) {
result_num = best_num;
result_den = best_den;
}
if (nump)
*nump = result_num;
if (denp)
*denp = result_den;
}
return err;
}
| 0 |
[
"CWE-416",
"CWE-362"
] |
linux
|
3aa02cb664c5fb1042958c8d1aa8c35055a2ebc4
| 212,473,843,118,736,860,000,000,000,000,000,000,000 | 105 |
ALSA: pcm : Call kill_fasync() in stream lock
Currently kill_fasync() is called outside the stream lock in
snd_pcm_period_elapsed(). This is potentially racy, since the stream
may get released even during the irq handler is running. Although
snd_pcm_release_substream() calls snd_pcm_drop(), this doesn't
guarantee that the irq handler finishes, thus the kill_fasync() call
outside the stream spin lock may be invoked after the substream is
detached, as recently reported by KASAN.
As a quick workaround, move kill_fasync() call inside the stream
lock. The fasync is rarely used interface, so this shouldn't have a
big impact from the performance POV.
Ideally, we should implement some sync mechanism for the proper finish
of stream and irq handler. But this oneliner should suffice for most
cases, so far.
Reported-by: Baozeng Ding <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
collated_compare (const void *a, const void *b)
{
char *const *ps1 = a; char *s1 = *ps1;
char *const *ps2 = b; char *s2 = *ps2;
if (s1 == s2)
return 0;
if (s1 == NULL)
return 1;
if (s2 == NULL)
return -1;
return strcoll (s1, s2);
}
| 0 |
[
"CWE-119"
] |
gnulib
|
2d1bd71ec70a31b01d01b734faa66bb1ed28961f
| 18,861,264,397,592,540,000,000,000,000,000,000,000 | 13 |
glob: fix heap buffer overflow
* lib/glob.c (glob): Fix off-by-one error introduced into
glibc in commit dd7d45e838a42b0ed470c44b55901ea98d0c2bab
dated 1997-10-29 20:33:40. Problem reported by Tim Rühsen in:
https://sourceware.org/bugzilla/show_bug.cgi?id=22320
Fix suggested by Bruno Haible.
|
const char *h2_conn_mpm_name(void)
{
check_modules(0);
return mpm_module? mpm_module->name : "unknown";
}
| 0 |
[
"CWE-444"
] |
mod_h2
|
825de6a46027b2f4c30d7ff5a0c8b852d639c207
| 76,663,144,940,349,980,000,000,000,000,000,000,000 | 5 |
* Fixed keepalives counter on slave connections.
|
static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
struct flowi *fl,
unsigned int family)
{
return __xfrm_decode_session(skb, fl, family, 1);
}
| 0 |
[
"CWE-416"
] |
linux
|
dbb2483b2a46fbaf833cfb5deb5ed9cace9c7399
| 197,257,128,769,891,700,000,000,000,000,000,000,000 | 6 |
xfrm: clean up xfrm protocol checks
In commit 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
I introduced a check for xfrm protocol, but according to Herbert
IPSEC_PROTO_ANY should only be used as a wildcard for lookup, so
it should be removed from validate_tmpl().
And, IPSEC_PROTO_ANY is expected to only match 3 IPSec-specific
protocols, this is why xfrm_state_flush() could still miss
IPPROTO_ROUTING, which leads that those entries are left in
net->xfrm.state_all before exit net. Fix this by replacing
IPSEC_PROTO_ANY with zero.
This patch also extracts the check from validate_tmpl() to
xfrm_id_proto_valid() and uses it in parse_ipsecrequest().
With this, no other protocols should be added into xfrm.
Fixes: 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
Reported-by: [email protected]
Cc: Steffen Klassert <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
worker_handle_service_reply(struct comm_point* c, void* arg, int error,
struct comm_reply* reply_info)
{
struct outbound_entry* e = (struct outbound_entry*)arg;
struct worker* worker = e->qstate->env->worker;
struct serviced_query *sq = e->qsent;
verbose(VERB_ALGO, "worker svcd callback for qstate %p", e->qstate);
if(error != 0) {
mesh_report_reply(worker->env.mesh, e, reply_info, error);
worker_mem_report(worker, sq);
return 0;
}
/* sanity check. */
if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
LDNS_PACKET_QUERY
|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
/* error becomes timeout for the module as if this reply
* never arrived. */
verbose(VERB_ALGO, "worker: bad reply handled as timeout");
mesh_report_reply(worker->env.mesh, e, reply_info,
NETEVENT_TIMEOUT);
worker_mem_report(worker, sq);
return 0;
}
mesh_report_reply(worker->env.mesh, e, reply_info, NETEVENT_NOERROR);
worker_mem_report(worker, sq);
return 0;
}
| 0 |
[
"CWE-613",
"CWE-703"
] |
unbound
|
f6753a0f1018133df552347a199e0362fc1dac68
| 85,289,851,692,409,970,000,000,000,000,000,000,000 | 30 |
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
|
static inline bool cpu_has_vmx_invvpid_individual_addr(void)
{
return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT;
}
| 0 |
[
"CWE-284"
] |
linux
|
727ba748e110b4de50d142edca9d6a9b7e6111d8
| 212,064,345,823,001,150,000,000,000,000,000,000,000 | 4 |
kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
int usbnet_manage_power(struct usbnet *dev, int on)
{
dev->intf->needs_remote_wakeup = on;
return 0;
}
| 0 |
[
"CWE-703"
] |
linux
|
1666984c8625b3db19a9abc298931d35ab7bc64b
| 212,930,104,058,748,000,000,000,000,000,000,000,000 | 5 |
usbnet: cleanup after bind() in probe()
In case bind() works, but a later error forces bailing
in probe() in error cases work and a timer may be scheduled.
They must be killed. This fixes an error case related to
the double free reported in
http://www.spinics.net/lists/netdev/msg367669.html
and needs to go on top of Linus' fix to cdc-ncm.
Signed-off-by: Oliver Neukum <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int write_data_suffix(SORT_INFO *sort_info, my_bool fix_datafile)
{
MI_INFO *info=sort_info->info;
if (info->s->options & HA_OPTION_COMPRESS_RECORD && fix_datafile)
{
uchar buff[MEMMAP_EXTRA_MARGIN];
bzero(buff,sizeof(buff));
if (my_b_write(&info->rec_cache,buff,sizeof(buff)))
{
mi_check_print_error(sort_info->param,
"%d when writing to datafile",my_errno);
return 1;
}
sort_info->param->read_cache.end_of_file+=sizeof(buff);
}
return 0;
}
| 0 |
[
"CWE-362"
] |
mysql-server
|
4e5473862e6852b0f3802b0cd0c6fa10b5253291
| 52,196,584,319,734,530,000,000,000,000,000,000,000 | 18 |
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD)
is created. When repair finishes, this file is renamed to the original
.MYD file. The problem was that during this rename, we copied the
stats from the old file to the new file with chmod/chown. If a user
managed to replace the temporary file before chmod/chown was executed,
it was possible to get an arbitrary file with the privileges of the
mysql user.
This patch fixes the problem by not copying stats from the old
file to the new file. This is not needed as the new file was
created with the correct stats. This fix only changes server
behavior - external utilities such as myisamchk still does
chmod/chown.
No test case provided since the problem involves synchronization
with file system operations.
|
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
ImageMagick
|
aecd0ada163a4d6c769cec178955d5f3e9316f2f
| 58,223,125,318,446,485,000,000,000,000,000,000,000 | 12 |
Set pixel cache to undefined if any resource limit is exceeded
|
config_monitor(
config_tree *ptree
)
{
int_node *pfilegen_token;
const char *filegen_string;
const char *filegen_file;
FILEGEN *filegen;
filegen_node *my_node;
attr_val *my_opts;
int filegen_type;
int filegen_flag;
/* Set the statistics directory */
if (ptree->stats_dir)
stats_config(STATS_STATSDIR, ptree->stats_dir);
/* NOTE:
* Calling filegen_get is brain dead. Doing a string
* comparison to find the relavant filegen structure is
* expensive.
*
* Through the parser, we already know which filegen is
* being specified. Hence, we should either store a
* pointer to the specified structure in the syntax tree
* or an index into a filegen array.
*
* Need to change the filegen code to reflect the above.
*/
/* Turn on the specified statistics */
pfilegen_token = HEAD_PFIFO(ptree->stats_list);
for (; pfilegen_token != NULL; pfilegen_token = pfilegen_token->link) {
filegen_string = keyword(pfilegen_token->i);
filegen = filegen_get(filegen_string);
DPRINTF(4, ("enabling filegen for %s statistics '%s%s'\n",
filegen_string, filegen->prefix,
filegen->basename));
filegen->flag |= FGEN_FLAG_ENABLED;
}
/* Configure the statistics with the options */
my_node = HEAD_PFIFO(ptree->filegen_opts);
for (; my_node != NULL; my_node = my_node->link) {
filegen_file = keyword(my_node->filegen_token);
filegen = filegen_get(filegen_file);
/* Initialize the filegen variables to their pre-configuration states */
filegen_flag = filegen->flag;
filegen_type = filegen->type;
/* "filegen ... enabled" is the default (when filegen is used) */
filegen_flag |= FGEN_FLAG_ENABLED;
my_opts = HEAD_PFIFO(my_node->options);
for (; my_opts != NULL; my_opts = my_opts->link) {
switch (my_opts->attr) {
case T_File:
filegen_file = my_opts->value.s;
break;
case T_Type:
switch (my_opts->value.i) {
default:
NTP_INSIST(0);
break;
case T_None:
filegen_type = FILEGEN_NONE;
break;
case T_Pid:
filegen_type = FILEGEN_PID;
break;
case T_Day:
filegen_type = FILEGEN_DAY;
break;
case T_Week:
filegen_type = FILEGEN_WEEK;
break;
case T_Month:
filegen_type = FILEGEN_MONTH;
break;
case T_Year:
filegen_type = FILEGEN_YEAR;
break;
case T_Age:
filegen_type = FILEGEN_AGE;
break;
}
break;
case T_Flag:
switch (my_opts->value.i) {
case T_Link:
filegen_flag |= FGEN_FLAG_LINK;
break;
case T_Nolink:
filegen_flag &= ~FGEN_FLAG_LINK;
break;
case T_Enable:
filegen_flag |= FGEN_FLAG_ENABLED;
break;
case T_Disable:
filegen_flag &= ~FGEN_FLAG_ENABLED;
break;
default:
msyslog(LOG_ERR,
"Unknown filegen flag token %d",
my_opts->value.i);
exit(1);
}
break;
default:
msyslog(LOG_ERR,
"Unknown filegen option token %d",
my_opts->attr);
exit(1);
}
}
filegen_config(filegen, filegen_file, filegen_type,
filegen_flag);
}
}
| 1 |
[
"CWE-20"
] |
ntp
|
52e977d79a0c4ace997e5c74af429844da2f27be
| 195,872,773,758,886,800,000,000,000,000,000,000,000 | 137 |
[Bug 1773] openssl not detected during ./configure.
[Bug 1774] Segfaults if cryptostats enabled and built without OpenSSL.
|
static pack_t *FS_LoadZipFile(const char *zipfile, const char *basename)
{
fileInPack_t *buildBuffer;
pack_t *pack;
unzFile uf;
int err;
unz_global_info gi;
char filename_inzip[MAX_ZPATH];
unz_file_info file_info;
int i, len;
long hash;
int fs_numHeaderLongs;
int *fs_headerLongs;
char *namePtr;
fs_numHeaderLongs = 0;
uf = unzOpen(zipfile);
err = unzGetGlobalInfo (uf,&gi);
if (err != UNZ_OK)
return NULL;
len = 0;
unzGoToFirstFile(uf);
for (i = 0; i < gi.number_entry; i++)
{
err = unzGetCurrentFileInfo(uf, &file_info, filename_inzip, sizeof(filename_inzip), NULL, 0, NULL, 0);
if (err != UNZ_OK) {
break;
}
len += strlen(filename_inzip) + 1;
unzGoToNextFile(uf);
}
buildBuffer = Z_Malloc( (gi.number_entry * sizeof( fileInPack_t )) + len );
namePtr = ((char *) buildBuffer) + gi.number_entry * sizeof( fileInPack_t );
fs_headerLongs = Z_Malloc( ( gi.number_entry + 1 ) * sizeof(int) );
fs_headerLongs[ fs_numHeaderLongs++ ] = LittleLong( fs_checksumFeed );
// get the hash table size from the number of files in the zip
// because lots of custom pk3 files have less than 32 or 64 files
for (i = 1; i <= MAX_FILEHASH_SIZE; i <<= 1) {
if (i > gi.number_entry) {
break;
}
}
pack = Z_Malloc( sizeof( pack_t ) + i * sizeof(fileInPack_t *) );
pack->hashSize = i;
pack->hashTable = (fileInPack_t **) (((char *) pack) + sizeof( pack_t ));
for(i = 0; i < pack->hashSize; i++) {
pack->hashTable[i] = NULL;
}
Q_strncpyz( pack->pakFilename, zipfile, sizeof( pack->pakFilename ) );
Q_strncpyz( pack->pakBasename, basename, sizeof( pack->pakBasename ) );
// strip .pk3 if needed
if ( strlen( pack->pakBasename ) > 4 && !Q_stricmp( pack->pakBasename + strlen( pack->pakBasename ) - 4, ".pk3" ) ) {
pack->pakBasename[strlen( pack->pakBasename ) - 4] = 0;
}
pack->handle = uf;
pack->numfiles = gi.number_entry;
unzGoToFirstFile(uf);
for (i = 0; i < gi.number_entry; i++)
{
err = unzGetCurrentFileInfo(uf, &file_info, filename_inzip, sizeof(filename_inzip), NULL, 0, NULL, 0);
if (err != UNZ_OK) {
break;
}
if (file_info.uncompressed_size > 0) {
fs_headerLongs[fs_numHeaderLongs++] = LittleLong(file_info.crc);
}
Q_strlwr( filename_inzip );
hash = FS_HashFileName(filename_inzip, pack->hashSize);
buildBuffer[i].name = namePtr;
strcpy( buildBuffer[i].name, filename_inzip );
namePtr += strlen(filename_inzip) + 1;
// store the file position in the zip
buildBuffer[i].pos = unzGetOffset(uf);
buildBuffer[i].len = file_info.uncompressed_size;
buildBuffer[i].next = pack->hashTable[hash];
pack->hashTable[hash] = &buildBuffer[i];
unzGoToNextFile(uf);
}
pack->checksum = Com_BlockChecksum( &fs_headerLongs[ 1 ], sizeof(*fs_headerLongs) * ( fs_numHeaderLongs - 1 ) );
pack->pure_checksum = Com_BlockChecksum( fs_headerLongs, sizeof(*fs_headerLongs) * fs_numHeaderLongs );
pack->checksum = LittleLong( pack->checksum );
pack->pure_checksum = LittleLong( pack->pure_checksum );
Z_Free(fs_headerLongs);
pack->buildBuffer = buildBuffer;
return pack;
}
| 0 |
[
"CWE-269"
] |
ioq3
|
376267d534476a875d8b9228149c4ee18b74a4fd
| 131,857,655,445,534,430,000,000,000,000,000,000,000 | 99 |
Don't load .pk3s as .dlls, and don't load user config files from .pk3s.
|
static double y2scr_pos(ASS_Renderer *render_priv, double y)
{
return y * render_priv->orig_height / render_priv->track->PlayResY +
render_priv->settings.top_margin;
}
| 0 |
[
"CWE-125"
] |
libass
|
f4f48950788b91c6a30029cc28a240b834713ea7
| 279,793,767,688,325,600,000,000,000,000,000,000,000 | 5 |
Fix line wrapping mode 0/3 bugs
This fixes two separate bugs:
a) Don't move a linebreak into the first symbol. This results in a empty
line at the front, which does not help to equalize line lengths at all.
Instead, merge line with the second one.
b) When moving a linebreak into a symbol that already is a break, the
number of lines must be decremented. Otherwise, uninitialized memory
is possibly used for later layout operations.
Found by fuzzer test case
id:000085,sig:11,src:003377+003350,op:splice,rep:8.
This might also affect and hopefully fix libass#229.
v2: change semantics according to review
|
static st32 getnummemendbang (const char *input) {
st32 res;
err = false;
if (!input || (strlen (input) < 2) || (input[strlen(input) - 2] != ']' || !r_str_endswith (input, "!"))) {
err = true;
return 0;
}
char *temp = r_str_ndup (input, strlen (input) - 2);
if (!temp) {
err = true;
return 0;
}
res = getnum (temp);
free (temp);
return res;
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
radare2
|
e5c14c167b0dcf0a53d76bd50bacbbcc0dfc1ae7
| 321,250,880,244,037,620,000,000,000,000,000,000,000 | 16 |
Fix #12417/#12418 (arm assembler heap overflows)
|
static const char *server_hostname_port(cmd_parms *cmd, void *dummy, const char *arg)
{
const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_LOC_FILE);
const char *portstr, *part;
char *scheme;
int port;
if (err != NULL) {
return err;
}
if (apr_fnmatch_test(arg))
return apr_pstrcat(cmd->temp_pool, "Invalid ServerName \"", arg,
"\" use ServerAlias to set multiple server names.", NULL);
part = ap_strstr_c(arg, "://");
if (part) {
scheme = apr_pstrndup(cmd->pool, arg, part - arg);
ap_str_tolower(scheme);
cmd->server->server_scheme = (const char *)scheme;
part += 3;
} else {
part = arg;
}
portstr = ap_strchr_c(part, ':');
if (portstr) {
cmd->server->server_hostname = apr_pstrndup(cmd->pool, part,
portstr - part);
portstr++;
port = atoi(portstr);
if (port <= 0 || port >= 65536) { /* 65536 == 1<<16 */
return apr_pstrcat(cmd->temp_pool, "The port number \"", arg,
"\" is outside the appropriate range "
"(i.e., 1..65535).", NULL);
}
}
else {
cmd->server->server_hostname = apr_pstrdup(cmd->pool, part);
port = 0;
}
cmd->server->port = port;
return NULL;
}
| 0 |
[
"CWE-416",
"CWE-284"
] |
httpd
|
4cc27823899e070268b906ca677ee838d07cf67a
| 35,917,552,559,870,385,000,000,000,000,000,000,000 | 46 |
core: Disallow Methods' registration at run time (.htaccess), they may be
used only if registered at init time (httpd.conf).
Calling ap_method_register() in children processes is not the right scope
since it won't be shared for all requests.
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1807655 13f79535-47bb-0310-9956-ffa450edef68
|
void Item_allany_subselect::print(String *str, enum_query_type query_type)
{
if (test_strategy(SUBS_IN_TO_EXISTS))
str->append(STRING_WITH_LEN("<exists>"));
else
{
left_expr->print(str, query_type);
str->append(' ');
str->append(func->symbol(all));
str->append(all ? " all " : " any ", 5);
}
Item_subselect::print(str, query_type);
}
| 0 |
[
"CWE-89"
] |
server
|
3c209bfc040ddfc41ece8357d772547432353fd2
| 23,807,505,486,623,910,000,000,000,000,000,000,000 | 13 |
MDEV-25994: Crash with union of my_decimal type in ORDER BY clause
When single-row subquery fails with "Subquery reutrns more than 1 row"
error, it will raise an error and return NULL.
On the other hand, Item_singlerow_subselect sets item->maybe_null=0
for table-less subqueries like "(SELECT not_null_value)" (*)
This discrepancy (item with maybe_null=0 returning NULL) causes the
code in Type_handler_decimal_result::make_sort_key_part() to crash.
Fixed this by allowing inference (*) only when the subquery is NOT a
UNION.
|
_copyCreateForeignTableStmt(const CreateForeignTableStmt *from)
{
CreateForeignTableStmt *newnode = makeNode(CreateForeignTableStmt);
CopyCreateStmtFields((const CreateStmt *) from, (CreateStmt *) newnode);
COPY_STRING_FIELD(servername);
COPY_NODE_FIELD(options);
return newnode;
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 18,424,477,631,395,629,000,000,000,000,000,000,000 | 11 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
void HttpIntegrationTest::checkSimpleRequestSuccess(uint64_t expected_request_size,
uint64_t expected_response_size,
IntegrationStreamDecoder* response) {
EXPECT_TRUE(upstream_request_->complete());
EXPECT_EQ(expected_request_size, upstream_request_->bodyLength());
ASSERT_TRUE(response->complete());
EXPECT_EQ("200", response->headers().getStatusValue());
EXPECT_EQ(expected_response_size, response->body().size());
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 127,278,987,846,285,380,000,000,000,000,000,000,000 | 10 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
int tls12_get_sigandhash(unsigned char *p, const EVP_PKEY *pk, const EVP_MD *md)
{
int sig_id, md_id;
md_id = tls12_find_id(EVP_MD_type(md), tls12_md,
sizeof(tls12_md)/sizeof(tls12_lookup));
if (md_id == -1)
return 0;
sig_id = tls12_get_sigid(pk);
if (sig_id == -1)
return 0;
p[0] = (unsigned char)md_id;
p[1] = (unsigned char)sig_id;
return 1;
}
| 0 |
[] |
openssl
|
4817504d069b4c5082161b02a22116ad75f822b1
| 59,555,856,537,197,730,000,000,000,000,000,000,000 | 14 |
PR: 2658
Submitted by: Robin Seggelmann <[email protected]>
Reviewed by: steve
Support for TLS/DTLS heartbeats.
|
Compression_method *compression_method() const
{ return compression_method_ptr; }
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 187,386,118,023,146,900,000,000,000,000,000,000,000 | 2 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
_equalConst(const Const *a, const Const *b)
{
COMPARE_SCALAR_FIELD(consttype);
COMPARE_SCALAR_FIELD(consttypmod);
COMPARE_SCALAR_FIELD(constcollid);
COMPARE_SCALAR_FIELD(constlen);
COMPARE_SCALAR_FIELD(constisnull);
COMPARE_SCALAR_FIELD(constbyval);
COMPARE_LOCATION_FIELD(location);
/*
* We treat all NULL constants of the same type as equal. Someday this
* might need to change? But datumIsEqual doesn't work on nulls, so...
*/
if (a->constisnull)
return true;
return datumIsEqual(a->constvalue, b->constvalue,
a->constbyval, a->constlen);
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 245,589,810,028,187,960,000,000,000,000,000,000,000 | 19 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
{
int ret;
/* memory barrier prior to reading state->n_* */
smp_rmb();
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
if (ret != 0)
return ret;
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
if (ret != 0)
return ret;
ret = nfs4_open_recover_helper(opendata, FMODE_READ);
if (ret != 0)
return ret;
/*
* We may have performed cached opens for all three recoveries.
* Check if we need to update the current stateid.
*/
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
!nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
write_seqlock(&state->seqlock);
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
nfs4_stateid_copy(&state->stateid, &state->open_stateid);
write_sequnlock(&state->seqlock);
}
return 0;
}
| 0 |
[
"CWE-787"
] |
linux
|
b4487b93545214a9db8cbf32e86411677b0cca21
| 257,932,162,284,368,900,000,000,000,000,000,000,000 | 28 |
nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <[email protected]>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <[email protected]>
|
SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
{
long ret = do_sys_ftruncate(fd, length, 1);
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(2, ret, fd, length);
return ret;
}
| 0 |
[
"CWE-732"
] |
linux-stable
|
e57712ebebbb9db7d8dcef216437b3171ddcf115
| 42,782,614,012,321,600,000,000,000,000,000,000,000 | 7 |
merge fchmod() and fchmodat() guts, kill ancient broken kludge
The kludge in question is undocumented and doesn't work for 32bit
binaries on amd64, sparc64 and s390. Passing (mode_t)-1 as
mode had (since 0.99.14v and contrary to behaviour of any
other Unix, prescriptions of POSIX, SuS and our own manpages)
was kinda-sorta no-op. Note that any software relying on
that (and looking for examples shows none) would be visibly
broken on sparc64, where practically all userland is built
32bit. No such complaints noticed...
Signed-off-by: Al Viro <[email protected]>
|
nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
struct tc_cls_u32_knode *knode,
__be16 proto, struct netlink_ext_ack *extack)
{
struct nfp_abm_u32_match *match = NULL, *iter;
unsigned int tos_off;
u8 mask, val;
int err;
if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
err = -EOPNOTSUPP;
goto err_delete;
}
tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
/* Extract the DSCP Class Selector bits */
val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff;
mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff;
/* Check if there is no conflicting mapping and find match by handle */
list_for_each_entry(iter, &alink->dscp_map, list) {
u32 cmask;
if (iter->handle == knode->handle) {
match = iter;
continue;
}
cmask = iter->mask & mask;
if ((iter->val & cmask) == (val & cmask) &&
iter->band != knode->res->classid) {
NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
err = -EOPNOTSUPP;
goto err_delete;
}
}
if (!match) {
match = kzalloc(sizeof(*match), GFP_KERNEL);
if (!match) {
err = -ENOMEM;
goto err_delete;
}
list_add(&match->list, &alink->dscp_map);
}
match->handle = knode->handle;
match->band = knode->res->classid;
match->mask = mask;
match->val = val;
err = nfp_abm_update_band_map(alink);
if (err)
goto err_delete;
return 0;
err_delete:
nfp_abm_u32_knode_delete(alink, knode);
return err;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
78beef629fd95be4ed853b2d37b832f766bd96ca
| 192,338,825,862,744,060,000,000,000,000,000,000,000 | 62 |
nfp: abm: fix memory leak in nfp_abm_u32_knode_replace
In nfp_abm_u32_knode_replace if the allocation for match fails it should
go to the error handling instead of returning. Updated other gotos to
have correct errno returned, too.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
install_sublevel(void)
{
sublevel++;
}
| 0 |
[
"CWE-59",
"CWE-61"
] |
keepalived
|
04f2d32871bb3b11d7dc024039952f2fe2750306
| 52,485,863,151,012,580,000,000,000,000,000,000,000 | 4 |
When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]>
|
int sldns_str2wire_loc_buf(const char* str, uint8_t* rd, size_t* len)
{
uint32_t latitude = 0;
uint32_t longitude = 0;
uint32_t altitude = 0;
uint32_t equator = (uint32_t)1<<31; /* 2**31 */
/* only support version 0 */
uint32_t h = 0;
uint32_t m = 0;
uint8_t size_b = 1, size_e = 2;
uint8_t horiz_pre_b = 1, horiz_pre_e = 6;
uint8_t vert_pre_b = 1, vert_pre_e = 3;
double s = 0.0;
int northerness;
int easterness;
char *my_str = (char *) str;
if (isdigit((unsigned char) *my_str)) {
h = (uint32_t) strtol(my_str, &my_str, 10);
} else {
return LDNS_WIREPARSE_ERR_INVALID_STR;
}
while (isblank((unsigned char) *my_str)) {
my_str++;
}
if (isdigit((unsigned char) *my_str)) {
m = (uint32_t) strtol(my_str, &my_str, 10);
} else if (*my_str == 'N' || *my_str == 'S') {
goto north;
} else {
return LDNS_WIREPARSE_ERR_INVALID_STR;
}
while (isblank((unsigned char) *my_str)) {
my_str++;
}
if (isdigit((unsigned char) *my_str)) {
s = strtod(my_str, &my_str);
}
/* skip blanks before northerness */
while (isblank((unsigned char) *my_str)) {
my_str++;
}
north:
if (*my_str == 'N') {
northerness = 1;
} else if (*my_str == 'S') {
northerness = 0;
} else {
return LDNS_WIREPARSE_ERR_INVALID_STR;
}
my_str++;
/* store number */
s = 1000.0 * s;
/* add a little to make floor in conversion a round */
s += 0.0005;
latitude = (uint32_t) s;
latitude += 1000 * 60 * m;
latitude += 1000 * 60 * 60 * h;
if (northerness) {
latitude = equator + latitude;
} else {
latitude = equator - latitude;
}
while (isblank((unsigned char)*my_str)) {
my_str++;
}
if (isdigit((unsigned char) *my_str)) {
h = (uint32_t) strtol(my_str, &my_str, 10);
} else {
return LDNS_WIREPARSE_ERR_INVALID_STR;
}
while (isblank((unsigned char) *my_str)) {
my_str++;
}
if (isdigit((unsigned char) *my_str)) {
m = (uint32_t) strtol(my_str, &my_str, 10);
} else if (*my_str == 'E' || *my_str == 'W') {
goto east;
} else {
return LDNS_WIREPARSE_ERR_INVALID_STR;
}
while (isblank((unsigned char)*my_str)) {
my_str++;
}
if (isdigit((unsigned char) *my_str)) {
s = strtod(my_str, &my_str);
}
/* skip blanks before easterness */
while (isblank((unsigned char)*my_str)) {
my_str++;
}
east:
if (*my_str == 'E') {
easterness = 1;
} else if (*my_str == 'W') {
easterness = 0;
} else {
return LDNS_WIREPARSE_ERR_INVALID_STR;
}
my_str++;
/* store number */
s *= 1000.0;
/* add a little to make floor in conversion a round */
s += 0.0005;
longitude = (uint32_t) s;
longitude += 1000 * 60 * m;
longitude += 1000 * 60 * 60 * h;
if (easterness) {
longitude += equator;
} else {
longitude = equator - longitude;
}
altitude = (uint32_t)(strtod(my_str, &my_str)*100.0 +
10000000.0 + 0.5);
if (*my_str == 'm' || *my_str == 'M') {
my_str++;
}
if (strlen(my_str) > 0) {
if(!loc_parse_cm(my_str, &my_str, &size_b, &size_e))
return LDNS_WIREPARSE_ERR_INVALID_STR;
}
if (strlen(my_str) > 0) {
if(!loc_parse_cm(my_str, &my_str, &horiz_pre_b, &horiz_pre_e))
return LDNS_WIREPARSE_ERR_INVALID_STR;
}
if (strlen(my_str) > 0) {
if(!loc_parse_cm(my_str, &my_str, &vert_pre_b, &vert_pre_e))
return LDNS_WIREPARSE_ERR_INVALID_STR;
}
if(*len < 16)
return LDNS_WIREPARSE_ERR_BUFFER_TOO_SMALL;
rd[0] = 0;
rd[1] = ((size_b << 4) & 0xf0) | (size_e & 0x0f);
rd[2] = ((horiz_pre_b << 4) & 0xf0) | (horiz_pre_e & 0x0f);
rd[3] = ((vert_pre_b << 4) & 0xf0) | (vert_pre_e & 0x0f);
sldns_write_uint32(rd + 4, latitude);
sldns_write_uint32(rd + 8, longitude);
sldns_write_uint32(rd + 12, altitude);
*len = 16;
return LDNS_WIREPARSE_ERR_OK;
}
| 0 |
[] |
unbound
|
3f3cadd416d6efa92ff2d548ac090f42cd79fee9
| 308,821,196,530,325,050,000,000,000,000,000,000,000 | 168 |
- Fix Out of Bounds Write in sldns_str2wire_str_buf(),
reported by X41 D-Sec.
|
bool got_fatal_error()
{
return unhandled_errors > 0;
}
| 0 |
[
"CWE-416"
] |
server
|
0beed9b5e933f0ff79b3bb346524f7a451d14e38
| 12,862,045,268,914,671,000,000,000,000,000,000,000 | 4 |
MDEV-28097 use-after-free when WHERE has subquery with an outer reference in HAVING
when resolving WHERE and ON clauses, do not look in
SELECT list/aliases.
|
AP_CORE_DECLARE(const char *) ap_add_if_conf(apr_pool_t *p,
core_dir_config *conf,
void *if_config)
{
void **new_space;
core_dir_config *new = ap_get_module_config(if_config, &core_module);
if (!conf->sec_if) {
conf->sec_if = apr_array_make(p, 2, sizeof(ap_conf_vector_t *));
}
if (new->condition_ifelse & AP_CONDITION_ELSE) {
int have_if = 0;
if (conf->sec_if->nelts > 0) {
core_dir_config *last;
ap_conf_vector_t *lastelt = APR_ARRAY_IDX(conf->sec_if,
conf->sec_if->nelts - 1,
ap_conf_vector_t *);
last = ap_get_module_config(lastelt, &core_module);
if (last->condition_ifelse & AP_CONDITION_IF)
have_if = 1;
}
if (!have_if)
return "<Else> or <ElseIf> section without previous <If> or "
"<ElseIf> section in same scope";
}
new_space = (void **)apr_array_push(conf->sec_if);
*new_space = if_config;
return NULL;
}
| 0 |
[
"CWE-416",
"CWE-284"
] |
httpd
|
4cc27823899e070268b906ca677ee838d07cf67a
| 113,515,551,757,317,940,000,000,000,000,000,000,000 | 30 |
core: Disallow Methods' registration at run time (.htaccess), they may be
used only if registered at init time (httpd.conf).
Calling ap_method_register() in children processes is not the right scope
since it won't be shared for all requests.
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1807655 13f79535-47bb-0310-9956-ffa450edef68
|
static void discard_swap_cluster(struct swap_info_struct *si,
pgoff_t start_page, pgoff_t nr_pages)
{
struct swap_extent *se = si->curr_swap_extent;
int found_extent = 0;
while (nr_pages) {
struct list_head *lh;
if (se->start_page <= start_page &&
start_page < se->start_page + se->nr_pages) {
pgoff_t offset = start_page - se->start_page;
sector_t start_block = se->start_block + offset;
sector_t nr_blocks = se->nr_pages - offset;
if (nr_blocks > nr_pages)
nr_blocks = nr_pages;
start_page += nr_blocks;
nr_pages -= nr_blocks;
if (!found_extent++)
si->curr_swap_extent = se;
start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_NOIO, 0))
break;
}
lh = se->list.next;
se = list_entry(lh, struct swap_extent, list);
}
}
| 0 |
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
| 161,351,089,376,304,010,000,000,000,000,000,000,000 | 34 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
inline void Http2Session::RemoveStream(Http2Stream* stream) {
if (streams_.empty() || stream == nullptr)
return; // Nothing to remove, item was never added?
streams_.erase(stream->id());
DecrementCurrentSessionMemory(sizeof(*stream));
}
| 0 |
[
"CWE-416"
] |
node
|
7f178663ebffc82c9f8a5a1b6bf2da0c263a30ed
| 337,559,811,940,118,030,000,000,000,000,000,000,000 | 6 |
src: use unique_ptr for WriteWrap
This commit attempts to avoid a use-after-free error by using unqiue_ptr
and passing a reference to it.
CVE-ID: CVE-2020-8265
Fixes: https://github.com/nodejs-private/node-private/issues/227
PR-URL: https://github.com/nodejs-private/node-private/pull/238
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Tobias Nießen <[email protected]>
Reviewed-By: Richard Lau <[email protected]>
|
ZEND_API int add_get_assoc_stringl_ex(zval *arg, const char *key, uint key_len, const char *str, uint length, void **dest, int duplicate) /* {{{ */
{
zval *tmp;
if (UNEXPECTED(length > INT_MAX)) {
zend_error_noreturn(E_ERROR, "String overflow, max size is %d", INT_MAX);
}
MAKE_STD_ZVAL(tmp);
ZVAL_STRINGL(tmp, str, length, duplicate);
return zend_symtable_update(Z_ARRVAL_P(arg), key, key_len, (void *) &tmp, sizeof(zval *), dest);
}
| 0 |
[
"CWE-416"
] |
php-src
|
0e6fe3a4c96be2d3e88389a5776f878021b4c59f
| 108,993,786,069,600,260,000,000,000,000,000,000,000 | 13 |
Fix bug #73147: Use After Free in PHP7 unserialize()
|
static void __loaded_vmcs_clear(void *arg)
{
struct loaded_vmcs *loaded_vmcs = arg;
int cpu = raw_smp_processor_id();
if (loaded_vmcs->cpu != cpu)
return; /* vcpu migration can race with cpu offline */
if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
vmcs_clear(loaded_vmcs->vmcs);
if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
vmcs_clear(loaded_vmcs->shadow_vmcs);
list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
/*
* Ensure all writes to loaded_vmcs, including deleting it from its
* current percpu list, complete before setting loaded_vmcs->vcpu to
* -1, otherwise a different cpu can see vcpu == -1 first and add
* loaded_vmcs to its percpu list before it's deleted from this cpu's
* list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
*/
smp_wmb();
loaded_vmcs->cpu = -1;
loaded_vmcs->launched = 0;
}
| 0 |
[
"CWE-787"
] |
linux
|
04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a
| 258,269,515,418,000,430,000,000,000,000,000,000,000 | 28 |
KVM: VMX: Don't use vcpu->run->internal.ndata as an array index
__vmx_handle_exit() uses vcpu->run->internal.ndata as an index for
an array access. Since vcpu->run is (can be) mapped to a user address
space with a writer permission, the 'ndata' could be updated by the
user process at anytime (the user process can set it to outside the
bounds of the array).
So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way.
Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information")
Signed-off-by: Reiji Watanabe <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int micsetup(struct airo_info *ai) {
int i;
if (ai->tfm == NULL)
ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(ai->tfm)) {
airo_print_err(ai->dev->name, "failed to load transform for AES");
ai->tfm = NULL;
return ERROR;
}
for (i=0; i < NUM_MODULES; i++) {
memset(&ai->mod[i].mCtx,0,sizeof(miccntx));
memset(&ai->mod[i].uCtx,0,sizeof(miccntx));
}
return SUCCESS;
}
| 0 |
[
"CWE-703",
"CWE-264"
] |
linux
|
550fd08c2cebad61c548def135f67aba284c6162
| 238,978,875,534,468,530,000,000,000,000,000,000,000 | 18 |
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
qboolean FS_FileInPathExists(const char *testpath)
{
FILE *filep;
filep = Sys_FOpen(testpath, "rb");
if(filep)
{
fclose(filep);
return qtrue;
}
return qfalse;
}
| 0 |
[
"CWE-269"
] |
ioq3
|
376267d534476a875d8b9228149c4ee18b74a4fd
| 207,993,760,414,002,800,000,000,000,000,000,000,000 | 14 |
Don't load .pk3s as .dlls, and don't load user config files from .pk3s.
|
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp = h->max_huge_pages;
int ret;
if (!hugepages_supported())
return -EOPNOTSUPP;
ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
&tmp);
if (ret)
goto out;
if (write)
ret = __nr_hugepages_store_common(obey_mempolicy, h,
NUMA_NO_NODE, tmp, *length);
out:
return ret;
}
| 0 |
[
"CWE-362"
] |
linux
|
17743798d81238ab13050e8e2833699b54e15467
| 25,281,200,662,477,140,000,000,000,000,000,000,000 | 22 |
mm/hugetlb: fix a race between hugetlb sysctl handlers
There is a race between the assignment of `table->data` and write value
to the pointer of `table->data` in the __do_proc_doulongvec_minmax() on
the other thread.
CPU0: CPU1:
proc_sys_write
hugetlb_sysctl_handler proc_sys_call_handler
hugetlb_sysctl_handler_common hugetlb_sysctl_handler
table->data = &tmp; hugetlb_sysctl_handler_common
table->data = &tmp;
proc_doulongvec_minmax
do_proc_doulongvec_minmax sysctl_head_finish
__do_proc_doulongvec_minmax unuse_table
i = table->data;
*i = val; // corrupt CPU1's stack
Fix this by duplicating the `table`, and only update the duplicate of
it. And introduce a helper of proc_hugetlb_doulongvec_minmax() to
simplify the code.
The following oops was seen:
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor instruction fetch in kernel mode
#PF: error_code(0x0010) - not-present page
Code: Bad RIP value.
...
Call Trace:
? set_max_huge_pages+0x3da/0x4f0
? alloc_pool_huge_page+0x150/0x150
? proc_doulongvec_minmax+0x46/0x60
? hugetlb_sysctl_handler_common+0x1c7/0x200
? nr_hugepages_store+0x20/0x20
? copy_fd_bitmaps+0x170/0x170
? hugetlb_sysctl_handler+0x1e/0x20
? proc_sys_call_handler+0x2f1/0x300
? unregister_sysctl_table+0xb0/0xb0
? __fd_install+0x78/0x100
? proc_sys_write+0x14/0x20
? __vfs_write+0x4d/0x90
? vfs_write+0xef/0x240
? ksys_write+0xc0/0x160
? __ia32_sys_read+0x50/0x50
? __close_fd+0x129/0x150
? __x64_sys_write+0x43/0x50
? do_syscall_64+0x6c/0x200
? entry_SYSCALL_64_after_hwframe+0x44/0xa9
Fixes: e5ff215941d5 ("hugetlb: multiple hstates for multiple page sizes")
Signed-off-by: Muchun Song <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
Cc: Andi Kleen <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
{
int count;
if (!boot_cpu_has(X86_FEATURE_APIC))
return -ENODEV;
/*
* Note that the LAPIC address is obtained from the MADT (32-bit value)
* and (optionally) overridden by a LAPIC_ADDR_OVR entry (64-bit value).
*/
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
acpi_parse_lapic_addr_ovr, 0);
if (count < 0) {
printk(KERN_ERR PREFIX
"Error parsing LAPIC address override entry\n");
return count;
}
register_lapic_address(acpi_lapic_addr);
return count;
}
| 0 |
[
"CWE-120"
] |
linux
|
dad5ab0db8deac535d03e3fe3d8f2892173fa6a4
| 98,359,716,565,094,590,000,000,000,000,000,000,000 | 24 |
x86/acpi: Prevent out of bound access caused by broken ACPI tables
The bus_irq argument of mp_override_legacy_irq() is used as the index into
the isa_irq_to_gsi[] array. The bus_irq argument originates from
ACPI_MADT_TYPE_IO_APIC and ACPI_MADT_TYPE_INTERRUPT items in the ACPI
tables, but is nowhere sanity checked.
That allows broken or malicious ACPI tables to overwrite memory, which
might cause malfunction, panic or arbitrary code execution.
Add a sanity check and emit a warning when that triggers.
[ tglx: Added warning and rewrote changelog ]
Signed-off-by: Seunghun Han <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: "Rafael J. Wysocki" <[email protected]>
Cc: [email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static void ehci_set_fetch_addr(EHCIState *s, int async, uint32_t addr)
{
if (async) {
s->a_fetch_addr = addr;
} else {
s->p_fetch_addr = addr;
}
}
| 0 |
[] |
qemu
|
791f97758e223de3290592d169f8e6339c281714
| 258,881,888,343,717,100,000,000,000,000,000,000,000 | 8 |
usb: ehci: fix memory leak in ehci_init_transfer
In ehci_init_transfer function, if the 'cpage' is bigger than 4,
it doesn't free the 'p->sgl' once allocated previously thus leading
a memory leak issue. This patch avoid this.
Signed-off-by: Li Qiang <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
static char* get_printer_config_path(const rdpSettings* settings, const WCHAR* name, size_t length)
{
char* dir = GetCombinedPath(settings->ConfigPath, "printers");
char* bname = crypto_base64_encode((const BYTE*)name, (int)length);
char* config = GetCombinedPath(dir, bname);
if (config && !PathFileExistsA(config))
{
if (!PathMakePathA(config, NULL))
{
free(config);
config = NULL;
}
}
free(dir);
free(bname);
return config;
}
| 0 |
[
"CWE-125"
] |
FreeRDP
|
6b485b146a1b9d6ce72dfd7b5f36456c166e7a16
| 58,361,929,982,522,140,000,000,000,000,000,000,000 | 19 |
Fixed oob read in irp_write and similar
|
int nla_server_recv(rdpNla* nla)
{
int status = -1;
wStream* s;
WINPR_ASSERT(nla);
s = nla_server_recv_stream(nla);
if (!s)
goto fail;
status = nla_decode_ts_request(nla, s);
fail:
Stream_Free(s, TRUE);
return status;
}
| 0 |
[] |
FreeRDP
|
479e891545473f01c187daffdfa05fc752b54b72
| 239,969,367,875,449,400,000,000,000,000,000,000,000 | 16 |
check return values for SetCredentialsAttributes, throw warnings for unsupported attributes
|
TEST(ComparisonMatchExpression, StringMatchingWithNullCollatorUsesBinaryComparison) {
BSONObj operand = BSON("a"
<< "string");
EqualityMatchExpression eq("a", operand["a"]);
ASSERT(!eq.matchesBSON(BSON("a"
<< "string2"),
NULL));
}
| 0 |
[] |
mongo
|
64095239f41e9f3841d8be9088347db56d35c891
| 260,578,883,203,634,800,000,000,000,000,000,000,000 | 8 |
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
|
static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
func = _perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
func = _perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
func = _perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
return _perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
case PERF_EVENT_IOC_ID:
{
u64 id = primary_event_id(event);
if (copy_to_user((void __user *)arg, &id, sizeof(id)))
return -EFAULT;
return 0;
}
case PERF_EVENT_IOC_SET_OUTPUT:
{
int ret;
if (arg != -1) {
struct perf_event *output_event;
struct fd output;
ret = perf_fget_light(arg, &output);
if (ret)
return ret;
output_event = output.file->private_data;
ret = perf_event_set_output(event, output_event);
fdput(output);
} else {
ret = perf_event_set_output(event, NULL);
}
return ret;
}
case PERF_EVENT_IOC_SET_FILTER:
return perf_event_set_filter(event, (void __user *)arg);
default:
return -ENOTTY;
}
if (flags & PERF_IOC_FLAG_GROUP)
perf_event_for_each(event, func);
else
perf_event_for_each_child(event, func);
return 0;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
f63a8daa5812afef4f06c962351687e1ff9ccb2b
| 23,477,174,522,279,830,000,000,000,000,000,000,000 | 63 |
perf: Fix event->ctx locking
There have been a few reported issues wrt. the lack of locking around
changing event->ctx. This patch tries to address those.
It avoids the whole rwsem thing; and while it appears to work, please
give it some thought in review.
What I did fail at is sensible runtime checks on the use of
event->ctx, the RCU use makes it very hard.
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
if (h->order >= MAX_ORDER)
return h->max_huge_pages;
/*
* Increase the pool size
* First take pages out of surplus state. Then make up the
* remaining difference by allocating fresh huge pages.
*
* We might race with alloc_buddy_huge_page() here and be unable
* to convert a surplus huge page to a normal huge page. That is
* not critical, though, it just means the overall size of the
* pool might be one hugepage larger than it needs to be, but
* within all the constraints specified by the sysctls.
*/
spin_lock(&hugetlb_lock);
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, -1))
break;
}
while (count > persistent_huge_pages(h)) {
/*
* If this allocation races such that we no longer need the
* page, free_huge_page will handle it by freeing the page
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
ret = alloc_fresh_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
/* Bail for signals. Probably ctrl-c from user */
if (signal_pending(current))
goto out;
}
/*
* Decrease the pool size
* First return free pages to the buddy allocator (being careful
* to keep enough around to satisfy reservations). Then place
* pages into surplus state as needed so the pool will shrink
* to the desired size as pages become free.
*
* By placing pages into the surplus state independent of the
* overcommit value, we are allowing the surplus pool size to
* exceed overcommit. There are few sane options here. Since
* alloc_buddy_huge_page() is checking the global counter,
* though, we'll note that we're not allowed to exceed surplus
* and won't grow the pool anywhere else. Not until one of the
* sysctls are changed, or the surplus pages go out of use.
*/
min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
min_count = max(count, min_count);
try_to_free_low(h, min_count, nodes_allowed);
while (min_count < persistent_huge_pages(h)) {
if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
}
while (count < persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, 1))
break;
}
out:
ret = persistent_huge_pages(h);
spin_unlock(&hugetlb_lock);
return ret;
}
| 0 |
[
"CWE-399"
] |
linux
|
90481622d75715bfcb68501280a917dbfe516029
| 76,561,286,071,227,450,000,000,000,000,000,000,000 | 73 |
hugepages: fix use after free bug in "quota" handling
hugetlbfs_{get,put}_quota() are badly named. They don't interact with the
general quota handling code, and they don't much resemble its behaviour.
Rather than being about maintaining limits on on-disk block usage by
particular users, they are instead about maintaining limits on in-memory
page usage (including anonymous MAP_PRIVATE copied-on-write pages)
associated with a particular hugetlbfs filesystem instance.
Worse, they work by having callbacks to the hugetlbfs filesystem code from
the low-level page handling code, in particular from free_huge_page().
This is a layering violation of itself, but more importantly, if the
kernel does a get_user_pages() on hugepages (which can happen from KVM
amongst others), then the free_huge_page() can be delayed until after the
associated inode has already been freed. If an unmount occurs at the
wrong time, even the hugetlbfs superblock where the "quota" limits are
stored may have been freed.
Andrew Barry proposed a patch to fix this by having hugepages, instead of
storing a pointer to their address_space and reaching the superblock from
there, had the hugepages store pointers directly to the superblock,
bumping the reference count as appropriate to avoid it being freed.
Andrew Morton rejected that version, however, on the grounds that it made
the existing layering violation worse.
This is a reworked version of Andrew's patch, which removes the extra, and
some of the existing, layering violation. It works by introducing the
concept of a hugepage "subpool" at the lower hugepage mm layer - that is a
finite logical pool of hugepages to allocate from. hugetlbfs now creates
a subpool for each filesystem instance with a page limit set, and a
pointer to the subpool gets added to each allocated hugepage, instead of
the address_space pointer used now. The subpool has its own lifetime and
is only freed once all pages in it _and_ all other references to it (i.e.
superblocks) are gone.
subpools are optional - a NULL subpool pointer is taken by the code to
mean that no subpool limits are in effect.
Previous discussion of this bug found in: "Fix refcounting in hugetlbfs
quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or
http://marc.info/?l=linux-mm&m=126928970510627&w=1
v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to
alloc_huge_page() - since it already takes the vma, it is not necessary.
Signed-off-by: Andrew Barry <[email protected]>
Signed-off-by: David Gibson <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Hillf Danton <[email protected]>
Cc: Paul Mackerras <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void print_tags(blkid_dev dev, char *show[], int output)
{
blkid_tag_iterate iter;
const char *type, *value, *devname;
int num = 1;
static int first = 1;
if (!dev)
return;
if (output & OUTPUT_PRETTY_LIST) {
pretty_print_dev(dev);
return;
}
devname = blkid_dev_devname(dev);
if (output & OUTPUT_DEVICE_ONLY) {
printf("%s\n", devname);
return;
}
iter = blkid_tag_iterate_begin(dev);
while (blkid_tag_next(iter, &type, &value) == 0) {
if (show[0] && !has_item(show, type))
continue;
if (num == 1 && !first &&
(output & (OUTPUT_UDEV_LIST | OUTPUT_EXPORT_LIST)))
/* add extra line between output from more devices */
fputc('\n', stdout);
print_value(output, num++, devname, value, type, strlen(value));
}
blkid_tag_iterate_end(iter);
if (num > 1) {
if (!(output & (OUTPUT_VALUE_ONLY | OUTPUT_UDEV_LIST |
OUTPUT_EXPORT_LIST)))
printf("\n");
first = 0;
}
}
| 0 |
[
"CWE-77"
] |
util-linux
|
89e90ae7b2826110ea28c1c0eb8e7c56c3907bdc
| 198,675,926,795,647,580,000,000,000,000,000,000,000 | 43 |
libblkid: care about unsafe chars in cache
The high-level libblkid API uses /run/blkid/blkid.tab cache to
store probing results. The cache format is
<device NAME="value" ...>devname</device>
and unfortunately the cache code does not escape quotation marks:
# mkfs.ext4 -L 'AAA"BBB'
# cat /run/blkid/blkid.tab
...
<device ... LABEL="AAA"BBB" ...>/dev/sdb1</device>
such string is later incorrectly parsed and blkid(8) returns
nonsenses. And for use-cases like
# eval $(blkid -o export /dev/sdb1)
it's also insecure.
Note that mount, udevd and blkid -p are based on low-level libblkid
API, it bypass the cache and directly read data from the devices.
The current udevd upstream does not depend on blkid(8) output at all,
it's directly linked with the library and all unsafe chars are encoded by
\x<hex> notation.
# mkfs.ext4 -L 'X"`/tmp/foo` "' /dev/sdb1
# udevadm info --export-db | grep LABEL
...
E: ID_FS_LABEL=X__/tmp/foo___
E: ID_FS_LABEL_ENC=X\x22\x60\x2ftmp\x2ffoo\x60\x20\x22
Signed-off-by: Karel Zak <[email protected]>
|
main(int argc, char *argv[])
{
int i, c;
FILE *ifp = 0, *ofp = 0;
const char *ifp_filename = "<stdin>";
const char *ofp_filename = "<stdout>";
const char *set_font_name = 0;
struct font_reader fr;
uint32_t rfork_len;
int raw = 0, macbinary = 1, applesingle = 0, appledouble = 0, binhex = 0;
Clp_Parser *clp =
Clp_NewParser(argc, (const char * const *)argv, sizeof(options) / sizeof(options[0]), options);
program_name = Clp_ProgramName(clp);
/* interpret command line arguments using CLP */
while (1) {
int opt = Clp_Next(clp);
switch (opt) {
case RAW_OPT:
raw = 1;
macbinary = applesingle = appledouble = binhex = 0;
break;
case MACBINARY_OPT:
macbinary = 1;
raw = applesingle = appledouble = binhex = 0;
break;
case APPLESINGLE_OPT:
applesingle = 1;
raw = macbinary = appledouble = binhex = 0;
break;
case APPLEDOUBLE_OPT:
appledouble = 1;
raw = macbinary = applesingle = binhex = 0;
break;
case BINHEX_OPT:
binhex = 1;
raw = macbinary = applesingle = appledouble = 0;
break;
output_file:
case OUTPUT_OPT:
if (ofp)
fatal_error("output file already specified");
if (strcmp(clp->vstr, "-") == 0)
ofp = stdout;
else {
ofp_filename = clp->vstr;
ofp = fopen(ofp_filename, "wb");
if (!ofp) fatal_error("%s: %s", ofp_filename, strerror(errno));
}
break;
case FILENAME_OPT:
if (set_font_name)
fatal_error("Macintosh font filename already specified");
set_font_name = clp->vstr;
break;
case HELP_OPT:
usage();
exit(0);
break;
case VERSION_OPT:
printf("t1mac (LCDF t1utils) %s\n", VERSION);
printf("Copyright (C) 2000-2010 Eddie Kohler et al.\n\
This is free software; see the source for copying conditions.\n\
There is NO warranty, not even for merchantability or fitness for a\n\
particular purpose.\n");
exit(0);
break;
case Clp_NotOption:
if (ifp && ofp)
fatal_error("too many arguments");
else if (ifp)
goto output_file;
if (strcmp(clp->vstr, "-") == 0)
ifp = stdin;
else {
ifp_filename = clp->vstr;
ifp = fopen(clp->vstr, "r");
if (!ifp) fatal_error("%s: %s", clp->vstr, strerror(errno));
}
break;
case Clp_Done:
goto done;
case Clp_BadOption:
short_usage();
exit(1);
break;
}
}
done:
if (!ifp) ifp = stdin;
if (!ofp) ofp = stdout;
#if defined(_MSDOS) || defined(_WIN32)
/* As we are processing a PFB (binary) output */
/* file, we must set its file mode to binary. */
_setmode(_fileno(ofp), _O_BINARY);
#endif
/* prepare font reader */
fr.output_ascii = t1mac_output_ascii;
fr.output_binary = t1mac_output_binary;
fr.output_end = t1mac_output_end;
/* prepare resource fork file */
rfork_f = tmpfile();
if (!rfork_f)
fatal_error("cannot open temorary file: %s", strerror(errno));
for (i = 0; i < RFORK_HEADERLEN; i++)
putc(0, rfork_f);
init_current_post();
/* peek at first byte to see if it is the PFB marker 0x80 */
c = getc(ifp);
ungetc(c, ifp);
/* do the file */
if (c == PFB_MARKER)
process_pfb(ifp, ifp_filename, &fr);
else if (c == '%')
process_pfa(ifp, ifp_filename, &fr);
else
fatal_error("%s does not start with font marker (`%%' or 0x80)", ifp_filename);
if (ifp != stdin)
fclose(ifp);
/* check if anything was read */
if (nrsrc == 0)
error("no POST resources written -- are you sure this was a font?");
/* output large B/W icon */
output_new_rsrc("ICN#", 256, 32, (const char *)icon_bw_data, 256);
/* output FREF */
output_new_rsrc("FREF", 256, 32, "LWFN\0\0\0", 7);
/* output BNDL */
output_new_rsrc("BNDL", 256, 32, "T1UT\0\0\0\1FREF\0\0\0\0\1\0ICN#\0\0\0\0\1\0", 28);
/* output other icons */
output_new_rsrc("icl8", 256, 32, (const char *)icon_8_data, 1024);
output_new_rsrc("icl4", 256, 32, (const char *)icon_4_data, 512);
output_new_rsrc("ics#", 256, 32, (const char *)small_icon_bw_data, 64);
output_new_rsrc("ics8", 256, 32, (const char *)small_icon_8_data, 256);
output_new_rsrc("ics4", 256, 32, (const char *)small_icon_4_data, 128);
/* output T1UT (signature) */
output_new_rsrc("T1UT", 0, 0, "DConverted by t1mac (t1utils) \251Eddie Kohler http://www.lcdf.org/type/", 69);
/* finish off resource file */
rfork_len = complete_rfork();
/* prepare font name */
if (!set_font_name && font_name) {
int part = 0, len = 0;
char *x, *s;
for (x = s = font_name; *s; s++)
if (isupper((unsigned char) *s) || isdigit((unsigned char) *s)) {
*x++ = *s;
part++;
len = 1;
} else if (islower((unsigned char) *s)) {
if (len < (part <= 1 ? 5 : 3))
*x++ = *s;
len++;
}
*x++ = 0;
set_font_name = font_name;
} else if (!set_font_name)
set_font_name = "Unknown Font";
/* now, output the file */
if (macbinary)
output_macbinary(rfork_f, rfork_len, set_font_name, ofp);
else if (raw)
output_raw(rfork_f, rfork_len, ofp);
else if (applesingle || appledouble)
output_applesingle(rfork_f, rfork_len, set_font_name, ofp, appledouble);
else if (binhex)
output_binhex(rfork_f, rfork_len, set_font_name, ofp);
else
fatal_error("strange output format");
fclose(rfork_f);
if (ofp != stdout)
fclose(ofp);
return 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
t1utils
|
6b9d1aafcb61a3663c883663eb19ccdbfcde8d33
| 19,834,615,380,377,593,000,000,000,000,000,000,000 | 198 |
Security fixes.
- Don't overflow the small cs_start buffer (reported by Niels
Thykier via the debian tracker (Jakub Wilk), found with a
fuzzer ("American fuzzy lop")).
- Cast arguments to <ctype.h> functions to unsigned char.
|
static void unregister_sched_domain_sysctl(void)
{
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
if (sd_ctl_dir[0].child)
sd_free_ctl_entry(&sd_ctl_dir[0].child);
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 231,818,791,789,479,600,000,000,000,000,000,000,000 | 8 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
static void __proc_set_tty(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->ctrl_lock, flags);
/*
* The session and fg pgrp references will be non-NULL if
* tiocsctty() is stealing the controlling tty
*/
put_pid(tty->session);
put_pid(tty->pgrp);
tty->pgrp = get_pid(task_pgrp(current));
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
tty->session = get_pid(task_session(current));
if (current->signal->tty) {
tty_debug(tty, "current tty %s not NULL!!\n",
current->signal->tty->name);
tty_kref_put(current->signal->tty);
}
put_pid(current->signal->tty_old_pgrp);
current->signal->tty = tty_kref_get(tty);
current->signal->tty_old_pgrp = NULL;
}
| 1 |
[
"CWE-416"
] |
linux
|
c8bcd9c5be24fb9e6132e97da5a35e55a83e36b9
| 148,065,517,577,564,860,000,000,000,000,000,000,000 | 23 |
tty: Fix ->session locking
Currently, locking of ->session is very inconsistent; most places
protect it using the legacy tty mutex, but disassociate_ctty(),
__do_SAK(), tiocspgrp() and tiocgsid() don't.
Two of the writers hold the ctrl_lock (because they already need it for
->pgrp), but __proc_set_tty() doesn't do that yet.
On a PREEMPT=y system, an unprivileged user can theoretically abuse
this broken locking to read 4 bytes of freed memory via TIOCGSID if
tiocgsid() is preempted long enough at the right point. (Other things
might also go wrong, especially if root-only ioctls are involved; I'm
not sure about that.)
Change the locking on ->session such that:
- tty_lock() is held by all writers: By making disassociate_ctty()
hold it. This should be fine because the same lock can already be
taken through the call to tty_vhangup_session().
The tricky part is that we need to shorten the area covered by
siglock to be able to take tty_lock() without ugly retry logic; as
far as I can tell, this should be fine, since nothing in the
signal_struct is touched in the `if (tty)` branch.
- ctrl_lock is held by all writers: By changing __proc_set_tty() to
hold the lock a little longer.
- All readers that aren't holding tty_lock() hold ctrl_lock: By
adding locking to tiocgsid() and __do_SAK(), and expanding the area
covered by ctrl_lock in tiocspgrp().
Cc: [email protected]
Signed-off-by: Jann Horn <[email protected]>
Reviewed-by: Jiri Slaby <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int xfrm_expand_policies(const struct flowi *fl, u16 family,
struct xfrm_policy **pols,
int *num_pols, int *num_xfrms)
{
int i;
if (*num_pols == 0 || !pols[0]) {
*num_pols = 0;
*num_xfrms = 0;
return 0;
}
if (IS_ERR(pols[0]))
return PTR_ERR(pols[0]);
*num_xfrms = pols[0]->xfrm_nr;
#ifdef CONFIG_XFRM_SUB_POLICY
if (pols[0]->action == XFRM_POLICY_ALLOW &&
pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
XFRM_POLICY_TYPE_MAIN,
fl, family,
XFRM_POLICY_OUT,
pols[0]->if_id);
if (pols[1]) {
if (IS_ERR(pols[1])) {
xfrm_pols_put(pols, *num_pols);
return PTR_ERR(pols[1]);
}
(*num_pols)++;
(*num_xfrms) += pols[1]->xfrm_nr;
}
}
#endif
for (i = 0; i < *num_pols; i++) {
if (pols[i]->action != XFRM_POLICY_ALLOW) {
*num_xfrms = -1;
break;
}
}
return 0;
}
| 1 |
[
"CWE-703"
] |
linux
|
f85daf0e725358be78dfd208dea5fd665d8cb901
| 160,505,295,107,260,790,000,000,000,000,000,000,000 | 44 |
xfrm: xfrm_policy: fix a possible double xfrm_pols_put() in xfrm_bundle_lookup()
xfrm_policy_lookup() will call xfrm_pol_hold_rcu() to get a refcount of
pols[0]. This refcount can be dropped in xfrm_expand_policies() when
xfrm_expand_policies() return error. pols[0]'s refcount is balanced in
here. But xfrm_bundle_lookup() will also call xfrm_pols_put() with
num_pols == 1 to drop this refcount when xfrm_expand_policies() return
error.
This patch also fix an illegal address access. pols[0] will save a error
point when xfrm_policy_lookup fails. This lead to xfrm_pols_put to resolve
an illegal address in xfrm_bundle_lookup's error path.
Fix these by setting num_pols = 0 in xfrm_expand_policies()'s error path.
Fixes: 80c802f3073e ("xfrm: cache bundles instead of policies for outgoing flows")
Signed-off-by: Hangyu Hua <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
static ut64 va2pa(uint64_t addr, ut32 n_maps, cache_map_t *maps, RzBuffer *cache_buf, ut64 slide, ut32 *offset, ut32 *left) {
ut64 res = UT64_MAX;
ut32 i;
addr -= slide;
for (i = 0; i < n_maps; i++) {
if (addr >= maps[i].address && addr < maps[i].address + maps[i].size) {
res = maps[i].fileOffset + addr - maps[i].address;
if (offset) {
*offset = addr - maps[i].address;
}
if (left) {
*left = maps[i].size - (addr - maps[i].address);
}
break;
}
}
return res;
}
| 0 |
[
"CWE-787"
] |
rizin
|
556ca2f9eef01ec0f4a76d1fbacfcf3a87a44810
| 66,824,782,201,858,095,000,000,000,000,000,000,000 | 21 |
Fix oob write in dyldcache
When the individual n_slide_infos were too high, the sum would overflow
and too few entries would be allocated.
|
static long get_nr_files(void)
{
return percpu_counter_read_positive(&nr_files);
}
| 0 |
[
"CWE-17"
] |
linux
|
eee5cc2702929fd41cce28058dc6d6717f723f87
| 128,556,034,979,285,020,000,000,000,000,000,000,000 | 4 |
get rid of s_files and files_lock
The only thing we need it for is alt-sysrq-r (emergency remount r/o)
and these days we can do just as well without going through the
list of files.
Signed-off-by: Al Viro <[email protected]>
|
_equalCurrentOfExpr(const CurrentOfExpr *a, const CurrentOfExpr *b)
{
COMPARE_SCALAR_FIELD(cvarno);
COMPARE_STRING_FIELD(cursor_name);
COMPARE_SCALAR_FIELD(cursor_param);
return true;
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 109,809,586,224,970,000,000,000,000,000,000,000,000 | 8 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
void _xml_endNamespaceDeclHandler(void *userData, const XML_Char *prefix)
{
xml_parser *parser = (xml_parser *)userData;
if (parser && parser->endNamespaceDeclHandler) {
zval *retval, *args[2];
args[0] = _xml_resource_zval(parser->index);
args[1] = _xml_xmlchar_zval(prefix, 0, parser->target_encoding);
if ((retval = xml_call_handler(parser, parser->endNamespaceDeclHandler, parser->endNamespaceDeclPtr, 2, args))) {
zval_ptr_dtor(&retval);
}
}
}
| 0 |
[
"CWE-787"
] |
php-src
|
7d163e8a0880ae8af2dd869071393e5dc07ef271
| 231,262,317,723,537,400,000,000,000,000,000,000,000 | 14 |
truncate results at depth of 255 to prevent corruption
|
_dbus_auth_get_buffer (DBusAuth *auth,
DBusString **buffer)
{
_dbus_assert (auth != NULL);
_dbus_assert (!auth->buffer_outstanding);
*buffer = &auth->incoming;
auth->buffer_outstanding = TRUE;
}
| 0 |
[
"CWE-59"
] |
dbus
|
47b1a4c41004bf494b87370987b222c934b19016
| 34,506,079,189,498,280,000,000,000,000,000,000,000 | 10 |
auth: Reject DBUS_COOKIE_SHA1 for users other than the server owner
The DBUS_COOKIE_SHA1 authentication mechanism aims to prove ownership
of a shared home directory by having the server write a secret "cookie"
into a .dbus-keyrings subdirectory of the desired identity's home
directory with 0700 permissions, and having the client prove that it can
read the cookie. This never actually worked for non-malicious clients in
the case where server uid != client uid (unless the server and client
both have privileges, such as Linux CAP_DAC_OVERRIDE or traditional
Unix uid 0) because an unprivileged server would fail to write out the
cookie, and an unprivileged client would be unable to read the resulting
file owned by the server.
Additionally, since dbus 1.7.10 we have checked that ~/.dbus-keyrings
is owned by the uid of the server (a side-effect of a check added to
harden our use of XDG_RUNTIME_DIR), further ruling out successful use
by a non-malicious client with a uid differing from the server's.
Joe Vennix of Apple Information Security discovered that the
implementation of DBUS_COOKIE_SHA1 was susceptible to a symbolic link
attack: a malicious client with write access to its own home directory
could manipulate a ~/.dbus-keyrings symlink to cause the DBusServer to
read and write in unintended locations. In the worst case this could
result in the DBusServer reusing a cookie that is known to the
malicious client, and treating that cookie as evidence that a subsequent
client connection came from an attacker-chosen uid, allowing
authentication bypass.
This is mitigated by the fact that by default, the well-known system
dbus-daemon (since 2003) and the well-known session dbus-daemon (in
stable releases since dbus 1.10.0 in 2015) only accept the EXTERNAL
authentication mechanism, and as a result will reject DBUS_COOKIE_SHA1
at an early stage, before manipulating cookies. As a result, this
vulnerability only applies to:
* system or session dbus-daemons with non-standard configuration
* third-party dbus-daemon invocations such as at-spi2-core (although
in practice at-spi2-core also only accepts EXTERNAL by default)
* third-party uses of DBusServer such as the one in Upstart
Avoiding symlink attacks in a portable way is difficult, because APIs
like openat() and Linux /proc/self/fd are not universally available.
However, because DBUS_COOKIE_SHA1 already doesn't work in practice for
a non-matching uid, we can solve this vulnerability in an easier way
without regressions, by rejecting it early (before looking at
~/.dbus-keyrings) whenever the requested identity doesn't match the
identity of the process hosting the DBusServer.
Signed-off-by: Simon McVittie <[email protected]>
Closes: https://gitlab.freedesktop.org/dbus/dbus/issues/269
Closes: CVE-2019-12749
|
GF_Err gf_isom_set_y3d_info(GF_ISOFile *movie, u32 trackNumber, u32 sampleDescriptionIndex, GF_ISOM_Y3D_Info *info)
{
GF_Err e;
u32 proj_type;
GF_SampleEntryBox *ent;
GF_TrackBox *trak;
e = CanAccessMovie(movie, GF_ISOM_OPEN_WRITE);
if (e) return e;
trak = gf_isom_get_track_from_file(movie, trackNumber);
if (!trak || !trak->Media || !info) return GF_BAD_PARAM;
ent = gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, sampleDescriptionIndex-1);
if (!ent) return GF_BAD_PARAM;
if (info->projection_type > GF_PROJ360_EQR) return GF_NOT_SUPPORTED;
GF_Stereo3DBox *st3d = (GF_Stereo3DBox *) gf_isom_box_find_child(ent->child_boxes, GF_ISOM_BOX_TYPE_ST3D);
if (st3d) {
if (info->stereo_type) {
st3d->stereo_type = info->stereo_type;
} else {
gf_isom_box_del_parent(&ent->child_boxes, (GF_Box *) st3d);
}
} else if (info->stereo_type) {
st3d = (GF_Stereo3DBox *) gf_isom_box_new_parent(&ent->child_boxes, GF_ISOM_BOX_TYPE_ST3D);
if (!st3d) return GF_OUT_OF_MEM;
st3d->stereo_type = info->stereo_type;
}
GF_Box *sv3d = gf_isom_box_find_child(ent->child_boxes, GF_ISOM_BOX_TYPE_SV3D);
if (sv3d && !info->projection_type) {
gf_isom_box_del_parent(&ent->child_boxes, sv3d);
return GF_OK;
}
if (!sv3d && !info->projection_type) {
return GF_OK;
}
if (!sv3d) {
sv3d = gf_isom_box_new_parent(&ent->child_boxes, GF_ISOM_BOX_TYPE_SV3D);
if (!sv3d) return GF_OUT_OF_MEM;
}
//svhd mandatory
GF_SphericalVideoInfoBox *svhd = (GF_SphericalVideoInfoBox *) gf_isom_box_find_child(sv3d->child_boxes, GF_ISOM_BOX_TYPE_SVHD);
if (svhd) {
if (svhd->string) gf_free(svhd->string);
} else {
svhd = (GF_SphericalVideoInfoBox *) gf_isom_box_new_parent(&sv3d->child_boxes, GF_ISOM_BOX_TYPE_SVHD);
if (!svhd) return GF_OUT_OF_MEM;
}
svhd->string = gf_strdup(info->meta_data ? info->meta_data : "");
//proj mandatory
GF_Box *proj = gf_isom_box_find_child(sv3d->child_boxes, GF_ISOM_BOX_TYPE_PROJ);
if (!proj) {
proj = (GF_Box *) gf_isom_box_new_parent(&sv3d->child_boxes, GF_ISOM_BOX_TYPE_PROJ);
if (!proj) return GF_OUT_OF_MEM;
}
GF_ProjectionHeaderBox *projh = (GF_ProjectionHeaderBox *) gf_isom_box_find_child(proj->child_boxes, GF_ISOM_BOX_TYPE_PRHD);
//prj header mandatory
if (!projh) {
projh = (GF_ProjectionHeaderBox *) gf_isom_box_new_parent(&proj->child_boxes, GF_ISOM_BOX_TYPE_PRHD);
if (!projh) return GF_OUT_OF_MEM;
}
projh->yaw = info->yaw;
projh->pitch = info->pitch;
projh->roll = info->roll;
proj_type = (info->projection_type==GF_PROJ360_CUBE_MAP) ? GF_ISOM_BOX_TYPE_CBMP : GF_ISOM_BOX_TYPE_EQUI;
GF_ProjectionTypeBox *projt = (GF_ProjectionTypeBox *) gf_isom_box_find_child(proj->child_boxes, proj_type);
if (!projt) {
projt = (GF_ProjectionTypeBox *) gf_isom_box_new_parent(&proj->child_boxes, proj_type);
if (!projt) return GF_OUT_OF_MEM;
}
if (info->projection_type==GF_PROJ360_CUBE_MAP) {
projt->layout = info->layout;
projt->padding = info->padding;
} else {
projt->bounds_top = info->top;
projt->bounds_bottom = info->bottom;
projt->bounds_left = info->left;
projt->bounds_right = info->right;
}
//remove other ones
GF_Box *b = gf_isom_box_new_parent(&proj->child_boxes, GF_ISOM_BOX_TYPE_MSHP);
if (b) gf_isom_box_del_parent(&proj->child_boxes, b);
if (info->projection_type==GF_PROJ360_CUBE_MAP) {
b = gf_isom_box_new_parent(&proj->child_boxes, GF_ISOM_BOX_TYPE_EQUI);
if (b) gf_isom_box_del_parent(&proj->child_boxes, b);
} else {
b = gf_isom_box_new_parent(&proj->child_boxes, GF_ISOM_BOX_TYPE_EQUI);
if (b) gf_isom_box_del_parent(&proj->child_boxes, b);
}
return GF_OK;
}
| 0 |
[
"CWE-476"
] |
gpac
|
ebfa346eff05049718f7b80041093b4c5581c24e
| 156,971,284,785,629,070,000,000,000,000,000,000,000 | 103 |
fixed #1706
|
static int print_media_desc(const pjmedia_sdp_media *m, char *buf, pj_size_t len)
{
char *p = buf;
char *end = buf+len;
unsigned i;
int printed;
/* check length for the "m=" line. */
if (len < (pj_size_t)m->desc.media.slen+m->desc.transport.slen+12+24) {
return -1;
}
*p++ = 'm'; /* m= */
*p++ = '=';
pj_memcpy(p, m->desc.media.ptr, m->desc.media.slen);
p += m->desc.media.slen;
*p++ = ' ';
printed = pj_utoa(m->desc.port, p);
p += printed;
if (m->desc.port_count > 1) {
*p++ = '/';
printed = pj_utoa(m->desc.port_count, p);
p += printed;
}
*p++ = ' ';
pj_memcpy(p, m->desc.transport.ptr, m->desc.transport.slen);
p += m->desc.transport.slen;
for (i=0; i<m->desc.fmt_count; ++i) {
*p++ = ' ';
pj_memcpy(p, m->desc.fmt[i].ptr, m->desc.fmt[i].slen);
p += m->desc.fmt[i].slen;
}
*p++ = '\r';
*p++ = '\n';
/* print connection info, if present. */
if (m->conn) {
printed = print_connection_info(m->conn, p, (int)(end-p));
if (printed < 0) {
return -1;
}
p += printed;
}
/* print optional bandwidth info. */
for (i=0; i<m->bandw_count; ++i) {
printed = (int)print_bandw(m->bandw[i], p, end-p);
if (printed < 0) {
return -1;
}
p += printed;
}
/* print attributes. */
for (i=0; i<m->attr_count; ++i) {
printed = (int)print_attr(m->attr[i], p, end-p);
if (printed < 0) {
return -1;
}
p += printed;
}
return (int)(p-buf);
}
| 1 |
[
"CWE-121",
"CWE-120",
"CWE-787"
] |
pjproject
|
560a1346f87aabe126509bb24930106dea292b00
| 133,454,390,800,145,620,000,000,000,000,000,000,000 | 63 |
Merge pull request from GHSA-f5qg-pqcg-765m
|
int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
{
int err = 0;
struct path realpath;
enum ovl_path_type type;
type = ovl_path_real(dentry, &realpath);
if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
err = ovl_want_write(dentry);
if (!err) {
if (file_flags & O_TRUNC)
err = ovl_copy_up_truncate(dentry);
else
err = ovl_copy_up(dentry);
ovl_drop_write(dentry);
}
}
return err;
}
| 0 |
[
"CWE-863"
] |
linux
|
c0ca3d70e8d3cf81e2255a217f7ca402f5ed0862
| 173,645,383,564,731,200,000,000,000,000,000,000,000 | 20 |
ovl: modify ovl_permission() to do checks on two inodes
Right now ovl_permission() calls __inode_permission(realinode), to do
permission checks on real inode and no checks are done on overlay inode.
Modify it to do checks both on overlay inode as well as underlying inode.
Checks on overlay inode will be done with the creds of calling task while
checks on underlying inode will be done with the creds of mounter.
Signed-off-by: Vivek Goyal <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
|
gst_date_time_new_now_local_time (void)
{
return gst_date_time_new_from_g_date_time (g_date_time_new_now_local ());
}
| 0 |
[
"CWE-125"
] |
gstreamer
|
9398b7f1a75b38844ae7050b5a7967e4cdebe24f
| 291,568,273,470,421,940,000,000,000,000,000,000,000 | 4 |
datetime: fix potential out-of-bound read on malformed datetime string
https://bugzilla.gnome.org/show_bug.cgi?id=777263
|
ldbm_config_internal_set(struct ldbminfo *li, char *attrname, char *value)
{
char err_buf[SLAPI_DSE_RETURNTEXT_SIZE];
struct berval bval;
bval.bv_val = value;
bval.bv_len = strlen(value);
if (ldbm_config_set((void *)li, attrname, ldbm_config, &bval,
err_buf, CONFIG_PHASE_INTERNAL, 1 /* apply */,
LDAP_MOD_REPLACE) != LDAP_SUCCESS) {
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_config_internal_set", "Error setting instance config attr %s to %s: %s\n",
attrname, value, err_buf);
exit(1);
}
}
| 0 |
[
"CWE-399",
"CWE-203"
] |
389-ds-base
|
cc0f69283abc082488824702dae485b8eae938bc
| 53,530,010,728,837,190,000,000,000,000,000,000,000 | 17 |
Issue 4480 - Unexpected info returned to ldap request (#4491)
Bug description:
If the bind entry does not exist, the bind result info
reports that 'No such entry'. It should not give any
information if the target entry exists or not
Fix description:
Does not return any additional information during a bind
relates: https://github.com/389ds/389-ds-base/issues/4480
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
Platforms tested: F31
|
static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm *kvm = vcpu->kvm;
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
kvm->arch.hv_guest_os_id = data;
/* setting guest os id to zero disables hypercall page */
if (!kvm->arch.hv_guest_os_id)
kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
break;
case HV_X64_MSR_HYPERCALL: {
u64 gfn;
unsigned long addr;
u8 instructions[4];
/* if guest os id is not set hypercall should remain disabled */
if (!kvm->arch.hv_guest_os_id)
break;
if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
kvm->arch.hv_hypercall = data;
break;
}
gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return 1;
kvm_x86_ops->patch_hypercall(vcpu, instructions);
((unsigned char *)instructions)[3] = 0xc3; /* ret */
if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
kvm->arch.hv_hypercall = data;
break;
}
default:
vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data);
return 1;
}
return 0;
}
| 0 |
[] |
linux
|
6d1068b3a98519247d8ba4ec85cd40ac136dbdf9
| 134,747,890,191,313,390,000,000,000,000,000,000,000 | 41 |
KVM: x86: invalid opcode oops on SET_SREGS with OSXSAVE bit set (CVE-2012-4461)
On hosts without the XSAVE support unprivileged local user can trigger
oops similar to the one below by setting X86_CR4_OSXSAVE bit in guest
cr4 register using KVM_SET_SREGS ioctl and later issuing KVM_RUN
ioctl.
invalid opcode: 0000 [#2] SMP
Modules linked in: tun ip6table_filter ip6_tables ebtable_nat ebtables
...
Pid: 24935, comm: zoog_kvm_monito Tainted: G D 3.2.0-3-686-pae
EIP: 0060:[<f8b9550c>] EFLAGS: 00210246 CPU: 0
EIP is at kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm]
EAX: 00000001 EBX: 000f387e ECX: 00000000 EDX: 00000000
ESI: 00000000 EDI: 00000000 EBP: ef5a0060 ESP: d7c63e70
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
Process zoog_kvm_monito (pid: 24935, ti=d7c62000 task=ed84a0c0
task.ti=d7c62000)
Stack:
00000001 f70a1200 f8b940a9 ef5a0060 00000000 00200202 f8769009 00000000
ef5a0060 000f387e eda5c020 8722f9c8 00015bae 00000000 ed84a0c0 ed84a0c0
c12bf02d 0000ae80 ef7f8740 fffffffb f359b740 ef5a0060 f8b85dc1 0000ae80
Call Trace:
[<f8b940a9>] ? kvm_arch_vcpu_ioctl_set_sregs+0x2fe/0x308 [kvm]
...
[<c12bfb44>] ? syscall_call+0x7/0xb
Code: 89 e8 e8 14 ee ff ff ba 00 00 04 00 89 e8 e8 98 48 ff ff 85 c0 74
1e 83 7d 48 00 75 18 8b 85 08 07 00 00 31 c9 8b 95 0c 07 00 00 <0f> 01
d1 c7 45 48 01 00 00 00 c7 45 1c 01 00 00 00 0f ae f0 89
EIP: [<f8b9550c>] kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm] SS:ESP
0068:d7c63e70
QEMU first retrieves the supported features via KVM_GET_SUPPORTED_CPUID
and then sets them later. So guest's X86_FEATURE_XSAVE should be masked
out on hosts without X86_FEATURE_XSAVE, making kvm_set_cr4 with
X86_CR4_OSXSAVE fail. Userspaces that allow specifying guest cpuid with
X86_FEATURE_XSAVE even on hosts that do not support it, might be
susceptible to this attack from inside the guest as well.
Allow setting X86_CR4_OSXSAVE bit only if host has XSAVE support.
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
nautilus_file_update_name (NautilusFile *file, const char *name)
{
gboolean ret;
ret = update_name_internal (file, name, TRUE);
if (ret) {
update_links_if_target (file);
}
return ret;
}
| 0 |
[] |
nautilus
|
7632a3e13874a2c5e8988428ca913620a25df983
| 307,618,123,619,912,660,000,000,000,000,000,000,000 | 12 |
Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003
|
mailimf_unparsed_fields_parse(const char * message, size_t length,
size_t * indx,
struct mailimf_unparsed_fields ** result)
{
size_t cur_token;
clist * list;
struct mailimf_unparsed_fields * fields;
int r;
int res;
cur_token = * indx;
list = NULL;
r = mailimf_struct_multiple_parse(message, length, &cur_token,
&list,
(mailimf_struct_parser *)
mailimf_optional_field_parse,
(mailimf_struct_destructor *)
mailimf_optional_field_free);
/*
if ((r = MAILIMF_NO_ERROR) && (r != MAILIMF_ERROR_PARSE)) {
res = r;
goto err;
}
*/
switch (r) {
case MAILIMF_NO_ERROR:
/* do nothing */
break;
case MAILIMF_ERROR_PARSE:
list = clist_new();
if (list == NULL) {
res = MAILIMF_ERROR_MEMORY;
goto err;
}
break;
default:
res = r;
goto err;
}
fields = mailimf_unparsed_fields_new(list);
if (fields == NULL) {
res = MAILIMF_ERROR_MEMORY;
goto free;
}
* result = fields;
* indx = cur_token;
return MAILIMF_NO_ERROR;
free:
if (list != NULL) {
clist_foreach(list, (clist_func) mailimf_optional_field_free, NULL);
clist_free(list);
}
err:
return res;
}
| 0 |
[
"CWE-476"
] |
libetpan
|
1fe8fbc032ccda1db9af66d93016b49c16c1f22d
| 310,426,217,844,780,740,000,000,000,000,000,000,000 | 64 |
Fixed crash #274
|
buf_add_chunk_with_capacity(buf_t *buf, size_t capacity, int capped)
{
chunk_t *chunk;
struct timeval now;
if (CHUNK_ALLOC_SIZE(capacity) < buf->default_chunk_size) {
chunk = chunk_new_with_alloc_size(buf->default_chunk_size);
} else if (capped && CHUNK_ALLOC_SIZE(capacity) > MAX_CHUNK_ALLOC) {
chunk = chunk_new_with_alloc_size(MAX_CHUNK_ALLOC);
} else {
chunk = chunk_new_with_alloc_size(preferred_chunk_size(capacity));
}
tor_gettimeofday_cached_monotonic(&now);
chunk->inserted_time = (uint32_t)tv_to_msec(&now);
if (buf->tail) {
tor_assert(buf->head);
buf->tail->next = chunk;
buf->tail = chunk;
} else {
tor_assert(!buf->head);
buf->head = buf->tail = chunk;
}
check();
return chunk;
}
| 0 |
[] |
tor
|
19df037e53331ae528b876f225be08f198e0f8b6
| 297,650,065,430,345,660,000,000,000,000,000,000,000 | 26 |
Log malformed hostnames in socks5 request respecting SafeLogging
|
static RAnalValue *anal_pcrel_disp_mov(RAnal* anal, RAnalOp* op, ut8 disp, int size){
RAnalValue *ret = r_anal_value_new ();
if (size==2) {
ret->base = op->addr+4;
ret->delta = disp<<1;
} else {
ret->base = (op->addr+4) & ~0x03;
ret->delta = disp<<2;
}
return ret;
}
| 0 |
[
"CWE-125"
] |
radare2
|
77c47cf873dd55b396da60baa2ca83bbd39e4add
| 145,413,884,877,270,960,000,000,000,000,000,000,000 | 12 |
Fix #9903 - oobread in RAnal.sh
|
xfs_bulkstat_fmt(
struct xfs_ibulk *breq,
const struct xfs_bulkstat *bstat)
{
if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat)))
return -EFAULT;
return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat));
}
| 0 |
[
"CWE-125"
] |
linux
|
983d8e60f50806f90534cc5373d0ce867e5aaf79
| 315,944,173,172,825,240,000,000,000,000,000,000,000 | 8 |
xfs: map unwritten blocks in XFS_IOC_{ALLOC,FREE}SP just like fallocate
The old ALLOCSP/FREESP ioctls in XFS can be used to preallocate space at
the end of files, just like fallocate and RESVSP. Make the behavior
consistent with the other ioctls.
Reported-by: Kirill Tkhai <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
Reviewed-by: Dave Chinner <[email protected]>
Reviewed-by: Eric Sandeen <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.