func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
void *merge_directory_configs(apr_pool_t *mp, void *_parent, void *_child)
{
directory_config *parent = (directory_config *)_parent;
directory_config *child = (directory_config *)_child;
directory_config *merged = create_directory_config(mp, NULL);
#ifdef DEBUG_CONF
ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Merge parent %pp child %pp RESULT %pp", _parent, _child, merged);
#endif
if (merged == NULL) return NULL;
/* Use values from the child configuration where possible,
* otherwise use the parent's.
*/
merged->is_enabled = (child->is_enabled == NOT_SET
? parent->is_enabled : child->is_enabled);
/* IO parameters */
merged->reqbody_access = (child->reqbody_access == NOT_SET
? parent->reqbody_access : child->reqbody_access);
merged->reqbody_buffering = (child->reqbody_buffering == NOT_SET
? parent->reqbody_buffering : child->reqbody_buffering);
merged->reqbody_inmemory_limit = (child->reqbody_inmemory_limit == NOT_SET
? parent->reqbody_inmemory_limit : child->reqbody_inmemory_limit);
merged->reqbody_limit = (child->reqbody_limit == NOT_SET
? parent->reqbody_limit : child->reqbody_limit);
merged->reqbody_no_files_limit = (child->reqbody_no_files_limit == NOT_SET
? parent->reqbody_no_files_limit : child->reqbody_no_files_limit);
merged->resbody_access = (child->resbody_access == NOT_SET
? parent->resbody_access : child->resbody_access);
merged->of_limit = (child->of_limit == NOT_SET
? parent->of_limit : child->of_limit);
merged->if_limit_action = (child->if_limit_action == NOT_SET
? parent->if_limit_action : child->if_limit_action);
merged->of_limit_action = (child->of_limit_action == NOT_SET
? parent->of_limit_action : child->of_limit_action);
merged->reqintercept_oe = (child->reqintercept_oe == NOT_SET
? parent->reqintercept_oe : child->reqintercept_oe);
if (child->of_mime_types != NOT_SET_P) {
/* Child added to the table */
if (child->of_mime_types_cleared == 1) {
/* The list of MIME types was cleared in the child,
* which means the parent's MIME types went away and
* we should not take them into consideration here.
*/
merged->of_mime_types = child->of_mime_types;
merged->of_mime_types_cleared = 1;
} else {
/* Add MIME types defined in the child to those
* defined in the parent context.
*/
if (parent->of_mime_types == NOT_SET_P) {
merged->of_mime_types = child->of_mime_types;
merged->of_mime_types_cleared = NOT_SET;
} else {
merged->of_mime_types = apr_table_overlay(mp, parent->of_mime_types,
child->of_mime_types);
if (merged->of_mime_types == NULL) return NULL;
}
}
} else {
/* Child did not add to the table */
if (child->of_mime_types_cleared == 1) {
merged->of_mime_types_cleared = 1;
} else {
merged->of_mime_types = parent->of_mime_types;
merged->of_mime_types_cleared = parent->of_mime_types_cleared;
}
}
/* debug log */
if (child->debuglog_fd == NOT_SET_P) {
merged->debuglog_name = parent->debuglog_name;
merged->debuglog_fd = parent->debuglog_fd;
} else {
merged->debuglog_name = child->debuglog_name;
merged->debuglog_fd = child->debuglog_fd;
}
merged->debuglog_level = (child->debuglog_level == NOT_SET
? parent->debuglog_level : child->debuglog_level);
merged->cookie_format = (child->cookie_format == NOT_SET
? parent->cookie_format : child->cookie_format);
merged->argument_separator = (child->argument_separator == NOT_SET
? parent->argument_separator : child->argument_separator);
merged->cookiev0_separator = (child->cookiev0_separator == NOT_SET_P
? parent->cookiev0_separator : child->cookiev0_separator);
/* rule inheritance */
if ((child->rule_inheritance == NOT_SET)||(child->rule_inheritance == 1)) {
merged->rule_inheritance = parent->rule_inheritance;
if ((child->ruleset == NULL)&&(parent->ruleset == NULL)) {
#ifdef DEBUG_CONF
ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "No rules in this context.");
#endif
/* Do nothing, there are no rules in either context. */
} else
if (child->ruleset == NULL) {
#ifdef DEBUG_CONF
ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Using parent rules in this context.");
#endif
/* Copy the rules from the parent context. */
merged->ruleset = msre_ruleset_create(parent->ruleset->engine, mp);
copy_rules(mp, parent->ruleset, merged->ruleset, child->rule_exceptions);
} else
if (parent->ruleset == NULL) {
#ifdef DEBUG_CONF
ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Using child rules in this context.");
#endif
/* Copy child rules. */
merged->ruleset = msre_ruleset_create(child->ruleset->engine, mp);
merged->ruleset->phase_request_headers = apr_array_copy(mp,
child->ruleset->phase_request_headers);
merged->ruleset->phase_request_body = apr_array_copy(mp,
child->ruleset->phase_request_body);
merged->ruleset->phase_response_headers = apr_array_copy(mp,
child->ruleset->phase_response_headers);
merged->ruleset->phase_response_body = apr_array_copy(mp,
child->ruleset->phase_response_body);
merged->ruleset->phase_logging = apr_array_copy(mp,
child->ruleset->phase_logging);
} else {
#ifdef DEBUG_CONF
ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Using parent then child rules in this context.");
#endif
/* Copy parent rules, then add child rules to it. */
merged->ruleset = msre_ruleset_create(parent->ruleset->engine, mp);
copy_rules(mp, parent->ruleset, merged->ruleset, child->rule_exceptions);
apr_array_cat(merged->ruleset->phase_request_headers,
child->ruleset->phase_request_headers);
apr_array_cat(merged->ruleset->phase_request_body,
child->ruleset->phase_request_body);
apr_array_cat(merged->ruleset->phase_response_headers,
child->ruleset->phase_response_headers);
apr_array_cat(merged->ruleset->phase_response_body,
child->ruleset->phase_response_body);
apr_array_cat(merged->ruleset->phase_logging,
child->ruleset->phase_logging);
}
} else {
merged->rule_inheritance = 0;
if (child->ruleset != NULL) {
/* Copy child rules. */
merged->ruleset = msre_ruleset_create(child->ruleset->engine, mp);
merged->ruleset->phase_request_headers = apr_array_copy(mp,
child->ruleset->phase_request_headers);
merged->ruleset->phase_request_body = apr_array_copy(mp,
child->ruleset->phase_request_body);
merged->ruleset->phase_response_headers = apr_array_copy(mp,
child->ruleset->phase_response_headers);
merged->ruleset->phase_response_body = apr_array_copy(mp,
child->ruleset->phase_response_body);
merged->ruleset->phase_logging = apr_array_copy(mp,
child->ruleset->phase_logging);
}
}
/* Merge rule exceptions. */
merged->rule_exceptions = apr_array_append(mp, parent->rule_exceptions,
child->rule_exceptions);
merged->hash_method = apr_array_append(mp, parent->hash_method,
child->hash_method);
/* audit log variables */
merged->auditlog_flag = (child->auditlog_flag == NOT_SET
? parent->auditlog_flag : child->auditlog_flag);
merged->auditlog_type = (child->auditlog_type == NOT_SET
? parent->auditlog_type : child->auditlog_type);
merged->max_rule_time = (child->max_rule_time == NOT_SET
? parent->max_rule_time : child->max_rule_time);
merged->auditlog_dirperms = (child->auditlog_dirperms == NOT_SET
? parent->auditlog_dirperms : child->auditlog_dirperms);
merged->auditlog_fileperms = (child->auditlog_fileperms == NOT_SET
? parent->auditlog_fileperms : child->auditlog_fileperms);
if (child->auditlog_fd != NOT_SET_P) {
merged->auditlog_fd = child->auditlog_fd;
merged->auditlog_name = child->auditlog_name;
} else {
merged->auditlog_fd = parent->auditlog_fd;
merged->auditlog_name = parent->auditlog_name;
}
if (child->auditlog2_fd != NOT_SET_P) {
merged->auditlog2_fd = child->auditlog2_fd;
merged->auditlog2_name = child->auditlog2_name;
} else {
merged->auditlog2_fd = parent->auditlog2_fd;
merged->auditlog2_name = parent->auditlog2_name;
}
merged->auditlog_storage_dir = (child->auditlog_storage_dir == NOT_SET_P
? parent->auditlog_storage_dir : child->auditlog_storage_dir);
merged->auditlog_parts = (child->auditlog_parts == NOT_SET_P
? parent->auditlog_parts : child->auditlog_parts);
merged->auditlog_relevant_regex = (child->auditlog_relevant_regex == NOT_SET_P
? parent->auditlog_relevant_regex : child->auditlog_relevant_regex);
/* Upload */
merged->tmp_dir = (child->tmp_dir == NOT_SET_P
? parent->tmp_dir : child->tmp_dir);
merged->upload_dir = (child->upload_dir == NOT_SET_P
? parent->upload_dir : child->upload_dir);
merged->upload_keep_files = (child->upload_keep_files == NOT_SET
? parent->upload_keep_files : child->upload_keep_files);
merged->upload_validates_files = (child->upload_validates_files == NOT_SET
? parent->upload_validates_files : child->upload_validates_files);
merged->upload_filemode = (child->upload_filemode == NOT_SET
? parent->upload_filemode : child->upload_filemode);
merged->upload_file_limit = (child->upload_file_limit == NOT_SET
? parent->upload_file_limit : child->upload_file_limit);
/* Misc */
merged->data_dir = (child->data_dir == NOT_SET_P
? parent->data_dir : child->data_dir);
merged->webappid = (child->webappid == NOT_SET_P
? parent->webappid : child->webappid);
merged->sensor_id = (child->sensor_id == NOT_SET_P
? parent->sensor_id : child->sensor_id);
merged->httpBlkey = (child->httpBlkey == NOT_SET_P
? parent->httpBlkey : child->httpBlkey);
/* Content injection. */
merged->content_injection_enabled = (child->content_injection_enabled == NOT_SET
? parent->content_injection_enabled : child->content_injection_enabled);
/* Stream inspection */
merged->stream_inbody_inspection = (child->stream_inbody_inspection == NOT_SET
? parent->stream_inbody_inspection : child->stream_inbody_inspection);
merged->stream_outbody_inspection = (child->stream_outbody_inspection == NOT_SET
? parent->stream_outbody_inspection : child->stream_outbody_inspection);
/* Geo Lookup */
merged->geo = (child->geo == NOT_SET_P
? parent->geo : child->geo);
/* Gsb Lookup */
merged->gsb = (child->gsb == NOT_SET_P
? parent->gsb : child->gsb);
/* Unicode Map */
merged->u_map = (child->u_map == NOT_SET_P
? parent->u_map : child->u_map);
/* Cache */
merged->cache_trans = (child->cache_trans == NOT_SET
? parent->cache_trans : child->cache_trans);
merged->cache_trans_incremental = (child->cache_trans_incremental == NOT_SET
? parent->cache_trans_incremental : child->cache_trans_incremental);
merged->cache_trans_min = (child->cache_trans_min == (apr_size_t)NOT_SET
? parent->cache_trans_min : child->cache_trans_min);
merged->cache_trans_max = (child->cache_trans_max == (apr_size_t)NOT_SET
? parent->cache_trans_max : child->cache_trans_max);
merged->cache_trans_maxitems = (child->cache_trans_maxitems == (apr_size_t)NOT_SET
? parent->cache_trans_maxitems : child->cache_trans_maxitems);
/* Merge component signatures. */
merged->component_signatures = apr_array_append(mp, parent->component_signatures,
child->component_signatures);
merged->request_encoding = (child->request_encoding == NOT_SET_P
? parent->request_encoding : child->request_encoding);
merged->disable_backend_compression = (child->disable_backend_compression == NOT_SET
? parent->disable_backend_compression : child->disable_backend_compression);
merged->col_timeout = (child->col_timeout == NOT_SET
? parent->col_timeout : child->col_timeout);
/* Hash */
merged->crypto_key = (child->crypto_key == NOT_SET_P
? parent->crypto_key : child->crypto_key);
merged->crypto_key_len = (child->crypto_key_len == NOT_SET
? parent->crypto_key_len : child->crypto_key_len);
merged->crypto_key_add = (child->crypto_key_add == NOT_SET
? parent->crypto_key_add : child->crypto_key_add);
merged->crypto_param_name = (child->crypto_param_name == NOT_SET_P
? parent->crypto_param_name : child->crypto_param_name);
merged->hash_is_enabled = (child->hash_is_enabled == NOT_SET
? parent->hash_is_enabled : child->hash_is_enabled);
merged->hash_enforcement = (child->hash_enforcement == NOT_SET
? parent->hash_enforcement : child->hash_enforcement);
merged->crypto_hash_href_rx = (child->crypto_hash_href_rx == NOT_SET
? parent->crypto_hash_href_rx : child->crypto_hash_href_rx);
merged->crypto_hash_faction_rx = (child->crypto_hash_faction_rx == NOT_SET
? parent->crypto_hash_faction_rx : child->crypto_hash_faction_rx);
merged->crypto_hash_location_rx = (child->crypto_hash_location_rx == NOT_SET
? parent->crypto_hash_location_rx : child->crypto_hash_location_rx);
merged->crypto_hash_iframesrc_rx = (child->crypto_hash_iframesrc_rx == NOT_SET
? parent->crypto_hash_iframesrc_rx : child->crypto_hash_iframesrc_rx);
merged->crypto_hash_framesrc_rx = (child->crypto_hash_framesrc_rx == NOT_SET
? parent->crypto_hash_framesrc_rx : child->crypto_hash_framesrc_rx);
merged->crypto_hash_href_pm = (child->crypto_hash_href_pm == NOT_SET
? parent->crypto_hash_href_pm : child->crypto_hash_href_pm);
merged->crypto_hash_faction_pm = (child->crypto_hash_faction_pm == NOT_SET
? parent->crypto_hash_faction_pm : child->crypto_hash_faction_pm);
merged->crypto_hash_location_pm = (child->crypto_hash_location_pm == NOT_SET
? parent->crypto_hash_location_pm : child->crypto_hash_location_pm);
merged->crypto_hash_iframesrc_pm = (child->crypto_hash_iframesrc_pm == NOT_SET
? parent->crypto_hash_iframesrc_pm : child->crypto_hash_iframesrc_pm);
merged->crypto_hash_framesrc_pm = (child->crypto_hash_framesrc_pm == NOT_SET
? parent->crypto_hash_framesrc_pm : child->crypto_hash_framesrc_pm);
/* xml external entity */
merged->xml_external_entity = (child->xml_external_entity == NOT_SET
? parent->xml_external_entity : child->xml_external_entity);
return merged;
}
| 0 |
[
"CWE-20",
"CWE-611"
] |
ModSecurity
|
d4d80b38aa85eccb26e3c61b04d16e8ca5de76fe
| 174,905,201,113,194,640,000,000,000,000,000,000,000 | 320 |
Added SecXmlExternalEntity
|
static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
{
clear_bit(flag, bits);
}
| 0 |
[
"CWE-476",
"CWE-119"
] |
linux
|
715230a44310a8cf66fbfb5a46f9a62a9b2de424
| 149,738,681,535,258,100,000,000,000,000,000,000,000 | 4 |
tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void dcn20_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
{
int pipe_cnt, i;
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0];
if (!res_ctx->pipe_ctx[i].stream)
continue;
/* Set writeback information */
pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0;
pipes[pipe_cnt].dout.num_active_wb++;
pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height;
pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width;
pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width;
pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height;
pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1;
pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1;
pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
pipes[pipe_cnt].dout.wb.wb_hratio = 1.0;
pipes[pipe_cnt].dout.wb.wb_vratio = 1.0;
if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8;
else
pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10;
} else
pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32;
pipe_cnt++;
}
}
| 0 |
[
"CWE-400",
"CWE-703",
"CWE-401"
] |
linux
|
055e547478a11a6360c7ce05e2afc3e366968a12
| 187,849,459,837,157,730,000,000,000,000,000,000,000 | 36 |
drm/amd/display: memory leak
In dcn*_clock_source_create when dcn20_clk_src_construct fails allocated
clk_src needs release.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Alex Deucher <[email protected]>
|
static ssize_t fuse_direct_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
ssize_t res;
struct inode *inode = file->f_path.dentry->d_inode;
if (is_bad_inode(inode))
return -EIO;
res = fuse_direct_io(file, buf, count, ppos, 0);
fuse_invalidate_attr(inode);
return res;
}
| 0 |
[] |
linux-2.6
|
0bd87182d3ab18a32a8e9175d3f68754c58e3432
| 213,990,682,764,256,540,000,000,000,000,000,000,000 | 15 |
fuse: fix kunmap in fuse_ioctl_copy_user
Looks like another victim of the confusing kmap() vs kmap_atomic() API
differences.
Reported-by: Todor Gyumyushev <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: [email protected]
|
vhost_user_get_inflight_fd(struct virtio_net **pdev,
struct vhu_msg_context *ctx,
int main_fd __rte_unused)
{
struct rte_vhost_inflight_info_packed *inflight_packed;
uint64_t pervq_inflight_size, mmap_size;
uint16_t num_queues, queue_size;
struct virtio_net *dev = *pdev;
int fd, i, j;
int numa_node = SOCKET_ID_ANY;
void *addr;
if (ctx->msg.size != sizeof(ctx->msg.payload.inflight)) {
VHOST_LOG_CONFIG(ERR, "(%s) invalid get_inflight_fd message size is %d\n",
dev->ifname, ctx->msg.size);
return RTE_VHOST_MSG_RESULT_ERR;
}
/*
* If VQ 0 has already been allocated, try to allocate on the same
* NUMA node. It can be reallocated later in numa_realloc().
*/
if (dev->nr_vring > 0)
numa_node = dev->virtqueue[0]->numa_node;
if (dev->inflight_info == NULL) {
dev->inflight_info = rte_zmalloc_socket("inflight_info",
sizeof(struct inflight_mem_info), 0, numa_node);
if (!dev->inflight_info) {
VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc dev inflight area\n",
dev->ifname);
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->inflight_info->fd = -1;
}
num_queues = ctx->msg.payload.inflight.num_queues;
queue_size = ctx->msg.payload.inflight.queue_size;
VHOST_LOG_CONFIG(INFO, "(%s) get_inflight_fd num_queues: %u\n",
dev->ifname, ctx->msg.payload.inflight.num_queues);
VHOST_LOG_CONFIG(INFO, "(%s) get_inflight_fd queue_size: %u\n",
dev->ifname, ctx->msg.payload.inflight.queue_size);
if (vq_is_packed(dev))
pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
else
pervq_inflight_size = get_pervq_shm_size_split(queue_size);
mmap_size = num_queues * pervq_inflight_size;
addr = inflight_mem_alloc(dev, "vhost-inflight", mmap_size, &fd);
if (!addr) {
VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc vhost inflight area\n", dev->ifname);
ctx->msg.payload.inflight.mmap_size = 0;
return RTE_VHOST_MSG_RESULT_ERR;
}
memset(addr, 0, mmap_size);
if (dev->inflight_info->addr) {
munmap(dev->inflight_info->addr, dev->inflight_info->size);
dev->inflight_info->addr = NULL;
}
if (dev->inflight_info->fd >= 0) {
close(dev->inflight_info->fd);
dev->inflight_info->fd = -1;
}
dev->inflight_info->addr = addr;
dev->inflight_info->size = ctx->msg.payload.inflight.mmap_size = mmap_size;
dev->inflight_info->fd = ctx->fds[0] = fd;
ctx->msg.payload.inflight.mmap_offset = 0;
ctx->fd_num = 1;
if (vq_is_packed(dev)) {
for (i = 0; i < num_queues; i++) {
inflight_packed =
(struct rte_vhost_inflight_info_packed *)addr;
inflight_packed->used_wrap_counter = 1;
inflight_packed->old_used_wrap_counter = 1;
for (j = 0; j < queue_size; j++)
inflight_packed->desc[j].next = j + 1;
addr = (void *)((char *)addr + pervq_inflight_size);
}
}
VHOST_LOG_CONFIG(INFO, "(%s) send inflight mmap_size: %"PRIu64"\n",
dev->ifname, ctx->msg.payload.inflight.mmap_size);
VHOST_LOG_CONFIG(INFO, "(%s) send inflight mmap_offset: %"PRIu64"\n",
dev->ifname, ctx->msg.payload.inflight.mmap_offset);
VHOST_LOG_CONFIG(INFO, "(%s) send inflight fd: %d\n", dev->ifname, ctx->fds[0]);
return RTE_VHOST_MSG_RESULT_REPLY;
}
| 1 |
[
"CWE-703"
] |
dpdk
|
af74f7db384ed149fe42b21dbd7975f8a54ef227
| 33,623,247,974,536,320,000,000,000,000,000,000,000 | 94 |
vhost: fix FD leak with inflight messages
Even if unlikely, a buggy vhost-user master might attach fds to inflight
messages. Add checks like for other types of vhost-user messages.
Fixes: d87f1a1cb7b6 ("vhost: support inflight info sharing")
Cc: [email protected]
Signed-off-by: David Marchand <[email protected]>
Reviewed-by: Maxime Coquelin <[email protected]>
|
void sched_idle_next(void)
{
int this_cpu = smp_processor_id();
struct rq *rq = cpu_rq(this_cpu);
struct task_struct *p = rq->idle;
unsigned long flags;
/* cpu has to be offline */
BUG_ON(cpu_online(this_cpu));
/*
* Strictly not necessary since rest of the CPUs are stopped by now
* and interrupts disabled on the current cpu.
*/
spin_lock_irqsave(&rq->lock, flags);
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
update_rq_clock(rq);
activate_task(rq, p, 0);
spin_unlock_irqrestore(&rq->lock, flags);
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 250,992,093,315,132,000,000,000,000,000,000,000,000 | 23 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
*/
static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
head->qlen += list->qlen;
__skb_queue_head_init(list);
}
| 0 |
[
"CWE-20"
] |
linux
|
2b16f048729bf35e6c28a40cbfad07239f9dcd90
| 233,819,840,886,779,000,000,000,000,000,000,000,000 | 9 |
net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
Binary::Binary() {
format_ = LIEF::EXE_FORMATS::FORMAT_MACHO;
}
| 0 |
[
"CWE-703"
] |
LIEF
|
7acf0bc4224081d4f425fcc8b2e361b95291d878
| 334,470,605,167,625,260,000,000,000,000,000,000,000 | 3 |
Resolve #764
|
void ZrtpStateClass::evDetect(void) {
DEBUGOUT((cout << "Checking for match in Detect.\n"));
char *msg, first, last;
uint8_t *pkt;
uint32_t errorCode = 0;
/*
* First check the general event type, then discrimnate
* the real event.
*/
if (event->type == ZrtpPacket) {
pkt = event->packet;
msg = (char *)pkt + 4;
first = tolower(*msg);
last = tolower(*(msg+7));
/*
* HelloAck:
* - our peer acknowledged our Hello packet, we have not seen the peer's Hello yet
* - cancel timer T1 to stop resending Hello
* - switch to state AckDetected, wait for peer's Hello (F3)
*
* When we receive an HelloAck this also means that out partner accepted our protocol version.
*/
if (first == 'h' && last =='k') {
cancelTimer();
sentPacket = NULL;
nextState(AckDetected);
return;
}
/*
* Hello:
* - send HelloAck packet to acknowledge the received Hello packet if versions match.
* Otherweise negotiate ZRTP versions.
* - use received Hello packet to prepare own Commit packet. We need to
* do it at this point because we need the hash value computed from
* peer's Hello packet. Follwing states my use the prepared Commit.
* - switch to new state AckSent which sends own Hello packet until
* peer acknowledges this
* - Don't clear sentPacket, points to Hello
*/
if (first == 'h' && last ==' ') {
ZrtpPacketHello hpkt(pkt);
cancelTimer();
/*
* Check and negotiate the ZRTP protocol version first.
*
* This selection mechanism relies on the fact that we sent the highest supported protocol version in
* the initial Hello packet with as stated in RFC6189, section 4.1.1
*/
int32_t recvVersion = hpkt.getVersionInt();
if (recvVersion > sentVersion) { // We don't support this version, stay in state with timer active
if (startTimer(&T1) <= 0) {
timerFailed(SevereNoTimer); // returns to state Initial
}
return;
}
/*
* The versions don't match. Start negotiating versions. This negotiation stays in the Detect state.
* Only if the received version matches our own sent version we start to send a HelloAck.
*/
if (recvVersion != sentVersion) {
ZRtp::HelloPacketVersion* hpv = parent->helloPackets;
int32_t index;
for (index = 0; hpv->packet && hpv->packet != parent->currentHelloPacket; hpv++, index++) // Find current sent Hello
;
for(; index >= 0 && hpv->version > recvVersion; hpv--, index--) // find a supported version less-equal to received version
;
if (index < 0) {
sendErrorPacket(UnsuppZRTPVersion);
return;
}
parent->currentHelloPacket = hpv->packet;
sentVersion = parent->currentHelloPacket->getVersionInt();
// remember packet for easy resend in case timer triggers
sentPacket = static_cast<ZrtpPacketBase *>(parent->currentHelloPacket);
if (!parent->sendPacketZRTP(sentPacket)) {
sendFailed(); // returns to state Initial
return;
}
if (startTimer(&T1) <= 0) {
timerFailed(SevereNoTimer); // returns to state Initial
return;
}
return;
}
ZrtpPacketHelloAck* helloAck = parent->prepareHelloAck();
if (!parent->sendPacketZRTP(static_cast<ZrtpPacketBase *>(helloAck))) {
parent->zrtpNegotiationFailed(Severe, SevereCannotSend);
return;
}
// Use peer's Hello packet to create my commit packet, store it
// for possible later usage in state AckSent
commitPkt = parent->prepareCommit(&hpkt, &errorCode);
nextState(AckSent);
if (commitPkt == NULL) {
sendErrorPacket(errorCode); // switches to Error state
return;
}
if (startTimer(&T1) <= 0) { // restart own Hello timer/counter
timerFailed(SevereNoTimer); // returns to state Initial
}
T1.maxResend = 60; // more retries to extend time, see chap. 6
}
return; // unknown packet for this state - Just ignore it
}
// Timer event triggered - this is Timer T1 to resend Hello
else if (event->type == Timer) {
if (!parent->sendPacketZRTP(sentPacket)) {
sendFailed(); // returns to state Initial
return;
}
if (nextTimer(&T1) <= 0) {
commitPkt = NULL;
parent->zrtpNotSuppOther();
nextState(Detect);
}
}
// If application calls zrtpStart() to restart discovery
else if (event->type == ZrtpInitial) {
cancelTimer();
if (!parent->sendPacketZRTP(sentPacket)) {
sendFailed(); // returns to state Initial
return;
}
if (startTimer(&T1) <= 0) {
timerFailed(SevereNoTimer); // returns to state Initial
}
}
else { // unknown Event type for this state (covers Error and ZrtpClose)
if (event->type != ZrtpClose) {
parent->zrtpNegotiationFailed(Severe, SevereProtocolError);
}
sentPacket = NULL;
nextState(Initial);
}
}
| 0 |
[
"CWE-119"
] |
ZRTPCPP
|
c8617100f359b217a974938c5539a1dd8a120b0e
| 32,885,919,697,249,452,000,000,000,000,000,000,000 | 149 |
Fix vulnerabilities found and reported by Mark Dowd
- limit length of memcpy
- limit number of offered algorithms in Hello packet
- length check in PING packet
- fix a small coding error
|
print_distance_range(FILE* f, OnigDistance a, OnigDistance b)
{
if (a == ONIG_INFINITE_DISTANCE)
fputs("inf", f);
else
fprintf(f, "(%u)", a);
fputs("-", f);
if (b == ONIG_INFINITE_DISTANCE)
fputs("inf", f);
else
fprintf(f, "(%u)", b);
}
| 0 |
[
"CWE-125"
] |
php-src
|
c6e34d91b88638966662caac62c4d0e90538e317
| 215,382,974,063,760,570,000,000,000,000,000,000,000 | 14 |
Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node)
|
R_API int r_socket_close(RSocket *s) {
int ret = false;
if (!s) {
return false;
}
if (s->fd != R_INVALID_SOCKET) {
#if __UNIX__
shutdown (s->fd, SHUT_RDWR);
#endif
#if __WINDOWS__
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms740481(v=vs.85).aspx
shutdown (s->fd, SD_SEND);
if (r_socket_ready (s, 0, 250)) {
do {
char buf = 0;
ret = recv (s->fd, &buf, 1, 0);
} while (ret != 0 && ret != SOCKET_ERROR);
}
ret = closesocket (s->fd);
#else
ret = close (s->fd);
#endif
s->fd = R_INVALID_SOCKET;
}
#if HAVE_LIB_SSL
if (s->is_ssl && s->sfd) {
SSL_free (s->sfd);
s->sfd = NULL;
}
#endif
return ret;
}
| 0 |
[
"CWE-78"
] |
radare2
|
04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9
| 7,698,250,400,327,171,000,000,000,000,000,000,000 | 32 |
Fix command injection on PDB download (#16966)
* Fix r_sys_mkdirp with absolute path on Windows
* Fix build with --with-openssl
* Use RBuffer in r_socket_http_answer()
* r_socket_http_answer: Fix read for big responses
* Implement r_str_escape_sh()
* Cleanup r_socket_connect() on Windows
* Fix socket being created without a protocol
* Fix socket connect with SSL ##socket
* Use select() in r_socket_ready()
* Fix read failing if received only protocol answer
* Fix double-free
* r_socket_http_get: Fail if req. SSL with no support
* Follow redirects in r_socket_http_answer()
* Fix r_socket_http_get result length with R2_CURL=1
* Also follow redirects
* Avoid using curl for downloading PDBs
* Use r_socket_http_get() on UNIXs
* Use WinINet API on Windows for r_socket_http_get()
* Fix command injection
* Fix r_sys_cmd_str_full output for binary data
* Validate GUID on PDB download
* Pass depth to socket_http_get_recursive()
* Remove 'r_' and '__' from static function names
* Fix is_valid_guid
* Fix for comments
|
void APar_ShowMPEG4AACProfileInfo(TrackInfo *track_info) {
if (track_info->descriptor_object_typeID == 1) {
fprintf(stdout, " MPEG-4 AAC Main Profile");
} else if (track_info->descriptor_object_typeID == 2) {
fprintf(
stdout,
" MPEG-4 AAC Low Complexity/LC Profile"); // most files will land here
} else if (track_info->descriptor_object_typeID == 3) {
fprintf(stdout, " MPEG-4 AAC Scaleable Sample Rate/SSR Profile");
} else if (track_info->descriptor_object_typeID == 4) {
fprintf(stdout, " MPEG-4 AAC Long Term Prediction Profile");
} else if (track_info->descriptor_object_typeID == 5) {
fprintf(stdout, " MPEG-4 AAC High Efficiency/HE Profile");
} else if (track_info->descriptor_object_typeID == 6) {
fprintf(stdout, " MPEG-4 AAC Scalable Profile");
} else if (track_info->descriptor_object_typeID == 7) {
fprintf(stdout,
" MPEG-4 AAC Transform domain Weighted INterleave Vector "
"Quantization/TwinVQ Profile");
} else if (track_info->descriptor_object_typeID == 8) {
fprintf(stdout, " MPEG-4 AAC Code Excited Linear Predictive/CELP Profile");
} else if (track_info->descriptor_object_typeID == 9) {
fprintf(stdout, " MPEG-4 AAC HVXC Profile");
} else if (track_info->descriptor_object_typeID == 12) {
fprintf(stdout, " MPEG-4 AAC TTSI Profile");
} else if (track_info->descriptor_object_typeID == 13) {
fprintf(stdout, " MPEG-4 AAC Main Synthesis Profile");
} else if (track_info->descriptor_object_typeID == 14) {
fprintf(stdout, " MPEG-4 AAC Wavetable Synthesis Profile");
} else if (track_info->descriptor_object_typeID == 15) {
fprintf(stdout, " MPEG-4 AAC General MIDI Profile");
} else if (track_info->descriptor_object_typeID == 16) {
fprintf(stdout, " MPEG-4 AAC Algorithmic Synthesis & Audio FX Profile");
} else if (track_info->descriptor_object_typeID == 17) {
fprintf(stdout,
" MPEG-4 AAC AAC Low Complexity/LC (+error recovery) Profile");
} else if (track_info->descriptor_object_typeID == 19) {
fprintf(stdout,
" MPEG-4 AAC Long Term Prediction (+error recovery) Profile");
} else if (track_info->descriptor_object_typeID == 20) {
fprintf(stdout, " MPEG-4 AAC Scalable (+error recovery) Profile");
} else if (track_info->descriptor_object_typeID == 21) {
fprintf(stdout,
" MPEG-4 AAC Transform domain Weighted INterleave Vector "
"Quantization/TwinVQ (+error recovery) Profile");
} else if (track_info->descriptor_object_typeID == 22) {
fprintf(stdout,
" MPEG-4 AAC Bit Sliced Arithmetic Coding/BSAC (+error "
"recovery) Profile");
} else if (track_info->descriptor_object_typeID == 23) {
fprintf(stdout, " MPEG-4 AAC Low Delay/LD (+error recovery) Profile");
} else if (track_info->descriptor_object_typeID == 24) {
fprintf(stdout,
" MPEG-4 AAC Code Excited Linear Predictive/CELP (+error "
"recovery) Profile");
} else if (track_info->descriptor_object_typeID == 25) {
fprintf(stdout, " MPEG-4 AAC HXVC (+error recovery) Profile");
} else if (track_info->descriptor_object_typeID == 26) {
fprintf(stdout,
" MPEG-4 AAC Harmonic and Individual Lines plus "
"Noise/HILN (+error recovery) Profile");
} else if (track_info->descriptor_object_typeID == 27) {
fprintf(stdout, " MPEG-4 AAC Parametric (+error recovery) Profile");
} else if (track_info->descriptor_object_typeID == 31) {
fprintf(
stdout,
" MPEG-4 ALS Audio Lossless Coding"); // I think that mp4alsRM18 writes
// the channels wrong after
// objectedID: 0xF880 has 0
// channels; 0xF890 is 2ch
} else {
fprintf(stdout,
" MPEG-4 Unknown profile: 0x%X",
track_info->descriptor_object_typeID);
}
return;
}
| 0 |
[
"CWE-787"
] |
atomicparsley
|
d72ccf06c98259d7261e0f3ac4fd8717778782c1
| 165,598,504,010,110,280,000,000,000,000,000,000,000 | 80 |
Avoid stack overflow
refs: https://github.com/wez/atomicparsley/issues/32
|
static int pgx_gethdr(jas_stream_t *in, pgx_hdr_t *hdr)
{
int c;
uchar buf[2];
if ((c = jas_stream_getc(in)) == EOF) {
goto error;
}
buf[0] = c;
if ((c = jas_stream_getc(in)) == EOF) {
goto error;
}
buf[1] = c;
hdr->magic = buf[0] << 8 | buf[1];
if (hdr->magic != PGX_MAGIC) {
jas_eprintf("invalid PGX signature\n");
goto error;
}
if ((c = pgx_getc(in)) == EOF || !isspace(c)) {
goto error;
}
if (pgx_getbyteorder(in, &hdr->bigendian)) {
jas_eprintf("cannot get byte order\n");
goto error;
}
if (pgx_getsgnd(in, &hdr->sgnd)) {
jas_eprintf("cannot get signedness\n");
goto error;
}
if (pgx_getuint32(in, &hdr->prec)) {
jas_eprintf("cannot get precision\n");
goto error;
}
if (pgx_getuint32(in, &hdr->width)) {
jas_eprintf("cannot get width\n");
goto error;
}
if (pgx_getuint32(in, &hdr->height)) {
jas_eprintf("cannot get height\n");
goto error;
}
return 0;
error:
return -1;
}
| 1 |
[
"CWE-20",
"CWE-190"
] |
jasper
|
d42b2388f7f8e0332c846675133acea151fc557a
| 92,248,045,301,257,220,000,000,000,000,000,000,000 | 46 |
The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
|
static int fuse_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
struct inode *inode = file->f_mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
if (is_bad_inode(inode))
return -EIO;
inode_lock(inode);
/*
* Start writeback against all dirty pages of the inode, then
* wait for all outstanding writes, before sending the FSYNC
* request.
*/
err = file_write_and_wait_range(file, start, end);
if (err)
goto out;
fuse_sync_writes(inode);
/*
* Due to implementation of fuse writeback
* file_write_and_wait_range() does not catch errors.
* We have to do this directly after fuse_sync_writes()
*/
err = file_check_and_advance_wb_err(file);
if (err)
goto out;
err = sync_inode_metadata(inode, 1);
if (err)
goto out;
if (fc->no_fsync)
goto out;
err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
if (err == -ENOSYS) {
fc->no_fsync = 1;
err = 0;
}
out:
inode_unlock(inode);
return err;
}
| 1 |
[
"CWE-459"
] |
linux
|
5d069dbe8aaf2a197142558b6fb2978189ba3454
| 167,431,050,107,389,430,000,000,000,000,000,000,000 | 49 |
fuse: fix bad inode
Jan Kara's analysis of the syzbot report (edited):
The reproducer opens a directory on FUSE filesystem, it then attaches
dnotify mark to the open directory. After that a fuse_do_getattr() call
finds that attributes returned by the server are inconsistent, and calls
make_bad_inode() which, among other things does:
inode->i_mode = S_IFREG;
This then confuses dnotify which doesn't tear down its structures
properly and eventually crashes.
Avoid calling make_bad_inode() on a live inode: switch to a private flag on
the fuse inode. Also add the test to ops which the bad_inode_ops would
have caught.
This bug goes back to the initial merge of fuse in 2.6.14...
Reported-by: [email protected]
Signed-off-by: Miklos Szeredi <[email protected]>
Tested-by: Jan Kara <[email protected]>
Cc: <[email protected]>
|
CImg<Tfloat> get_RGBtoXYZ(const bool use_D65=true) const {
return CImg<Tfloat>(*this,false).RGBtoXYZ(use_D65);
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 254,584,378,258,595,680,000,000,000,000,000,000,000 | 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
static inline void io_req_complete(struct io_kiocb *req, long res)
{
__io_req_complete(req, 0, res, 0);
}
| 0 |
[
"CWE-667"
] |
linux
|
3ebba796fa251d042be42b929a2d916ee5c34a49
| 224,832,253,714,481,100,000,000,000,000,000,000,000 | 4 |
io_uring: ensure that SQPOLL thread is started for exit
If we create it in a disabled state because IORING_SETUP_R_DISABLED is
set on ring creation, we need to ensure that we've kicked the thread if
we're exiting before it's been explicitly disabled. Otherwise we can run
into a deadlock where exit is waiting go park the SQPOLL thread, but the
SQPOLL thread itself is waiting to get a signal to start.
That results in the below trace of both tasks hung, waiting on each other:
INFO: task syz-executor458:8401 blocked for more than 143 seconds.
Not tainted 5.11.0-next-20210226-syzkaller #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread_park fs/io_uring.c:7115 [inline]
io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103
io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745
__io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840
io_uring_files_cancel include/linux/io_uring.h:47 [inline]
do_exit+0x299/0x2a60 kernel/exit.c:780
do_group_exit+0x125/0x310 kernel/exit.c:922
__do_sys_exit_group kernel/exit.c:933 [inline]
__se_sys_exit_group kernel/exit.c:931 [inline]
__x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x43e899
RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7
RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899
RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000
RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000
R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0
R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001
INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds.
task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294
INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds.
Reported-by: [email protected]
Signed-off-by: Jens Axboe <[email protected]>
|
void AbstractSqlMigrator::dumpStatus()
{
qWarning() << " executed Query:";
qWarning() << qPrintable(executedQuery());
qWarning() << " bound Values:";
QList<QVariant> list = boundValues();
for (int i = 0; i < list.size(); ++i)
qWarning() << i << ": " << list.at(i).toString().toAscii().data();
qWarning() << " Error Number:" << lastError().number();
qWarning() << " Error Message:" << lastError().text();
}
| 0 |
[
"CWE-89"
] |
quassel
|
aa1008be162cb27da938cce93ba533f54d228869
| 278,182,301,496,813,160,000,000,000,000,000,000,000 | 11 |
Fixing security vulnerability with Qt 4.8.5+ and PostgreSQL.
Properly detects whether Qt performs slash escaping in SQL queries or
not, and then configures PostgreSQL accordingly. This bug was a
introduced due to a bugfix in Qt 4.8.5 disables slash escaping when
binding queries: https://bugreports.qt-project.org/browse/QTBUG-30076
Thanks to brot and Tucos.
[Fixes #1244]
|
static void ep_free(struct eventpoll *ep)
{
struct rb_node *rbp;
struct epitem *epi;
/* We need to release all tasks waiting for these file */
if (waitqueue_active(&ep->poll_wait))
ep_poll_safewake(&ep->poll_wait);
/*
* We need to lock this because we could be hit by
* eventpoll_release_file() while we're freeing the "struct eventpoll".
* We do not need to hold "ep->mtx" here because the epoll file
* is on the way to be removed and no one has references to it
* anymore. The only hit might come from eventpoll_release_file() but
* holding "epmutex" is sufficient here.
*/
mutex_lock(&epmutex);
/*
* Walks through the whole tree by unregistering poll callbacks.
*/
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_unregister_pollwait(ep, epi);
}
/*
* Walks through the whole tree by freeing each "struct epitem". At this
* point we are sure no poll callbacks will be lingering around, and also by
* holding "epmutex" we can be sure that no file cleanup code will hit
* us during this operation. So we can avoid the lock on "ep->lock".
*/
while ((rbp = rb_first(&ep->rbr)) != NULL) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_remove(ep, epi);
}
mutex_unlock(&epmutex);
mutex_destroy(&ep->mtx);
free_uid(ep->user);
kfree(ep);
}
| 0 |
[] |
linux-2.6
|
28d82dc1c4edbc352129f97f4ca22624d1fe61de
| 322,347,267,337,918,430,000,000,000,000,000,000,000 | 44 |
epoll: limit paths
The current epoll code can be tickled to run basically indefinitely in
both loop detection path check (on ep_insert()), and in the wakeup paths.
The programs that tickle this behavior set up deeply linked networks of
epoll file descriptors that cause the epoll algorithms to traverse them
indefinitely. A couple of these sample programs have been previously
posted in this thread: https://lkml.org/lkml/2011/2/25/297.
To fix the loop detection path check algorithms, I simply keep track of
the epoll nodes that have been already visited. Thus, the loop detection
becomes proportional to the number of epoll file descriptor and links.
This dramatically decreases the run-time of the loop check algorithm. In
one diabolical case I tried it reduced the run-time from 15 mintues (all
in kernel time) to .3 seconds.
Fixing the wakeup paths could be done at wakeup time in a similar manner
by keeping track of nodes that have already been visited, but the
complexity is harder, since there can be multiple wakeups on different
cpus...Thus, I've opted to limit the number of possible wakeup paths when
the paths are created.
This is accomplished, by noting that the end file descriptor points that
are found during the loop detection pass (from the newly added link), are
actually the sources for wakeup events. I keep a list of these file
descriptors and limit the number and length of these paths that emanate
from these 'source file descriptors'. In the current implemetation I
allow 1000 paths of length 1, 500 of length 2, 100 of length 3, 50 of
length 4 and 10 of length 5. Note that it is sufficient to check the
'source file descriptors' reachable from the newly added link, since no
other 'source file descriptors' will have newly added links. This allows
us to check only the wakeup paths that may have gotten too long, and not
re-check all possible wakeup paths on the system.
In terms of the path limit selection, I think its first worth noting that
the most common case for epoll, is probably the model where you have 1
epoll file descriptor that is monitoring n number of 'source file
descriptors'. In this case, each 'source file descriptor' has a 1 path of
length 1. Thus, I believe that the limits I'm proposing are quite
reasonable and in fact may be too generous. Thus, I'm hoping that the
proposed limits will not prevent any workloads that currently work to
fail.
In terms of locking, I have extended the use of the 'epmutex' to all
epoll_ctl add and remove operations. Currently its only used in a subset
of the add paths. I need to hold the epmutex, so that we can correctly
traverse a coherent graph, to check the number of paths. I believe that
this additional locking is probably ok, since its in the setup/teardown
paths, and doesn't affect the running paths, but it certainly is going to
add some extra overhead. Also, worth noting is that the epmuex was
recently added to the ep_ctl add operations in the initial path loop
detection code using the argument that it was not on a critical path.
Another thing to note here, is the length of epoll chains that is allowed.
Currently, eventpoll.c defines:
/* Maximum number of nesting allowed inside epoll sets */
#define EP_MAX_NESTS 4
This basically means that I am limited to a graph depth of 5 (EP_MAX_NESTS
+ 1). However, this limit is currently only enforced during the loop
check detection code, and only when the epoll file descriptors are added
in a certain order. Thus, this limit is currently easily bypassed. The
newly added check for wakeup paths, stricly limits the wakeup paths to a
length of 5, regardless of the order in which ep's are linked together.
Thus, a side-effect of the new code is a more consistent enforcement of
the graph depth.
Thus far, I've tested this, using the sample programs previously
mentioned, which now either return quickly or return -EINVAL. I've also
testing using the piptest.c epoll tester, which showed no difference in
performance. I've also created a number of different epoll networks and
tested that they behave as expectded.
I believe this solves the original diabolical test cases, while still
preserving the sane epoll nesting.
Signed-off-by: Jason Baron <[email protected]>
Cc: Nelson Elhage <[email protected]>
Cc: Davide Libenzi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void dname_pkt_copy(sldns_buffer* pkt, uint8_t* to, uint8_t* dname)
{
/* copy over the dname and decompress it at the same time */
size_t len = 0;
uint8_t lablen;
lablen = *dname++;
while(lablen) {
if(LABEL_IS_PTR(lablen)) {
/* follow pointer */
dname = sldns_buffer_at(pkt, PTR_OFFSET(lablen, *dname));
lablen = *dname++;
continue;
}
if(lablen > LDNS_MAX_LABELLEN) {
*to = 0; /* end the result prematurely */
return;
}
log_assert(lablen <= LDNS_MAX_LABELLEN);
len += (size_t)lablen+1;
if(len >= LDNS_MAX_DOMAINLEN) {
*to = 0; /* end the result prematurely */
log_err("bad dname in dname_pkt_copy");
return;
}
*to++ = lablen;
memmove(to, dname, lablen);
dname += lablen;
to += lablen;
lablen = *dname++;
}
/* copy last \0 */
*to = 0;
}
| 1 |
[
"CWE-835"
] |
unbound
|
2d444a5037acff6024630b88092d9188f2f5d8fe
| 274,904,599,341,244,770,000,000,000,000,000,000,000 | 33 |
- Fix Insufficient Handling of Compressed Names in dname_pkt_copy(),
reported by X41 D-Sec.
|
connection_write_chunkqueue (connection * const con, chunkqueue * const restrict cq, off_t max_bytes)
{
/*assert(!chunkqueue_is_empty(cq));*//* checked by callers */
con->write_request_ts = log_epoch_secs;
max_bytes = connection_write_throttle(con, max_bytes);
if (0 == max_bytes) return 1;
off_t written = cq->bytes_out;
int ret;
#ifdef TCP_CORK
int corked = 0;
#endif
/* walk chunkqueue up to first FILE_CHUNK (if present)
* This may incur memory load misses for pointer chasing, but effectively
* preloads part of the chunkqueue, something which used to be a side effect
* of a previous (less efficient) version of chunkqueue_length() which
* walked the entire chunkqueue (on each and every call). The loads here
* make a measurable difference in performance in underlying call to
* con->network_write() */
if (cq->first->next) {
const chunk *c = cq->first;
while (c->type == MEM_CHUNK && NULL != (c = c->next)) ;
#ifdef TCP_CORK
/* Linux: put a cork into socket as we want to combine write() calls
* but only if we really have multiple chunks including non-MEM_CHUNK
* (or if multiple chunks and TLS), and only if TCP socket */
if (NULL != c || con->is_ssl_sock) {
const int sa_family = sock_addr_get_family(&con->srv_socket->addr);
if (sa_family == AF_INET || sa_family == AF_INET6) {
corked = 1;
(void)setsockopt(con->fd, IPPROTO_TCP, TCP_CORK,
&corked, sizeof(corked));
}
}
#endif
}
ret = con->network_write(con, cq, max_bytes);
if (ret >= 0) {
ret = chunkqueue_is_empty(cq) ? 0 : 1;
}
#ifdef TCP_CORK
if (corked) {
corked = 0;
(void)setsockopt(con->fd, IPPROTO_TCP, TCP_CORK,
&corked, sizeof(corked));
}
#endif
written = cq->bytes_out - written;
con->bytes_written += written;
con->bytes_written_cur_second += written;
request_st * const r = &con->request;
if (r->conf.global_bytes_per_second_cnt_ptr)
*(r->conf.global_bytes_per_second_cnt_ptr) += written;
return ret;
}
| 0 |
[
"CWE-703"
] |
lighttpd1.4
|
b03b86f47b0d5a553137f081fadc482b4af1372d
| 267,932,140,397,626,840,000,000,000,000,000,000,000 | 63 |
[core] fix merging large headers across mult reads (fixes #3059)
(thx mitd)
x-ref:
"Connections stuck in Close_Wait causing 100% cpu usage"
https://redmine.lighttpd.net/issues/3059
|
void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
struct mem_cgroup *memcg;
struct page_cgroup *pc;
enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(oldpage);
/* fix accounting on old pages */
lock_page_cgroup(pc);
memcg = pc->mem_cgroup;
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
ClearPageCgroupUsed(pc);
unlock_page_cgroup(pc);
if (PageSwapBacked(oldpage))
type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
/*
* Even if newpage->mapping was NULL before starting replacement,
* the newpage may be on LRU(or pagevec for LRU) already. We lock
* LRU while we overwrite pc->mem_cgroup.
*/
__mem_cgroup_commit_charge_lrucare(newpage, memcg, type);
}
| 0 |
[
"CWE-476",
"CWE-415"
] |
linux
|
371528caec553785c37f73fa3926ea0de84f986f
| 28,816,713,359,882,840,000,000,000,000,000,000,000 | 28 |
mm: memcg: Correct unregistring of events attached to the same eventfd
There is an issue when memcg unregisters events that were attached to
the same eventfd:
- On the first call mem_cgroup_usage_unregister_event() removes all
events attached to a given eventfd, and if there were no events left,
thresholds->primary would become NULL;
- Since there were several events registered, cgroups core will call
mem_cgroup_usage_unregister_event() again, but now kernel will oops,
as the function doesn't expect that threshold->primary may be NULL.
That's a good question whether mem_cgroup_usage_unregister_event()
should actually remove all events in one go, but nowadays it can't
do any better as cftype->unregister_event callback doesn't pass
any private event-associated cookie. So, let's fix the issue by
simply checking for threshold->primary.
FWIW, w/o the patch the following oops may be observed:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000004
IP: [<ffffffff810be32c>] mem_cgroup_usage_unregister_event+0x9c/0x1f0
Pid: 574, comm: kworker/0:2 Not tainted 3.3.0-rc4+ #9 Bochs Bochs
RIP: 0010:[<ffffffff810be32c>] [<ffffffff810be32c>] mem_cgroup_usage_unregister_event+0x9c/0x1f0
RSP: 0018:ffff88001d0b9d60 EFLAGS: 00010246
Process kworker/0:2 (pid: 574, threadinfo ffff88001d0b8000, task ffff88001de91cc0)
Call Trace:
[<ffffffff8107092b>] cgroup_event_remove+0x2b/0x60
[<ffffffff8103db94>] process_one_work+0x174/0x450
[<ffffffff8103e413>] worker_thread+0x123/0x2d0
Cc: stable <[email protected]>
Signed-off-by: Anton Vorontsov <[email protected]>
Acked-by: KAMEZAWA Hiroyuki <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Michal Hocko <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
RList *r_bin_wasm_get_types (RBinWasmObj *bin) {
RBinWasmSection *type = NULL;
RList *types = NULL;
if (!bin || !bin->g_sections) {
return NULL;
}
if (bin->g_types) {
return bin->g_types;
}
if (!(types = r_bin_wasm_get_sections_by_id (bin->g_sections,
R_BIN_WASM_SECTION_TYPE))) {
return r_list_new();
}
// support for multiple export sections against spec
if (!(type = (RBinWasmSection*) r_list_first (types))) {
return r_list_new();
}
bin->g_types = r_bin_wasm_get_type_entries (bin, type);
return bin->g_types;
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
radare2
|
d2632f6483a3ceb5d8e0a5fb11142c51c43978b4
| 223,190,246,305,411,270,000,000,000,000,000,000,000 | 26 |
Fix crash in fuzzed wasm r2_hoobr_consume_init_expr
|
static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
void *data)
{
struct nfs4_accessres *res = data;
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status != 0)
goto out;
status = decode_access(xdr, &res->supported, &res->access);
if (status != 0)
goto out;
if (res->fattr)
decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
| 0 |
[
"CWE-787"
] |
linux
|
b4487b93545214a9db8cbf32e86411677b0cca21
| 57,922,618,835,498,235,000,000,000,000,000,000,000 | 24 |
nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <[email protected]>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <[email protected]>
|
wc_get_ucs_table(wc_ccs ccs)
{
int f = WC_CCS_INDEX(ccs);
switch (WC_CCS_TYPE(ccs)) {
case WC_CCS_A_CS94:
if (f < WC_F_ISO_BASE || f > WC_F_CS94_END)
return NULL;
return &ucs_cs94_table[f - WC_F_ISO_BASE];
case WC_CCS_A_CS94W:
if (f < WC_F_ISO_BASE || f > WC_F_CS94W_END)
return NULL;
return &ucs_cs94w_table[f - WC_F_ISO_BASE];
case WC_CCS_A_CS96:
if (f < WC_F_ISO_BASE || f > WC_F_CS96_END)
return NULL;
return &ucs_cs96_table[f - WC_F_ISO_BASE];
case WC_CCS_A_CS96W:
if (f < WC_F_ISO_BASE || f > WC_F_CS96W_END)
return NULL;
return &ucs_cs96w_table[f - WC_F_ISO_BASE];
case WC_CCS_A_CS942:
if (f < WC_F_ISO_BASE || f > WC_F_CS942_END)
return NULL;
return &ucs_cs942_table[f - WC_F_ISO_BASE];
case WC_CCS_A_PCS:
if (f < WC_F_PCS_BASE || f > WC_F_PCS_END)
return NULL;
return &ucs_pcs_table[f - WC_F_PCS_BASE];
case WC_CCS_A_PCSW:
if (f < WC_F_PCS_BASE || f > WC_F_PCSW_END)
return NULL;
return &ucs_pcsw_table[f - WC_F_PCS_BASE];
default:
return NULL;
}
}
| 0 |
[
"CWE-119"
] |
w3m
|
716bc126638393c733399d11d3228edb82877faa
| 51,518,017,312,520,240,000,000,000,000,000,000,000 | 37 |
Prevent global-buffer-overflow in wc_any_to_ucs()
Bug-Debian: https://github.com/tats/w3m/issues/43
|
static void account_event(struct perf_event *event)
{
bool inc = false;
if (event->parent)
return;
if (event->attach_state & PERF_ATTACH_TASK)
inc = true;
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (event->attr.freq)
account_freq_event();
if (event->attr.context_switch) {
atomic_inc(&nr_switch_events);
inc = true;
}
if (has_branch_stack(event))
inc = true;
if (is_cgroup_event(event))
inc = true;
if (inc) {
if (atomic_inc_not_zero(&perf_sched_count))
goto enabled;
mutex_lock(&perf_sched_mutex);
if (!atomic_read(&perf_sched_count)) {
static_branch_enable(&perf_sched_events);
/*
* Guarantee that all CPUs observe they key change and
* call the perf scheduling hooks before proceeding to
* install events that need them.
*/
synchronize_sched();
}
/*
* Now that we have waited for the sync_sched(), allow further
* increments to by-pass the mutex.
*/
atomic_inc(&perf_sched_count);
mutex_unlock(&perf_sched_mutex);
}
enabled:
account_event_cpu(event, event->cpu);
}
| 0 |
[
"CWE-667"
] |
linux
|
79c9ce57eb2d5f1497546a3946b4ae21b6fdc438
| 46,958,709,560,764,990,000,000,000,000,000,000,000 | 51 |
perf/core: Fix perf_event_open() vs. execve() race
Jann reported that the ptrace_may_access() check in
find_lively_task_by_vpid() is racy against exec().
Specifically:
perf_event_open() execve()
ptrace_may_access()
commit_creds()
... if (get_dumpable() != SUID_DUMP_USER)
perf_event_exit_task();
perf_install_in_context()
would result in installing a counter across the creds boundary.
Fix this by wrapping lots of perf_event_open() in cred_guard_mutex.
This should be fine as perf_event_exit_task() is already called with
cred_guard_mutex held, so all perf locks already nest inside it.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
static uint32_t xhci_nec_challenge(uint32_t hi, uint32_t lo)
{
uint32_t val;
val = rotl(lo - 0x49434878, 32 - ((hi>>8) & 0x1F));
val += rotl(lo + 0x49434878, hi & 0x1F);
val -= rotl(hi ^ 0x49434878, (lo >> 16) & 0x1F);
return ~val;
}
| 0 |
[
"CWE-835"
] |
qemu
|
96d87bdda3919bb16f754b3d3fd1227e1f38f13c
| 118,602,399,513,186,760,000,000,000,000,000,000,000 | 8 |
xhci: guard xhci_kick_epctx against recursive calls
Track xhci_kick_epctx processing being active in a variable. Check the
variable before calling xhci_kick_epctx from xhci_kick_ep. Add an
assert to make sure we don't call recursively into xhci_kick_epctx.
Cc: [email protected]
Fixes: 94b037f2a451b3dc855f9f2c346e5049a361bd55
Reported-by: Fabian Lesniak <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]>
Message-id: [email protected]
Message-id: [email protected]
|
static const char *uvc_print_chain(struct uvc_video_chain *chain)
{
static char buffer[43];
char *p = buffer;
p += uvc_print_terms(&chain->entities, UVC_TERM_INPUT, p);
p += sprintf(p, " -> ");
uvc_print_terms(&chain->entities, UVC_TERM_OUTPUT, p);
return buffer;
}
| 0 |
[
"CWE-269"
] |
linux
|
68035c80e129c4cfec659aac4180354530b26527
| 15,936,165,866,801,200,000,000,000,000,000,000,000 | 11 |
media: uvcvideo: Avoid cyclic entity chains due to malformed USB descriptors
Way back in 2017, fuzzing the 4.14-rc2 USB stack with syzkaller kicked
up the following WARNING from the UVC chain scanning code:
| list_add double add: new=ffff880069084010, prev=ffff880069084010,
| next=ffff880067d22298.
| ------------[ cut here ]------------
| WARNING: CPU: 1 PID: 1846 at lib/list_debug.c:31 __list_add_valid+0xbd/0xf0
| Modules linked in:
| CPU: 1 PID: 1846 Comm: kworker/1:2 Not tainted
| 4.14.0-rc2-42613-g1488251d1a98 #238
| Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
| Workqueue: usb_hub_wq hub_event
| task: ffff88006b01ca40 task.stack: ffff880064358000
| RIP: 0010:__list_add_valid+0xbd/0xf0 lib/list_debug.c:29
| RSP: 0018:ffff88006435ddd0 EFLAGS: 00010286
| RAX: 0000000000000058 RBX: ffff880067d22298 RCX: 0000000000000000
| RDX: 0000000000000058 RSI: ffffffff85a58800 RDI: ffffed000c86bbac
| RBP: ffff88006435dde8 R08: 1ffff1000c86ba52 R09: 0000000000000000
| R10: 0000000000000002 R11: 0000000000000000 R12: ffff880069084010
| R13: ffff880067d22298 R14: ffff880069084010 R15: ffff880067d222a0
| FS: 0000000000000000(0000) GS:ffff88006c900000(0000) knlGS:0000000000000000
| CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
| CR2: 0000000020004ff2 CR3: 000000006b447000 CR4: 00000000000006e0
| Call Trace:
| __list_add ./include/linux/list.h:59
| list_add_tail+0x8c/0x1b0 ./include/linux/list.h:92
| uvc_scan_chain_forward.isra.8+0x373/0x416
| drivers/media/usb/uvc/uvc_driver.c:1471
| uvc_scan_chain drivers/media/usb/uvc/uvc_driver.c:1585
| uvc_scan_device drivers/media/usb/uvc/uvc_driver.c:1769
| uvc_probe+0x77f2/0x8f00 drivers/media/usb/uvc/uvc_driver.c:2104
Looking into the output from usbmon, the interesting part is the
following data packet:
ffff880069c63e00 30710169 C Ci:1:002:0 0 143 = 09028f00 01030080
00090403 00000e01 00000924 03000103 7c003328 010204db
If we drop the lead configuration and interface descriptors, we're left
with an output terminal descriptor describing a generic display:
/* Output terminal descriptor */
buf[0] 09
buf[1] 24
buf[2] 03 /* UVC_VC_OUTPUT_TERMINAL */
buf[3] 00 /* ID */
buf[4] 01 /* type == 0x0301 (UVC_OTT_DISPLAY) */
buf[5] 03
buf[6] 7c
buf[7] 00 /* source ID refers to self! */
buf[8] 33
The problem with this descriptor is that it is self-referential: the
source ID of 0 matches itself! This causes the 'struct uvc_entity'
representing the display to be added to its chain list twice during
'uvc_scan_chain()': once via 'uvc_scan_chain_entity()' when it is
processed directly from the 'dev->entities' list and then again
immediately afterwards when trying to follow the source ID in
'uvc_scan_chain_forward()'
Add a check before adding an entity to a chain list to ensure that the
entity is not already part of a chain.
Link: https://lore.kernel.org/linux-media/CAAeHK+z+Si69jUR+N-SjN9q4O+o5KFiNManqEa-PjUta7EOb7A@mail.gmail.com/
Cc: <[email protected]>
Fixes: c0efd232929c ("V4L/DVB (8145a): USB Video Class driver")
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: Laurent Pinchart <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
void btreeFree(struct BTREE *btree) {
free(btree->records);
}
| 0 |
[
"CWE-20",
"CWE-703"
] |
libmysofa
|
d39a171e9c6a1c44dbdf43f9db6c3fbd887e38c1
| 202,541,792,151,252,130,000,000,000,000,000,000,000 | 3 |
Fixed security issue 1
|
backsql_process_filter_list( backsql_srch_info *bsi, Filter *f, int op )
{
int res;
if ( !f ) {
return 0;
}
backsql_strfcat_x( &bsi->bsi_flt_where,
bsi->bsi_op->o_tmpmemctx, "c", '(' /* ) */ );
while ( 1 ) {
res = backsql_process_filter( bsi, f );
if ( res < 0 ) {
/*
* TimesTen : If the query has no answers,
* don't bother to run the query.
*/
return -1;
}
f = f->f_next;
if ( f == NULL ) {
break;
}
switch ( op ) {
case LDAP_FILTER_AND:
backsql_strfcat_x( &bsi->bsi_flt_where,
bsi->bsi_op->o_tmpmemctx, "l",
(ber_len_t)STRLENOF( " AND " ),
" AND " );
break;
case LDAP_FILTER_OR:
backsql_strfcat_x( &bsi->bsi_flt_where,
bsi->bsi_op->o_tmpmemctx, "l",
(ber_len_t)STRLENOF( " OR " ),
" OR " );
break;
}
}
backsql_strfcat_x( &bsi->bsi_flt_where,
bsi->bsi_op->o_tmpmemctx, "c", /* ( */ ')' );
return 1;
}
| 0 |
[
"CWE-89"
] |
openldap
|
40f3ae4f5c9a8baf75b237220f62c436a571d66e
| 260,044,009,336,828,070,000,000,000,000,000,000,000 | 48 |
ITS#9815 slapd-sql: escape filter values
Escape filter values to slapd-sql (CVE-2022-29155)
|
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
int pinned)
{
if (pinned || !base->migration_enabled)
return base;
return &per_cpu(hrtimer_bases, get_nohz_timer_target());
}
| 0 |
[
"CWE-200"
] |
tip
|
dfb4357da6ddbdf57d583ba64361c9d792b0e0b1
| 105,149,527,321,885,480,000,000,000,000,000,000,000 | 7 |
time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Nicolas Pitre <[email protected]>
Cc: [email protected]
Cc: Lai Jiangshan <[email protected]>
Cc: Shuah Khan <[email protected]>
Cc: Xing Gao <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Jessica Frazelle <[email protected]>
Cc: [email protected]
Cc: Nicolas Iooss <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Richard Cochran <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Michal Marek <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Olof Johansson <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: Arjan van de Ven <[email protected]>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <[email protected]>
|
static int id3_has_changed_values(struct playlist *pls, AVDictionary *metadata,
ID3v2ExtraMetaAPIC *apic)
{
AVDictionaryEntry *entry = NULL;
AVDictionaryEntry *oldentry;
/* check that no keys have changed values */
while ((entry = av_dict_get(metadata, "", entry, AV_DICT_IGNORE_SUFFIX))) {
oldentry = av_dict_get(pls->id3_initial, entry->key, NULL, AV_DICT_MATCH_CASE);
if (!oldentry || strcmp(oldentry->value, entry->value) != 0)
return 1;
}
/* check if apic appeared */
if (apic && (pls->ctx->nb_streams != 2 || !pls->ctx->streams[1]->attached_pic.data))
return 1;
if (apic) {
int size = pls->ctx->streams[1]->attached_pic.size;
if (size != apic->buf->size - AV_INPUT_BUFFER_PADDING_SIZE)
return 1;
if (memcmp(apic->buf->data, pls->ctx->streams[1]->attached_pic.data, size) != 0)
return 1;
}
return 0;
}
| 0 |
[
"CWE-416"
] |
FFmpeg
|
6959358683c7533f586c07a766acc5fe9544d8b2
| 183,970,058,749,584,140,000,000,000,000,000,000,000 | 27 |
avformat/hls: check segment duration value of EXTINF
fix ticket: 8673
set the default EXTINF duration to 1ms if duration is smaller than 1ms
Signed-off-by: Steven Liu <[email protected]>
(cherry picked from commit 9dfb19baeb86a8bb02c53a441682c6e9a6e104cc)
|
~DelayedDelivery() override {
discard();
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 200,658,035,353,410,850,000,000,000,000,000,000,000 | 3 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
int *valp,
int write, void *data)
{
struct do_proc_dointvec_minmax_conv_param *param = data;
if (write) {
int val = *negp ? -*lvalp : *lvalp;
if ((param->min && *param->min > val) ||
(param->max && *param->max < val))
return -EINVAL;
*valp = val;
} else {
int val = *valp;
if (val < 0) {
*negp = true;
*lvalp = (unsigned long)-val;
} else {
*negp = false;
*lvalp = (unsigned long)val;
}
}
return 0;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
bfdc0b497faa82a0ba2f9dddcf109231dd519fcc
| 254,587,992,864,343,960,000,000,000,000,000,000,000 | 23 |
sysctl: restrict write access to dmesg_restrict
When dmesg_restrict is set to 1 CAP_SYS_ADMIN is needed to read the kernel
ring buffer. But a root user without CAP_SYS_ADMIN is able to reset
dmesg_restrict to 0.
This is an issue when e.g. LXC (Linux Containers) are used and complete
user space is running without CAP_SYS_ADMIN. A unprivileged and jailed
root user can bypass the dmesg_restrict protection.
With this patch writing to dmesg_restrict is only allowed when root has
CAP_SYS_ADMIN.
Signed-off-by: Richard Weinberger <[email protected]>
Acked-by: Dan Rosenberg <[email protected]>
Acked-by: Serge E. Hallyn <[email protected]>
Cc: Eric Paris <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: James Morris <[email protected]>
Cc: Eugene Teo <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
SkipDataBlocks(gif_source_ptr sinfo)
/* Skip a series of data blocks, until a block terminator is found */
{
U_CHAR buf[256];
while (GetDataBlock(sinfo, buf) > 0)
/* skip */;
}
| 0 |
[
"CWE-369"
] |
libjpeg-turbo
|
1719d12e51641cce5c77e259516649ba5ef6303c
| 41,639,473,888,115,520,000,000,000,000,000,000,000 | 8 |
cjpeg: Fix FPE when compressing 0-width GIF
Fixes #493
|
static void audio_add(VncState *vs)
{
struct audio_capture_ops ops;
if (vs->audio_cap) {
error_report("audio already running");
return;
}
ops.notify = audio_capture_notify;
ops.destroy = audio_capture_destroy;
ops.capture = audio_capture;
vs->audio_cap = AUD_add_capture(vs->vd->audio_state, &vs->as, &ops, vs);
if (!vs->audio_cap) {
error_report("Failed to add audio capture");
}
}
| 0 |
[
"CWE-401"
] |
qemu
|
6bf21f3d83e95bcc4ba35a7a07cc6655e8b010b0
| 32,625,888,824,014,160,000,000,000,000,000,000,000 | 18 |
vnc: fix memory leak when vnc disconnect
Currently when qemu receives a vnc connect, it creates a 'VncState' to
represent this connection. In 'vnc_worker_thread_loop' it creates a
local 'VncState'. The connection 'VcnState' and local 'VncState' exchange
data in 'vnc_async_encoding_start' and 'vnc_async_encoding_end'.
In 'zrle_compress_data' it calls 'deflateInit2' to allocate the libz library
opaque data. The 'VncState' used in 'zrle_compress_data' is the local
'VncState'. In 'vnc_zrle_clear' it calls 'deflateEnd' to free the libz
library opaque data. The 'VncState' used in 'vnc_zrle_clear' is the connection
'VncState'. In currently implementation there will be a memory leak when the
vnc disconnect. Following is the asan output backtrack:
Direct leak of 29760 byte(s) in 5 object(s) allocated from:
0 0xffffa67ef3c3 in __interceptor_calloc (/lib64/libasan.so.4+0xd33c3)
1 0xffffa65071cb in g_malloc0 (/lib64/libglib-2.0.so.0+0x571cb)
2 0xffffa5e968f7 in deflateInit2_ (/lib64/libz.so.1+0x78f7)
3 0xaaaacec58613 in zrle_compress_data ui/vnc-enc-zrle.c:87
4 0xaaaacec58613 in zrle_send_framebuffer_update ui/vnc-enc-zrle.c:344
5 0xaaaacec34e77 in vnc_send_framebuffer_update ui/vnc.c:919
6 0xaaaacec5e023 in vnc_worker_thread_loop ui/vnc-jobs.c:271
7 0xaaaacec5e5e7 in vnc_worker_thread ui/vnc-jobs.c:340
8 0xaaaacee4d3c3 in qemu_thread_start util/qemu-thread-posix.c:502
9 0xffffa544e8bb in start_thread (/lib64/libpthread.so.0+0x78bb)
10 0xffffa53965cb in thread_start (/lib64/libc.so.6+0xd55cb)
This is because the opaque allocated in 'deflateInit2' is not freed in
'deflateEnd'. The reason is that the 'deflateEnd' calls 'deflateStateCheck'
and in the latter will check whether 's->strm != strm'(libz's data structure).
This check will be true so in 'deflateEnd' it just return 'Z_STREAM_ERROR' and
not free the data allocated in 'deflateInit2'.
The reason this happens is that the 'VncState' contains the whole 'VncZrle',
so when calling 'deflateInit2', the 's->strm' will be the local address.
So 's->strm != strm' will be true.
To fix this issue, we need to make 'zrle' of 'VncState' to be a pointer.
Then the connection 'VncState' and local 'VncState' exchange mechanism will
work as expection. The 'tight' of 'VncState' has the same issue, let's also turn
it to a pointer.
Reported-by: Ying Fang <[email protected]>
Signed-off-by: Li Qiang <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
static Image *ReadHDRImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
format[MaxTextExtent],
keyword[MaxTextExtent],
tag[MaxTextExtent],
value[MaxTextExtent];
double
gamma;
Image
*image;
int
c;
MagickBooleanType
status,
value_expected;
register Quantum
*q;
register ssize_t
i,
x;
register unsigned char
*p;
ssize_t
count,
y;
unsigned char
*end,
pixel[4],
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Decode image header.
*/
image->columns=0;
image->rows=0;
*format='\0';
c=ReadBlobByte(image);
if (c == EOF)
{
image=DestroyImage(image);
return((Image *) NULL);
}
while (isgraph(c) && (image->columns == 0) && (image->rows == 0))
{
if (c == (int) '#')
{
char
*comment;
register char
*p;
size_t
length;
/*
Read comment-- any text between # and end-of-line.
*/
length=MaxTextExtent;
comment=AcquireString((char *) NULL);
for (p=comment; comment != (char *) NULL; p++)
{
c=ReadBlobByte(image);
if ((c == EOF) || (c == (int) '\n'))
break;
if ((size_t) (p-comment+1) >= length)
{
*p='\0';
length<<=1;
comment=(char *) ResizeQuantumMemory(comment,length+
MaxTextExtent,sizeof(*comment));
if (comment == (char *) NULL)
break;
p=comment+strlen(comment);
}
*p=(char) c;
}
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
*p='\0';
(void) SetImageProperty(image,"comment",comment,exception);
comment=DestroyString(comment);
c=ReadBlobByte(image);
}
else
if (isalnum(c) == MagickFalse)
c=ReadBlobByte(image);
else
{
register char
*p;
/*
Determine a keyword and its value.
*/
p=keyword;
do
{
if ((size_t) (p-keyword) < (MaxTextExtent-1))
*p++=c;
c=ReadBlobByte(image);
} while (isalnum(c) || (c == '_'));
*p='\0';
value_expected=MagickFalse;
while ((isspace((int) ((unsigned char) c)) != 0) || (c == '='))
{
if (c == '=')
value_expected=MagickTrue;
c=ReadBlobByte(image);
}
if (LocaleCompare(keyword,"Y") == 0)
value_expected=MagickTrue;
if (value_expected == MagickFalse)
continue;
p=value;
while ((c != '\n') && (c != '\0') && (c != EOF))
{
if ((size_t) (p-value) < (MaxTextExtent-1))
*p++=c;
c=ReadBlobByte(image);
}
*p='\0';
/*
Assign a value to the specified keyword.
*/
switch (*keyword)
{
case 'F':
case 'f':
{
if (LocaleCompare(keyword,"format") == 0)
{
(void) CopyMagickString(format,value,MaxTextExtent);
break;
}
(void) FormatLocaleString(tag,MaxTextExtent,"hdr:%s",keyword);
(void) SetImageProperty(image,tag,value,exception);
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(keyword,"gamma") == 0)
{
image->gamma=StringToDouble(value,(char **) NULL);
break;
}
(void) FormatLocaleString(tag,MaxTextExtent,"hdr:%s",keyword);
(void) SetImageProperty(image,tag,value,exception);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(keyword,"primaries") == 0)
{
float
chromaticity[6],
white_point[2];
if (sscanf(value,"%g %g %g %g %g %g %g %g",&chromaticity[0],
&chromaticity[1],&chromaticity[2],&chromaticity[3],
&chromaticity[4],&chromaticity[5],&white_point[0],
&white_point[1]) == 8)
{
image->chromaticity.red_primary.x=chromaticity[0];
image->chromaticity.red_primary.y=chromaticity[1];
image->chromaticity.green_primary.x=chromaticity[2];
image->chromaticity.green_primary.y=chromaticity[3];
image->chromaticity.blue_primary.x=chromaticity[4];
image->chromaticity.blue_primary.y=chromaticity[5];
image->chromaticity.white_point.x=white_point[0],
image->chromaticity.white_point.y=white_point[1];
}
break;
}
(void) FormatLocaleString(tag,MaxTextExtent,"hdr:%s",keyword);
(void) SetImageProperty(image,tag,value,exception);
break;
}
case 'Y':
case 'y':
{
char
target[] = "Y";
if (strcmp(keyword,target) == 0)
{
int
height,
width;
if (sscanf(value,"%d +X %d",&height,&width) == 2)
{
image->columns=(size_t) width;
image->rows=(size_t) height;
}
break;
}
(void) FormatLocaleString(tag,MaxTextExtent,"hdr:%s",keyword);
(void) SetImageProperty(image,tag,value,exception);
break;
}
default:
{
(void) FormatLocaleString(tag,MaxTextExtent,"hdr:%s",keyword);
(void) SetImageProperty(image,tag,value,exception);
break;
}
}
}
if ((image->columns == 0) && (image->rows == 0))
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
}
if ((LocaleCompare(format,"32-bit_rle_rgbe") != 0) &&
(LocaleCompare(format,"32-bit_rle_xyze") != 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
(void) SetImageColorspace(image,RGBColorspace,exception);
if (LocaleCompare(format,"32-bit_rle_xyze") == 0)
(void) SetImageColorspace(image,XYZColorspace,exception);
image->compression=(image->columns < 8) || (image->columns > 0x7ffff) ?
NoCompression : RLECompression;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Read RGBE (red+green+blue+exponent) pixels.
*/
pixels=(unsigned char *) AcquireQuantumMemory(image->columns,4*
sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (y=0; y < (ssize_t) image->rows; y++)
{
if (image->compression != RLECompression)
{
count=ReadBlob(image,4*image->columns*sizeof(*pixels),pixels);
if (count != (ssize_t) (4*image->columns*sizeof(*pixels)))
break;
}
else
{
count=ReadBlob(image,4*sizeof(*pixel),pixel);
if (count != 4)
break;
if ((size_t) ((((size_t) pixel[2]) << 8) | pixel[3]) != image->columns)
{
(void) memcpy(pixels,pixel,4*sizeof(*pixel));
count=ReadBlob(image,4*(image->columns-1)*sizeof(*pixels),pixels+4);
image->compression=NoCompression;
}
else
{
p=pixels;
for (i=0; i < 4; i++)
{
end=&pixels[(i+1)*image->columns];
while (p < end)
{
count=ReadBlob(image,2*sizeof(*pixel),pixel);
if (count < 1)
break;
if (pixel[0] > 128)
{
count=(ssize_t) pixel[0]-128;
if ((count == 0) || (count > (ssize_t) (end-p)))
break;
while (count-- > 0)
*p++=pixel[1];
}
else
{
count=(ssize_t) pixel[0];
if ((count == 0) || (count > (ssize_t) (end-p)))
break;
*p++=pixel[1];
if (--count > 0)
{
count=ReadBlob(image,(size_t) count*sizeof(*p),p);
if (count < 1)
break;
p+=count;
}
}
}
}
}
}
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->compression == RLECompression)
{
pixel[0]=pixels[x];
pixel[1]=pixels[x+image->columns];
pixel[2]=pixels[x+2*image->columns];
pixel[3]=pixels[x+3*image->columns];
}
else
{
pixel[0]=pixels[i++];
pixel[1]=pixels[i++];
pixel[2]=pixels[i++];
pixel[3]=pixels[i++];
}
SetPixelRed(image,0,q);
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
if (pixel[3] != 0)
{
gamma=pow(2.0,pixel[3]-(128.0+8.0));
SetPixelRed(image,ClampToQuantum(QuantumRange*gamma*pixel[0]),q);
SetPixelGreen(image,ClampToQuantum(QuantumRange*gamma*pixel[1]),q);
SetPixelBlue(image,ClampToQuantum(QuantumRange*gamma*pixel[2]),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
if (EOFBlob(image) != MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 0 |
[
"CWE-20",
"CWE-703",
"CWE-835"
] |
ImageMagick
|
97aa7d7cfd2027f6ba7ce42caf8b798541b9cdc6
| 168,062,288,779,636,910,000,000,000,000,000,000,000 | 369 |
Fixed infinite loop and added checks for the sscanf result.
|
CryptRsaDecrypt(
TPM2B *dOut, // OUT: the decrypted data
TPM2B *cIn, // IN: the data to decrypt
OBJECT *key, // IN: the key to use for decryption
TPMT_RSA_DECRYPT *scheme, // IN: the padding scheme
const TPM2B *label // IN: in case it is needed for the scheme
)
{
TPM_RC retVal;
EVP_PKEY *pkey = NULL;
EVP_PKEY_CTX *ctx = NULL;
const EVP_MD *md = NULL;
const char *digestname;
size_t outlen;
unsigned char *tmp = NULL;
// Make sure that the necessary parameters are provided
pAssert(cIn != NULL && dOut != NULL && key != NULL);
// Size is checked to make sure that the encrypted value is the right size
if(cIn->size != key->publicArea.unique.rsa.t.size)
ERROR_RETURN(TPM_RC_SIZE);
TEST(scheme->scheme);
retVal = InitOpenSSLRSAPrivateKey(key, &pkey);
if (retVal != TPM_RC_SUCCESS)
return retVal;
ctx = EVP_PKEY_CTX_new(pkey, NULL);
if (ctx == NULL ||
EVP_PKEY_decrypt_init(ctx) <= 0)
ERROR_RETURN(TPM_RC_FAILURE);
switch(scheme->scheme)
{
case ALG_NULL_VALUE: // 'raw' encryption
if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_NO_PADDING) <= 0)
ERROR_RETURN(TPM_RC_FAILURE);
break;
case ALG_RSAES_VALUE:
if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0)
ERROR_RETURN(TPM_RC_FAILURE);
break;
case ALG_OAEP_VALUE:
digestname = GetDigestNameByHashAlg(scheme->details.oaep.hashAlg);
if (digestname == NULL)
ERROR_RETURN(TPM_RC_VALUE);
md = EVP_get_digestbyname(digestname);
if (md == NULL ||
EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_OAEP_PADDING) <= 0 ||
EVP_PKEY_CTX_set_rsa_oaep_md(ctx, md) <= 0)
ERROR_RETURN(TPM_RC_FAILURE);
if (label->size > 0) {
tmp = malloc(label->size);
if (tmp == NULL)
ERROR_RETURN(TPM_RC_FAILURE);
memcpy(tmp, label->buffer, label->size);
}
if (EVP_PKEY_CTX_set0_rsa_oaep_label(ctx, tmp, label->size) <= 0)
ERROR_RETURN(TPM_RC_FAILURE);
tmp = NULL;
break;
default:
ERROR_RETURN(TPM_RC_SCHEME);
break;
}
outlen = cIn->size;
if (EVP_PKEY_decrypt(ctx, dOut->buffer, &outlen,
cIn->buffer, cIn->size) <= 0)
ERROR_RETURN(TPM_RC_FAILURE);
dOut->size = outlen;
retVal = TPM_RC_SUCCESS;
Exit:
EVP_PKEY_free(pkey);
EVP_PKEY_CTX_free(ctx);
free(tmp);
return retVal;
}
| 1 |
[
"CWE-787"
] |
libtpms
|
505ef841c00b4c096b1977c667cb957bec3a1d8b
| 336,056,029,318,474,660,000,000,000,000,000,000,000 | 85 |
tpm2: Fix output buffer parameter and size for RSA decyrption
For the RSA decryption we have to use an output buffer of the size of the
(largest possible) RSA key for the decryption to always work.
This fixes a stack corruption bug that caused a SIGBUS and termination of
'swtpm'.
Signed-off-by: Stefan Berger <[email protected]>
|
bool fuse_write_update_size(struct inode *inode, loff_t pos)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
bool ret = false;
spin_lock(&fi->lock);
fi->attr_version = atomic64_inc_return(&fc->attr_version);
if (pos > inode->i_size) {
i_size_write(inode, pos);
ret = true;
}
spin_unlock(&fi->lock);
return ret;
}
| 0 |
[
"CWE-459"
] |
linux
|
5d069dbe8aaf2a197142558b6fb2978189ba3454
| 222,262,658,567,487,700,000,000,000,000,000,000,000 | 16 |
fuse: fix bad inode
Jan Kara's analysis of the syzbot report (edited):
The reproducer opens a directory on FUSE filesystem, it then attaches
dnotify mark to the open directory. After that a fuse_do_getattr() call
finds that attributes returned by the server are inconsistent, and calls
make_bad_inode() which, among other things does:
inode->i_mode = S_IFREG;
This then confuses dnotify which doesn't tear down its structures
properly and eventually crashes.
Avoid calling make_bad_inode() on a live inode: switch to a private flag on
the fuse inode. Also add the test to ops which the bad_inode_ops would
have caught.
This bug goes back to the initial merge of fuse in 2.6.14...
Reported-by: [email protected]
Signed-off-by: Miklos Szeredi <[email protected]>
Tested-by: Jan Kara <[email protected]>
Cc: <[email protected]>
|
WandPrivate MagickBooleanType CLIListOperatorImages(MagickCLI *cli_wand,
const char *option,const char *arg1n,const char *arg2n)
{
const char /* percent escaped versions of the args */
*arg1,
*arg2;
Image
*new_images;
MagickStatusType
status;
ssize_t
parse;
#define _image_info (cli_wand->wand.image_info)
#define _images (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _draw_info (cli_wand->draw_info)
#define _quantize_info (cli_wand->quantize_info)
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
#define IfNormalOp (*option=='-')
#define IfPlusOp (*option!='-')
#define IsNormalOp IfNormalOp ? MagickTrue : MagickFalse
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
assert(_images != (Image *) NULL); /* _images must be present */
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- List Operator: %s \"%s\" \"%s\"", option,
arg1n == (const char *) NULL ? "null" : arg1n,
arg2n == (const char *) NULL ? "null" : arg2n);
arg1 = arg1n;
arg2 = arg2n;
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
status=MagickTrue;
new_images=NewImageList();
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("append",option+1) == 0)
{
new_images=AppendImages(_images,IsNormalOp,_exception);
break;
}
if (LocaleCompare("average",option+1) == 0)
{
CLIWandWarnReplaced("-evaluate-sequence Mean");
(void) CLIListOperatorImages(cli_wand,"-evaluate-sequence","Mean",
NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'c':
{
if (LocaleCompare("channel-fx",option+1) == 0)
{
new_images=ChannelFxImage(_images,arg1,_exception);
break;
}
if (LocaleCompare("clut",option+1) == 0)
{
Image
*clut_image;
/* FUTURE - make this a compose option, and thus can be used
with layers compose or even compose last image over all other
_images.
*/
new_images=RemoveFirstImageFromList(&_images);
clut_image=RemoveFirstImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (clut_image == (Image *) NULL)
{
(void) ThrowMagickException(_exception,GetMagickModule(),
OptionError,"ImageSequenceRequired","`%s'",option);
new_images=DestroyImage(new_images);
status=MagickFalse;
break;
}
(void) ClutImage(new_images,clut_image,new_images->interpolate,
_exception);
clut_image=DestroyImage(clut_image);
break;
}
if (LocaleCompare("coalesce",option+1) == 0)
{
new_images=CoalesceImages(_images,_exception);
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
parse=(ssize_t) _images->colorspace;
if (_images->number_channels < GetImageListLength(_images))
parse=sRGBColorspace;
if ( IfPlusOp )
parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",option,
arg1);
new_images=CombineImages(_images,(ColorspaceType) parse,_exception);
break;
}
if (LocaleCompare("compare",option+1) == 0)
{
double
distortion;
Image
*image,
*reconstruct_image;
MetricType
metric;
/*
Mathematically and visually annotate the difference between an
image and its reconstruction.
*/
image=RemoveFirstImageFromList(&_images);
reconstruct_image=RemoveFirstImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (reconstruct_image == (Image *) NULL)
{
(void) ThrowMagickException(_exception,GetMagickModule(),
OptionError,"ImageSequenceRequired","`%s'",option);
image=DestroyImage(image);
status=MagickFalse;
break;
}
metric=UndefinedErrorMetric;
option=GetImageOption(_image_info,"metric");
if (option != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,option);
new_images=CompareImages(image,reconstruct_image,metric,&distortion,
_exception);
(void) distortion;
reconstruct_image=DestroyImage(reconstruct_image);
image=DestroyImage(image);
break;
}
if (LocaleCompare("complex",option+1) == 0)
{
parse=ParseCommandOption(MagickComplexOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator",
option,arg1);
new_images=ComplexImages(_images,(ComplexOperator) parse,_exception);
break;
}
if (LocaleCompare("composite",option+1) == 0)
{
CompositeOperator
compose;
const char*
value;
MagickBooleanType
clip_to_self;
Image
*mask_image,
*source_image;
RectangleInfo
geometry;
/* Compose value from "-compose" option only */
value=GetImageOption(_image_info,"compose");
if (value == (const char *) NULL)
compose=OverCompositeOp; /* use Over not source_image->compose */
else
compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,value);
/* Get "clip-to-self" expert setting (false is normal) */
clip_to_self=GetCompositeClipToSelf(compose);
value=GetImageOption(_image_info,"compose:clip-to-self");
if (value != (const char *) NULL)
clip_to_self=IsStringTrue(value);
value=GetImageOption(_image_info,"compose:outside-overlay");
if (value != (const char *) NULL)
clip_to_self=IsStringFalse(value); /* deprecated */
new_images=RemoveFirstImageFromList(&_images);
source_image=RemoveFirstImageFromList(&_images);
if (source_image == (Image *) NULL)
{
(void) ThrowMagickException(_exception,GetMagickModule(),
OptionError,"ImageSequenceRequired","`%s'",option);
new_images=DestroyImage(new_images);
status=MagickFalse;
break;
}
/* FUTURE - this should not be here! - should be part of -geometry */
if (source_image->geometry != (char *) NULL)
{
RectangleInfo
resize_geometry;
(void) ParseRegionGeometry(source_image,source_image->geometry,
&resize_geometry,_exception);
if ((source_image->columns != resize_geometry.width) ||
(source_image->rows != resize_geometry.height))
{
Image
*resize_image;
resize_image=ResizeImage(source_image,resize_geometry.width,
resize_geometry.height,source_image->filter,_exception);
if (resize_image != (Image *) NULL)
{
source_image=DestroyImage(source_image);
source_image=resize_image;
}
}
}
SetGeometry(source_image,&geometry);
(void) ParseAbsoluteGeometry(source_image->geometry,&geometry);
GravityAdjustGeometry(new_images->columns,new_images->rows,
new_images->gravity, &geometry);
mask_image=RemoveFirstImageFromList(&_images);
if (mask_image == (Image *) NULL)
status&=CompositeImage(new_images,source_image,compose,clip_to_self,
geometry.x,geometry.y,_exception);
else
{
if ((compose == DisplaceCompositeOp) ||
(compose == DistortCompositeOp))
{
status&=CompositeImage(source_image,mask_image,
CopyGreenCompositeOp,MagickTrue,0,0,_exception);
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,_exception);
}
else
{
Image
*clone_image;
clone_image=CloneImage(new_images,0,0,MagickTrue,_exception);
if (clone_image == (Image *) NULL)
break;
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,_exception);
status&=CompositeImage(new_images,mask_image,
CopyAlphaCompositeOp,MagickTrue,0,0,_exception);
status&=CompositeImage(clone_image,new_images,OverCompositeOp,
clip_to_self,0,0,_exception);
new_images=DestroyImageList(new_images);
new_images=clone_image;
}
mask_image=DestroyImage(mask_image);
}
source_image=DestroyImage(source_image);
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
Image
*source_image;
OffsetInfo
offset;
RectangleInfo
geometry;
/*
Copy image pixels.
*/
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if (IsGeometry(arg2) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParsePageGeometry(_images,arg2,&geometry,_exception);
offset.x=geometry.x;
offset.y=geometry.y;
source_image=_images;
if (source_image->next != (Image *) NULL)
source_image=source_image->next;
(void) ParsePageGeometry(source_image,arg1,&geometry,_exception);
(void) CopyImagePixels(_images,source_image,&geometry,&offset,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'd':
{
if (LocaleCompare("deconstruct",option+1) == 0)
{
CLIWandWarnReplaced("-layer CompareAny");
(void) CLIListOperatorImages(cli_wand,"-layer","CompareAny",NULL);
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (IfNormalOp)
DeleteImages(&_images,arg1,_exception);
else
DeleteImages(&_images,"-1",_exception);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
if (IfNormalOp)
{
const char
*p;
size_t
number_duplicates;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,
arg1);
number_duplicates=(size_t) StringToLong(arg1);
p=strchr(arg1,',');
if (p == (const char *) NULL)
new_images=DuplicateImages(_images,number_duplicates,"-1",
_exception);
else
new_images=DuplicateImages(_images,number_duplicates,p,
_exception);
}
else
new_images=DuplicateImages(_images,1,"-1",_exception);
AppendImageToList(&_images, new_images);
new_images=(Image *) NULL;
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'e':
{
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
parse=ParseCommandOption(MagickEvaluateOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator",
option,arg1);
new_images=EvaluateImages(_images,(MagickEvaluateOperator) parse,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'f':
{
if (LocaleCompare("fft",option+1) == 0)
{
new_images=ForwardFourierTransformImage(_images,IsNormalOp,
_exception);
break;
}
if (LocaleCompare("flatten",option+1) == 0)
{
/* REDIRECTED to use -layers flatten instead */
(void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL);
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
new_images=FxImage(_images,arg1,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
{
/* FUTURE - make this a compose option (and thus layers compose )
or perhaps compose last image over all other _images.
*/
Image
*hald_image;
new_images=RemoveFirstImageFromList(&_images);
hald_image=RemoveLastImageFromList(&_images);
if (hald_image == (Image *) NULL)
{
(void) ThrowMagickException(_exception,GetMagickModule(),
OptionError,"ImageSequenceRequired","`%s'",option);
new_images=DestroyImage(new_images);
status=MagickFalse;
break;
}
(void) HaldClutImage(new_images,hald_image,_exception);
hald_image=DestroyImage(hald_image);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'i':
{
if (LocaleCompare("ift",option+1) == 0)
{
Image
*magnitude_image,
*phase_image;
magnitude_image=RemoveFirstImageFromList(&_images);
phase_image=RemoveFirstImageFromList(&_images);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(_exception,GetMagickModule(),
OptionError,"ImageSequenceRequired","`%s'",option);
magnitude_image=DestroyImage(magnitude_image);
status=MagickFalse;
break;
}
new_images=InverseFourierTransformImage(magnitude_image,phase_image,
IsNormalOp,_exception);
magnitude_image=DestroyImage(magnitude_image);
phase_image=DestroyImage(phase_image);
break;
}
if (LocaleCompare("insert",option+1) == 0)
{
Image
*insert_image,
*index_image;
ssize_t
index;
if (IfNormalOp && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
index=0;
insert_image=RemoveLastImageFromList(&_images);
if (IfNormalOp)
index=(ssize_t) StringToLong(arg1);
index_image=insert_image;
if (index == 0)
PrependImageToList(&_images,insert_image);
else if (index == (ssize_t) GetImageListLength(_images))
AppendImageToList(&_images,insert_image);
else
{
index_image=GetImageFromList(_images,index-1);
if (index_image == (Image *) NULL)
{
insert_image=DestroyImage(insert_image);
CLIWandExceptArgBreak(OptionError,"NoSuchImage",option,arg1);
}
InsertImageInList(&index_image,insert_image);
}
_images=GetFirstImageInList(index_image);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'l':
{
if (LocaleCompare("layers",option+1) == 0)
{
parse=ParseCommandOption(MagickLayerOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedLayerMethod",
option,arg1);
switch ((LayerMethod) parse)
{
case CoalesceLayer:
{
new_images=CoalesceImages(_images,_exception);
break;
}
case CompareAnyLayer:
case CompareClearLayer:
case CompareOverlayLayer:
default:
{
new_images=CompareImagesLayers(_images,(LayerMethod) parse,
_exception);
break;
}
case MergeLayer:
case FlattenLayer:
case MosaicLayer:
case TrimBoundsLayer:
{
new_images=MergeImageLayers(_images,(LayerMethod) parse,
_exception);
break;
}
case DisposeLayer:
{
new_images=DisposeImages(_images,_exception);
break;
}
case OptimizeImageLayer:
{
new_images=OptimizeImageLayers(_images,_exception);
break;
}
case OptimizePlusLayer:
{
new_images=OptimizePlusImageLayers(_images,_exception);
break;
}
case OptimizeTransLayer:
{
OptimizeImageTransparency(_images,_exception);
break;
}
case RemoveDupsLayer:
{
RemoveDuplicateLayers(&_images,_exception);
break;
}
case RemoveZeroLayer:
{
RemoveZeroDelayLayers(&_images,_exception);
break;
}
case OptimizeLayer:
{ /* General Purpose, GIF Animation Optimizer. */
new_images=CoalesceImages(_images,_exception);
if (new_images == (Image *) NULL)
break;
_images=DestroyImageList(_images);
_images=OptimizeImageLayers(new_images,_exception);
if (_images == (Image *) NULL)
break;
new_images=DestroyImageList(new_images);
OptimizeImageTransparency(_images,_exception);
(void) RemapImages(_quantize_info,_images,(Image *) NULL,
_exception);
break;
}
case CompositeLayer:
{
Image
*source;
RectangleInfo
geometry;
CompositeOperator
compose;
const char*
value;
value=GetImageOption(_image_info,"compose");
compose=OverCompositeOp; /* Default to Over */
if (value != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,value);
/* Split image sequence at the first 'NULL:' image. */
source=_images;
while (source != (Image *) NULL)
{
source=GetNextImageInList(source);
if ((source != (Image *) NULL) &&
(LocaleCompare(source->magick,"NULL") == 0))
break;
}
if (source != (Image *) NULL)
{
if ((GetPreviousImageInList(source) == (Image *) NULL) ||
(GetNextImageInList(source) == (Image *) NULL))
source=(Image *) NULL;
else
{ /* Separate the two lists, junk the null: image. */
source=SplitImageList(source->previous);
DeleteImageFromList(&source);
}
}
if (source == (Image *) NULL)
{
(void) ThrowMagickException(_exception,GetMagickModule(),
OptionError,"MissingNullSeparator","layers Composite");
break;
}
/* Adjust offset with gravity and virtual canvas. */
SetGeometry(_images,&geometry);
(void) ParseAbsoluteGeometry(_images->geometry,&geometry);
geometry.width=source->page.width != 0 ?
source->page.width : source->columns;
geometry.height=source->page.height != 0 ?
source->page.height : source->rows;
GravityAdjustGeometry(_images->page.width != 0 ?
_images->page.width : _images->columns,
_images->page.height != 0 ? _images->page.height :
_images->rows,_images->gravity,&geometry);
/* Compose the two image sequences together */
CompositeLayers(_images,compose,source,geometry.x,geometry.y,
_exception);
source=DestroyImageList(source);
break;
}
}
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'm':
{
if (LocaleCompare("map",option+1) == 0)
{
CLIWandWarnReplaced("+remap");
(void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("morph",option+1) == 0)
{
Image
*morph_image;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
morph_image=MorphImages(_images,StringToUnsignedLong(arg1),
_exception);
if (morph_image == (Image *) NULL)
break;
_images=DestroyImageList(_images);
_images=morph_image;
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
{
/* REDIRECTED to use -layers mosaic instead */
(void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'p':
{
if (LocaleCompare("poly",option+1) == 0)
{
double
*args;
ssize_t
count;
/* convert argument string into an array of doubles */
args = StringToArrayOfDoubles(arg1,&count,_exception);
if (args == (double *) NULL )
CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg1);
new_images=PolynomialImage(_images,(size_t) (count >> 1),args,
_exception);
args=(double *) RelinquishMagickMemory(args);
break;
}
if (LocaleCompare("process",option+1) == 0)
{
/* FUTURE: better parsing using ScriptToken() from string ??? */
char
**arguments;
int
j,
number_arguments;
arguments=StringToArgv(arg1,&number_arguments);
if (arguments == (char **) NULL)
break;
if (strchr(arguments[1],'=') != (char *) NULL)
{
char
breaker,
quote,
*token;
const char
*arguments;
int
next,
status;
size_t
length;
TokenInfo
*token_info;
/*
Support old style syntax, filter="-option arg1".
*/
assert(arg1 != (const char *) NULL);
length=strlen(arg1);
token=(char *) NULL;
if (~length >= (MagickPathExtent-1))
token=(char *) AcquireQuantumMemory(length+MagickPathExtent,
sizeof(*token));
if (token == (char *) NULL)
break;
next=0;
arguments=arg1;
token_info=AcquireTokenInfo();
status=Tokenizer(token_info,0,token,length,arguments,"","=",
"\"",'\0',&breaker,&next,"e);
token_info=DestroyTokenInfo(token_info);
if (status == 0)
{
const char
*argv;
argv=(&(arguments[next]));
(void) InvokeDynamicImageFilter(token,&_images,1,&argv,
_exception);
}
token=DestroyString(token);
break;
}
(void) SubstituteString(&arguments[1],"-","");
(void) InvokeDynamicImageFilter(arguments[1],&_images,
number_arguments-2,(const char **) arguments+2,_exception);
for (j=0; j < number_arguments; j++)
arguments[j]=DestroyString(arguments[j]);
arguments=(char **) RelinquishMagickMemory(arguments);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'r':
{
if (LocaleCompare("remap",option+1) == 0)
{
(void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception);
break;
}
if (LocaleCompare("reverse",option+1) == 0)
{
ReverseImageList(&_images);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 's':
{
if (LocaleCompare("smush",option+1) == 0)
{
/* FUTURE: this option needs more work to make better */
ssize_t
offset;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
offset=(ssize_t) StringToLong(arg1);
new_images=SmushImages(_images,IsNormalOp,offset,_exception);
break;
}
if (LocaleCompare("subimage",option+1) == 0)
{
Image
*base_image,
*compare_image;
const char
*value;
MetricType
metric;
double
similarity;
RectangleInfo
offset;
base_image=GetImageFromList(_images,0);
compare_image=GetImageFromList(_images,1);
/* Comparision Metric */
metric=UndefinedErrorMetric;
value=GetImageOption(_image_info,"metric");
if (value != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,value);
new_images=SimilarityImage(base_image,compare_image,metric,0.0,
&offset,&similarity,_exception);
if (new_images != (Image *) NULL)
{
char
result[MagickPathExtent];
(void) FormatLocaleString(result,MagickPathExtent,"%lf",
similarity);
(void) SetImageProperty(new_images,"subimage:similarity",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long)
offset.x);
(void) SetImageProperty(new_images,"subimage:x",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long)
offset.y);
(void) SetImageProperty(new_images,"subimage:y",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,
"%lux%lu%+ld%+ld",(unsigned long) offset.width,(unsigned long)
offset.height,(long) offset.x,(long) offset.y);
(void) SetImageProperty(new_images,"subimage:offset",result,
_exception);
}
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
Image
*p,
*q,
*swap;
ssize_t
index,
swap_index;
index=(-1);
swap_index=(-2);
if (IfNormalOp) {
GeometryInfo
geometry_info;
MagickStatusType
flags;
swap_index=(-1);
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
index=(ssize_t) geometry_info.rho;
if ((flags & SigmaValue) != 0)
swap_index=(ssize_t) geometry_info.sigma;
}
p=GetImageFromList(_images,index);
q=GetImageFromList(_images,swap_index);
if ((p == (Image *) NULL) || (q == (Image *) NULL)) {
if (IfNormalOp)
CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1)
else
CLIWandExceptionBreak(OptionError,"TwoOrMoreImagesRequired",option);
}
if (p == q)
CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1);
swap=CloneImage(p,0,0,MagickTrue,_exception);
if (swap == (Image *) NULL)
CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed",
option,GetExceptionMessage(errno));
ReplaceImageInList(&p,CloneImage(q,0,0,MagickTrue,_exception));
ReplaceImageInList(&q,swap);
_images=GetFirstImageInList(q);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
default:
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
/* clean up percent escape interpreted strings */
if (arg1 != arg1n )
arg1=DestroyString((char *)arg1);
if (arg2 != arg2n )
arg2=DestroyString((char *)arg2);
/* if new image list generated, replace existing image list */
if (new_images == (Image *) NULL)
return(status == 0 ? MagickFalse : MagickTrue);
_images=DestroyImageList(_images);
_images=GetFirstImageInList(new_images);
return(status == 0 ? MagickFalse : MagickTrue);
#undef _image_info
#undef _images
#undef _exception
#undef _draw_info
#undef _quantize_info
#undef IfNormalOp
#undef IfPlusOp
#undef IsNormalOp
}
| 0 |
[
"CWE-399",
"CWE-401"
] |
ImageMagick
|
4a334bbf5584de37c6f5a47c380a531c8c4b140a
| 166,259,257,869,100,790,000,000,000,000,000,000,000 | 922 |
https://github.com/ImageMagick/ImageMagick/issues/1623
|
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread;
struct thread_struct *next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
unsigned fsindex, gsindex;
fpu_switch_t fpu;
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
/*
* Reload esp0, LDT and the page table pointer:
*/
load_sp0(tss, next);
/*
* Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok.
*/
savesegment(es, prev->es);
if (unlikely(next->es | prev->es))
loadsegment(es, next->es);
savesegment(ds, prev->ds);
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
*
* (e.g. xen_load_tls())
*/
savesegment(fs, fsindex);
savesegment(gs, gsindex);
load_TLS(next, cpu);
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
* the GDT and LDT are properly updated, and must be
* done before math_state_restore, so the TS bit is up
* to date.
*/
arch_end_context_switch(next_p);
/*
* Switch FS and GS.
*
* Segment register != 0 always requires a reload. Also
* reload when it has changed. When prev process used 64bit
* base always reload to avoid an information leak.
*/
if (unlikely(fsindex | next->fsindex | prev->fs)) {
loadsegment(fs, next->fsindex);
/*
* Check if the user used a selector != 0; if yes
* clear 64bit base, since overloaded base is always
* mapped to the Null selector
*/
if (fsindex)
prev->fs = 0;
}
/* when next process has a 64bit base use it */
if (next->fs)
wrmsrl(MSR_FS_BASE, next->fs);
prev->fsindex = fsindex;
if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex);
if (gsindex)
prev->gs = 0;
}
if (next->gs)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
prev->gsindex = gsindex;
switch_fpu_finish(next_p, fpu);
/*
* Switch the PDA and FPU contexts.
*/
prev->usersp = this_cpu_read(old_rsp);
this_cpu_write(old_rsp, next->usersp);
this_cpu_write(current_task, next_p);
/*
* If it were not for PREEMPT_ACTIVE we could guarantee that the
* preempt_count of all tasks was equal here and this would not be
* needed.
*/
task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
this_cpu_write(kernel_stack,
(unsigned long)task_stack_page(next_p) +
THREAD_SIZE - KERNEL_STACK_OFFSET);
/*
* Now maybe reload the debug registers and handle I/O bitmaps
*/
if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);
return prev_p;
}
| 1 |
[
"CWE-200",
"CWE-401"
] |
linux
|
f647d7c155f069c1a068030255c300663516420e
| 92,234,122,397,960,570,000,000,000,000,000,000,000 | 109 |
x86_64, switch_to(): Load TLS descriptors before switching DS and ES
Otherwise, if buggy user code points DS or ES into the TLS
array, they would be corrupted after a context switch.
This also significantly improves the comments and documents some
gotchas in the code.
Before this patch, the both tests below failed. With this
patch, the es test passes, although the gsbase test still fails.
----- begin es test -----
/*
* Copyright (c) 2014 Andy Lutomirski
* GPL v2
*/
static unsigned short GDT3(int idx)
{
return (idx << 3) | 3;
}
static int create_tls(int idx, unsigned int base)
{
struct user_desc desc = {
.entry_number = idx,
.base_addr = base,
.limit = 0xfffff,
.seg_32bit = 1,
.contents = 0, /* Data, grow-up */
.read_exec_only = 0,
.limit_in_pages = 1,
.seg_not_present = 0,
.useable = 0,
};
if (syscall(SYS_set_thread_area, &desc) != 0)
err(1, "set_thread_area");
return desc.entry_number;
}
int main()
{
int idx = create_tls(-1, 0);
printf("Allocated GDT index %d\n", idx);
unsigned short orig_es;
asm volatile ("mov %%es,%0" : "=rm" (orig_es));
int errors = 0;
int total = 1000;
for (int i = 0; i < total; i++) {
asm volatile ("mov %0,%%es" : : "rm" (GDT3(idx)));
usleep(100);
unsigned short es;
asm volatile ("mov %%es,%0" : "=rm" (es));
asm volatile ("mov %0,%%es" : : "rm" (orig_es));
if (es != GDT3(idx)) {
if (errors == 0)
printf("[FAIL]\tES changed from 0x%hx to 0x%hx\n",
GDT3(idx), es);
errors++;
}
}
if (errors) {
printf("[FAIL]\tES was corrupted %d/%d times\n", errors, total);
return 1;
} else {
printf("[OK]\tES was preserved\n");
return 0;
}
}
----- end es test -----
----- begin gsbase test -----
/*
* gsbase.c, a gsbase test
* Copyright (c) 2014 Andy Lutomirski
* GPL v2
*/
static unsigned char *testptr, *testptr2;
static unsigned char read_gs_testvals(void)
{
unsigned char ret;
asm volatile ("movb %%gs:%1, %0" : "=r" (ret) : "m" (*testptr));
return ret;
}
int main()
{
int errors = 0;
testptr = mmap((void *)0x200000000UL, 1, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
if (testptr == MAP_FAILED)
err(1, "mmap");
testptr2 = mmap((void *)0x300000000UL, 1, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
if (testptr2 == MAP_FAILED)
err(1, "mmap");
*testptr = 0;
*testptr2 = 1;
if (syscall(SYS_arch_prctl, ARCH_SET_GS,
(unsigned long)testptr2 - (unsigned long)testptr) != 0)
err(1, "ARCH_SET_GS");
usleep(100);
if (read_gs_testvals() == 1) {
printf("[OK]\tARCH_SET_GS worked\n");
} else {
printf("[FAIL]\tARCH_SET_GS failed\n");
errors++;
}
asm volatile ("mov %0,%%gs" : : "r" (0));
if (read_gs_testvals() == 0) {
printf("[OK]\tWriting 0 to gs worked\n");
} else {
printf("[FAIL]\tWriting 0 to gs failed\n");
errors++;
}
usleep(100);
if (read_gs_testvals() == 0) {
printf("[OK]\tgsbase is still zero\n");
} else {
printf("[FAIL]\tgsbase was corrupted\n");
errors++;
}
return errors == 0 ? 0 : 1;
}
----- end gsbase test -----
Signed-off-by: Andy Lutomirski <[email protected]>
Cc: <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/509d27c9fec78217691c3dad91cec87e1006b34a.1418075657.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <[email protected]>
|
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
__bad_area(regs, error_code, address, SEGV_ACCERR);
}
| 0 |
[
"CWE-264"
] |
linux
|
548acf19234dbda5a52d5a8e7e205af46e9da840
| 152,000,543,461,170,400,000,000,000,000,000,000,000 | 5 |
x86/mm: Expand the exception table logic to allow new handling options
Huge amounts of help from Andy Lutomirski and Borislav Petkov to
produce this. Andy provided the inspiration to add classes to the
exception table with a clever bit-squeezing trick, Boris pointed
out how much cleaner it would all be if we just had a new field.
Linus Torvalds blessed the expansion with:
' I'd rather not be clever in order to save just a tiny amount of space
in the exception table, which isn't really criticial for anybody. '
The third field is another relative function pointer, this one to a
handler that executes the actions.
We start out with three handlers:
1: Legacy - just jumps the to fixup IP
2: Fault - provide the trap number in %ax to the fixup code
3: Cleaned up legacy for the uaccess error hack
Signed-off-by: Tony Luck <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/f6af78fcbd348cf4939875cfda9c19689b5e50b8.1455732970.git.tony.luck@intel.com
Signed-off-by: Ingo Molnar <[email protected]>
|
inline int32_t QuantizeSoftmaxOutput(float prob_rescaled, int32_t zero_point) {
const int32_t prob_rnd = static_cast<int32_t>(std::round(prob_rescaled));
return prob_rnd + zero_point;
}
| 0 |
[
"CWE-476",
"CWE-369"
] |
tensorflow
|
15691e456c7dc9bd6be203b09765b063bf4a380c
| 316,745,207,296,189,680,000,000,000,000,000,000,000 | 4 |
Prevent dereferencing of null pointers in TFLite's `add.cc`.
PiperOrigin-RevId: 387244946
Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9
|
findoprnd(QueryItem *ptr, int size)
{
uint32 pos;
pos = 0;
findoprnd_recurse(ptr, &pos, size);
if (pos != size)
elog(ERROR, "malformed tsquery: extra nodes");
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
postgres
|
31400a673325147e1205326008e32135a78b4d8a
| 144,980,761,052,676,170,000,000,000,000,000,000,000 | 10 |
Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064
|
CImg<T> get_threshold(const T& value, const bool soft_threshold=false, const bool strict_threshold=false) const {
return (+*this).threshold(value,soft_threshold,strict_threshold);
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 170,385,158,191,784,200,000,000,000,000,000,000,000 | 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
static int recv_pkt(git_pkt **out, gitno_buffer *buf)
{
const char *ptr = buf->data, *line_end = ptr;
git_pkt *pkt = NULL;
int pkt_type, error = 0, ret;
do {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, ptr, &line_end, buf->offset);
else
error = GIT_EBUFS;
if (error == 0)
break; /* return the pkt */
if (error < 0 && error != GIT_EBUFS)
return error;
if ((ret = gitno_recv(buf)) < 0) {
return ret;
} else if (ret == 0) {
giterr_set(GITERR_NET, "early EOF");
return GIT_EEOF;
}
} while (error);
gitno_consume(buf, line_end);
pkt_type = pkt->type;
if (out != NULL)
*out = pkt;
else
git__free(pkt);
return pkt_type;
}
| 0 |
[
"CWE-476",
"CWE-119"
] |
libgit2
|
2fdef641fd0dd2828bd948234ae86de75221a11a
| 146,776,508,880,194,530,000,000,000,000,000,000,000 | 35 |
smart_pkt: treat empty packet lines as error
The Git protocol does not specify what should happen in the case
of an empty packet line (that is a packet line "0004"). We
currently indicate success, but do not return a packet in the
case where we hit an empty line. The smart protocol was not
prepared to handle such packets in all cases, though, resulting
in a `NULL` pointer dereference.
Fix the issue by returning an error instead. As such kind of
packets is not even specified by upstream, this is the right
thing to do.
|
static inline int sk_has_account(struct sock *sk)
{
/* return true if protocol supports memory accounting */
return !!sk->sk_prot->memory_allocated;
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
c377411f2494a931ff7facdbb3a6839b1266bcf6
| 146,144,118,591,462,640,000,000,000,000,000,000,000 | 5 |
net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
MagickExport MagickBooleanType AnnotateImage(Image *image,
const DrawInfo *draw_info)
{
char
*p,
primitive[MaxTextExtent],
*text,
**textlist;
DrawInfo
*annotate,
*annotate_info;
GeometryInfo
geometry_info;
MagickBooleanType
status;
PointInfo
offset;
RectangleInfo
geometry;
register ssize_t
i;
TypeMetric
metrics;
size_t
height,
number_lines;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->text == (char *) NULL)
return(MagickFalse);
if (*draw_info->text == '\0')
return(MagickTrue);
annotate=CloneDrawInfo((ImageInfo *) NULL,draw_info);
text=annotate->text;
annotate->text=(char *) NULL;
annotate_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
number_lines=1;
for (p=text; *p != '\0'; p++)
if (*p == '\n')
number_lines++;
textlist=AcquireQuantumMemory(number_lines+1,sizeof(*textlist));
if (textlist == (char **) NULL)
return(MagickFalse);
p=text;
for (i=0; i < number_lines; i++)
{
char
*q;
textlist[i]=p;
for (q=p; *q != '\0'; q++)
if ((*q == '\r') || (*q == '\n'))
break;
if (*q == '\r')
{
*q='\0';
q++;
}
*q='\0';
p=q+1;
}
textlist[i]=(char *) NULL;
if (textlist == (char **) NULL)
return(MagickFalse);
SetGeometry(image,&geometry);
SetGeometryInfo(&geometry_info);
if (annotate_info->geometry != (char *) NULL)
{
(void) ParsePageGeometry(image,annotate_info->geometry,&geometry,
&image->exception);
(void) ParseGeometry(annotate_info->geometry,&geometry_info);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
status=MagickTrue;
(void) memset(&metrics,0,sizeof(metrics));
for (i=0; textlist[i] != (char *) NULL; i++)
{
if (*textlist[i] == '\0')
continue;
/*
Position text relative to image.
*/
annotate_info->affine.tx=geometry_info.xi-image->page.x;
annotate_info->affine.ty=geometry_info.psi-image->page.y;
(void) CloneString(&annotate->text,textlist[i]);
if ((metrics.width == 0) || (annotate->gravity != NorthWestGravity))
(void) GetTypeMetrics(image,annotate,&metrics);
height=(ssize_t) (metrics.ascent-metrics.descent+
draw_info->interline_spacing+0.5);
switch (annotate->gravity)
{
case UndefinedGravity:
default:
{
offset.x=annotate_info->affine.tx+i*annotate_info->affine.ry*height;
offset.y=annotate_info->affine.ty+i*annotate_info->affine.sy*height;
break;
}
case NorthWestGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+i*
annotate_info->affine.ry*height+annotate_info->affine.ry*
(metrics.ascent+metrics.descent);
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+i*
annotate_info->affine.sy*height+annotate_info->affine.sy*
metrics.ascent;
break;
}
case NorthGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+
geometry.width/2.0+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*metrics.width/2.0+annotate_info->affine.ry*
(metrics.ascent+metrics.descent);
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+i*
annotate_info->affine.sy*height+annotate_info->affine.sy*
metrics.ascent-annotate_info->affine.rx*metrics.width/2.0;
break;
}
case NorthEastGravity:
{
offset.x=(geometry.width == 0 ? 1.0 : -1.0)*annotate_info->affine.tx+
geometry.width+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*metrics.width+annotate_info->affine.ry*
(metrics.ascent+metrics.descent)-1.0;
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+i*
annotate_info->affine.sy*height+annotate_info->affine.sy*
metrics.ascent-annotate_info->affine.rx*metrics.width;
break;
}
case WestGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+i*
annotate_info->affine.ry*height+annotate_info->affine.ry*
(metrics.ascent+metrics.descent-(number_lines-1.0)*height)/2.0;
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+
geometry.height/2.0+i*annotate_info->affine.sy*height+
annotate_info->affine.sy*(metrics.ascent+metrics.descent-
(number_lines-1.0)*height)/2.0;
break;
}
case StaticGravity:
case CenterGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+
geometry.width/2.0+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*metrics.width/2.0+annotate_info->affine.ry*
(metrics.ascent+metrics.descent-(number_lines-1)*height)/2.0;
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+
geometry.height/2.0+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*metrics.width/2.0+annotate_info->affine.sy*
(metrics.ascent+metrics.descent-(number_lines-1.0)*height)/2.0;
break;
}
case EastGravity:
{
offset.x=(geometry.width == 0 ? 1.0 : -1.0)*annotate_info->affine.tx+
geometry.width+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*metrics.width+annotate_info->affine.ry*
(metrics.ascent+metrics.descent-(number_lines-1.0)*height)/2.0-1.0;
offset.y=(geometry.height == 0 ? -1.0 : 1.0)*annotate_info->affine.ty+
geometry.height/2.0+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*metrics.width+annotate_info->affine.sy*
(metrics.ascent+metrics.descent-(number_lines-1.0)*height)/2.0;
break;
}
case SouthWestGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+i*
annotate_info->affine.ry*height-annotate_info->affine.ry*
(number_lines-1.0)*height;
offset.y=(geometry.height == 0 ? 1.0 : -1.0)*annotate_info->affine.ty+
geometry.height+i*annotate_info->affine.sy*height-
annotate_info->affine.sy*(number_lines-1.0)*height+metrics.descent;
break;
}
case SouthGravity:
{
offset.x=(geometry.width == 0 ? -1.0 : 1.0)*annotate_info->affine.tx+
geometry.width/2.0+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*metrics.width/2.0-annotate_info->affine.ry*
(number_lines-1.0)*height/2.0;
offset.y=(geometry.height == 0 ? 1.0 : -1.0)*annotate_info->affine.ty+
geometry.height+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*metrics.width/2.0-annotate_info->affine.sy*
(number_lines-1.0)*height+metrics.descent;
break;
}
case SouthEastGravity:
{
offset.x=(geometry.width == 0 ? 1.0 : -1.0)*annotate_info->affine.tx+
geometry.width+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*metrics.width-annotate_info->affine.ry*
(number_lines-1.0)*height-1.0;
offset.y=(geometry.height == 0 ? 1.0 : -1.0)*annotate_info->affine.ty+
geometry.height+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*metrics.width-annotate_info->affine.sy*
(number_lines-1.0)*height+metrics.descent;
break;
}
}
switch (annotate->align)
{
case LeftAlign:
{
offset.x=annotate_info->affine.tx+i*annotate_info->affine.ry*height;
offset.y=annotate_info->affine.ty+i*annotate_info->affine.sy*height;
break;
}
case CenterAlign:
{
offset.x=annotate_info->affine.tx+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*metrics.width/2.0;
offset.y=annotate_info->affine.ty+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*metrics.width/2.0;
break;
}
case RightAlign:
{
offset.x=annotate_info->affine.tx+i*annotate_info->affine.ry*height-
annotate_info->affine.sx*metrics.width;
offset.y=annotate_info->affine.ty+i*annotate_info->affine.sy*height-
annotate_info->affine.rx*metrics.width;
break;
}
default:
break;
}
if (draw_info->undercolor.opacity != TransparentOpacity)
{
DrawInfo
*undercolor_info;
/*
Text box.
*/
undercolor_info=CloneDrawInfo((ImageInfo *) NULL,(DrawInfo *) NULL);
undercolor_info->fill=draw_info->undercolor;
undercolor_info->affine=draw_info->affine;
undercolor_info->affine.tx=offset.x-draw_info->affine.ry*metrics.ascent;
undercolor_info->affine.ty=offset.y-draw_info->affine.sy*metrics.ascent;
(void) FormatLocaleString(primitive,MaxTextExtent,
"rectangle 0.0,0.0 %g,%g",metrics.origin.x,(double) height);
(void) CloneString(&undercolor_info->primitive,primitive);
(void) DrawImage(image,undercolor_info);
(void) DestroyDrawInfo(undercolor_info);
}
annotate_info->affine.tx=offset.x;
annotate_info->affine.ty=offset.y;
(void) FormatLocaleString(primitive,MaxTextExtent,"stroke-width %g "
"line 0,0 %g,0",metrics.underline_thickness,metrics.width);
if (annotate->decorate == OverlineDecoration)
{
annotate_info->affine.ty-=(draw_info->affine.sy*(metrics.ascent+
metrics.descent-metrics.underline_position));
(void) CloneString(&annotate_info->primitive,primitive);
(void) DrawImage(image,annotate_info);
}
else
if (annotate->decorate == UnderlineDecoration)
{
annotate_info->affine.ty-=(draw_info->affine.sy*
metrics.underline_position);
(void) CloneString(&annotate_info->primitive,primitive);
(void) DrawImage(image,annotate_info);
}
/*
Annotate image with text.
*/
status=RenderType(image,annotate,&offset,&metrics);
if (status == MagickFalse)
break;
if (annotate->decorate == LineThroughDecoration)
{
annotate_info->affine.ty-=(draw_info->affine.sy*(height+
metrics.underline_position+metrics.descent)/2.0);
(void) CloneString(&annotate_info->primitive,primitive);
(void) DrawImage(image,annotate_info);
}
}
/*
Relinquish resources.
*/
annotate_info=DestroyDrawInfo(annotate_info);
annotate=DestroyDrawInfo(annotate);
textlist=(char **) RelinquishMagickMemory(textlist);
return(status);
}
| 1 |
[
"CWE-401"
] |
ImageMagick6
|
0b7d3675438cbcde824e751895847a0794406e08
| 43,271,922,742,203,170,000,000,000,000,000,000,000 | 304 |
https://github.com/ImageMagick/ImageMagick/issues/1589
|
void utf2char(utf16_t *str, char *buffer, unsigned bufsz)
{
if(bufsz<1) return;
buffer[bufsz-1] = 0;
char *b = buffer;
while (*str != 0x00 && --bufsz>0)
{
char *chr = (char *)str;
*b++ = *chr;
str++;
}
*b = 0;
}
| 0 |
[
"CWE-787"
] |
LibRaw
|
efd8cfabb93fd0396266a7607069901657c082e3
| 6,051,803,650,346,116,000,000,000,000,000,000,000 | 14 |
X3F parser possible buffer overrun
|
MONGO_EXPORT mongo_cursor* mongo_cursor_create( void ) {
return (mongo_cursor*)bson_malloc(sizeof(mongo_cursor));
}
| 0 |
[
"CWE-190"
] |
mongo-c-driver-legacy
|
1a1f5e26a4309480d88598913f9eebf9e9cba8ca
| 134,654,247,628,877,280,000,000,000,000,000,000,000 | 3 |
don't mix up int and size_t (first pass to fix that)
|
void ConnectionManagerImpl::ActiveStream::refreshDurationTimeout() {
if (!filter_manager_.streamInfo().route() ||
!filter_manager_.streamInfo().route()->routeEntry() || !request_headers_) {
return;
}
const auto& route = filter_manager_.streamInfo().route()->routeEntry();
auto grpc_timeout = Grpc::Common::getGrpcTimeout(*request_headers_);
std::chrono::milliseconds timeout;
bool disable_timer = false;
if (!grpc_timeout || !route->grpcTimeoutHeaderMax()) {
// Either there is no grpc-timeout header or special timeouts for it are not
// configured. Use stream duration.
if (route->maxStreamDuration()) {
timeout = route->maxStreamDuration().value();
if (timeout == std::chrono::milliseconds(0)) {
// Explicitly configured 0 means no timeout.
disable_timer = true;
}
} else {
// Fall back to HCM config. If no HCM duration limit exists, disable
// timers set by any prior route configuration.
const auto max_stream_duration = connection_manager_.config_.maxStreamDuration();
if (max_stream_duration.has_value() && max_stream_duration.value().count()) {
timeout = max_stream_duration.value();
} else {
disable_timer = true;
}
}
} else {
// Start with the timeout equal to the gRPC timeout header.
timeout = grpc_timeout.value();
// If there's a valid cap, apply it.
if (timeout > route->grpcTimeoutHeaderMax().value() &&
route->grpcTimeoutHeaderMax().value() != std::chrono::milliseconds(0)) {
timeout = route->grpcTimeoutHeaderMax().value();
}
// Apply the configured offset.
if (timeout != std::chrono::milliseconds(0) && route->grpcTimeoutHeaderOffset()) {
const auto offset = route->grpcTimeoutHeaderOffset().value();
if (offset < timeout) {
timeout -= offset;
} else {
timeout = std::chrono::milliseconds(0);
}
}
}
// Disable any existing timer if configured to do so.
if (disable_timer) {
if (max_stream_duration_timer_) {
max_stream_duration_timer_->disableTimer();
if (route->usingNewTimeouts() && Grpc::Common::isGrpcRequestHeaders(*request_headers_)) {
request_headers_->removeGrpcTimeout();
}
}
return;
}
// Set the header timeout before doing used-time adjustments.
// This may result in the upstream not getting the latest results, but also
// avoids every request getting a custom timeout based on envoy think time.
if (route->usingNewTimeouts() && Grpc::Common::isGrpcRequestHeaders(*request_headers_)) {
Grpc::Common::toGrpcTimeout(std::chrono::milliseconds(timeout), *request_headers_);
}
// See how long this stream has been alive, and adjust the timeout
// accordingly.
std::chrono::duration time_used = std::chrono::duration_cast<std::chrono::milliseconds>(
connection_manager_.timeSource().monotonicTime() -
filter_manager_.streamInfo().startTimeMonotonic());
if (timeout > time_used) {
timeout -= time_used;
} else {
timeout = std::chrono::milliseconds(0);
}
// Finally create (if necessary) and enable the timer.
if (!max_stream_duration_timer_) {
max_stream_duration_timer_ =
connection_manager_.read_callbacks_->connection().dispatcher().createTimer(
[this]() -> void { onStreamMaxDurationReached(); });
}
max_stream_duration_timer_->enableTimer(timeout);
}
| 0 |
[
"CWE-416"
] |
envoy
|
148de954ed3585d8b4298b424aa24916d0de6136
| 71,523,045,684,460,130,000,000,000,000,000,000,000 | 87 |
CVE-2021-43825
Response filter manager crash
Signed-off-by: Yan Avlasov <[email protected]>
|
static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
{
if (acb) {
/* stop adapter background rebuild */
if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
uint32_t intmask_org;
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
arcmsr_enable_outbound_ints(acb, intmask_org);
}
}
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
| 184,298,711,433,661,430,000,000,000,000,000,000,000 | 14 |
scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer()
We need to put an upper bound on "user_len" so the memcpy() doesn't
overflow.
Cc: <[email protected]>
Reported-by: Marco Grassi <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: Tomas Henzl <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
virDomainParseMemory(const char *xpath,
const char *units_xpath,
xmlXPathContextPtr ctxt,
unsigned long long *mem,
bool required,
bool capped)
{
unsigned long long bytes, max;
max = virMemoryMaxValue(capped);
if (virDomainParseScaledValue(xpath, units_xpath, ctxt,
&bytes, 1024, max, required) < 0)
return -1;
/* Yes, we really do use kibibytes for our internal sizing. */
*mem = VIR_DIV_UP(bytes, 1024);
if (*mem >= VIR_DIV_UP(max, 1024)) {
virReportError(VIR_ERR_OVERFLOW, "%s", _("size value too large"));
return -1;
}
return 0;
}
| 0 |
[
"CWE-212"
] |
libvirt
|
a5b064bf4b17a9884d7d361733737fb614ad8979
| 211,972,771,008,039,650,000,000,000,000,000,000,000 | 24 |
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]>
|
njs_typed_array_prototype_index_of(njs_vm_t *vm, njs_value_t *args,
njs_uint_t nargs, njs_index_t type)
{
double v;
int64_t i, i64, from, to, index, increment, offset, length;
njs_int_t ret, integer;
njs_value_t *this;
const float *f32;
const double *f64;
const uint8_t *u8;
const uint16_t *u16;
const uint32_t *u32;
njs_typed_array_t *array;
njs_array_buffer_t *buffer;
this = njs_argument(args, 0);
if (njs_slow_path(!njs_is_typed_array(this))) {
njs_type_error(vm, "this is not a typed array");
return NJS_ERROR;
}
index = -1;
array = njs_typed_array(this);
length = njs_typed_array_length(array);
if (!njs_is_number(njs_arg(args, nargs, 1)) || length == 0) {
goto done;
}
if (type & 2) {
/* lastIndexOf(). */
if (nargs > 2) {
ret = njs_value_to_integer(vm, njs_arg(args, nargs, 2), &from);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
} else {
from = length - 1;
}
if (from >= 0) {
from = njs_min(from, length - 1);
} else if (from < 0) {
from += length;
}
to = -1;
increment = -1;
if (from <= to) {
goto done;
}
} else {
/* indexOf(), includes(). */
ret = njs_value_to_integer(vm, njs_arg(args, nargs, 2), &from);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (from < 0) {
from += length;
if (from < 0) {
from = 0;
}
}
to = length;
increment = 1;
if (from >= to) {
goto done;
}
}
if (njs_slow_path(njs_is_detached_buffer(array->buffer))) {
njs_type_error(vm, "detached buffer");
return NJS_ERROR;
}
v = njs_number(njs_argument(args, 1));
i64 = v;
integer = (v == i64);
buffer = array->buffer;
offset = array->offset;
switch (array->type) {
case NJS_OBJ_TYPE_INT8_ARRAY:
if (integer && ((int8_t) i64 == i64)) {
goto search8;
}
break;
case NJS_OBJ_TYPE_UINT8_CLAMPED_ARRAY:
case NJS_OBJ_TYPE_UINT8_ARRAY:
if (integer && ((uint8_t) i64 == i64)) {
search8:
u8 = &buffer->u.u8[0];
for (i = from; i != to; i += increment) {
if (u8[offset + i] == (uint8_t) i64) {
index = i;
break;
}
}
}
break;
case NJS_OBJ_TYPE_INT16_ARRAY:
if (integer && ((int16_t) i64 == i64)) {
goto search16;
}
break;
case NJS_OBJ_TYPE_UINT16_ARRAY:
if (integer && ((uint16_t) i64 == i64)) {
search16:
u16 = &buffer->u.u16[0];
for (i = from; i != to; i += increment) {
if (u16[offset + i] == (uint16_t) i64) {
index = i;
break;
}
}
}
break;
case NJS_OBJ_TYPE_INT32_ARRAY:
if (integer && ((int32_t) i64 == i64)) {
goto search32;
}
break;
case NJS_OBJ_TYPE_UINT32_ARRAY:
if (integer && ((uint32_t) i64 == i64)) {
search32:
u32 = &buffer->u.u32[0];
for (i = from; i != to; i += increment) {
if (u32[offset + i] == (uint32_t) i64) {
index = i;
break;
}
}
}
break;
case NJS_OBJ_TYPE_FLOAT32_ARRAY:
f32 = &buffer->u.f32[0];
if (((float) v == v)) {
for (i = from; i != to; i += increment) {
if (f32[offset + i] == (float) v) {
index = i;
break;
}
}
} else if ((type & 1) && isnan(v)) {
/* includes() handles NaN. */
for (i = from; i != to; i += increment) {
if (isnan(f32[offset + i])) {
index = i;
break;
}
}
}
break;
default:
/* NJS_OBJ_TYPE_FLOAT64_ARRAY. */
f64 = &buffer->u.f64[0];
if ((type & 1) && isnan(v)) {
/* includes() handles NaN. */
for (i = from; i != to; i += increment) {
if (isnan(f64[offset + i])) {
index = i;
break;
}
}
} else {
for (i = from; i != to; i += increment) {
if (f64[offset + i] == v) {
index = i;
break;
}
}
}
}
done:
/* Default values. */
if (type & 1) {
njs_set_boolean(&vm->retval, index != -1);
} else {
njs_set_number(&vm->retval, index);
}
return NJS_OK;
}
| 0 |
[
"CWE-703"
] |
njs
|
5c6130a2a0b4c41ab415f6b8992aa323636338b9
| 87,997,912,113,384,600,000,000,000,000,000,000,000 | 221 |
Fixed Array.prototype.fill() for typed-arrays.
This closes #478 issue on Github.
|
static void get_object_list(int ac, const char **av)
{
struct rev_info revs;
char line[1000];
int flags = 0;
init_revisions(&revs, NULL);
save_commit_buffer = 0;
setup_revisions(ac, av, &revs, NULL);
/* make sure shallows are read */
is_repository_shallow();
while (fgets(line, sizeof(line), stdin) != NULL) {
int len = strlen(line);
if (len && line[len - 1] == '\n')
line[--len] = 0;
if (!len)
break;
if (*line == '-') {
if (!strcmp(line, "--not")) {
flags ^= UNINTERESTING;
write_bitmap_index = 0;
continue;
}
if (starts_with(line, "--shallow ")) {
unsigned char sha1[20];
if (get_sha1_hex(line + 10, sha1))
die("not an SHA-1 '%s'", line + 10);
register_shallow(sha1);
use_bitmap_index = 0;
continue;
}
die("not a rev '%s'", line);
}
if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
die("bad revision '%s'", line);
}
if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
return;
if (prepare_revision_walk(&revs))
die("revision walk setup failed");
mark_edges_uninteresting(&revs, show_edge);
traverse_commit_list(&revs, show_commit, show_object, NULL);
if (unpack_unreachable_expiration) {
revs.ignore_missing_links = 1;
if (add_unseen_recent_objects_to_traversal(&revs,
unpack_unreachable_expiration))
die("unable to add recent objects");
if (prepare_revision_walk(&revs))
die("revision walk setup failed");
traverse_commit_list(&revs, record_recent_commit,
record_recent_object, NULL);
}
if (keep_unreachable)
add_objects_in_unpacked_packs(&revs);
if (unpack_unreachable)
loosen_unused_packed_objects(&revs);
sha1_array_clear(&recent_objects);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
git
|
de1e67d0703894cb6ea782e36abb63976ab07e60
| 263,807,123,757,459,030,000,000,000,000,000,000,000 | 65 |
list-objects: pass full pathname to callbacks
When we find a blob at "a/b/c", we currently pass this to
our show_object_fn callbacks as two components: "a/b/" and
"c". Callbacks which want the full value then call
path_name(), which concatenates the two. But this is an
inefficient interface; the path is a strbuf, and we could
simply append "c" to it temporarily, then roll back the
length, without creating a new copy.
So we could improve this by teaching the callsites of
path_name() this trick (and there are only 3). But we can
also notice that no callback actually cares about the
broken-down representation, and simply pass each callback
the full path "a/b/c" as a string. The callback code becomes
even simpler, then, as we do not have to worry about freeing
an allocated buffer, nor rolling back our modification to
the strbuf.
This is theoretically less efficient, as some callbacks
would not bother to format the final path component. But in
practice this is not measurable. Since we use the same
strbuf over and over, our work to grow it is amortized, and
we really only pay to memcpy a few bytes.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
select_union_direct(THD *thd_arg, select_result *result_arg,
SELECT_LEX *last_select_lex_arg):
select_union(thd_arg), result(result_arg),
last_select_lex(last_select_lex_arg),
done_send_result_set_metadata(false), done_initialize_tables(false),
limit_found_rows(0)
{ send_records= 0; }
| 0 |
[
"CWE-416"
] |
server
|
4681b6f2d8c82b4ec5cf115e83698251963d80d5
| 264,390,808,304,949,050,000,000,000,000,000,000,000 | 7 |
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do)
|
void CLASS bad_pixels (const char *cfname)
{
FILE *fp=0;
#ifndef LIBRAW_LIBRARY_BUILD
char *fname, *cp, line[128];
int len, time, row, col, r, c, rad, tot, n, fixed=0;
#else
char *cp, line[128];
int time, row, col, r, c, rad, tot, n;
#ifdef DCRAW_VERBOSE
int fixed = 0;
#endif
#endif
if (!filters) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,0,2);
#endif
if (cfname)
fp = fopen (cfname, "r");
#line 3674 "dcraw/dcraw.c"
if (!fp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_BADPIXELMAP;
#endif
return;
}
while (fgets (line, 128, fp)) {
cp = strchr (line, '#');
if (cp) *cp = 0;
if (sscanf (line, "%d %d %d", &col, &row, &time) != 3) continue;
if ((unsigned) col >= width || (unsigned) row >= height) continue;
if (time > timestamp) continue;
for (tot=n=0, rad=1; rad < 3 && n==0; rad++)
for (r = row-rad; r <= row+rad; r++)
for (c = col-rad; c <= col+rad; c++)
if ((unsigned) r < height && (unsigned) c < width &&
(r != row || c != col) && fc(r,c) == fc(row,col)) {
tot += BAYER2(r,c);
n++;
}
BAYER2(row,col) = tot/n;
#ifdef DCRAW_VERBOSE
if (verbose) {
if (!fixed++)
fprintf (stderr,_("Fixed dead pixels at:"));
fprintf (stderr, " %d,%d", col, row);
}
#endif
}
#ifdef DCRAW_VERBOSE
if (fixed) fputc ('\n', stderr);
#endif
fclose (fp);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,1,2);
#endif
}
| 0 |
[] |
LibRaw
|
c4e374ea6c979a7d1d968f5082b7d0ea8cd27202
| 18,694,756,394,670,820,000,000,000,000,000,000,000 | 59 |
additional data checks backported from 0.15.4
|
EIGEN_STRONG_INLINE QInt32 operator*(const QInt8 a, const QUInt8 b) {
return QInt32(static_cast<int32_t>(a.value) * static_cast<int32_t>(b.value));
}
| 0 |
[
"CWE-908",
"CWE-787"
] |
tensorflow
|
ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2
| 93,212,524,288,380,080,000,000,000,000,000,000,000 | 3 |
Default initialize fixed point Eigen types.
In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN.
PiperOrigin-RevId: 344101137
Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2
|
static bool checkreturn find_extension_field(pb_field_iterator_t *iter)
{
unsigned start = iter->field_index;
do {
if (PB_LTYPE(iter->pos->type) == PB_LTYPE_EXTENSION)
return true;
(void)pb_field_next(iter);
} while (iter->field_index != start);
return false;
}
| 0 |
[
"CWE-125"
] |
nanopb
|
7b396821ddd06df8e39143f16e1dc0a4645b89a3
| 22,561,736,283,133,600,000,000,000,000,000,000,000 | 12 |
Fix invalid free() after failed realloc() (GHSA-gcx3-7m76-287p)
|
static int remove_uuid(struct btd_adapter *adapter, uuid_t *uuid)
{
struct mgmt_cp_remove_uuid cp;
uuid_t uuid128;
uint128_t uint128;
if (!is_supported_uuid(uuid)) {
btd_warn(adapter->dev_id,
"Ignoring unsupported UUID for removal");
return 0;
}
uuid_to_uuid128(&uuid128, uuid);
ntoh128((uint128_t *) uuid128.value.uuid128.data, &uint128);
htob128(&uint128, (uint128_t *) cp.uuid);
DBG("sending remove uuid command for index %u", adapter->dev_id);
if (mgmt_send(adapter->mgmt, MGMT_OP_REMOVE_UUID,
adapter->dev_id, sizeof(cp), &cp,
remove_uuid_complete, adapter, NULL) > 0)
return 0;
btd_error(adapter->dev_id, "Failed to remove UUID for index %u",
adapter->dev_id);
return -EIO;
}
| 0 |
[
"CWE-862",
"CWE-863"
] |
bluez
|
b497b5942a8beb8f89ca1c359c54ad67ec843055
| 228,668,654,098,269,050,000,000,000,000,000,000,000 | 29 |
adapter: Fix storing discoverable setting
discoverable setting shall only be store when changed via Discoverable
property and not when discovery client set it as that be considered
temporary just for the lifetime of the discovery.
|
static uint16_t nvme_set_zd_ext(NvmeNamespace *ns, NvmeZone *zone)
{
uint16_t status;
uint8_t state = nvme_get_zone_state(zone);
if (state == NVME_ZONE_STATE_EMPTY) {
status = nvme_aor_check(ns, 1, 0);
if (status) {
return status;
}
nvme_aor_inc_active(ns);
zone->d.za |= NVME_ZA_ZD_EXT_VALID;
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
return NVME_SUCCESS;
}
return NVME_ZONE_INVAL_TRANSITION;
}
| 0 |
[] |
qemu
|
736b01642d85be832385063f278fe7cd4ffb5221
| 244,825,451,436,646,100,000,000,000,000,000,000,000 | 18 |
hw/nvme: fix CVE-2021-3929
This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
device itself. This still allows DMA to MMIO regions of other devices
(e.g. doing P2P DMA to the controller memory buffer of another NVMe
device).
Fixes: CVE-2021-3929
Reported-by: Qiuhao Li <[email protected]>
Reviewed-by: Keith Busch <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Klaus Jensen <[email protected]>
|
static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
__s32 *array, int bytes)
{
BUG_ON(bytes < (DEVCONF_MAX * 4));
memset(array, 0, bytes);
array[DEVCONF_FORWARDING] = cnf->forwarding;
array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
array[DEVCONF_MTU6] = cnf->mtu6;
array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
array[DEVCONF_AUTOCONF] = cnf->autoconf;
array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
array[DEVCONF_RTR_SOLICIT_INTERVAL] =
jiffies_to_msecs(cnf->rtr_solicit_interval);
array[DEVCONF_RTR_SOLICIT_DELAY] =
jiffies_to_msecs(cnf->rtr_solicit_delay);
array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
#ifdef CONFIG_IPV6_ROUTER_PREF
array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
array[DEVCONF_RTR_PROBE_INTERVAL] =
jiffies_to_msecs(cnf->rtr_probe_interval);
#ifdef CONFIG_IPV6_ROUTE_INFO
array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
#endif
#endif
array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
#endif
array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
}
| 0 |
[
"CWE-20"
] |
linux
|
77751427a1ff25b27d47a4c36b12c3c8667855ac
| 139,606,240,346,285,590,000,000,000,000,000,000,000 | 56 |
ipv6: addrconf: validate new MTU before applying it
Currently we don't check if the new MTU is valid or not and this allows
one to configure a smaller than minimum allowed by RFCs or even bigger
than interface own MTU, which is a problem as it may lead to packet
drops.
If you have a daemon like NetworkManager running, this may be exploited
by remote attackers by forging RA packets with an invalid MTU, possibly
leading to a DoS. (NetworkManager currently only validates for values
too small, but not for too big ones.)
The fix is just to make sure the new value is valid. That is, between
IPV6_MIN_MTU and interface's MTU.
Note that similar check is already performed at
ndisc_router_discovery(), for when kernel itself parses the RA.
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void InstanceKlass::print_class_load_logging(ClassLoaderData* loader_data,
const char* module_name,
const ClassFileStream* cfs) const {
if (!log_is_enabled(Info, class, load)) {
return;
}
ResourceMark rm;
LogMessage(class, load) msg;
stringStream info_stream;
// Name and class hierarchy info
info_stream.print("%s", external_name());
// Source
if (cfs != NULL) {
if (cfs->source() != NULL) {
if (module_name != NULL) {
if (ClassLoader::is_modules_image(cfs->source())) {
info_stream.print(" source: jrt:/%s", module_name);
} else {
info_stream.print(" source: %s", cfs->source());
}
} else {
info_stream.print(" source: %s", cfs->source());
}
} else if (loader_data == ClassLoaderData::the_null_class_loader_data()) {
Thread* THREAD = Thread::current();
Klass* caller =
THREAD->is_Java_thread()
? ((JavaThread*)THREAD)->security_get_caller_class(1)
: NULL;
// caller can be NULL, for example, during a JVMTI VM_Init hook
if (caller != NULL) {
info_stream.print(" source: instance of %s", caller->external_name());
} else {
// source is unknown
}
} else {
oop class_loader = loader_data->class_loader();
info_stream.print(" source: %s", class_loader->klass()->external_name());
}
} else {
info_stream.print(" source: shared objects file");
}
msg.info("%s", info_stream.as_string());
if (log_is_enabled(Debug, class, load)) {
stringStream debug_stream;
// Class hierarchy info
debug_stream.print(" klass: " INTPTR_FORMAT " super: " INTPTR_FORMAT,
p2i(this), p2i(superklass()));
// Interfaces
if (local_interfaces() != NULL && local_interfaces()->length() > 0) {
debug_stream.print(" interfaces:");
int length = local_interfaces()->length();
for (int i = 0; i < length; i++) {
debug_stream.print(" " INTPTR_FORMAT,
p2i(InstanceKlass::cast(local_interfaces()->at(i))));
}
}
// Class loader
debug_stream.print(" loader: [");
loader_data->print_value_on(&debug_stream);
debug_stream.print("]");
// Classfile checksum
if (cfs) {
debug_stream.print(" bytes: %d checksum: %08x",
cfs->length(),
ClassLoader::crc32(0, (const char*)cfs->buffer(),
cfs->length()));
}
msg.debug("%s", debug_stream.as_string());
}
}
| 0 |
[] |
jdk11u-dev
|
41825fa33d605f8501164f9296572e4378e8183b
| 190,269,224,035,620,220,000,000,000,000,000,000,000 | 81 |
8270386: Better verification of scan methods
Reviewed-by: mbaesken
Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4
|
book_backend_ldap_refresh_sync (EBookBackend *backend,
GCancellable *cancellable,
GError **error)
{
EBookBackendLDAP *ldap_backend = E_BOOK_BACKEND_LDAP (backend);
g_return_val_if_fail (ldap_backend != NULL, FALSE);
g_return_val_if_fail (ldap_backend->priv != NULL, FALSE);
if (!ldap_backend->priv->cache || !ldap_backend->priv->marked_for_offline ||
ldap_backend->priv->generate_cache_in_progress)
return TRUE;
e_book_backend_cache_set_time (ldap_backend->priv->cache, "");
generate_cache (ldap_backend);
return TRUE;
}
| 0 |
[] |
evolution-data-server
|
34bad61738e2127736947ac50e0c7969cc944972
| 218,762,692,655,178,600,000,000,000,000,000,000,000 | 18 |
Bug 796174 - strcat() considered unsafe for buffer overflow
|
static CURLcode smtp_disconnect(struct connectdata *conn,
bool dead_connection)
{
struct smtp_conn *smtpc = &conn->proto.smtpc;
/* We cannot send quit unconditionally. If this connection is stale or
bad in any way, sending quit and waiting around here will make the
disconnect wait in vain and cause more problems than we need to.
*/
/* The SMTP session may or may not have been allocated/setup at this
point! */
if(!dead_connection && smtpc->pp.conn)
(void)smtp_quit(conn); /* ignore errors on the LOGOUT */
Curl_pp_disconnect(&smtpc->pp);
#ifdef USE_NTLM
/* Cleanup the ntlm structure */
if(smtpc->authused == SMTP_AUTH_NTLM) {
Curl_ntlm_sspi_cleanup(&conn->ntlm);
}
#endif
/* This won't already be freed in some error cases */
Curl_safefree(smtpc->domain);
smtpc->domain = NULL;
return CURLE_OK;
}
| 0 |
[
"CWE-89"
] |
curl
|
75ca568fa1c19de4c5358fed246686de8467c238
| 110,249,443,628,201,340,000,000,000,000,000,000,000 | 30 |
URL sanitize: reject URLs containing bad data
Protocols (IMAP, POP3 and SMTP) that use the path part of a URL in a
decoded manner now use the new Curl_urldecode() function to reject URLs
with embedded control codes (anything that is or decodes to a byte value
less than 32).
URLs containing such codes could easily otherwise be used to do harm and
allow users to do unintended actions with otherwise innocent tools and
applications. Like for example using a URL like
pop3://pop3.example.com/1%0d%0aDELE%201 when the app wants a URL to get
a mail and instead this would delete one.
This flaw is considered a security vulnerability: CVE-2012-0036
Security advisory at: http://curl.haxx.se/docs/adv_20120124.html
Reported by: Dan Fandrich
|
int __weak ftrace_arch_code_modify_prepare(void)
{
return 0;
}
| 0 |
[
"CWE-703"
] |
linux
|
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
| 170,520,214,581,441,720,000,000,000,000,000,000,000 | 4 |
tracing: Fix possible NULL pointer dereferences
Currently set_ftrace_pid and set_graph_function files use seq_lseek
for their fops. However seq_open() is called only for FMODE_READ in
the fops->open() so that if an user tries to seek one of those file
when she open it for writing, it sees NULL seq_file and then panic.
It can be easily reproduced with following command:
$ cd /sys/kernel/debug/tracing
$ echo 1234 | sudo tee -a set_ftrace_pid
In this example, GNU coreutils' tee opens the file with fopen(, "a")
and then the fopen() internally calls lseek().
Link: http://lkml.kernel.org/r/[email protected]
Cc: Frederic Weisbecker <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: [email protected]
Signed-off-by: Namhyung Kim <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
|
bool is_empty() { return change_list.is_empty(); }
| 0 |
[
"CWE-416"
] |
server
|
4681b6f2d8c82b4ec5cf115e83698251963d80d5
| 259,296,706,010,508,460,000,000,000,000,000,000,000 | 1 |
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do)
|
static void read_binary_date(MYSQL_TIME *tm, uchar **pos)
{
uint length= net_field_length(pos);
if (length)
{
uchar *to= *pos;
tm->year = (uint) sint2korr(to);
tm->month= (uint) to[2];
tm->day= (uint) to[3];
tm->hour= tm->minute= tm->second= 0;
tm->second_part= 0;
tm->neg= 0;
tm->time_type= MYSQL_TIMESTAMP_DATE;
*pos+= length;
}
else
set_zero_time(tm, MYSQL_TIMESTAMP_DATE);
}
| 0 |
[] |
mysql-server
|
3d8134d2c9b74bc8883ffe2ef59c168361223837
| 218,224,312,851,683,900,000,000,000,000,000,000,000 | 21 |
Bug#25988681: USE-AFTER-FREE IN MYSQL_STMT_CLOSE()
Description: If mysql_stmt_close() encountered error,
it recorded error in prepared statement
but then frees memory assigned to prepared
statement. If mysql_stmt_error() is used
to get error information, it will result
into use after free.
In all cases where mysql_stmt_close() can
fail, error would have been set by
cli_advanced_command in MYSQL structure.
Solution: Don't copy error from MYSQL using set_stmt_errmsg.
There is no automated way to test the fix since
it is in mysql_stmt_close() which does not expect
any reply from server.
Reviewed-By: Georgi Kodinov <[email protected]>
Reviewed-By: Ramil Kalimullin <[email protected]>
|
flatpak_remote_state_get_main_metadata (FlatpakRemoteState *self)
{
VarSummaryRef summary;
VarSummaryIndexRef index;
VarMetadataRef meta;
if (self->index)
{
index = var_summary_index_from_gvariant (self->index);
meta = var_summary_index_get_metadata (index);
}
else if (self->summary)
{
summary = var_summary_from_gvariant (self->summary);
meta = var_summary_get_metadata (summary);
}
else
g_assert_not_reached ();
return meta;
}
| 0 |
[
"CWE-74"
] |
flatpak
|
fb473cad801c6b61706353256cab32330557374a
| 29,614,623,840,403,643,000,000,000,000,000,000,000 | 21 |
dir: Pass environment via bwrap --setenv when running apply_extra
This means we can systematically pass the environment variables
through bwrap(1), even if it is setuid and thus is filtering out
security-sensitive environment variables. bwrap ends up being
run with an empty environment instead.
As with the previous commit, this regressed while fixing CVE-2021-21261.
Fixes: 6d1773d2 "run: Convert all environment variables into bwrap arguments"
Signed-off-by: Simon McVittie <[email protected]>
|
TEST(EqOp, MatchesElement) {
BSONObj operand = BSON("a" << 5);
BSONObj match = BSON("a" << 5.0);
BSONObj notMatch = BSON("a" << 6);
EqualityMatchExpression eq("", operand["a"]);
ASSERT(eq.matchesSingleElement(match.firstElement()));
ASSERT(!eq.matchesSingleElement(notMatch.firstElement()));
ASSERT(eq.equivalent(&eq));
}
| 0 |
[] |
mongo
|
64095239f41e9f3841d8be9088347db56d35c891
| 303,876,289,915,203,850,000,000,000,000,000,000,000 | 11 |
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
|
BGD_DECLARE(int) gdTransformAffineGetImage(gdImagePtr *dst,
const gdImagePtr src,
gdRectPtr src_area,
const double affine[6])
{
int res;
double m[6];
gdRect bbox;
gdRect area_full;
if (src_area == NULL) {
area_full.x = 0;
area_full.y = 0;
area_full.width = gdImageSX(src);
area_full.height = gdImageSY(src);
src_area = &area_full;
}
gdTransformAffineBoundingBox(src_area, affine, &bbox);
*dst = gdImageCreateTrueColor(bbox.width, bbox.height);
if (*dst == NULL) {
return GD_FALSE;
}
(*dst)->saveAlphaFlag = 1;
if (!src->trueColor) {
gdImagePaletteToTrueColor(src);
}
/* Translate to dst origin (0,0) */
gdAffineTranslate(m, -bbox.x, -bbox.y);
gdAffineConcat(m, affine, m);
gdImageAlphaBlending(*dst, 0);
res = gdTransformAffineCopy(*dst,
0,0,
src,
src_area,
m);
if (res != GD_TRUE) {
gdImageDestroy(*dst);
dst = NULL;
return GD_FALSE;
} else {
return GD_TRUE;
}
}
| 0 |
[
"CWE-399"
] |
libgd
|
4751b606fa38edc456d627140898a7ec679fcc24
| 232,321,569,154,353,000,000,000,000,000,000,000,000 | 50 |
gdImageScaleTwoPass memory leak fix
Fixing memory leak in gdImageScaleTwoPass, as reported by @cmb69 and
confirmed by @vapier. This bug actually bit me in production and I'm
very thankful that it was reported with an easy fix.
Fixes #173.
|
xmlValidateNamesValueInternal(xmlDocPtr doc, const xmlChar *value) {
const xmlChar *cur;
int val, len;
if (value == NULL) return(0);
cur = value;
val = xmlStringCurrentChar(NULL, cur, &len);
cur += len;
if (!xmlIsDocNameStartChar(doc, val))
return(0);
val = xmlStringCurrentChar(NULL, cur, &len);
cur += len;
while (xmlIsDocNameChar(doc, val)) {
val = xmlStringCurrentChar(NULL, cur, &len);
cur += len;
}
/* Should not test IS_BLANK(val) here -- see erratum E20*/
while (val == 0x20) {
while (val == 0x20) {
val = xmlStringCurrentChar(NULL, cur, &len);
cur += len;
}
if (!xmlIsDocNameStartChar(doc, val))
return(0);
val = xmlStringCurrentChar(NULL, cur, &len);
cur += len;
while (xmlIsDocNameChar(doc, val)) {
val = xmlStringCurrentChar(NULL, cur, &len);
cur += len;
}
}
if (val != 0) return(0);
return(1);
}
| 0 |
[] |
libxml2
|
932cc9896ab41475d4aa429c27d9afd175959d74
| 111,668,422,976,453,460,000,000,000,000,000,000,000 | 42 |
Fix buffer size checks in xmlSnprintfElementContent
xmlSnprintfElementContent failed to correctly check the available
buffer space in two locations.
Fixes bug 781333 (CVE-2017-9047) and bug 781701 (CVE-2017-9048).
Thanks to Marcel Böhme and Thuan Pham for the report.
|
static int fill_ext_compat(struct usb_configuration *c, u8 *buf)
{
int i, count;
count = 16;
buf += 16;
for (i = 0; i < c->next_interface_id; ++i) {
struct usb_function *f;
int j;
f = c->interface[i];
for (j = 0; j < f->os_desc_n; ++j) {
struct usb_os_desc *d;
if (i != f->os_desc_table[j].if_id)
continue;
d = f->os_desc_table[j].os_desc;
if (d && d->ext_compat_id) {
*buf++ = i;
*buf++ = 0x01;
memcpy(buf, d->ext_compat_id, 16);
buf += 22;
} else {
++buf;
*buf = 0x01;
buf += 23;
}
count += 24;
if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ)
return count;
}
}
return count;
}
| 0 |
[
"CWE-476"
] |
linux
|
75e5b4849b81e19e9efe1654b30d7f3151c33c2c
| 166,397,339,931,793,830,000,000,000,000,000,000,000 | 35 |
USB: gadget: validate interface OS descriptor requests
Stall the control endpoint in case provided index exceeds array size of
MAX_CONFIG_INTERFACES or when the retrieved function pointer is null.
Signed-off-by: Szymon Heidrich <[email protected]>
Cc: [email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static connection *connections_get_new_connection(server *srv) {
connections * const conns = &srv->conns;
size_t i;
if (conns->size == conns->used) {
conns->size += srv->max_conns >= 128 ? 128 : srv->max_conns > 16 ? 16 : srv->max_conns;
conns->ptr = realloc(conns->ptr, sizeof(*conns->ptr) * conns->size);
force_assert(NULL != conns->ptr);
for (i = conns->used; i < conns->size; i++) {
conns->ptr[i] = connection_init(srv);
connection_reset(conns->ptr[i]);
}
}
conns->ptr[conns->used]->ndx = conns->used;
return conns->ptr[conns->used++];
}
| 0 |
[
"CWE-703"
] |
lighttpd1.4
|
b03b86f47b0d5a553137f081fadc482b4af1372d
| 189,708,008,353,797,450,000,000,000,000,000,000,000 | 18 |
[core] fix merging large headers across mult reads (fixes #3059)
(thx mitd)
x-ref:
"Connections stuck in Close_Wait causing 100% cpu usage"
https://redmine.lighttpd.net/issues/3059
|
check_cursor_lnum(void)
{
if (curwin->w_cursor.lnum > curbuf->b_ml.ml_line_count)
{
#ifdef FEAT_FOLDING
// If there is a closed fold at the end of the file, put the cursor in
// its first line. Otherwise in the last line.
if (!hasFolding(curbuf->b_ml.ml_line_count,
&curwin->w_cursor.lnum, NULL))
#endif
curwin->w_cursor.lnum = curbuf->b_ml.ml_line_count;
}
if (curwin->w_cursor.lnum <= 0)
curwin->w_cursor.lnum = 1;
}
| 0 |
[
"CWE-120"
] |
vim
|
7ce5b2b590256ce53d6af28c1d203fb3bc1d2d97
| 123,300,629,347,740,980,000,000,000,000,000,000,000 | 15 |
patch 8.2.4969: changing text in Visual mode may cause invalid memory access
Problem: Changing text in Visual mode may cause invalid memory access.
Solution: Check the Visual position after making a change.
|
static char *format_duration(u64 dur, u32 timescale, char *szDur)
{
u32 h, m, s, ms;
if (!timescale) return NULL;
dur = (u32) (( ((Double) (s64) dur)/timescale)*1000);
h = (u32) (dur / 3600000);
dur -= h*3600000;
m = (u32) (dur / 60000);
dur -= m*60000;
s = (u32) (dur/1000);
dur -= s*1000;
ms = (u32) (dur);
if (h<=24) {
sprintf(szDur, "%02d:%02d:%02d.%03d", h, m, s, ms);
} else {
u32 d = (u32) (dur / 3600000 / 24);
h = (u32) (dur/3600000)-24*d;
if (d<=365) {
sprintf(szDur, "%d Days, %02d:%02d:%02d.%03d", d, h, m, s, ms);
} else {
u32 y=0;
while (d>365) {
y++;
d-=365;
if (y%4) d--;
}
sprintf(szDur, "%d Years %d Days, %02d:%02d:%02d.%03d", y, d, h, m, s, ms);
}
}
return szDur;
}
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 42,525,287,213,034,460,000,000,000,000,000,000,000 | 32 |
fixed #2138
|
int cli_bytecode_done(struct cli_all_bc *allbc)
{
return cli_bytecode_done_jit(allbc, 0);
}
| 0 |
[
"CWE-189"
] |
clamav-devel
|
3d664817f6ef833a17414a4ecea42004c35cc42f
| 126,765,269,260,833,430,000,000,000,000,000,000,000 | 4 |
fix recursion level crash (bb #3706).
Thanks to Stephane Chazelas for the analysis.
|
void CServer::RegisterCommands()
{
// register console commands
Console()->Register("kick", "i[id] ?r[reason]", CFGFLAG_SERVER, ConKick, this, "Kick player with specified id for any reason");
Console()->Register("status", "", CFGFLAG_SERVER, ConStatus, this, "List players");
Console()->Register("shutdown", "", CFGFLAG_SERVER, ConShutdown, this, "Shut down");
Console()->Register("logout", "", CFGFLAG_SERVER|CFGFLAG_BASICACCESS, ConLogout, this, "Logout of rcon");
Console()->Register("record", "?s[file]", CFGFLAG_SERVER|CFGFLAG_STORE, ConRecord, this, "Record to a file");
Console()->Register("stoprecord", "", CFGFLAG_SERVER, ConStopRecord, this, "Stop recording");
Console()->Register("reload", "", CFGFLAG_SERVER, ConMapReload, this, "Reload the map");
Console()->Chain("sv_name", ConchainSpecialInfoupdate, this);
Console()->Chain("password", ConchainSpecialInfoupdate, this);
Console()->Chain("sv_player_slots", ConchainPlayerSlotsUpdate, this);
Console()->Chain("sv_max_clients", ConchainMaxclientsUpdate, this);
Console()->Chain("sv_max_clients", ConchainSpecialInfoupdate, this);
Console()->Chain("sv_max_clients_per_ip", ConchainMaxclientsperipUpdate, this);
Console()->Chain("mod_command", ConchainModCommandUpdate, this);
Console()->Chain("console_output_level", ConchainConsoleOutputLevelUpdate, this);
Console()->Chain("sv_rcon_password", ConchainRconPasswordSet, this);
// register console commands in sub parts
m_ServerBan.InitServerBan(Console(), Storage(), this);
m_pGameServer->OnConsoleInit();
}
| 0 |
[
"CWE-20",
"CWE-703",
"CWE-400"
] |
teeworlds
|
c68402fa7e279d42886d5951d1ea8ac2facc1ea5
| 7,230,852,647,582,441,000,000,000,000,000,000,000 | 28 |
changed a check
|
static inline int cpu_time_before(const clockid_t which_clock,
union cpu_time_count now,
union cpu_time_count then)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
return now.sched < then.sched;
} else {
return cputime_lt(now.cpu, then.cpu);
}
}
| 0 |
[
"CWE-189"
] |
linux
|
f8bd2258e2d520dff28c855658bd24bdafb5102d
| 108,037,543,700,994,000,000,000,000,000,000,000,000 | 10 |
remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: john stultz <[email protected]>
Cc: Christoph Lameter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
u32 val;
if (len < sizeof(u32))
return -EINVAL;
len = sizeof(u32);
val = sctp_sk(sk)->pd_point;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
| 0 |
[
"CWE-20"
] |
linux
|
726bc6b092da4c093eb74d13c07184b18c1af0f1
| 15,991,317,210,869,460,000,000,000,000,000,000,000 | 19 |
net/sctp: Validate parameter size for SCTP_GET_ASSOC_STATS
Building sctp may fail with:
In function ‘copy_from_user’,
inlined from ‘sctp_getsockopt_assoc_stats’ at
net/sctp/socket.c:5656:20:
arch/x86/include/asm/uaccess_32.h:211:26: error: call to
‘copy_from_user_overflow’ declared with attribute error: copy_from_user()
buffer size is not provably correct
if built with W=1 due to a missing parameter size validation
before the call to copy_from_user.
Signed-off-by: Guenter Roeck <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void esp_free_tcp_sk(struct rcu_head *head)
{
struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
sock_put(esk->sk);
kfree(esk);
}
| 0 |
[
"CWE-787"
] |
linux
|
ebe48d368e97d007bfeb76fcb065d6cfc4c96645
| 4,720,978,883,545,403,000,000,000,000,000,000,000 | 7 |
esp: Fix possible buffer overflow in ESP transformation
The maximum message size that can be send is bigger than
the maximum site that skb_page_frag_refill can allocate.
So it is possible to write beyond the allocated buffer.
Fix this by doing a fallback to COW in that case.
v2:
Avoid get get_order() costs as suggested by Linus Torvalds.
Fixes: cac2661c53f3 ("esp4: Avoid skb_cow_data whenever possible")
Fixes: 03e2a30f6a27 ("esp6: Avoid skb_cow_data whenever possible")
Reported-by: valis <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
rad_put_vendor_attr(struct rad_handle *h, int vendor, int type,
const void *value, size_t len)
{
struct vendor_attribute *attr;
int res;
if (!h->request_created) {
generr(h, "Please call rad_create_request()");
return -1;
}
if ((attr = malloc(len + 6)) == NULL) {
generr(h, "malloc failure (%d bytes)", len + 6);
return -1;
}
attr->vendor_value = htonl(vendor);
attr->attrib_type = type;
attr->attrib_len = len + 2;
memcpy(attr->attrib_data, value, len);
res = put_raw_attr(h, RAD_VENDOR_SPECIFIC, attr, len + 6);
free(attr);
if (res == 0 && vendor == RAD_VENDOR_MICROSOFT
&& (type == RAD_MICROSOFT_MS_CHAP_RESPONSE
|| type == RAD_MICROSOFT_MS_CHAP2_RESPONSE)) {
h->chap_pass = 1;
}
return (res);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
php-radius
|
13c149b051f82b709e8d7cc32111e84b49d57234
| 306,249,147,668,903,270,000,000,000,000,000,000,000 | 30 |
Fix a security issue in radius_get_vendor_attr().
The underlying rad_get_vendor_attr() function assumed that it would always be
given valid VSA data. Indeed, the buffer length wasn't even passed in; the
assumption was that the length field within the VSA structure would be valid.
This could result in denial of service by providing a length that would be
beyond the memory limit, or potential arbitrary memory access by providing a
length greater than the actual data given.
rad_get_vendor_attr() has been changed to require the raw data length be
provided, and this is then used to check that the VSA is valid.
Conflicts:
radlib_vs.h
|
bool operator==(const Component& other) const { return begin == other.begin && len == other.len; }
| 0 |
[] |
envoy
|
3b5acb2f43548862dadb243de7cf3994986a8e04
| 184,901,940,652,775,160,000,000,000,000,000,000,000 | 1 |
http, url: Bring back chromium_url and http_parser_parse_url (#198)
* Revert GURL as HTTP URL parser utility
This reverts:
1. commit c9c4709c844b90b9bb2935d784a428d667c9df7d
2. commit d828958b591a6d79f4b5fa608ece9962b7afbe32
3. commit 2d69e30c51f2418faf267aaa6c1126fce9948c62
Signed-off-by: Dhi Aurrahman <[email protected]>
|
static uint64_t reencrypt_length(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reencrypt *rh,
uint64_t keyslot_area_length,
uint64_t length_max)
{
unsigned long dummy, optimal_alignment;
uint64_t length, soft_mem_limit;
if (rh->rp.type == REENC_PROTECTION_NONE)
length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
else if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
length = (keyslot_area_length / rh->rp.p.csum.hash_size) * rh->alignment;
else if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
return reencrypt_data_shift(hdr);
else
length = keyslot_area_length;
/* hard limit */
if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
/* soft limit is 1/4 of system memory */
soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
if (soft_mem_limit && length > soft_mem_limit)
length = soft_mem_limit;
if (length_max && length > length_max)
length = length_max;
length -= (length % rh->alignment);
/* Emits error later */
if (!length)
return length;
device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
/* we have to stick with encryption sector size alignment */
if (optimal_alignment % rh->alignment)
return length;
/* align to opt-io size only if remaining size allows it */
if (length > optimal_alignment)
length -= (length % optimal_alignment);
return length;
}
| 0 |
[
"CWE-345"
] |
cryptsetup
|
0113ac2d889c5322659ad0596d4cfc6da53e356c
| 88,482,418,784,901,100,000,000,000,000,000,000,000 | 49 |
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack
Fix possible attacks against data confidentiality through LUKS2 online
reencryption extension crash recovery.
An attacker can modify on-disk metadata to simulate decryption in
progress with crashed (unfinished) reencryption step and persistently
decrypt part of the LUKS device.
This attack requires repeated physical access to the LUKS device but
no knowledge of user passphrases.
The decryption step is performed after a valid user activates
the device with a correct passphrase and modified metadata.
There are no visible warnings for the user that such recovery happened
(except using the luksDump command). The attack can also be reversed
afterward (simulating crashed encryption from a plaintext) with
possible modification of revealed plaintext.
The problem was caused by reusing a mechanism designed for actual
reencryption operation without reassessing the security impact for new
encryption and decryption operations. While the reencryption requires
calculating and verifying both key digests, no digest was needed to
initiate decryption recovery if the destination is plaintext (no
encryption key). Also, some metadata (like encryption cipher) is not
protected, and an attacker could change it. Note that LUKS2 protects
visible metadata only when a random change occurs. It does not protect
against intentional modification but such modification must not cause
a violation of data confidentiality.
The fix introduces additional digest protection of reencryption
metadata. The digest is calculated from known keys and critical
reencryption metadata. Now an attacker cannot create correct metadata
digest without knowledge of a passphrase for used keyslots.
For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
|
finish_input_tga (j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
{
/* no work */
}
| 0 |
[
"CWE-369"
] |
libjpeg-turbo
|
82923eb93a2eacf4a593e00e3e672bbb86a8a3a0
| 156,876,948,428,426,860,000,000,000,000,000,000,000 | 4 |
Check image size when reading targa file
Throw an error when image width or height is 0.
Fixes mozilla/mozjpeg#140, closes #7.
|
ByteVector ByteVector::operator+(const ByteVector &v) const
{
ByteVector sum(*this);
sum.append(v);
return sum;
}
| 0 |
[
"CWE-189"
] |
taglib
|
dcdf4fd954e3213c355746fa15b7480461972308
| 164,016,540,697,953,200,000,000,000,000,000,000,000 | 6 |
Avoid uint overflow in case the length + index is over UINT_MAX
|
c_pdf14trans_is_friendly(const gs_composite_t * composite_action, byte cmd0, byte cmd1)
{
gs_pdf14trans_t *pct0 = (gs_pdf14trans_t *)composite_action;
int op0 = pct0->params.pdf14_op;
if (op0 == PDF14_PUSH_DEVICE || op0 == PDF14_END_TRANS_GROUP ||
op0 == PDF14_END_TRANS_TEXT_GROUP) {
/* Halftone commands are always passed to the target printer device,
because transparency buffers are always contone.
So we're safe to execute them before queued transparency compositors. */
if (cmd0 == cmd_opv_extend && (cmd1 == cmd_opv_ext_put_halftone ||
cmd1 == cmd_opv_ext_put_ht_seg))
return true;
if (cmd0 == cmd_opv_set_misc && (cmd1 >> 6) == (cmd_set_misc_map >> 6))
return true;
}
return false;
}
| 0 |
[] |
ghostpdl
|
c432131c3fdb2143e148e8ba88555f7f7a63b25e
| 126,487,890,272,269,210,000,000,000,000,000,000,000 | 18 |
Bug 699661: Avoid sharing pointers between pdf14 compositors
If a copdevice is triggered when the pdf14 compositor is the device, we make
a copy of the device, then throw an error because, by default we're only allowed
to copy the device prototype - then freeing it calls the finalize, which frees
several pointers shared with the parent.
Make a pdf14 specific finish_copydevice() which NULLs the relevant pointers,
before, possibly, throwing the same error as the default method.
This also highlighted a problem with reopening the X11 devices, where a custom
error handler could be replaced with itself, meaning it also called itself,
and infifite recursion resulted.
Keep a note of if the handler replacement has been done, and don't do it a
second time.
|
static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
{
LSIState *s = LSI53C895A(dev);
DeviceState *d = DEVICE(dev);
uint8_t *pci_conf;
pci_conf = dev->config;
/* PCI latency timer = 255 */
pci_conf[PCI_LATENCY_TIMER] = 0xff;
/* Interrupt pin A */
pci_conf[PCI_INTERRUPT_PIN] = 0x01;
memory_region_init_io(&s->mmio_io, OBJECT(s), &lsi_mmio_ops, s,
"lsi-mmio", 0x400);
memory_region_init_io(&s->ram_io, OBJECT(s), &lsi_ram_ops, s,
"lsi-ram", 0x2000);
memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s,
"lsi-io", 256);
address_space_init(&s->pci_io_as, pci_address_space_io(dev), "lsi-pci-io");
qdev_init_gpio_out(d, &s->ext_irq, 1);
pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_io);
pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio_io);
pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->ram_io);
QTAILQ_INIT(&s->queue);
scsi_bus_new(&s->bus, sizeof(s->bus), d, &lsi_scsi_info, NULL);
}
| 0 |
[
"CWE-835"
] |
qemu
|
de594e47659029316bbf9391efb79da0a1a08e08
| 110,324,845,795,501,840,000,000,000,000,000,000,000 | 30 |
scsi: lsi: exit infinite loop while executing script (CVE-2019-12068)
When executing script in lsi_execute_script(), the LSI scsi adapter
emulator advances 's->dsp' index to read next opcode. This can lead
to an infinite loop if the next opcode is empty. Move the existing
loop exit after 10k iterations so that it covers no-op opcodes as
well.
Reported-by: Bugs SysSec <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
void TNEFInitAttachment(Attachment *p) {
INITDTR(p->Date);
INITVARLENGTH(p->Title);
INITVARLENGTH(p->MetaFile);
INITDTR(p->CreateDate);
INITDTR(p->ModifyDate);
INITVARLENGTH(p->TransportFilename);
INITVARLENGTH(p->FileData);
INITVARLENGTH(p->IconData);
memset(&(p->RenderData), 0, sizeof(renddata));
TNEFInitMapi(&(p->MAPI));
p->next = NULL;
}
| 0 |
[
"CWE-399",
"CWE-125"
] |
ytnef
|
3cb0f914d6427073f262e1b2b5fd973e3043cdf7
| 302,615,200,646,317,340,000,000,000,000,000,000,000 | 13 |
BugFix - Potential OOB with Fields of Size 0
Thanks to @hannob for contributing a malformed TNEF stream with
a Version field of size 0. Now such files will return an error
indicating invalid data.
|
int Item_func_nullif::compare()
{
if (m_cache)
m_cache->cache_value();
return cmp.compare();
}
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 119,128,696,255,467,920,000,000,000,000,000,000,000 | 6 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
{
static unsigned alloc_hint = 16;
if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes);
map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
alloc_hint = map->nodes_nb_alloc;
}
}
| 0 |
[
"CWE-787"
] |
qemu
|
4bfb024bc76973d40a359476dc0291f46e435442
| 273,432,137,934,066,600,000,000,000,000,000,000,000 | 9 |
memory: clamp cached translation in case it points to an MMIO region
In using the address_space_translate_internal API, address_space_cache_init
forgot one piece of advice that can be found in the code for
address_space_translate_internal:
/* MMIO registers can be expected to perform full-width accesses based only
* on their address, without considering adjacent registers that could
* decode to completely different MemoryRegions. When such registers
* exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
* regions overlap wildly. For this reason we cannot clamp the accesses
* here.
*
* If the length is small (as is the case for address_space_ldl/stl),
* everything works fine. If the incoming length is large, however,
* the caller really has to do the clamping through memory_access_size.
*/
address_space_cache_init is exactly one such case where "the incoming length
is large", therefore we need to clamp the resulting length---not to
memory_access_size though, since we are not doing an access yet, but to
the size of the resulting section. This ensures that subsequent accesses
to the cached MemoryRegionSection will be in range.
With this patch, the enclosed testcase notices that the used ring does
not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used"
error.
Signed-off-by: Paolo Bonzini <[email protected]>
|
apr_byte_t oidc_util_json_merge(request_rec *r, json_t *src, json_t *dst) {
const char *key;
json_t *value = NULL;
void *iter = NULL;
if ((src == NULL) || (dst == NULL))
return FALSE;
oidc_debug(r, "src=%s, dst=%s",
oidc_util_encode_json_object(r, src, JSON_COMPACT),
oidc_util_encode_json_object(r, dst, JSON_COMPACT));
iter = json_object_iter(src);
while (iter) {
key = json_object_iter_key(iter);
value = json_object_iter_value(iter);
json_object_set(dst, key, value);
iter = json_object_iter_next(src, iter);
}
oidc_debug(r, "result dst=%s",
oidc_util_encode_json_object(r, dst, JSON_COMPACT));
return TRUE;
}
| 0 |
[
"CWE-79"
] |
mod_auth_openidc
|
55ea0a085290cd2c8cdfdd960a230cbc38ba8b56
| 248,331,733,074,072,570,000,000,000,000,000,000,000 | 26 |
Add a function to escape Javascript characters
|
static int selinux_setprocattr(struct task_struct *p,
char *name, void *value, size_t size)
{
struct task_security_struct *tsec;
struct task_struct *tracer;
struct cred *new;
u32 sid = 0, ptsid;
int error;
char *str = value;
if (current != p) {
/* SELinux only allows a process to change its own
security attributes. */
return -EACCES;
}
/*
* Basic control over ability to set these attributes at all.
* current == p, but we'll pass them separately in case the
* above restriction is ever removed.
*/
if (!strcmp(name, "exec"))
error = current_has_perm(p, PROCESS__SETEXEC);
else if (!strcmp(name, "fscreate"))
error = current_has_perm(p, PROCESS__SETFSCREATE);
else if (!strcmp(name, "keycreate"))
error = current_has_perm(p, PROCESS__SETKEYCREATE);
else if (!strcmp(name, "sockcreate"))
error = current_has_perm(p, PROCESS__SETSOCKCREATE);
else if (!strcmp(name, "current"))
error = current_has_perm(p, PROCESS__SETCURRENT);
else
error = -EINVAL;
if (error)
return error;
/* Obtain a SID for the context, if one was specified. */
if (size && str[1] && str[1] != '\n') {
if (str[size-1] == '\n') {
str[size-1] = 0;
size--;
}
error = security_context_to_sid(value, size, &sid, GFP_KERNEL);
if (error == -EINVAL && !strcmp(name, "fscreate")) {
if (!capable(CAP_MAC_ADMIN)) {
struct audit_buffer *ab;
size_t audit_size;
/* We strip a nul only if it is at the end, otherwise the
* context contains a nul and we should audit that */
if (str[size - 1] == '\0')
audit_size = size - 1;
else
audit_size = size;
ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR);
audit_log_format(ab, "op=fscreate invalid_context=");
audit_log_n_untrustedstring(ab, value, audit_size);
audit_log_end(ab);
return error;
}
error = security_context_to_sid_force(value, size,
&sid);
}
if (error)
return error;
}
new = prepare_creds();
if (!new)
return -ENOMEM;
/* Permission checking based on the specified context is
performed during the actual operation (execve,
open/mkdir/...), when we know the full context of the
operation. See selinux_bprm_set_creds for the execve
checks and may_create for the file creation checks. The
operation will then fail if the context is not permitted. */
tsec = new->security;
if (!strcmp(name, "exec")) {
tsec->exec_sid = sid;
} else if (!strcmp(name, "fscreate")) {
tsec->create_sid = sid;
} else if (!strcmp(name, "keycreate")) {
error = may_create_key(sid, p);
if (error)
goto abort_change;
tsec->keycreate_sid = sid;
} else if (!strcmp(name, "sockcreate")) {
tsec->sockcreate_sid = sid;
} else if (!strcmp(name, "current")) {
error = -EINVAL;
if (sid == 0)
goto abort_change;
/* Only allow single threaded processes to change context */
error = -EPERM;
if (!current_is_single_threaded()) {
error = security_bounded_transition(tsec->sid, sid);
if (error)
goto abort_change;
}
/* Check permissions for the transition. */
error = avc_has_perm(tsec->sid, sid, SECCLASS_PROCESS,
PROCESS__DYNTRANSITION, NULL);
if (error)
goto abort_change;
/* Check for ptracing, and update the task SID if ok.
Otherwise, leave SID unchanged and fail. */
ptsid = 0;
rcu_read_lock();
tracer = ptrace_parent(p);
if (tracer)
ptsid = task_sid(tracer);
rcu_read_unlock();
if (tracer) {
error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
PROCESS__PTRACE, NULL);
if (error)
goto abort_change;
}
tsec->sid = sid;
} else {
error = -EINVAL;
goto abort_change;
}
commit_creds(new);
return size;
abort_change:
abort_creds(new);
return error;
}
| 0 |
[
"CWE-264"
] |
linux
|
7b0d0b40cd78cadb525df760ee4cac151533c2b5
| 133,941,023,534,540,780,000,000,000,000,000,000,000 | 138 |
selinux: Permit bounded transitions under NO_NEW_PRIVS or NOSUID.
If the callee SID is bounded by the caller SID, then allowing
the transition to occur poses no risk of privilege escalation and we can
therefore safely allow the transition to occur. Add this exemption
for both the case where a transition was explicitly requested by the
application and the case where an automatic transition is defined in
policy.
Signed-off-by: Stephen Smalley <[email protected]>
Reviewed-by: Andy Lutomirski <[email protected]>
Signed-off-by: Paul Moore <[email protected]>
|
static void io_poll_remove_one(struct io_kiocb *req)
{
struct io_poll_iocb *poll = &req->poll;
spin_lock(&poll->head->lock);
WRITE_ONCE(poll->canceled, true);
if (!list_empty(&poll->wait.entry)) {
list_del_init(&poll->wait.entry);
io_queue_async_work(req);
}
spin_unlock(&poll->head->lock);
hash_del(&req->hash_node);
}
| 0 |
[] |
linux
|
ff002b30181d30cdfbca316dadd099c3ca0d739c
| 150,421,245,021,699,950,000,000,000,000,000,000,000 | 13 |
io_uring: grab ->fs as part of async preparation
This passes it in to io-wq, so it assumes the right fs_struct when
executing async work that may need to do lookups.
Cc: [email protected] # 5.3+
Signed-off-by: Jens Axboe <[email protected]>
|
int compat_ip_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
if (level != SOL_IP)
return -ENOPROTOOPT;
if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
return compat_mc_setsockopt(sk, level, optname, optval, optlen,
ip_setsockopt);
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
!ip_mroute_opt(optname))
err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
optlen);
#endif
return err;
}
| 0 |
[
"CWE-476"
] |
net
|
425aa0e1d01513437668fa3d4a971168bbaa8515
| 28,435,027,104,186,198,000,000,000,000,000,000,000 | 24 |
ip_sockglue: Fix missing-check bug in ip_ra_control()
In function ip_ra_control(), the pointer new_ra is allocated a memory
space via kmalloc(). And it is used in the following codes. However,
when there is a memory allocation error, kmalloc() fails. Thus null
pointer dereference may happen. And it will cause the kernel to crash.
Therefore, we should check the return value and handle the error.
Signed-off-by: Gen Zhang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
MOBI_RET mobi_search_links_kf7(MOBIResult *result, const unsigned char *data_start, const unsigned char *data_end) {
if (!result) {
debug_print("Result structure is null%s", "\n");
return MOBI_PARAM_ERR;
}
result->start = result->end = NULL;
*(result->value) = '\0';
if (!data_start || !data_end) {
debug_print("Data is null%s", "\n");
return MOBI_PARAM_ERR;
}
const char *needle1 = "filepos=";
const char *needle2 = "recindex=";
const size_t needle1_length = strlen(needle1);
const size_t needle2_length = strlen(needle2);
const size_t needle_length = max(needle1_length,needle2_length);
if (data_start + needle_length > data_end) {
return MOBI_SUCCESS;
}
unsigned char *data = (unsigned char *) data_start;
const unsigned char tag_open = '<';
const unsigned char tag_close = '>';
unsigned char last_border = tag_open;
while (data <= data_end) {
if (*data == tag_open || *data == tag_close) {
last_border = *data;
}
if (data + needle_length <= data_end &&
(memcmp(data, needle1, needle1_length) == 0 ||
memcmp(data, needle2, needle2_length) == 0)) {
/* found match */
if (last_border != tag_open) {
/* opening char not found, not an attribute */
data += needle_length;
continue;
}
/* go to attribute beginning */
while (data >= data_start && !isspace(*data) && *data != tag_open) {
data--;
}
result->start = ++data;
/* now go forward */
int i = 0;
while (data <= data_end && !isspace(*data) && *data != tag_close && i < MOBI_ATTRVALUE_MAXSIZE) {
result->value[i++] = (char) *data++;
}
/* self closing tag '/>' */
if (*(data - 1) == '/' && *data == '>') {
--data; --i;
}
result->end = data;
result->value[i] = '\0';
return MOBI_SUCCESS;
}
data++;
}
return MOBI_SUCCESS;
}
| 0 |
[
"CWE-703",
"CWE-125"
] |
libmobi
|
fb1ab50e448ddbed746fd27ae07469bc506d838b
| 51,230,431,518,316,820,000,000,000,000,000,000,000 | 58 |
Fix array boundary check when parsing inflections which could result in buffer over-read with corrupt input
|
table_map Item_default_value::used_tables() const
{
if (!field || !field->default_value)
return static_cast<table_map>(0);
if (!field->default_value->expr) // not fully parsed field
return static_cast<table_map>(RAND_TABLE_BIT);
return field->default_value->expr->used_tables();
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 75,142,770,513,191,800,000,000,000,000,000,000,000 | 8 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
agoo_con_http_write(agooCon c) {
agooRes res = agoo_con_res_pop(c);
agooText message = agoo_res_message_peek(res);
ssize_t cnt = 0;
if (NULL == message) {
return true;
}
c->timeout = dtime() + CON_TIMEOUT;
if (0 == c->wcnt) {
if (agoo_resp_cat.on) {
char buf[4096];
char *hend = strstr(message->text, "\r\n\r\n");
if (NULL == hend) {
hend = message->text + message->len;
}
if ((long)sizeof(buf) <= hend - message->text) {
hend = message->text + sizeof(buf) - 1;
}
memcpy(buf, message->text, hend - message->text);
buf[hend - message->text] = '\0';
agoo_log_cat(&agoo_resp_cat, "%s %llu: %s", agoo_con_kind_str(c->bind->kind), (unsigned long long)c->id, buf);
}
if (agoo_debug_cat.on) {
agoo_log_cat(&agoo_debug_cat, "%s response on %llu: %s", agoo_con_kind_str(c->bind->kind), (unsigned long long)c->id, message->text);
}
}
if (AGOO_CON_HTTPS == c->bind->kind) {
#ifdef HAVE_OPENSSL_SSL_H
if (0 >= (cnt = SSL_write(c->ssl, message->text + c->wcnt, (int)(message->len - c->wcnt)))) {
unsigned long e = ERR_get_error();
if (0 == e) {
return true;
}
con_ssl_error(c, "write", (unsigned long)e, __FILE__, __LINE__);
c->dead = true;
return false;
}
#else
agoo_log_cat(&agoo_error_cat, "SSL not included in the build.");
c->dead = true;
#endif
} else {
if (0 > (cnt = send(c->sock, message->text + c->wcnt, message->len - c->wcnt, MSG_DONTWAIT))) {
if (EAGAIN == errno) {
return true;
}
agoo_log_cat(&agoo_error_cat, "Socket error @ %llu.", (unsigned long long)c->id);
c->dead = true;
return false;
}
}
c->wcnt += cnt;
if (c->wcnt == message->len) { // finished
agooText next = agoo_res_message_next(res);
c->wcnt = 0;
if (NULL == next && res->final) {
bool done = res->close;
agoo_res_destroy(res);
return !done;
}
}
agoo_con_res_prepend(c, res);
return true;
}
| 0 |
[
"CWE-444",
"CWE-61"
] |
agoo
|
23d03535cf7b50d679a60a953a0cae9519a4a130
| 59,979,485,829,615,300,000,000,000,000,000,000,000 | 73 |
Remote addr (#99)
* REMOTE_ADDR added
* Ready for merge
|
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (vcpu->arch.guest_state_protected)
return;
get_debugreg(vcpu->arch.db[0], 0);
get_debugreg(vcpu->arch.db[1], 1);
get_debugreg(vcpu->arch.db[2], 2);
get_debugreg(vcpu->arch.db[3], 3);
/*
* We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
* because db_interception might need it. We can do it before vmentry.
*/
vcpu->arch.dr6 = svm->vmcb->save.dr6;
vcpu->arch.dr7 = svm->vmcb->save.dr7;
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
set_dr_intercepts(svm);
}
| 0 |
[
"CWE-862"
] |
kvm
|
0f923e07124df069ba68d8bb12324398f4b6b709
| 241,831,822,438,426,730,000,000,000,000,000,000,000 | 20 |
KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
* Invert the mask of bits that we pick from L2 in
nested_vmcb02_prepare_control
* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr
This fixes a security issue that allowed a malicious L1 to run L2 with
AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
AVIC to read/write the host physical memory at some offsets.
Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
Signed-off-by: Maxim Levitsky <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static double mp_list_ioff(_cimg_math_parser& mp) {
const unsigned int
ind = (unsigned int)cimg::mod((int)_mp_arg(2),mp.imglist.width()),
boundary_conditions = (unsigned int)_mp_arg(4);
const CImg<T> &img = mp.imglist[ind];
const longT
off = (longT)_mp_arg(3),
whds = (longT)img.size();
if (off>=0 && off<whds) return (double)img[off];
if (img._data) switch (boundary_conditions) {
case 3 : { // Mirror
const longT whds2 = 2*whds, moff = cimg::mod(off,whds2);
return (double)img[moff<whds?moff:whds2 - moff - 1];
}
case 2 : // Periodic
return (double)img[cimg::mod(off,whds)];
case 1 : // Neumann
return (double)img[off<0?0:whds - 1];
default : // Dirichlet
return 0;
}
return 0;
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 214,027,813,402,354,970,000,000,000,000,000,000,000 | 23 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
const char *FLTGetBBOX(FilterEncodingNode *psFilterNode, rectObj *psRect)
{
const char *pszReturn = NULL;
if (!psFilterNode || !psRect)
return NULL;
if (psFilterNode->pszValue && strcasecmp(psFilterNode->pszValue, "BBOX") == 0) {
if (psFilterNode->psRightNode && psFilterNode->psRightNode->pOther) {
rectObj* pRect= (rectObj *)psFilterNode->psRightNode->pOther;
psRect->minx = pRect->minx;
psRect->miny = pRect->miny;
psRect->maxx = pRect->maxx;
psRect->maxy = pRect->maxy;
return psFilterNode->pszSRS;
}
} else {
pszReturn = FLTGetBBOX(psFilterNode->psLeftNode, psRect);
if (pszReturn)
return pszReturn;
else
return FLTGetBBOX(psFilterNode->psRightNode, psRect);
}
return pszReturn;
}
| 0 |
[
"CWE-200",
"CWE-119"
] |
mapserver
|
e52a436c0e1c5e9f7ef13428dba83194a800f4df
| 100,028,346,210,295,360,000,000,000,000,000,000,000 | 28 |
security fix (patch by EvenR)
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.