func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
void abort(void)
{
BUG();
/* if that doesn't kill us, halt */
panic("Oops failed to kill thread");
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | a4780adeefd042482f624f5e0d577bf9cdcbb760 | 96,305,323,783,772,680,000,000,000,000,000,000,000 | 7 | ARM: 7735/2: Preserve the user r/w register TPIDRURW on context switch and fork
Since commit 6a1c53124aa1 the user writeable TLS register was zeroed to
prevent it from being used as a covert channel between two tasks.
There are more and more applications coming to Windows RT,
Wine could support them, but mostly they expect to have
the thread environment block (TEB) in TPIDRURW.
This patch preserves that register per thread instead of clearing it.
Unlike the TPIDRURO, which is already switched, the TPIDRURW
can be updated from userspace so needs careful treatment in the case that we
modify TPIDRURW and call fork(). To avoid this we must always read
TPIDRURW in copy_thread.
Signed-off-by: André Hentschel <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: Jonathan Austin <[email protected]>
Signed-off-by: Russell King <[email protected]> |
static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval t;
snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
hw_param_interval_c(params, rule->deps[1]), &t);
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
} | 0 | [
"CWE-125"
]
| linux | 92ee3c60ec9fe64404dc035e7c41277d74aa26cb | 172,562,302,012,258,000,000,000,000,000,000,000,000 | 8 | ALSA: pcm: Fix races among concurrent hw_params and hw_free calls
Currently we have neither proper check nor protection against the
concurrent calls of PCM hw_params and hw_free ioctls, which may result
in a UAF. Since the existing PCM stream lock can't be used for
protecting the whole ioctl operations, we need a new mutex to protect
those racy calls.
This patch introduced a new mutex, runtime->buffer_mutex, and applies
it to both hw_params and hw_free ioctl code paths. Along with it, the
both functions are slightly modified (the mmap_count check is moved
into the state-check block) for code simplicity.
Reported-by: Hu Jiahui <[email protected]>
Cc: <[email protected]>
Reviewed-by: Jaroslav Kysela <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]> |
int inode_change_ok(const struct inode *inode, struct iattr *attr)
{
unsigned int ia_valid = attr->ia_valid;
/*
* First check size constraints. These can't be overriden using
* ATTR_FORCE.
*/
if (ia_valid & ATTR_SIZE) {
int error = inode_newsize_ok(inode, attr->ia_size);
if (error)
return error;
}
/* If force is set do it anyway. */
if (ia_valid & ATTR_FORCE)
return 0;
/* Make sure a caller can chown. */
if ((ia_valid & ATTR_UID) &&
(!uid_eq(current_fsuid(), inode->i_uid) ||
!uid_eq(attr->ia_uid, inode->i_uid)) &&
!capable_wrt_inode_uidgid(inode, CAP_CHOWN))
return -EPERM;
/* Make sure caller can chgrp. */
if ((ia_valid & ATTR_GID) &&
(!uid_eq(current_fsuid(), inode->i_uid) ||
(!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) &&
!capable_wrt_inode_uidgid(inode, CAP_CHOWN))
return -EPERM;
/* Make sure a caller can chmod. */
if (ia_valid & ATTR_MODE) {
if (!inode_owner_or_capable(inode))
return -EPERM;
/* Also check the setgid bit! */
if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
inode->i_gid) &&
!capable_wrt_inode_uidgid(inode, CAP_FSETID))
attr->ia_mode &= ~S_ISGID;
}
/* Check for setting the inode time. */
if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) {
if (!inode_owner_or_capable(inode))
return -EPERM;
}
return 0;
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | 23adbe12ef7d3d4195e80800ab36b37bee28cd03 | 45,455,037,669,885,760,000,000,000,000,000,000,000 | 51 | fs,userns: Change inode_capable to capable_wrt_inode_uidgid
The kernel has no concept of capabilities with respect to inodes; inodes
exist independently of namespaces. For example, inode_capable(inode,
CAP_LINUX_IMMUTABLE) would be nonsense.
This patch changes inode_capable to check for uid and gid mappings and
renames it to capable_wrt_inode_uidgid, which should make it more
obvious what it does.
Fixes CVE-2014-4014.
Cc: Theodore Ts'o <[email protected]>
Cc: Serge Hallyn <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Dave Chinner <[email protected]>
Cc: [email protected]
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static struct child_process *git_tcp_connect(int fd[2], char *host, int flags)
{
int sockfd = git_tcp_connect_sock(host, flags);
fd[0] = sockfd;
fd[1] = dup(sockfd);
return &no_fork;
} | 0 | []
| git | a02ea577174ab8ed18f847cf1693f213e0b9c473 | 226,615,579,757,961,120,000,000,000,000,000,000,000 | 9 | git_connect_git(): forbid newlines in host and path
When we connect to a git:// server, we send an initial request that
looks something like:
002dgit-upload-pack repo.git\0host=example.com
If the repo path contains a newline, then it's included literally, and
we get:
002egit-upload-pack repo
.git\0host=example.com
This works fine if you really do have a newline in your repository name;
the server side uses the pktline framing to parse the string, not
newlines. However, there are many _other_ protocols in the wild that do
parse on newlines, such as HTTP. So a carefully constructed git:// URL
can actually turn into a valid HTTP request. For example:
git://localhost:1234/%0d%0a%0d%0aGET%20/%20HTTP/1.1 %0d%0aHost:localhost%0d%0a%0d%0a
becomes:
0050git-upload-pack /
GET / HTTP/1.1
Host:localhost
host=localhost:1234
on the wire. Again, this isn't a problem for a real Git server, but it
does mean that feeding a malicious URL to Git (e.g., through a
submodule) can cause it to make unexpected cross-protocol requests.
Since repository names with newlines are presumably quite rare (and
indeed, we already disallow them in git-over-http), let's just disallow
them over this protocol.
Hostnames could likewise inject a newline, but this is unlikely a
problem in practice; we'd try resolving the hostname with a newline in
it, which wouldn't work. Still, it doesn't hurt to err on the side of
caution there, since we would not expect them to work in the first
place.
The ssh and local code paths are unaffected by this patch. In both cases
we're trying to run upload-pack via a shell, and will quote the newline
so that it makes it intact. An attacker can point an ssh url at an
arbitrary port, of course, but unless there's an actual ssh server
there, we'd never get as far as sending our shell command anyway. We
_could_ similarly restrict newlines in those protocols out of caution,
but there seems little benefit to doing so.
The new test here is run alongside the git-daemon tests, which cover the
same protocol, but it shouldn't actually contact the daemon at all. In
theory we could make the test more robust by setting up an actual
repository with a newline in it (so that our clone would succeed if our
new check didn't kick in). But a repo directory with newline in it is
likely not portable across all filesystems. Likewise, we could check
git-daemon's log that it was not contacted at all, but we do not
currently record the log (and anyway, it would make the test racy with
the daemon's log write). We'll just check the client-side stderr to make
sure we hit the expected code path.
Reported-by: Harold Kim <[email protected]>
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
GF_Err ctts_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
u32 sampleCount;
GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s;
ISOM_DECREASE_SIZE(ptr, 4);
ptr->nb_entries = gf_bs_read_u32(bs);
if (ptr->nb_entries > ptr->size / 8) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", ptr->nb_entries));
return GF_ISOM_INVALID_FILE;
}
ptr->alloc_size = ptr->nb_entries;
ptr->entries = (GF_DttsEntry *)gf_malloc(sizeof(GF_DttsEntry)*ptr->alloc_size);
if (!ptr->entries) return GF_OUT_OF_MEM;
sampleCount = 0;
for (i=0; i<ptr->nb_entries; i++) {
ISOM_DECREASE_SIZE(ptr, 8);
ptr->entries[i].sampleCount = gf_bs_read_u32(bs);
if (ptr->version)
ptr->entries[i].decodingOffset = gf_bs_read_int(bs, 32);
else
ptr->entries[i].decodingOffset = (s32) gf_bs_read_u32(bs);
sampleCount += ptr->entries[i].sampleCount;
if (ptr->max_ts_delta < ABS(ptr->entries[i].decodingOffset))
ptr->max_ts_delta = ABS(ptr->entries[i].decodingOffset);
}
#ifndef GPAC_DISABLE_ISOM_WRITE
ptr->w_LastSampleNumber = sampleCount;
#endif
return GF_OK;
} | 0 | [
"CWE-787"
]
| gpac | 388ecce75d05e11fc8496aa4857b91245007d26e | 173,374,887,041,150,600,000,000,000,000,000,000,000 | 35 | fixed #1587 |
PHP_MINFO_FUNCTION(xmlrpc)
{
php_info_print_table_start();
php_info_print_table_row(2, "core library version", XMLRPC_GetVersionString());
php_info_print_table_row(2, "php extension version", PHP_XMLRPC_VERSION);
php_info_print_table_row(2, "author", "Dan Libby");
php_info_print_table_row(2, "homepage", "http://xmlrpc-epi.sourceforge.net");
php_info_print_table_row(2, "open sourced by", "Epinions.com");
php_info_print_table_end();
} | 0 | []
| php-src | f3c1863aa2721343245b63ac7bd68cfdc3dd41f3 | 4,619,556,654,505,992,500,000,000,000,000,000,000 | 10 | Fixed #70728
Conflicts:
ext/xmlrpc/xmlrpc-epi-php.c |
static Exit_status check_header(IO_CACHE* file,
PRINT_EVENT_INFO *print_event_info,
const char* logname)
{
DBUG_ENTER("check_header");
uchar header[BIN_LOG_HEADER_SIZE];
uchar buf[PROBE_HEADER_LEN];
my_off_t tmp_pos, pos;
MY_STAT my_file_stat;
delete glob_description_event;
if (!(glob_description_event= new Format_description_log_event(3)))
{
error("Failed creating Format_description_log_event; out of memory?");
DBUG_RETURN(ERROR_STOP);
}
pos= my_b_tell(file);
/* fstat the file to check if the file is a regular file. */
if (my_fstat(file->file, &my_file_stat, MYF(0)) == -1)
{
error("Unable to stat the file.");
DBUG_RETURN(ERROR_STOP);
}
if ((my_file_stat.st_mode & S_IFMT) == S_IFREG)
my_b_seek(file, (my_off_t)0);
if (my_b_read(file, header, sizeof(header)))
{
error("Failed reading header; probably an empty file.");
DBUG_RETURN(ERROR_STOP);
}
if (memcmp(header, BINLOG_MAGIC, sizeof(header)))
{
error("File is not a binary log file.");
DBUG_RETURN(ERROR_STOP);
}
/*
Imagine we are running with --start-position=1000. We still need
to know the binlog format's. So we still need to find, if there is
one, the Format_desc event, or to know if this is a 3.23
binlog. So we need to first read the first events of the log,
those around offset 4. Even if we are reading a 3.23 binlog from
the start (no --start-position): we need to know the header length
(which is 13 in 3.23, 19 in 4.x) to be able to successfully print
the first event (Start_log_event_v3). So even in this case, we
need to "probe" the first bytes of the log *before* we do a real
read_log_event(). Because read_log_event() needs to know the
header's length to work fine.
*/
for(;;)
{
tmp_pos= my_b_tell(file); /* should be 4 the first time */
if (my_b_read(file, buf, sizeof(buf)))
{
if (file->error)
{
error("Could not read entry at offset %llu: "
"Error in log format or read error.", (ulonglong)tmp_pos);
DBUG_RETURN(ERROR_STOP);
}
/*
Otherwise this is just EOF : this log currently contains 0-2
events. Maybe it's going to be filled in the next
milliseconds; then we are going to have a problem if this a
3.23 log (imagine we are locally reading a 3.23 binlog which
is being written presently): we won't know it in
read_log_event() and will fail(). Similar problems could
happen with hot relay logs if --start-position is used (but a
--start-position which is posterior to the current size of the log).
These are rare problems anyway (reading a hot log + when we
read the first events there are not all there yet + when we
read a bit later there are more events + using a strange
--start-position).
*/
break;
}
else
{
DBUG_PRINT("info",("buf[EVENT_TYPE_OFFSET=%d]=%d",
EVENT_TYPE_OFFSET, buf[EVENT_TYPE_OFFSET]));
/* always test for a Start_v3, even if no --start-position */
if (buf[EVENT_TYPE_OFFSET] == START_EVENT_V3)
{
/* This is 3.23 or 4.x */
if (uint4korr(buf + EVENT_LEN_OFFSET) <
(LOG_EVENT_MINIMAL_HEADER_LEN + START_V3_HEADER_LEN))
{
/* This is 3.23 (format 1) */
delete glob_description_event;
if (!(glob_description_event= new Format_description_log_event(1)))
{
error("Failed creating Format_description_log_event; "
"out of memory?");
DBUG_RETURN(ERROR_STOP);
}
}
break;
}
else if (tmp_pos >= start_position)
break;
else if (buf[EVENT_TYPE_OFFSET] == FORMAT_DESCRIPTION_EVENT)
{
/* This is 5.0 */
Format_description_log_event *new_description_event;
my_b_seek(file, tmp_pos); /* seek back to event's start */
if (!(new_description_event= (Format_description_log_event*)
Log_event::read_log_event(file, glob_description_event,
opt_verify_binlog_checksum)))
/* EOF can't be hit here normally, so it's a real error */
{
error("Could not read a Format_description_log_event event at "
"offset %llu; this could be a log format error or read error.",
(ulonglong)tmp_pos);
DBUG_RETURN(ERROR_STOP);
}
if (opt_base64_output_mode == BASE64_OUTPUT_AUTO)
{
/*
process_event will delete *description_event and set it to
the new one, so we should not do it ourselves in this
case.
*/
Exit_status retval= process_event(print_event_info,
new_description_event, tmp_pos,
logname);
if (retval != OK_CONTINUE)
DBUG_RETURN(retval);
}
else
{
delete glob_description_event;
glob_description_event= new_description_event;
}
DBUG_PRINT("info",("Setting description_event"));
}
else if (buf[EVENT_TYPE_OFFSET] == ROTATE_EVENT)
{
Log_event *ev;
my_b_seek(file, tmp_pos); /* seek back to event's start */
if (!(ev= Log_event::read_log_event(file, glob_description_event,
opt_verify_binlog_checksum)))
{
/* EOF can't be hit here normally, so it's a real error */
error("Could not read a Rotate_log_event event at offset %llu;"
" this could be a log format error or read error.",
(ulonglong)tmp_pos);
DBUG_RETURN(ERROR_STOP);
}
delete ev;
}
else
break;
}
}
my_b_seek(file, pos);
DBUG_RETURN(OK_CONTINUE);
} | 0 | [
"CWE-284",
"CWE-295"
]
| mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 22,359,544,269,455,440,000,000,000,000,000,000,000 | 160 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
void snd_seq_device_load_drivers(void)
{
queue_autoload_drivers();
flush_work(&autoload_work);
} | 0 | [
"CWE-416",
"CWE-401"
]
| linux | fc27fe7e8deef2f37cba3f2be2d52b6ca5eb9d57 | 56,360,941,092,001,800,000,000,000,000,000,000,000 | 5 | ALSA: seq: Cancel pending autoload work at unbinding device
ALSA sequencer core has a mechanism to load the enumerated devices
automatically, and it's performed in an off-load work. This seems
causing some race when a sequencer is removed while the pending
autoload work is running. As syzkaller spotted, it may lead to some
use-after-free:
BUG: KASAN: use-after-free in snd_rawmidi_dev_seq_free+0x69/0x70
sound/core/rawmidi.c:1617
Write of size 8 at addr ffff88006c611d90 by task kworker/2:1/567
CPU: 2 PID: 567 Comm: kworker/2:1 Not tainted 4.13.0+ #29
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Workqueue: events autoload_drivers
Call Trace:
__dump_stack lib/dump_stack.c:16 [inline]
dump_stack+0x192/0x22c lib/dump_stack.c:52
print_address_description+0x78/0x280 mm/kasan/report.c:252
kasan_report_error mm/kasan/report.c:351 [inline]
kasan_report+0x230/0x340 mm/kasan/report.c:409
__asan_report_store8_noabort+0x1c/0x20 mm/kasan/report.c:435
snd_rawmidi_dev_seq_free+0x69/0x70 sound/core/rawmidi.c:1617
snd_seq_dev_release+0x4f/0x70 sound/core/seq_device.c:192
device_release+0x13f/0x210 drivers/base/core.c:814
kobject_cleanup lib/kobject.c:648 [inline]
kobject_release lib/kobject.c:677 [inline]
kref_put include/linux/kref.h:70 [inline]
kobject_put+0x145/0x240 lib/kobject.c:694
put_device+0x25/0x30 drivers/base/core.c:1799
klist_devices_put+0x36/0x40 drivers/base/bus.c:827
klist_next+0x264/0x4a0 lib/klist.c:403
next_device drivers/base/bus.c:270 [inline]
bus_for_each_dev+0x17e/0x210 drivers/base/bus.c:312
autoload_drivers+0x3b/0x50 sound/core/seq_device.c:117
process_one_work+0x9fb/0x1570 kernel/workqueue.c:2097
worker_thread+0x1e4/0x1350 kernel/workqueue.c:2231
kthread+0x324/0x3f0 kernel/kthread.c:231
ret_from_fork+0x25/0x30 arch/x86/entry/entry_64.S:425
The fix is simply to assure canceling the autoload work at removing
the device.
Reported-by: Andrey Konovalov <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N){
assert( p->nOp + N <= p->nOpAlloc );
} | 0 | [
"CWE-755"
]
| sqlite | 8654186b0236d556aa85528c2573ee0b6ab71be3 | 249,197,346,315,687,800,000,000,000,000,000,000,000 | 3 | When an error occurs while rewriting the parser tree for window functions
in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set,
and make sure that this shuts down any subsequent code generation that might
depend on the transformations that were implemented. This fixes a problem
discovered by the Yongheng and Rui fuzzer.
FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f |
get_session_command_for_name (GdmSession *self,
const char *name,
char **command)
{
gboolean res;
char *filename;
filename = g_strdup_printf ("%s.desktop", name);
res = get_session_command_for_file (self, filename, command);
g_free (filename);
return res;
} | 0 | []
| gdm | 5ac224602f1d603aac5eaa72e1760d3e33a26f0a | 85,719,880,169,542,200,000,000,000,000,000,000,000 | 13 | session: disconnect signals from worker proxy when conversation is freed
We don't want an outstanding reference on the worker proxy to lead to
signal handlers getting dispatched after the conversation is freed.
https://bugzilla.gnome.org/show_bug.cgi?id=758032 |
BSONObj BSONObj::filterFieldsUndotted( const BSONObj &filter, bool inFilter ) const {
BSONObjBuilder b;
BSONObjIterator i( *this );
while( i.moreWithEOO() ) {
BSONElement e = i.next();
if ( e.eoo() )
break;
BSONElement x = filter.getField( e.fieldName() );
if ( ( x.eoo() && !inFilter ) ||
( !x.eoo() && inFilter ) )
b.append( e );
}
return b.obj();
} | 0 | [
"CWE-20"
]
| mongo | f9817a6cf64bdba8e1e1cef30a798110df746b58 | 240,200,454,242,104,920,000,000,000,000,000,000,000 | 14 | SERVER-7769 - turn objcheck on by default and use new fast bson validate |
double Item_default_value::val_result()
{
calculate();
return Item_field::val_result();
} | 0 | [
"CWE-416"
]
| server | c02ebf3510850ba78a106be9974c94c3b97d8585 | 80,065,761,319,430,490,000,000,000,000,000,000,000 | 5 | MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments. |
static int cm_drep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_drep_msg *drep_msg;
int ret;
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
drep_msg->local_comm_id);
if (!cm_id_priv)
return -EINVAL;
work->cm_event.private_data = &drep_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_enter_timewait(cm_id_priv);
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
} | 0 | [
"CWE-20"
]
| linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | 240,606,931,682,809,120,000,000,000,000,000,000,000 | 37 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]> |
vrrp_garp_lower_prio_delay_handler(vector_t *strvec)
{
vrrp_t *vrrp = LIST_TAIL_DATA(vrrp_data->vrrp);
unsigned delay;
if (!read_unsigned_strvec(strvec, 1, &delay, 0, UINT_MAX / TIMER_HZ, true)) {
report_config_error(CONFIG_GENERAL_ERROR, "(%s): garp_lower_prio_delay '%s' invalid - ignoring", vrrp->iname, FMT_STR_VSLOT(strvec, 1));
return;
}
vrrp->garp_lower_prio_delay = delay * TIMER_HZ;
} | 0 | [
"CWE-59",
"CWE-61"
]
| keepalived | 04f2d32871bb3b11d7dc024039952f2fe2750306 | 68,251,768,706,417,320,000,000,000,000,000,000,000 | 12 | When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]> |
inline int32_t Dims(int i) const {
TFLITE_DCHECK_GE(i, 0);
TFLITE_DCHECK_LT(i, size_);
return size_ > kMaxSmallSize ? dims_pointer_[i] : dims_[i];
} | 0 | [
"CWE-125",
"CWE-787"
]
| tensorflow | 8ee24e7949a203d234489f9da2c5bf45a7d5157d | 261,762,772,535,595,930,000,000,000,000,000,000,000 | 5 | [tflite] Ensure `MatchingDim` does not allow buffer overflow.
We check in `MatchingDim` that both arguments have the same dimensionality, however that is a `DCHECK` only enabled if building in debug mode. Hence, it could be possible to cause buffer overflows by passing in a tensor with larger dimensions as the second argument. To fix, we now make `MatchingDim` return the minimum of the two sizes.
A much better fix would be to return a status object but that requires refactoring a large part of the codebase for minor benefits.
PiperOrigin-RevId: 332526127
Change-Id: If627d0d2c80a685217b6e0d1e64b0872dbf1c5e4 |
Array_decodeJson_internal(void **dst, const UA_DataType *type,
CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) {
(void) moveToken;
status ret;
if(parseCtx->tokenArray[parseCtx->index].type != JSMN_ARRAY)
return UA_STATUSCODE_BADDECODINGERROR;
size_t length = (size_t)parseCtx->tokenArray[parseCtx->index].size;
/* Save the length of the array */
size_t *p = (size_t*) dst - 1;
*p = length;
/* Return early for empty arrays */
if(length == 0) {
*dst = UA_EMPTY_ARRAY_SENTINEL;
return UA_STATUSCODE_GOOD;
}
/* Allocate memory */
*dst = UA_calloc(length, type->memSize);
if(*dst == NULL)
return UA_STATUSCODE_BADOUTOFMEMORY;
parseCtx->index++; /* We go to first Array member!*/
/* Decode array members */
uintptr_t ptr = (uintptr_t)*dst;
for(size_t i = 0; i < length; ++i) {
ret = decodeJsonJumpTable[type->typeKind]((void*)ptr, type, ctx, parseCtx, true);
if(ret != UA_STATUSCODE_GOOD) {
UA_Array_delete(*dst, i+1, type);
*dst = NULL;
return ret;
}
ptr += type->memSize;
}
return UA_STATUSCODE_GOOD;
} | 0 | [
"CWE-703",
"CWE-787"
]
| open62541 | c800e2987b10bb3af6ef644b515b5d6392f8861d | 131,271,798,762,729,260,000,000,000,000,000,000,000 | 40 | fix(json): Check max recursion depth in more places |
void blk_mq_enable_hotplug(void)
{
mutex_unlock(&all_q_mutex);
} | 0 | [
"CWE-362",
"CWE-264"
]
| linux | 0048b4837affd153897ed1222283492070027aa9 | 209,017,170,494,490,660,000,000,000,000,000,000,000 | 4 | blk-mq: fix race between timeout and freeing request
Inside timeout handler, blk_mq_tag_to_rq() is called
to retrieve the request from one tag. This way is obviously
wrong because the request can be freed any time and some
fiedds of the request can't be trusted, then kernel oops
might be triggered[1].
Currently wrt. blk_mq_tag_to_rq(), the only special case is
that the flush request can share same tag with the request
cloned from, and the two requests can't be active at the same
time, so this patch fixes the above issue by updating tags->rqs[tag]
with the active request(either flush rq or the request cloned
from) of the tag.
Also blk_mq_tag_to_rq() gets much simplified with this patch.
Given blk_mq_tag_to_rq() is mainly for drivers and the caller must
make sure the request can't be freed, so in bt_for_each() this
helper is replaced with tags->rqs[tag].
[1] kernel oops log
[ 439.696220] BUG: unable to handle kernel NULL pointer dereference at 0000000000000158^M
[ 439.697162] IP: [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.700653] PGD 7ef765067 PUD 7ef764067 PMD 0 ^M
[ 439.700653] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC ^M
[ 439.700653] Dumping ftrace buffer:^M
[ 439.700653] (ftrace buffer empty)^M
[ 439.700653] Modules linked in: nbd ipv6 kvm_intel kvm serio_raw^M
[ 439.700653] CPU: 6 PID: 2779 Comm: stress-ng-sigfd Not tainted 4.2.0-rc5-next-20150805+ #265^M
[ 439.730500] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011^M
[ 439.730500] task: ffff880605308000 ti: ffff88060530c000 task.ti: ffff88060530c000^M
[ 439.730500] RIP: 0010:[<ffffffff812d89ba>] [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.730500] RSP: 0018:ffff880819203da0 EFLAGS: 00010283^M
[ 439.730500] RAX: ffff880811b0e000 RBX: ffff8800bb465f00 RCX: 0000000000000002^M
[ 439.730500] RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000^M
[ 439.730500] RBP: ffff880819203db0 R08: 0000000000000002 R09: 0000000000000000^M
[ 439.730500] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000202^M
[ 439.730500] R13: ffff880814104800 R14: 0000000000000002 R15: ffff880811a2ea00^M
[ 439.730500] FS: 00007f165b3f5740(0000) GS:ffff880819200000(0000) knlGS:0000000000000000^M
[ 439.730500] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b^M
[ 439.730500] CR2: 0000000000000158 CR3: 00000007ef766000 CR4: 00000000000006e0^M
[ 439.730500] Stack:^M
[ 439.730500] 0000000000000008 ffff8808114eed90 ffff880819203e00 ffffffff812dc104^M
[ 439.755663] ffff880819203e40 ffffffff812d9f5e 0000020000000000 ffff8808114eed80^M
[ 439.755663] Call Trace:^M
[ 439.755663] <IRQ> ^M
[ 439.755663] [<ffffffff812dc104>] bt_for_each+0x6e/0xc8^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812dc1b3>] blk_mq_tag_busy_iter+0x55/0x5e^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff812d8911>] blk_mq_rq_timer+0x5d/0xd4^M
[ 439.755663] [<ffffffff810a3e10>] call_timer_fn+0xf7/0x284^M
[ 439.755663] [<ffffffff810a3d1e>] ? call_timer_fn+0x5/0x284^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff810a46d6>] run_timer_softirq+0x1ce/0x1f8^M
[ 439.755663] [<ffffffff8104c367>] __do_softirq+0x181/0x3a4^M
[ 439.755663] [<ffffffff8104c76e>] irq_exit+0x40/0x94^M
[ 439.755663] [<ffffffff81031482>] smp_apic_timer_interrupt+0x33/0x3e^M
[ 439.755663] [<ffffffff815559a4>] apic_timer_interrupt+0x84/0x90^M
[ 439.755663] <EOI> ^M
[ 439.755663] [<ffffffff81554350>] ? _raw_spin_unlock_irq+0x32/0x4a^M
[ 439.755663] [<ffffffff8106a98b>] finish_task_switch+0xe0/0x163^M
[ 439.755663] [<ffffffff8106a94d>] ? finish_task_switch+0xa2/0x163^M
[ 439.755663] [<ffffffff81550066>] __schedule+0x469/0x6cd^M
[ 439.755663] [<ffffffff8155039b>] schedule+0x82/0x9a^M
[ 439.789267] [<ffffffff8119b28b>] signalfd_read+0x186/0x49a^M
[ 439.790911] [<ffffffff8106d86a>] ? wake_up_q+0x47/0x47^M
[ 439.790911] [<ffffffff811618c2>] __vfs_read+0x28/0x9f^M
[ 439.790911] [<ffffffff8117a289>] ? __fget_light+0x4d/0x74^M
[ 439.790911] [<ffffffff811620a7>] vfs_read+0x7a/0xc6^M
[ 439.790911] [<ffffffff8116292b>] SyS_read+0x49/0x7f^M
[ 439.790911] [<ffffffff81554c17>] entry_SYSCALL_64_fastpath+0x12/0x6f^M
[ 439.790911] Code: 48 89 e5 e8 a9 b8 e7 ff 5d c3 0f 1f 44 00 00 55 89
f2 48 89 e5 41 54 41 89 f4 53 48 8b 47 60 48 8b 1c d0 48 8b 7b 30 48 8b
53 38 <48> 8b 87 58 01 00 00 48 85 c0 75 09 48 8b 97 88 0c 00 00 eb 10
^M
[ 439.790911] RIP [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.790911] RSP <ffff880819203da0>^M
[ 439.790911] CR2: 0000000000000158^M
[ 439.790911] ---[ end trace d40af58949325661 ]---^M
Cc: <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
void Dispatcher::newparamchar( const Parser::Param *act )
{
assert( act->char_present );
assert( (act->ch == ';') || ( (act->ch >= '0') && (act->ch <= '9') ) );
if ( params.length() < 100 ) {
/* enough for 16 five-char params plus 15 semicolons */
params.push_back( act->ch );
act->handled = true;
}
parsed = false;
} | 0 | [
"CWE-399"
]
| mosh | 9791768705528e911bfca6c4d8aa88139035060e | 73,696,567,353,642,665,000,000,000,000,000,000,000 | 11 | Cap escape sequence parameters to prevent long loops.
Fixes #271 github issue. |
PHP_FUNCTION(chdir)
{
char *str;
int ret, str_len;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p", &str, &str_len) == FAILURE) {
RETURN_FALSE;
}
if (php_check_open_basedir(str TSRMLS_CC)) {
RETURN_FALSE;
}
ret = VCWD_CHDIR(str);
if (ret != 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s (errno %d)", strerror(errno), errno);
RETURN_FALSE;
}
if (BG(CurrentStatFile) && !IS_ABSOLUTE_PATH(BG(CurrentStatFile), strlen(BG(CurrentStatFile)))) {
efree(BG(CurrentStatFile));
BG(CurrentStatFile) = NULL;
}
if (BG(CurrentLStatFile) && !IS_ABSOLUTE_PATH(BG(CurrentLStatFile), strlen(BG(CurrentLStatFile)))) {
efree(BG(CurrentLStatFile));
BG(CurrentLStatFile) = NULL;
}
RETURN_TRUE;
} | 1 | [
"CWE-19"
]
| php-src | be9b2a95adb504abd5acdc092d770444ad6f6854 | 215,803,266,267,490,900,000,000,000,000,000,000,000 | 30 | Fixed bug #69418 - more s->p fixes for filenames |
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
} | 0 | [
"CWE-20",
"CWE-190"
]
| tinyexr | a685e3332f61cd4e59324bf3f669d36973d64270 | 4,114,788,920,812,478,500,000,000,000,000,000,000 | 306 | Make line_no with too large value(2**20) invalid. Fixes #124 |
static bool checkreturn buf_read(pb_istream_t *stream, pb_byte_t *buf, size_t count)
{
size_t i;
const pb_byte_t *source = (const pb_byte_t*)stream->state;
stream->state = (pb_byte_t*)stream->state + count;
if (buf != NULL)
{
for (i = 0; i < count; i++)
buf[i] = source[i];
}
return true;
} | 0 | [
"CWE-20",
"CWE-119"
]
| nanopb | 4fe23595732b6f1254cfc11a9b8d6da900b55b0c | 114,756,554,477,416,320,000,000,000,000,000,000,000 | 14 | Fix memory leak with oneofs and PB_ENABLE_MALLOC (#615)
Nanopb would leak memory when all of the following conditions were true:
- PB_ENABLE_MALLOC is defined at the compile time
- Message definitions contains an oneof field,
the oneof contains a static submessage, and
the static submessage contains a pointer field.
- Data being decoded contains two values for the submessage.
The logic in pb_release_union_field would detect that the same
submessage occurs twice, and wouldn't release it because keeping
the old values is necessary to match the C++ library behavior
regarding message merges.
But then decode_static_field() would go to memset() the whole
submessage to zero, because it unconditionally assumed it to
be uninitialized memory. This would normally happen when the
contents of the union field is switched to a different oneof
item, instead of merging with the same one.
This commit changes it so that the field is memset() only when
`which_field` contains a different tag. |
build_object (GoaProvider *provider,
GoaObjectSkeleton *object,
GKeyFile *key_file,
const gchar *group,
GDBusConnection *connection,
gboolean just_added,
GError **error)
{
GoaAccount *account;
GoaCalendar *calendar;
GoaContacts *contacts;
GoaFiles *files;
GoaPasswordBased *password_based;
SoupURI *uri;
gboolean accept_ssl_errors;
gboolean calendar_enabled;
gboolean contacts_enabled;
gboolean files_enabled;
gboolean ret;
const gchar *identity;
gchar *uri_string;
account = NULL;
calendar = NULL;
contacts = NULL;
files = NULL;
password_based = NULL;
uri = NULL;
uri_string = NULL;
ret = FALSE;
/* Chain up */
if (!GOA_PROVIDER_CLASS (goa_owncloud_provider_parent_class)->build_object (provider,
object,
key_file,
group,
connection,
just_added,
error))
goto out;
password_based = goa_object_get_password_based (GOA_OBJECT (object));
if (password_based == NULL)
{
password_based = goa_password_based_skeleton_new ();
/* Ensure D-Bus method invocations run in their own thread */
g_dbus_interface_skeleton_set_flags (G_DBUS_INTERFACE_SKELETON (password_based),
G_DBUS_INTERFACE_SKELETON_FLAGS_HANDLE_METHOD_INVOCATIONS_IN_THREAD);
goa_object_skeleton_set_password_based (object, password_based);
g_signal_connect (password_based,
"handle-get-password",
G_CALLBACK (on_handle_get_password),
NULL);
}
account = goa_object_get_account (GOA_OBJECT (object));
identity = goa_account_get_identity (account);
uri_string = g_key_file_get_string (key_file, group, "Uri", NULL);
uri = soup_uri_new (uri_string);
if (uri != NULL)
soup_uri_set_user (uri, identity);
accept_ssl_errors = g_key_file_get_boolean (key_file, group, "AcceptSslErrors", NULL);
/* Calendar */
calendar = goa_object_get_calendar (GOA_OBJECT (object));
calendar_enabled = g_key_file_get_boolean (key_file, group, "CalendarEnabled", NULL);
if (calendar_enabled)
{
if (calendar == NULL)
{
gchar *uri_caldav;
uri_caldav = NULL;
if (uri != NULL)
{
gchar *uri_tmp;
uri_tmp = soup_uri_to_string (uri, FALSE);
uri_caldav = g_strconcat (uri_tmp, CALDAV_ENDPOINT, NULL);
g_free (uri_tmp);
}
calendar = goa_calendar_skeleton_new ();
g_object_set (G_OBJECT (calendar),
"accept-ssl-errors", accept_ssl_errors,
"uri", uri_caldav,
NULL);
goa_object_skeleton_set_calendar (object, calendar);
g_free (uri_caldav);
}
}
else
{
if (calendar != NULL)
goa_object_skeleton_set_calendar (object, NULL);
}
/* Contacts */
contacts = goa_object_get_contacts (GOA_OBJECT (object));
contacts_enabled = g_key_file_get_boolean (key_file, group, "ContactsEnabled", NULL);
if (contacts_enabled)
{
if (contacts == NULL)
{
gchar *uri_carddav;
uri_carddav = NULL;
if (uri != NULL)
{
gchar *uri_tmp;
uri_tmp = soup_uri_to_string (uri, FALSE);
uri_carddav = g_strconcat (uri_tmp, CARDDAV_ENDPOINT, NULL);
g_free (uri_tmp);
}
contacts = goa_contacts_skeleton_new ();
g_object_set (G_OBJECT (contacts),
"accept-ssl-errors", accept_ssl_errors,
"uri", uri_carddav,
NULL);
goa_object_skeleton_set_contacts (object, contacts);
g_free (uri_carddav);
}
}
else
{
if (contacts != NULL)
goa_object_skeleton_set_contacts (object, NULL);
}
/* Files */
files = goa_object_get_files (GOA_OBJECT (object));
files_enabled = g_key_file_get_boolean (key_file, group, "FilesEnabled", NULL);
if (files_enabled)
{
if (files == NULL)
{
gchar *uri_webdav;
uri_webdav = NULL;
if (uri != NULL)
{
const gchar *scheme;
gchar *uri_tmp;
scheme = soup_uri_get_scheme (uri);
if (g_strcmp0 (scheme, SOUP_URI_SCHEME_HTTPS) == 0)
soup_uri_set_scheme (uri, "davs");
else
soup_uri_set_scheme (uri, "dav");
uri_tmp = soup_uri_to_string (uri, FALSE);
uri_webdav = g_strconcat (uri_tmp, WEBDAV_ENDPOINT, NULL);
g_free (uri_tmp);
}
files = goa_files_skeleton_new ();
g_object_set (G_OBJECT (files),
"accept-ssl-errors", accept_ssl_errors,
"uri", uri_webdav,
NULL);
goa_object_skeleton_set_files (object, files);
g_free (uri_webdav);
}
}
else
{
if (files != NULL)
goa_object_skeleton_set_files (object, NULL);
}
if (just_added)
{
goa_account_set_calendar_disabled (account, !calendar_enabled);
goa_account_set_contacts_disabled (account, !contacts_enabled);
goa_account_set_files_disabled (account, !files_enabled);
g_signal_connect (account,
"notify::calendar-disabled",
G_CALLBACK (goa_util_account_notify_property_cb),
"CalendarEnabled");
g_signal_connect (account,
"notify::contacts-disabled",
G_CALLBACK (goa_util_account_notify_property_cb),
"ContactsEnabled");
g_signal_connect (account,
"notify::files-disabled",
G_CALLBACK (goa_util_account_notify_property_cb),
"FilesEnabled");
}
ret = TRUE;
out:
g_clear_object (&calendar);
g_clear_object (&contacts);
g_clear_object (&files);
g_clear_object (&password_based);
g_clear_pointer (&uri, (GDestroyNotify *) soup_uri_free);
g_free (uri_string);
return ret;
} | 0 | [
"CWE-310"
]
| gnome-online-accounts | edde7c63326242a60a075341d3fea0be0bc4d80e | 223,538,503,652,261,040,000,000,000,000,000,000,000 | 208 | Guard against invalid SSL certificates
None of the branded providers (eg., Google, Facebook and Windows Live)
should ever have an invalid certificate. So set "ssl-strict" on the
SoupSession object being used by GoaWebView.
Providers like ownCloud and Exchange might have to deal with
certificates that are not up to the mark. eg., self-signed
certificates. For those, show a warning when the account is being
created, and only proceed if the user decides to ignore it. In any
case, save the status of the certificate that was used to create the
account. So an account created with a valid certificate will never
work with an invalid one, and one created with an invalid certificate
will not throw any further warnings.
Fixes: CVE-2013-0240 |
//! Compute the arccosine of each pixel value \newinstance.
CImg<Tfloat> get_acos() const {
return CImg<Tfloat>(*this,false).acos(); | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 314,776,298,751,068,300,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
try_dlopen (lt_dlhandle *phandle, const char *filename, const char *ext,
lt_dladvise advise)
{
const char * saved_error = 0;
char * archive_name = 0;
char * canonical = 0;
char * base_name = 0;
char * dir = 0;
char * name = 0;
char * attempt = 0;
int errors = 0;
lt_dlhandle newhandle;
assert (phandle);
assert (*phandle == 0);
#ifdef LT_DEBUG_LOADERS
fprintf (stderr, "try_dlopen (%s, %s)\n",
filename ? filename : "(null)",
ext ? ext : "(null)");
#endif
LT__GETERROR (saved_error);
/* dlopen self? */
if (!filename)
{
*phandle = (lt_dlhandle) lt__zalloc (sizeof (struct lt__handle));
if (*phandle == 0)
return 1;
newhandle = *phandle;
/* lt_dlclose()ing yourself is very bad! Disallow it. */
newhandle->info.is_resident = 1;
if (tryall_dlopen (&newhandle, 0, advise, 0) != 0)
{
FREE (*phandle);
return 1;
}
goto register_handle;
}
assert (filename && *filename);
if (ext)
{
attempt = MALLOC (char, LT_STRLEN (filename) + LT_STRLEN (ext) + 1);
if (!attempt)
return 1;
sprintf(attempt, "%s%s", filename, ext);
}
else
{
attempt = lt__strdup (filename);
if (!attempt)
return 1;
}
/* Doing this immediately allows internal functions to safely
assume only canonicalized paths are passed. */
if (canonicalize_path (attempt, &canonical) != 0)
{
++errors;
goto cleanup;
}
/* If the canonical module name is a path (relative or absolute)
then split it into a directory part and a name part. */
base_name = strrchr (canonical, '/');
if (base_name)
{
size_t dirlen = (1+ base_name) - canonical;
dir = MALLOC (char, 1+ dirlen);
if (!dir)
{
++errors;
goto cleanup;
}
strncpy (dir, canonical, dirlen);
dir[dirlen] = LT_EOS_CHAR;
++base_name;
}
else
MEMREASSIGN (base_name, canonical);
assert (base_name && *base_name);
ext = strrchr (base_name, '.');
if (!ext)
{
ext = base_name + LT_STRLEN (base_name);
}
/* extract the module name from the file name */
name = MALLOC (char, ext - base_name + 1);
if (!name)
{
++errors;
goto cleanup;
}
/* canonicalize the module name */
{
int i;
for (i = 0; i < ext - base_name; ++i)
{
if (isalnum ((unsigned char)(base_name[i])))
{
name[i] = base_name[i];
}
else
{
name[i] = '_';
}
}
name[ext - base_name] = LT_EOS_CHAR;
}
/* Before trawling through the filesystem in search of a module,
check whether we are opening a preloaded module. */
if (!dir)
{
const lt_dlvtable *vtable = lt_dlloader_find ("lt_preopen");
if (vtable)
{
/* name + "." + libext + NULL */
archive_name = MALLOC (char, LT_STRLEN (name) + strlen (libext) + 2);
*phandle = (lt_dlhandle) lt__zalloc (sizeof (struct lt__handle));
if ((*phandle == NULL) || (archive_name == NULL))
{
++errors;
goto cleanup;
}
newhandle = *phandle;
/* Preloaded modules are always named according to their old
archive name. */
sprintf (archive_name, "%s.%s", name, libext);
if (tryall_dlopen (&newhandle, archive_name, advise, vtable) == 0)
{
goto register_handle;
}
/* If we're still here, there was no matching preloaded module,
so put things back as we found them, and continue searching. */
FREE (*phandle);
newhandle = NULL;
}
}
/* If we are allowing only preloaded modules, and we didn't find
anything yet, give up on the search here. */
if (advise && advise->try_preload_only)
{
goto cleanup;
}
/* Check whether we are opening a libtool module (.la extension). */
if (ext && streq (ext, archive_ext))
{
/* this seems to be a libtool module */
FILE * file = 0;
char * dlname = 0;
char * old_name = 0;
char * libdir = 0;
char * deplibs = 0;
/* if we can't find the installed flag, it is probably an
installed libtool archive, produced with an old version
of libtool */
int installed = 1;
/* Now try to open the .la file. If there is no directory name
component, try to find it first in user_search_path and then other
prescribed paths. Otherwise (or in any case if the module was not
yet found) try opening just the module name as passed. */
if (!dir)
{
const char *search_path = user_search_path;
if (search_path)
file = find_file (user_search_path, base_name, &dir);
if (!file)
{
search_path = getenv (LTDL_SEARCHPATH_VAR);
if (search_path)
file = find_file (search_path, base_name, &dir);
}
#if defined(LT_MODULE_PATH_VAR)
if (!file)
{
search_path = getenv (LT_MODULE_PATH_VAR);
if (search_path)
file = find_file (search_path, base_name, &dir);
}
#endif
#if defined(LT_DLSEARCH_PATH)
if (!file && *sys_dlsearch_path)
{
file = find_file (sys_dlsearch_path, base_name, &dir);
}
#endif
}
else
{
file = fopen (attempt, LT_READTEXT_MODE);
}
/* If we didn't find the file by now, it really isn't there. Set
the status flag, and bail out. */
if (!file)
{
LT__SETERROR (FILE_NOT_FOUND);
++errors;
goto cleanup;
}
/* read the .la file */
if (parse_dotla_file(file, &dlname, &libdir, &deplibs,
&old_name, &installed) != 0)
++errors;
fclose (file);
/* allocate the handle */
*phandle = (lt_dlhandle) lt__zalloc (sizeof (struct lt__handle));
if (*phandle == 0)
++errors;
if (errors)
{
FREE (dlname);
FREE (old_name);
FREE (libdir);
FREE (deplibs);
FREE (*phandle);
goto cleanup;
}
assert (*phandle);
if (load_deplibs (*phandle, deplibs) == 0)
{
newhandle = *phandle;
/* find_module may replace newhandle */
if (find_module (&newhandle, dir, libdir, dlname, old_name,
installed, advise))
{
unload_deplibs (*phandle);
++errors;
}
}
else
{
++errors;
}
FREE (dlname);
FREE (old_name);
FREE (libdir);
FREE (deplibs);
if (errors)
{
FREE (*phandle);
goto cleanup;
}
if (*phandle != newhandle)
{
unload_deplibs (*phandle);
}
}
else
{
/* not a libtool module */
*phandle = (lt_dlhandle) lt__zalloc (sizeof (struct lt__handle));
if (*phandle == 0)
{
++errors;
goto cleanup;
}
newhandle = *phandle;
/* If the module has no directory name component, try to find it
first in user_search_path and then other prescribed paths.
Otherwise (or in any case if the module was not yet found) try
opening just the module name as passed. */
if ((dir || (!find_handle (user_search_path, base_name,
&newhandle, advise)
&& !find_handle (getenv (LTDL_SEARCHPATH_VAR), base_name,
&newhandle, advise)
#if defined(LT_MODULE_PATH_VAR)
&& !find_handle (getenv (LT_MODULE_PATH_VAR), base_name,
&newhandle, advise)
#endif
#if defined(LT_DLSEARCH_PATH)
&& !find_handle (sys_dlsearch_path, base_name,
&newhandle, advise)
#endif
)))
{
if (tryall_dlopen (&newhandle, attempt, advise, 0) != 0)
{
newhandle = NULL;
}
}
if (!newhandle)
{
FREE (*phandle);
++errors;
goto cleanup;
}
}
register_handle:
MEMREASSIGN (*phandle, newhandle);
if ((*phandle)->info.ref_count == 0)
{
(*phandle)->info.ref_count = 1;
MEMREASSIGN ((*phandle)->info.name, name);
(*phandle)->next = handles;
handles = *phandle;
}
LT__SETERRORSTR (saved_error);
cleanup:
FREE (dir);
FREE (attempt);
FREE (name);
if (!canonical) /* was MEMREASSIGNed */
FREE (base_name);
FREE (canonical);
FREE (archive_name);
return errors;
} | 0 | []
| libtool | e91f7b960032074a55fc91273c1917e3082b5338 | 231,937,319,562,821,930,000,000,000,000,000,000,000 | 354 | Don't load module.la from current directory by default.
* libltdl/ltdl.c (try_dlopen): Do not attempt to load an
unqualified module.la file from the current directory (by
default) since doing so is insecure and is not compliant with
the documentation.
* tests/testsuite.at: Qualify access to module.la file in
current directory so that test passes. |
lyd_lyb_data_length(const char *data)
{
const char *ptr;
uint16_t i, mod_count, str_len;
uint8_t tmp_buf[2];
LYB_META meta;
if (!data) {
return -1;
}
ptr = data;
/* magic number */
if ((ptr[0] != 'l') || (ptr[1] != 'y') || (ptr[2] != 'b')) {
return -1;
}
ptr += 3;
/* header */
++ptr;
/* models */
memcpy(tmp_buf, ptr, 2);
ptr += 2;
mod_count = tmp_buf[0] | (tmp_buf[1] << 8);
for (i = 0; i < mod_count; ++i) {
/* model name */
memcpy(tmp_buf, ptr, 2);
ptr += 2;
str_len = tmp_buf[0] | (tmp_buf[1] << 8);
ptr += str_len;
/* revision */
ptr += 2;
}
if (ptr[0]) {
/* subtrees */
do {
memcpy(&meta, ptr, LYB_META_BYTES);
ptr += LYB_META_BYTES;
/* read whole subtree (chunk size) */
ptr += *((uint8_t *)&meta);
/* skip inner chunks (inner chunk count) */
ptr += *(((uint8_t *)&meta) + LYB_SIZE_BYTES) * LYB_META_BYTES;
} while ((*((uint8_t *)&meta) == LYB_SIZE_MAX) || ptr[0]);
}
/* ending zero */
++ptr;
return ptr - data;
} | 0 | [
"CWE-119"
]
| libyang | 32fb4993bc8bb49e93e84016af3c10ea53964be5 | 182,150,359,994,137,640,000,000,000,000,000,000,000 | 57 | schema tree BUGFIX do not check features while still resolving schema
Fixes #723 |
static void FVMenuClear(GWindow gw, struct gmenuitem *UNUSED(mi), GEvent *UNUSED(e)) {
FontView *fv = (FontView *) GDrawGetUserData(gw);
FVClear( (FontViewBase *) fv );
} | 0 | [
"CWE-119",
"CWE-787"
]
| fontforge | 626f751752875a0ddd74b9e217b6f4828713573c | 8,372,594,998,219,005,000,000,000,000,000,000,000 | 4 | Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846. |
gs_window_real_hide (GtkWidget *widget)
{
GSWindow *window;
window = GS_WINDOW (widget);
gdk_window_remove_filter (NULL, (GdkFilterFunc)xevent_filter, window);
remove_watchdog_timer (window);
if (GTK_WIDGET_CLASS (gs_window_parent_class)->hide) {
GTK_WIDGET_CLASS (gs_window_parent_class)->hide (widget);
}
} | 0 | []
| gnome-screensaver | a5f66339be6719c2b8fc478a1d5fc6545297d950 | 101,501,555,791,190,300,000,000,000,000,000,000,000 | 14 | Ensure keyboard grab and unlock dialog exist after monitor removal
gnome-screensaver currently doesn't deal with monitors getting
removed properly. If the unlock dialog is on the removed monitor
then the unlock dialog and its associated keyboard grab are not
moved to an existing monitor when the monitor removal is processed.
This means that users can gain access to the locked system by placing
the mouse pointer on an external monitor and then disconnect the
external monitor.
CVE-2010-0414
https://bugzilla.gnome.org/show_bug.cgi?id=609337 |
XML_SetElementDeclHandler(XML_Parser parser,
XML_ElementDeclHandler eldecl)
{
elementDeclHandler = eldecl;
} | 0 | [
"CWE-119"
]
| libexpat | ba0f9c3b40c264b8dd392e02a7a060a8fa54f032 | 210,668,412,658,247,360,000,000,000,000,000,000,000 | 5 | CVE-2015-1283 Sanity check size calculations. r=peterv, a=abillings
https://sourceforge.net/p/expat/bugs/528/ |
static struct ppp *ppp_create_interface(struct net *net, int unit,
struct file *file, int *retp)
{
struct ppp *ppp;
struct ppp_net *pn;
struct net_device *dev = NULL;
int ret = -ENOMEM;
int i;
dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
if (!dev)
goto out1;
pn = ppp_pernet(net);
ppp = netdev_priv(dev);
ppp->dev = dev;
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
ppp->owner = file;
for (i = 0; i < NUM_NP; ++i)
ppp->npmode[i] = NPMODE_PASS;
INIT_LIST_HEAD(&ppp->channels);
spin_lock_init(&ppp->rlock);
spin_lock_init(&ppp->wlock);
#ifdef CONFIG_PPP_MULTILINK
ppp->minseq = -1;
skb_queue_head_init(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
ppp->pass_filter = NULL;
ppp->active_filter = NULL;
#endif /* CONFIG_PPP_FILTER */
/*
* drum roll: don't forget to set
* the net device is belong to
*/
dev_net_set(dev, net);
rtnl_lock();
mutex_lock(&pn->all_ppp_mutex);
if (unit < 0) {
unit = unit_get(&pn->units_idr, ppp);
if (unit < 0) {
ret = unit;
goto out2;
}
} else {
ret = -EEXIST;
if (unit_find(&pn->units_idr, unit))
goto out2; /* unit already exists */
/*
* if caller need a specified unit number
* lets try to satisfy him, otherwise --
* he should better ask us for new unit number
*
* NOTE: yes I know that returning EEXIST it's not
* fair but at least pppd will ask us to allocate
* new unit in this case so user is happy :)
*/
unit = unit_set(&pn->units_idr, ppp, unit);
if (unit < 0)
goto out2;
}
/* Initialize the new ppp unit */
ppp->file.index = unit;
sprintf(dev->name, "ppp%d", unit);
ret = register_netdevice(dev);
if (ret != 0) {
unit_put(&pn->units_idr, unit);
netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
dev->name, ret);
goto out2;
}
ppp->ppp_net = net;
atomic_inc(&ppp_unit_count);
mutex_unlock(&pn->all_ppp_mutex);
rtnl_unlock();
*retp = 0;
return ppp;
out2:
mutex_unlock(&pn->all_ppp_mutex);
rtnl_unlock();
free_netdev(dev);
out1:
*retp = ret;
return NULL;
} | 0 | [
"CWE-416"
]
| linux | 1f461dcdd296eecedaffffc6bae2bfa90bd7eb89 | 237,096,503,577,630,520,000,000,000,000,000,000,000 | 97 | ppp: take reference on channels netns
Let channels hold a reference on their network namespace.
Some channel types, like ppp_async and ppp_synctty, can have their
userspace controller running in a different namespace. Therefore they
can't rely on them to preclude their netns from being removed from
under them.
==================================================================
BUG: KASAN: use-after-free in ppp_unregister_channel+0x372/0x3a0 at
addr ffff880064e217e0
Read of size 8 by task syz-executor/11581
=============================================================================
BUG net_namespace (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in copy_net_ns+0x6b/0x1a0 age=92569 cpu=3 pid=6906
[< none >] ___slab_alloc+0x4c7/0x500 kernel/mm/slub.c:2440
[< none >] __slab_alloc+0x4c/0x90 kernel/mm/slub.c:2469
[< inline >] slab_alloc_node kernel/mm/slub.c:2532
[< inline >] slab_alloc kernel/mm/slub.c:2574
[< none >] kmem_cache_alloc+0x23a/0x2b0 kernel/mm/slub.c:2579
[< inline >] kmem_cache_zalloc kernel/include/linux/slab.h:597
[< inline >] net_alloc kernel/net/core/net_namespace.c:325
[< none >] copy_net_ns+0x6b/0x1a0 kernel/net/core/net_namespace.c:360
[< none >] create_new_namespaces+0x2f6/0x610 kernel/kernel/nsproxy.c:95
[< none >] copy_namespaces+0x297/0x320 kernel/kernel/nsproxy.c:150
[< none >] copy_process.part.35+0x1bf4/0x5760 kernel/kernel/fork.c:1451
[< inline >] copy_process kernel/kernel/fork.c:1274
[< none >] _do_fork+0x1bc/0xcb0 kernel/kernel/fork.c:1723
[< inline >] SYSC_clone kernel/kernel/fork.c:1832
[< none >] SyS_clone+0x37/0x50 kernel/kernel/fork.c:1826
[< none >] entry_SYSCALL_64_fastpath+0x16/0x7a kernel/arch/x86/entry/entry_64.S:185
INFO: Freed in net_drop_ns+0x67/0x80 age=575 cpu=2 pid=2631
[< none >] __slab_free+0x1fc/0x320 kernel/mm/slub.c:2650
[< inline >] slab_free kernel/mm/slub.c:2805
[< none >] kmem_cache_free+0x2a0/0x330 kernel/mm/slub.c:2814
[< inline >] net_free kernel/net/core/net_namespace.c:341
[< none >] net_drop_ns+0x67/0x80 kernel/net/core/net_namespace.c:348
[< none >] cleanup_net+0x4e5/0x600 kernel/net/core/net_namespace.c:448
[< none >] process_one_work+0x794/0x1440 kernel/kernel/workqueue.c:2036
[< none >] worker_thread+0xdb/0xfc0 kernel/kernel/workqueue.c:2170
[< none >] kthread+0x23f/0x2d0 kernel/drivers/block/aoe/aoecmd.c:1303
[< none >] ret_from_fork+0x3f/0x70 kernel/arch/x86/entry/entry_64.S:468
INFO: Slab 0xffffea0001938800 objects=3 used=0 fp=0xffff880064e20000
flags=0x5fffc0000004080
INFO: Object 0xffff880064e20000 @offset=0 fp=0xffff880064e24200
CPU: 1 PID: 11581 Comm: syz-executor Tainted: G B 4.4.0+
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
rel-1.8.2-0-g33fbe13 by qemu-project.org 04/01/2014
00000000ffffffff ffff8800662c7790 ffffffff8292049d ffff88003e36a300
ffff880064e20000 ffff880064e20000 ffff8800662c77c0 ffffffff816f2054
ffff88003e36a300 ffffea0001938800 ffff880064e20000 0000000000000000
Call Trace:
[< inline >] __dump_stack kernel/lib/dump_stack.c:15
[<ffffffff8292049d>] dump_stack+0x6f/0xa2 kernel/lib/dump_stack.c:50
[<ffffffff816f2054>] print_trailer+0xf4/0x150 kernel/mm/slub.c:654
[<ffffffff816f875f>] object_err+0x2f/0x40 kernel/mm/slub.c:661
[< inline >] print_address_description kernel/mm/kasan/report.c:138
[<ffffffff816fb0c5>] kasan_report_error+0x215/0x530 kernel/mm/kasan/report.c:236
[< inline >] kasan_report kernel/mm/kasan/report.c:259
[<ffffffff816fb4de>] __asan_report_load8_noabort+0x3e/0x40 kernel/mm/kasan/report.c:280
[< inline >] ? ppp_pernet kernel/include/linux/compiler.h:218
[<ffffffff83ad71b2>] ? ppp_unregister_channel+0x372/0x3a0 kernel/drivers/net/ppp/ppp_generic.c:2392
[< inline >] ppp_pernet kernel/include/linux/compiler.h:218
[<ffffffff83ad71b2>] ppp_unregister_channel+0x372/0x3a0 kernel/drivers/net/ppp/ppp_generic.c:2392
[< inline >] ? ppp_pernet kernel/drivers/net/ppp/ppp_generic.c:293
[<ffffffff83ad6f26>] ? ppp_unregister_channel+0xe6/0x3a0 kernel/drivers/net/ppp/ppp_generic.c:2392
[<ffffffff83ae18f3>] ppp_asynctty_close+0xa3/0x130 kernel/drivers/net/ppp/ppp_async.c:241
[<ffffffff83ae1850>] ? async_lcp_peek+0x5b0/0x5b0 kernel/drivers/net/ppp/ppp_async.c:1000
[<ffffffff82c33239>] tty_ldisc_close.isra.1+0x99/0xe0 kernel/drivers/tty/tty_ldisc.c:478
[<ffffffff82c332c0>] tty_ldisc_kill+0x40/0x170 kernel/drivers/tty/tty_ldisc.c:744
[<ffffffff82c34943>] tty_ldisc_release+0x1b3/0x260 kernel/drivers/tty/tty_ldisc.c:772
[<ffffffff82c1ef21>] tty_release+0xac1/0x13e0 kernel/drivers/tty/tty_io.c:1901
[<ffffffff82c1e460>] ? release_tty+0x320/0x320 kernel/drivers/tty/tty_io.c:1688
[<ffffffff8174de36>] __fput+0x236/0x780 kernel/fs/file_table.c:208
[<ffffffff8174e405>] ____fput+0x15/0x20 kernel/fs/file_table.c:244
[<ffffffff813595ab>] task_work_run+0x16b/0x200 kernel/kernel/task_work.c:115
[< inline >] exit_task_work kernel/include/linux/task_work.h:21
[<ffffffff81307105>] do_exit+0x8b5/0x2c60 kernel/kernel/exit.c:750
[<ffffffff813fdd20>] ? debug_check_no_locks_freed+0x290/0x290 kernel/kernel/locking/lockdep.c:4123
[<ffffffff81306850>] ? mm_update_next_owner+0x6f0/0x6f0 kernel/kernel/exit.c:357
[<ffffffff813215e6>] ? __dequeue_signal+0x136/0x470 kernel/kernel/signal.c:550
[<ffffffff8132067b>] ? recalc_sigpending_tsk+0x13b/0x180 kernel/kernel/signal.c:145
[<ffffffff81309628>] do_group_exit+0x108/0x330 kernel/kernel/exit.c:880
[<ffffffff8132b9d4>] get_signal+0x5e4/0x14f0 kernel/kernel/signal.c:2307
[< inline >] ? kretprobe_table_lock kernel/kernel/kprobes.c:1113
[<ffffffff8151d355>] ? kprobe_flush_task+0xb5/0x450 kernel/kernel/kprobes.c:1158
[<ffffffff8115f7d3>] do_signal+0x83/0x1c90 kernel/arch/x86/kernel/signal.c:712
[<ffffffff8151d2a0>] ? recycle_rp_inst+0x310/0x310 kernel/include/linux/list.h:655
[<ffffffff8115f750>] ? setup_sigcontext+0x780/0x780 kernel/arch/x86/kernel/signal.c:165
[<ffffffff81380864>] ? finish_task_switch+0x424/0x5f0 kernel/kernel/sched/core.c:2692
[< inline >] ? finish_lock_switch kernel/kernel/sched/sched.h:1099
[<ffffffff81380560>] ? finish_task_switch+0x120/0x5f0 kernel/kernel/sched/core.c:2678
[< inline >] ? context_switch kernel/kernel/sched/core.c:2807
[<ffffffff85d794e9>] ? __schedule+0x919/0x1bd0 kernel/kernel/sched/core.c:3283
[<ffffffff81003901>] exit_to_usermode_loop+0xf1/0x1a0 kernel/arch/x86/entry/common.c:247
[< inline >] prepare_exit_to_usermode kernel/arch/x86/entry/common.c:282
[<ffffffff810062ef>] syscall_return_slowpath+0x19f/0x210 kernel/arch/x86/entry/common.c:344
[<ffffffff85d88022>] int_ret_from_sys_call+0x25/0x9f kernel/arch/x86/entry/entry_64.S:281
Memory state around the buggy address:
ffff880064e21680: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880064e21700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff880064e21780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff880064e21800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880064e21880: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
==================================================================
Fixes: 273ec51dd7ce ("net: ppp_generic - introduce net-namespace functionality v2")
Reported-by: Baozeng Ding <[email protected]>
Signed-off-by: Guillaume Nault <[email protected]>
Reviewed-by: Cyrill Gorcunov <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int ntop_get_ndpi_protocol_breed(lua_State* vm) {
NetworkInterfaceView *ntop_interface = getCurrentInterface(vm);
nDPIStats stats;
int proto;
ntop->getTrace()->traceEvent(TRACE_INFO, "%s() called", __FUNCTION__);
if(ntop_lua_check(vm, __FUNCTION__, 1, LUA_TNUMBER)) return(CONST_LUA_ERROR);
proto = (u_int32_t)lua_tonumber(vm, 1);
if(proto == HOST_FAMILY_ID)
lua_pushstring(vm, "Unrated-to-Host Contact");
else {
if(ntop_interface)
lua_pushstring(vm, ntop_interface->getFirst()->get_ndpi_proto_breed_name(proto));
else
lua_pushnil(vm);
}
return(CONST_LUA_OK);
} | 0 | [
"CWE-254"
]
| ntopng | 2e0620be3410f5e22c9aa47e261bc5a12be692c6 | 196,556,603,044,887,420,000,000,000,000,000,000,000 | 21 | Added security fix to avoid escalating privileges to non-privileged users
Many thanks to Dolev Farhi for reporting it |
int TTF_SetDirection(int direction) /* hb_direction_t */
{
#if TTF_USE_HARFBUZZ
g_hb_direction = direction;
return 0;
#else
(void) direction;
return -1;
#endif
} | 0 | [
"CWE-190",
"CWE-787"
]
| SDL_ttf | db1b41ab8bde6723c24b866e466cad78c2fa0448 | 33,962,135,869,533,550,000,000,000,000,000,000,000 | 10 | More integer overflow (see bug #187)
Make sure that 'width + alignment' doesn't overflow, otherwise
it could create a SDL_Surface of 'width' but with wrong 'pitch' |
const CImg<T>& _display_graph(CImgDisplay &disp, const char *const title=0,
const unsigned int plot_type=1, const unsigned int vertex_type=1,
const char *const labelx=0, const double xmin=0, const double xmax=0,
const char *const labely=0, const double ymin=0, const double ymax=0,
const bool exit_on_anykey=false) const {
if (is_empty())
throw CImgInstanceException(_cimg_instance
"display_graph(): Empty instance.",
cimg_instance);
if (!disp) disp.assign(cimg_fitscreen(CImgDisplay::screen_width()/2,CImgDisplay::screen_height()/2,1),0,0).
set_title(title?"%s":"CImg<%s>",title?title:pixel_type());
const ulongT siz = (ulongT)_width*_height*_depth, siz1 = std::max((ulongT)1,siz - 1);
const unsigned int old_normalization = disp.normalization();
disp.show().flush()._normalization = 0;
double y0 = ymin, y1 = ymax, nxmin = xmin, nxmax = xmax;
if (nxmin==nxmax) { nxmin = 0; nxmax = siz1; }
int x0 = 0, x1 = width()*height()*depth() - 1, key = 0;
for (bool reset_view = true; !key && !disp.is_closed(); ) {
if (reset_view) { x0 = 0; x1 = width()*height()*depth() - 1; y0 = ymin; y1 = ymax; reset_view = false; }
CImg<T> zoom(x1 - x0 + 1,1,1,spectrum());
cimg_forC(*this,c) zoom.get_shared_channel(c) = CImg<T>(data(x0,0,0,c),x1 - x0 + 1,1,1,1,true);
if (y0==y1) { y0 = zoom.min_max(y1); const double dy = y1 - y0; y0-=dy/20; y1+=dy/20; }
if (y0==y1) { --y0; ++y1; }
const CImg<intT> selection = zoom.get_select_graph(disp,plot_type,vertex_type,
labelx,
nxmin + x0*(nxmax - nxmin)/siz1,
nxmin + x1*(nxmax - nxmin)/siz1,
labely,y0,y1,true);
const int mouse_x = disp.mouse_x(), mouse_y = disp.mouse_y();
if (selection[0]>=0) {
if (selection[2]<0) reset_view = true;
else {
x1 = x0 + selection[2]; x0+=selection[0];
if (selection[1]>=0 && selection[3]>=0) {
y0 = y1 - selection[3]*(y1 - y0)/(disp.height() - 32);
y1-=selection[1]*(y1 - y0)/(disp.height() - 32);
}
}
} else {
bool go_in = false, go_out = false, go_left = false, go_right = false, go_up = false, go_down = false;
switch (key = (int)disp.key()) {
case cimg::keyHOME : reset_view = true; key = 0; disp.set_key(); break;
case cimg::keyPADADD : go_in = true; go_out = false; key = 0; disp.set_key(); break;
case cimg::keyPADSUB : go_out = true; go_in = false; key = 0; disp.set_key(); break;
case cimg::keyARROWLEFT : case cimg::keyPAD4 : go_left = true; go_right = false; key = 0; disp.set_key();
break;
case cimg::keyARROWRIGHT : case cimg::keyPAD6 : go_right = true; go_left = false; key = 0; disp.set_key();
break;
case cimg::keyARROWUP : case cimg::keyPAD8 : go_up = true; go_down = false; key = 0; disp.set_key(); break;
case cimg::keyARROWDOWN : case cimg::keyPAD2 : go_down = true; go_up = false; key = 0; disp.set_key(); break;
case cimg::keyPAD7 : go_left = true; go_up = true; key = 0; disp.set_key(); break;
case cimg::keyPAD9 : go_right = true; go_up = true; key = 0; disp.set_key(); break;
case cimg::keyPAD1 : go_left = true; go_down = true; key = 0; disp.set_key(); break;
case cimg::keyPAD3 : go_right = true; go_down = true; key = 0; disp.set_key(); break;
}
if (disp.wheel()) {
if (disp.is_keyCTRLLEFT() || disp.is_keyCTRLRIGHT()) go_up = !(go_down = disp.wheel()<0);
else if (disp.is_keySHIFTLEFT() || disp.is_keySHIFTRIGHT()) go_left = !(go_right = disp.wheel()>0);
else go_out = !(go_in = disp.wheel()>0);
key = 0;
}
if (go_in) {
const int
xsiz = x1 - x0,
mx = (mouse_x - 16)*xsiz/(disp.width() - 32),
cx = x0 + cimg::cut(mx,0,xsiz);
if (x1 - x0>4) {
x0 = cx - 7*(cx - x0)/8; x1 = cx + 7*(x1 - cx)/8;
if (disp.is_keyCTRLLEFT() || disp.is_keyCTRLRIGHT()) {
const double
ysiz = y1 - y0,
my = (mouse_y - 16)*ysiz/(disp.height() - 32),
cy = y1 - cimg::cut(my,0.,ysiz);
y0 = cy - 7*(cy - y0)/8; y1 = cy + 7*(y1 - cy)/8;
} else y0 = y1 = 0;
}
}
if (go_out) {
if (x0>0 || x1<(int)siz1) {
const int delta_x = (x1 - x0)/8, ndelta_x = delta_x?delta_x:(siz>1);
const double ndelta_y = (y1 - y0)/8;
x0-=ndelta_x; x1+=ndelta_x;
y0-=ndelta_y; y1+=ndelta_y;
if (x0<0) { x1-=x0; x0 = 0; if (x1>=(int)siz) x1 = (int)siz1; }
if (x1>=(int)siz) { x0-=(x1 - siz1); x1 = (int)siz1; if (x0<0) x0 = 0; }
}
}
if (go_left) {
const int delta = (x1 - x0)/5, ndelta = delta?delta:1;
if (x0 - ndelta>=0) { x0-=ndelta; x1-=ndelta; }
else { x1-=x0; x0 = 0; }
go_left = false;
}
if (go_right) {
const int delta = (x1 - x0)/5, ndelta = delta?delta:1;
if (x1 + ndelta<(int)siz) { x0+=ndelta; x1+=ndelta; }
else { x0+=(siz1 - x1); x1 = (int)siz1; }
go_right = false;
}
if (go_up) {
const double delta = (y1 - y0)/10, ndelta = delta?delta:1;
y0+=ndelta; y1+=ndelta;
go_up = false;
}
if (go_down) {
const double delta = (y1 - y0)/10, ndelta = delta?delta:1;
y0-=ndelta; y1-=ndelta;
go_down = false;
}
}
if (!exit_on_anykey && key && key!=(int)cimg::keyESC &&
(key!=(int)cimg::keyW || (!disp.is_keyCTRLLEFT() && !disp.is_keyCTRLRIGHT()))) {
disp.set_key(key,false);
key = 0;
}
}
disp._normalization = old_normalization;
return *this; | 0 | [
"CWE-119",
"CWE-787"
]
| CImg | ac8003393569aba51048c9d67e1491559877b1d1 | 293,053,983,454,589,920,000,000,000,000,000,000,000 | 123 | . |
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
{
int i, ret = 0, runtime_enabled, runtime_was_enabled;
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
if (tg == &root_task_group)
return -EINVAL;
/*
* Ensure we have at some amount of bandwidth every period. This is
* to prevent reaching a state of large arrears when throttled via
* entity_tick() resulting in prolonged exit starvation.
*/
if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
return -EINVAL;
/*
* Likewise, bound things on the otherside by preventing insane quota
* periods. This also allows us to normalize in computing quota
* feasibility.
*/
if (period > max_cfs_quota_period)
return -EINVAL;
/*
* Prevent race between setting of cfs_rq->runtime_enabled and
* unthrottle_offline_cfs_rqs().
*/
get_online_cpus();
mutex_lock(&cfs_constraints_mutex);
ret = __cfs_schedulable(tg, period, quota);
if (ret)
goto out_unlock;
runtime_enabled = quota != RUNTIME_INF;
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
/*
* If we need to toggle cfs_bandwidth_used, off->on must occur
* before making related changes, and on->off must occur afterwards
*/
if (runtime_enabled && !runtime_was_enabled)
cfs_bandwidth_usage_inc();
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
__refill_cfs_bandwidth_runtime(cfs_b);
/* restart the period timer (if active) to handle new period expiry */
if (runtime_enabled)
start_cfs_bandwidth(cfs_b);
raw_spin_unlock_irq(&cfs_b->lock);
for_each_online_cpu(i) {
struct cfs_rq *cfs_rq = tg->cfs_rq[i];
struct rq *rq = cfs_rq->rq;
raw_spin_lock_irq(&rq->lock);
cfs_rq->runtime_enabled = runtime_enabled;
cfs_rq->runtime_remaining = 0;
if (cfs_rq->throttled)
unthrottle_cfs_rq(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
}
if (runtime_was_enabled && !runtime_enabled)
cfs_bandwidth_usage_dec();
out_unlock:
mutex_unlock(&cfs_constraints_mutex);
put_online_cpus();
return ret;
} | 0 | [
"CWE-119"
]
| linux | 29d6455178a09e1dc340380c582b13356227e8df | 123,583,851,642,524,210,000,000,000,000,000,000,000 | 72 | sched: panic on corrupted stack end
Until now, hitting this BUG_ON caused a recursive oops (because oops
handling involves do_exit(), which calls into the scheduler, which in
turn raises an oops), which caused stuff below the stack to be
overwritten until a panic happened (e.g. via an oops in interrupt
context, caused by the overwritten CPU index in the thread_info).
Just panic directly.
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
PJ_DEF(int) pjsua_handle_events(unsigned msec_timeout)
{
#if defined(PJ_SYMBIAN) && PJ_SYMBIAN != 0
return pj_symbianos_poll(-1, msec_timeout);
#else
unsigned count = 0;
pj_time_val tv;
pj_status_t status;
tv.sec = 0;
tv.msec = msec_timeout;
pj_time_val_normalize(&tv);
status = pjsip_endpt_handle_events2(pjsua_var.endpt, &tv, &count);
if (status != PJ_SUCCESS)
return -status;
return count;
#endif
} | 0 | [
"CWE-120",
"CWE-787"
]
| pjproject | d27f79da11df7bc8bb56c2f291d71e54df8d2c47 | 325,200,508,498,385,950,000,000,000,000,000,000,000 | 25 | Use PJ_ASSERT_RETURN() on pjsip_auth_create_digest() and pjsua_init_tpselector() (#3009)
* Use PJ_ASSERT_RETURN on pjsip_auth_create_digest
* Use PJ_ASSERT_RETURN on pjsua_init_tpselector()
* Fix incorrect check.
* Add return value to pjsip_auth_create_digest() and pjsip_auth_create_digestSHA256()
* Modification based on comments. |
static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType,
const char* orderName)
{
BOOL condition = FALSE;
switch (orderType)
{
case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP:
case ORDER_TYPE_SWITCH_SURFACE:
condition = settings->OffscreenSupportLevel != 0;
break;
case ORDER_TYPE_CREATE_NINE_GRID_BITMAP:
condition = settings->DrawNineGridEnabled;
break;
case ORDER_TYPE_FRAME_MARKER:
condition = settings->FrameMarkerCommandEnabled;
break;
case ORDER_TYPE_GDIPLUS_FIRST:
case ORDER_TYPE_GDIPLUS_NEXT:
case ORDER_TYPE_GDIPLUS_END:
case ORDER_TYPE_GDIPLUS_CACHE_FIRST:
case ORDER_TYPE_GDIPLUS_CACHE_NEXT:
case ORDER_TYPE_GDIPLUS_CACHE_END:
condition = settings->DrawGdiPlusCacheEnabled;
break;
case ORDER_TYPE_WINDOW:
condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED;
break;
case ORDER_TYPE_STREAM_BITMAP_FIRST:
case ORDER_TYPE_STREAM_BITMAP_NEXT:
case ORDER_TYPE_COMPDESK_FIRST:
condition = TRUE;
break;
default:
WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName);
condition = FALSE;
break;
}
return check_order_activated(log, settings, orderName, condition);
} | 0 | [
"CWE-415"
]
| FreeRDP | 67c2aa52b2ae0341d469071d1bc8aab91f8d2ed8 | 217,111,307,651,461,060,000,000,000,000,000,000,000 | 47 | Fixed #6013: Check new length is > 0 |
static bool tcp_check_sack_reneging(struct sock *sk, int flag)
{
if (flag & FLAG_SACK_RENEGING) {
struct tcp_sock *tp = tcp_sk(sk);
unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
msecs_to_jiffies(10));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
delay, TCP_RTO_MAX);
return true;
}
return false;
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 8b8a321ff72c785ed5e8b4cf6eda20b35d427390 | 141,504,107,173,136,600,000,000,000,000,000,000,000 | 13 | tcp: fix zero cwnd in tcp_cwnd_reduction
Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode
conditionally") introduced a bug that cwnd may become 0 when both
inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead
to a div-by-zero if the connection starts another cwnd reduction
phase by setting tp->prior_cwnd to the current cwnd (0) in
tcp_init_cwnd_reduction().
To prevent this we skip PRR operation when nothing is acked or
sacked. Then cwnd must be positive in all cases as long as ssthresh
is positive:
1) The proportional reduction mode
inflight > ssthresh > 0
2) The reduction bound mode
a) inflight == ssthresh > 0
b) inflight < ssthresh
sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh
Therefore in all cases inflight and sndcnt can not both be 0.
We check invalid tp->prior_cwnd to avoid potential div0 bugs.
In reality this bug is triggered only with a sequence of less common
events. For example, the connection is terminating an ECN-triggered
cwnd reduction with an inflight 0, then it receives reordered/old
ACKs or DSACKs from prior transmission (which acks nothing). Or the
connection is in fast recovery stage that marks everything lost,
but fails to retransmit due to local issues, then receives data
packets from other end which acks nothing.
Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally")
Reported-by: Oleksandr Natalenko <[email protected]>
Signed-off-by: Yuchung Cheng <[email protected]>
Signed-off-by: Neal Cardwell <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
PHP_FUNCTION(bcscale)
{
long new_scale;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &new_scale) == FAILURE) {
return;
}
BCG(bc_precision) = ((int)new_scale < 0) ? 0 : new_scale;
RETURN_TRUE;
} | 1 | [
"CWE-20"
]
| php-src | ed52bcb3dcb2e7dbc009ef8c6579fb1276ca73c1 | 19,851,923,768,377,131,000,000,000,000,000,000,000 | 12 | Fix bug #72093: bcpowmod accepts negative scale and corrupts _one_ definition
We can not modify result since it can be copy of _zero_ or _one_, etc. and
"copy" in bcmath is just bumping the refcount.
Conflicts:
main/php_version.h |
void ssl_set_renegotiation_enforced( ssl_context *ssl, int max_records )
{
ssl->renego_max_records = max_records;
} | 0 | [
"CWE-119"
]
| mbedtls | c988f32adde62a169ba340fee0da15aecd40e76e | 136,593,167,398,554,370,000,000,000,000,000,000,000 | 4 | Added max length checking of hostname |
bool PDFDoc::checkLinearization() {
if (linearization == nullptr)
return false;
if (linearizationState == 1)
return true;
if (linearizationState == 2)
return false;
if (!hints) {
hints = new Hints(str, linearization, getXRef(), secHdlr);
}
if (!hints->isOk()) {
linearizationState = 2;
return false;
}
for (int page = 1; page <= linearization->getNumPages(); page++) {
Ref pageRef;
pageRef.num = hints->getPageObjectNum(page);
if (!pageRef.num) {
linearizationState = 2;
return false;
}
// check for bogus ref - this can happen in corrupted PDF files
if (pageRef.num < 0 || pageRef.num >= xref->getNumObjects()) {
linearizationState = 2;
return false;
}
pageRef.gen = xref->getEntry(pageRef.num)->gen;
Object obj = xref->fetch(pageRef.num, pageRef.gen);
if (!obj.isDict("Page")) {
linearizationState = 2;
return false;
}
}
linearizationState = 1;
return true;
} | 0 | [
"CWE-20"
]
| poppler | 9fd5ec0e6e5f763b190f2a55ceb5427cfe851d5f | 11,016,114,271,184,260,000,000,000,000,000,000,000 | 39 | PDFDoc::setup: Fix return value
At that point xref can have gone wrong since extractPDFSubtype() can
have caused a reconstruct that broke stuff so instead of unconditionally
returning true, return xref->isOk()
Fixes #706 |
static UINT drive_free(DEVICE* device)
{
DRIVE_DEVICE* drive = (DRIVE_DEVICE*)device;
UINT error = CHANNEL_RC_OK;
if (!drive)
return ERROR_INVALID_PARAMETER;
if (MessageQueue_PostQuit(drive->IrpQueue, 0) &&
(WaitForSingleObject(drive->thread, INFINITE) == WAIT_FAILED))
{
error = GetLastError();
WLog_ERR(TAG, "WaitForSingleObject failed with error %" PRIu32 "", error);
return error;
}
return drive_free_int(drive);
} | 0 | [
"CWE-125"
]
| FreeRDP | 6b485b146a1b9d6ce72dfd7b5f36456c166e7a16 | 36,388,455,068,416,647,000,000,000,000,000,000,000 | 18 | Fixed oob read in irp_write and similar |
smtp_send_thread(thread_t * thread)
{
smtp_t *smtp = THREAD_ARG(thread);
if (thread->type == THREAD_WRITE_TIMEOUT) {
log_message(LOG_INFO, "Timeout sending data to remote SMTP server %s."
, FMT_SMTP_HOST());
SMTP_FSM_READ(QUIT, thread, 0);
return 0;
}
SMTP_FSM_SEND(smtp->stage, thread);
/* Handle END command */
if (smtp->stage == END) {
SMTP_FSM_READ(QUIT, thread, 0);
return 0;
}
/* Registering next smtp command processing thread */
if (smtp->stage != ERROR) {
thread_add_read(thread->master, smtp_read_thread, smtp,
thread->u.fd, global_data->smtp_connection_to);
thread_del_write(thread);
} else {
log_message(LOG_INFO, "Can not send data to remote SMTP server %s."
, FMT_SMTP_HOST());
SMTP_FSM_READ(QUIT, thread, 0);
}
return 0;
} | 0 | [
"CWE-59",
"CWE-61"
]
| keepalived | 04f2d32871bb3b11d7dc024039952f2fe2750306 | 8,719,721,086,356,986,000,000,000,000,000,000,000 | 32 | When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]> |
static int __init wakeup_sources_sysfs_init(void)
{
wakeup_class = class_create(THIS_MODULE, "wakeup");
return PTR_ERR_OR_ZERO(wakeup_class);
} | 0 | [
"CWE-787"
]
| linux | aa838896d87af561a33ecefea1caa4c15a68bc47 | 118,640,007,965,201,340,000,000,000,000,000,000,000 | 6 | drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
retry:
kvm_s390_vcpu_request_handled(vcpu);
if (!kvm_request_pending(vcpu))
return 0;
/*
* We use MMU_RELOAD just to re-arm the ipte notifier for the
* guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
* This ensures that the ipte instruction for this request has
* already finished. We might race against a second unmapper that
* wants to set the blocking bit. Lets just retry the request loop.
*/
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
int rc;
rc = gmap_mprotect_notify(vcpu->arch.gmap,
kvm_s390_get_prefix(vcpu),
PAGE_SIZE * 2, PROT_WRITE);
if (rc) {
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
return rc;
}
goto retry;
}
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
vcpu->arch.sie_block->ihcpu = 0xffff;
goto retry;
}
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
if (!ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
}
goto retry;
}
if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
if (ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
}
goto retry;
}
if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
goto retry;
}
if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
/*
* Disable CMM virtualization; we will emulate the ESSA
* instruction manually, in order to provide additional
* functionalities needed for live migration.
*/
vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
goto retry;
}
if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
/*
* Re-enable CMM virtualization if CMMA is available and
* CMM has been used.
*/
if ((vcpu->kvm->arch.use_cmma) &&
(vcpu->kvm->mm->context.uses_cmm))
vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
goto retry;
}
/* nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
/* we left the vsie handler, nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
return 0;
} | 0 | [
"CWE-416"
]
| linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 113,530,936,744,472,530,000,000,000,000,000,000,000 | 79 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static int __init gate_vma_init(void)
{
gate_vma.vm_mm = NULL;
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
gate_vma.vm_page_prot = __P101;
return 0;
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | b4cbb197c7e7a68dbad0d491242e3ca67420c13e | 173,073,415,126,447,700,000,000,000,000,000,000,000 | 10 | vm: add vm_iomap_memory() helper function
Various drivers end up replicating the code to mmap() their memory
buffers into user space, and our core memory remapping function may be
very flexible but it is unnecessarily complicated for the common cases
to use.
Our internal VM uses pfn's ("page frame numbers") which simplifies
things for the VM, and allows us to pass physical addresses around in a
denser and more efficient format than passing a "phys_addr_t" around,
and having to shift it up and down by the page size. But it just means
that drivers end up doing that shifting instead at the interface level.
It also means that drivers end up mucking around with internal VM things
like the vma details (vm_pgoff, vm_start/end) way more than they really
need to.
So this just exports a function to map a certain physical memory range
into user space (using a phys_addr_t based interface that is much more
natural for a driver) and hides all the complexity from the driver.
Some drivers will still end up tweaking the vm_page_prot details for
things like prefetching or cacheability etc, but that's actually
relevant to the driver, rather than caring about what the page offset of
the mapping is into the particular IO memory region.
Acked-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void fp_ungetc(int c, struct tok_state *tok) {
ungetc(c, tok->fp);
} | 0 | [
"CWE-125"
]
| cpython | dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c | 155,094,745,070,367,190,000,000,000,000,000,000,000 | 3 | bpo-35766: Merge typed_ast back into CPython (GH-11645) |
decompileENUMERATE(int n, SWF_ACTION *actions, int maxn, int is_type2)
{
int i=0;
while (i < maxn && i < 5 && OpCode(actions, n+i, maxn))
i++;
INDENT
println("/* a for-var-in loop should follow below: */" );
return i-1; // preserve some code for decompileIF()...
} // ... and let decompileIF() do all the dirty work ;-) | 0 | [
"CWE-119",
"CWE-125"
]
| libming | da9d86eab55cbf608d5c916b8b690f5b76bca462 | 75,148,536,674,251,270,000,000,000,000,000,000,000 | 10 | decompileAction: Prevent heap buffer overflow and underflow with using OpCode |
GF_Err url_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s;
if (ptr->size) {
u32 location_size = (u32) ptr->size;
if (location_size < 1) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid size %llu in svhd box\n", ptr->size));
return GF_ISOM_INVALID_FILE;
}
ptr->location = (char*)gf_malloc(location_size);
if (! ptr->location) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, ptr->location, location_size);
if (ptr->location[location_size-1]) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] url box location is not 0-terminated\n" ));
return GF_ISOM_INVALID_FILE;
}
}
return GF_OK;
} | 0 | [
"CWE-787"
]
| gpac | 77510778516803b7f7402d7423c6d6bef50254c3 | 207,219,707,406,923,700,000,000,000,000,000,000,000 | 20 | fixed #2255 |
R_API const char *r_flag_color(RFlag *f, RFlagItem *it, const char *color) {
if (!f || !it) return NULL;
if (!color) return it->color;
free (it->color);
it->color = *color ? strdup (color) : NULL;
return it->color;
} | 0 | [
"CWE-125",
"CWE-787"
]
| radare2 | 52b1526443c1f433087928291d1c3d37a5600515 | 170,103,096,149,638,720,000,000,000,000,000,000,000 | 7 | Fix crash in wasm disassembler |
void free_abrt_conf_data()
{
free(g_settings_sWatchCrashdumpArchiveDir);
g_settings_sWatchCrashdumpArchiveDir = NULL;
free(g_settings_dump_location);
g_settings_dump_location = NULL;
} | 0 | [
"CWE-200"
]
| abrt | 8939398b82006ba1fec4ed491339fc075f43fc7c | 262,819,632,262,306,040,000,000,000,000,000,000,000 | 8 | make the dump directories owned by root by default
It was discovered that the abrt event scripts create a user-readable
copy of a sosreport file in abrt problem directories, and include
excerpts of /var/log/messages selected by the user-controlled process
name, leading to an information disclosure.
This issue was discovered by Florian Weimer of Red Hat Product Security.
Related: #1212868
Signed-off-by: Jakub Filak <[email protected]> |
SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
{
sigset_t blocked;
siginitset(&blocked, mask);
return sigsuspend(&blocked);
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 4ea77014af0d6205b05503d1c7aac6eace11d473 | 38,306,401,845,800,533,000,000,000,000,000,000,000 | 6 | kernel/signal.c: avoid undefined behaviour in kill_something_info
When running kill(72057458746458112, 0) in userspace I hit the following
issue.
UBSAN: Undefined behaviour in kernel/signal.c:1462:11
negation of -2147483648 cannot be represented in type 'int':
CPU: 226 PID: 9849 Comm: test Tainted: G B ---- ------- 3.10.0-327.53.58.70.x86_64_ubsan+ #116
Hardware name: Huawei Technologies Co., Ltd. RH8100 V3/BC61PBIA, BIOS BLHSV028 11/11/2014
Call Trace:
dump_stack+0x19/0x1b
ubsan_epilogue+0xd/0x50
__ubsan_handle_negate_overflow+0x109/0x14e
SYSC_kill+0x43e/0x4d0
SyS_kill+0xe/0x10
system_call_fastpath+0x16/0x1b
Add code to avoid the UBSAN detection.
[[email protected]: tweak comment]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: zhongjiang <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Xishi Qiu <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
CImg(const t *const values, const unsigned int size_x, const unsigned int size_y=1,
const unsigned int size_z=1, const unsigned int size_c=1, const bool is_shared=false):_is_shared(false) {
if (is_shared) {
_width = _height = _depth = _spectrum = 0; _data = 0;
throw CImgArgumentException(_cimg_instance
"CImg(): Invalid construction request of a (%u,%u,%u,%u) shared instance "
"from a (%s*) buffer (pixel types are different).",
cimg_instance,
size_x,size_y,size_z,size_c,CImg<t>::pixel_type());
}
const size_t siz = safe_size(size_x,size_y,size_z,size_c);
if (values && siz) {
_width = size_x; _height = size_y; _depth = size_z; _spectrum = size_c;
try { _data = new T[siz]; } catch (...) {
_width = _height = _depth = _spectrum = 0; _data = 0;
throw CImgInstanceException(_cimg_instance
"CImg(): Failed to allocate memory (%s) for image (%u,%u,%u,%u).",
cimg_instance,
cimg::strbuffersize(sizeof(T)*size_x*size_y*size_z*size_c),
size_x,size_y,size_z,size_c);
}
const t *ptrs = values; cimg_for(*this,ptrd,T) *ptrd = (T)*(ptrs++);
} else { _width = _height = _depth = _spectrum = 0; _data = 0; }
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 101,447,120,900,380,520,000,000,000,000,000,000,000 | 25 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
authentic_erase_binary(struct sc_card *card, unsigned int offs, size_t count, unsigned long flags)
{
struct sc_context *ctx = card->ctx;
int rv;
unsigned char *buf_zero = NULL;
LOG_FUNC_CALLED(ctx);
if (!count)
LOG_TEST_RET(ctx, SC_ERROR_NOT_SUPPORTED, "'ERASE BINARY' with ZERO count not supported");
if (card->cache.valid && card->cache.current_ef)
sc_log(ctx, "current_ef(type=%i) %s", card->cache.current_ef->path.type,
sc_print_path(&card->cache.current_ef->path));
buf_zero = calloc(1, count);
if (!buf_zero)
LOG_TEST_RET(ctx, SC_ERROR_OUT_OF_MEMORY, "cannot allocate buff 'zero'");
rv = sc_update_binary(card, offs, buf_zero, count, flags);
free(buf_zero);
LOG_TEST_RET(ctx, rv, "'ERASE BINARY' failed");
LOG_FUNC_RETURN(ctx, SC_SUCCESS);
} | 0 | [
"CWE-125"
]
| OpenSC | 8fe377e93b4b56060e5bbfb6f3142ceaeca744fa | 245,587,138,321,446,270,000,000,000,000,000,000,000 | 24 | fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes. |
START_TEST(virgl_test_copy_transfer_from_staging_without_iov_fails)
{
static const unsigned bufsize = 50;
static const unsigned synchronized = 1;
struct virgl_context ctx = {0};
struct virgl_resource src_res = {0};
struct virgl_resource dst_res = {0};
struct pipe_box box = {.width = bufsize, .height = 1, .depth = 1};
int ret;
ret = testvirgl_init_ctx_cmdbuf(&ctx);
ck_assert_int_eq(ret, 0);
ret = testvirgl_create_unbacked_simple_buffer(&src_res, 1, bufsize, VIRGL_BIND_STAGING);
ck_assert_int_eq(ret, 0);
virgl_renderer_ctx_attach_resource(ctx.ctx_id, src_res.handle);
ret = testvirgl_create_backed_simple_buffer(&dst_res, 2, bufsize, VIRGL_BIND_VERTEX_BUFFER);
ck_assert_int_eq(ret, 0);
virgl_renderer_ctx_attach_resource(ctx.ctx_id, dst_res.handle);
box.width = bufsize;
virgl_encoder_copy_transfer(&ctx, &dst_res, 0, 0, &box, &src_res, 0, synchronized);
ret = virgl_renderer_submit_cmd(ctx.cbuf->buf, ctx.ctx_id, ctx.cbuf->cdw);
ck_assert_int_eq(ret, EINVAL);
virgl_renderer_ctx_detach_resource(ctx.ctx_id, src_res.handle);
virgl_renderer_ctx_detach_resource(ctx.ctx_id, dst_res.handle);
virgl_renderer_resource_unref(src_res.handle);
testvirgl_destroy_backed_res(&dst_res);
testvirgl_fini_ctx_cmdbuf(&ctx);
} | 0 | [
"CWE-909"
]
| virglrenderer | b05bb61f454eeb8a85164c8a31510aeb9d79129c | 266,044,627,644,561,830,000,000,000,000,000,000,000 | 33 | vrend: clear memory when allocating a host-backed memory resource
Closes: #249
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]> |
gs_main_finit(gs_main_instance * minst, int exit_status, int code)
{
i_ctx_t *i_ctx_p = minst->i_ctx_p;
gs_dual_memory_t dmem = {0};
int exit_code;
ref error_object;
char *tempnames;
/* NB: need to free gs_name_table
*/
/*
* Previous versions of this code closed the devices in the
* device list here. Since these devices are now prototypes,
* they cannot be opened, so they do not need to be closed;
* alloc_restore_all will close dynamically allocated devices.
*/
tempnames = gs_main_tempnames(minst);
/* by the time we get here, we *must* avoid any random redefinitions of
* operators etc, so we push systemdict onto the top of the dict stack.
* We do this in C to avoid running into any other re-defininitions in the
* Postscript world.
*/
gs_finit_push_systemdict(i_ctx_p);
/* We have to disable BGPrint before we call interp_reclaim() to prevent the
* parent rendering thread initialising for the next page, whilst we are
* removing objects it may want to access - for example, the I/O device table.
* We also have to mess with the BeginPage/EndPage procs so that we don't
* trigger a spurious extra page to be emitted.
*/
if (minst->init_done >= 2) {
gs_main_run_string(minst,
"/BGPrint /GetDeviceParam .special_op \
{{ <</BeginPage {pop} /EndPage {pop pop //false } \
/BGPrint false /NumRenderingThreads 0>> setpagedevice} if} if \
serverdict /.jobsavelevel get 0 eq {/quit} {/stop} ifelse \
.systemvar exec",
0 , &exit_code, &error_object);
}
/*
* Close the "main" device, because it may need to write out
* data before destruction. pdfwrite needs so.
*/
if (minst->init_done >= 2) {
int code = 0;
if (idmemory->reclaim != 0) {
code = interp_reclaim(&minst->i_ctx_p, avm_global);
if (code < 0) {
ref error_name;
if (tempnames)
free(tempnames);
if (gs_errorname(i_ctx_p, code, &error_name) >= 0) {
char err_str[32] = {0};
name_string_ref(imemory, &error_name, &error_name);
memcpy(err_str, error_name.value.const_bytes, r_size(&error_name));
emprintf2(imemory, "ERROR: %s (%d) reclaiming the memory while the interpreter finalization.\n", err_str, code);
}
else {
emprintf1(imemory, "UNKNOWN ERROR %d reclaiming the memory while the interpreter finalization.\n", code);
}
#ifdef MEMENTO_SQUEEZE_BUILD
if (code != gs_error_VMerror ) return gs_error_Fatal;
#else
return gs_error_Fatal;
#endif
}
i_ctx_p = minst->i_ctx_p; /* interp_reclaim could change it. */
}
if (i_ctx_p->pgs != NULL && i_ctx_p->pgs->device != NULL) {
gx_device *pdev = i_ctx_p->pgs->device;
const char * dname = pdev->dname;
/* make sure device doesn't isn't freed by .uninstalldevice */
rc_adjust(pdev, 1, "gs_main_finit");
/* deactivate the device just before we close it for the last time */
gs_main_run_string(minst,
/* we need to do the 'quit' so we don't loop for input (double quit) */
".uninstallpagedevice serverdict \
/.jobsavelevel get 0 eq {/quit} {/stop} ifelse .systemvar exec",
0 , &exit_code, &error_object);
code = gs_closedevice(pdev);
if (code < 0) {
ref error_name;
if (gs_errorname(i_ctx_p, code, &error_name) >= 0) {
char err_str[32] = {0};
name_string_ref(imemory, &error_name, &error_name);
memcpy(err_str, error_name.value.const_bytes, r_size(&error_name));
emprintf3(imemory, "ERROR: %s (%d) on closing %s device.\n", err_str, code, dname);
}
else {
emprintf2(imemory, "UNKNOWN ERROR %d closing %s device.\n", code, dname);
}
}
rc_decrement(pdev, "gs_main_finit"); /* device might be freed */
if (exit_status == 0 || exit_status == gs_error_Quit)
exit_status = code;
}
/* Flush stdout and stderr */
gs_main_run_string(minst,
"(%stdout) (w) file closefile (%stderr) (w) file closefile \
serverdict /.jobsavelevel get 0 eq {/quit} {/stop} ifelse .systemexec \
systemdict /savedinitialgstate .forceundef",
0 , &exit_code, &error_object);
}
gp_readline_finit(minst->readline_data);
i_ctx_p = minst->i_ctx_p; /* get current interp context */
if (gs_debug_c(':')) {
print_resource_usage(minst, &gs_imemory, "Final");
dmprintf1(minst->heap, "%% Exiting instance 0x%p\n", minst);
}
/* Do the equivalent of a restore "past the bottom". */
/* This will release all memory, close all open files, etc. */
if (minst->init_done >= 1) {
gs_memory_t *mem_raw = i_ctx_p->memory.current->non_gc_memory;
i_plugin_holder *h = i_ctx_p->plugin_list;
dmem = *idmemory;
code = alloc_restore_all(i_ctx_p);
if (code < 0)
emprintf1(mem_raw,
"ERROR %d while the final restore. See gs/psi/ierrors.h for code explanation.\n",
code);
i_iodev_finit(&dmem);
i_plugin_finit(mem_raw, h);
}
/* clean up redirected stdout */
if (minst->heap->gs_lib_ctx->fstdout2
&& (minst->heap->gs_lib_ctx->fstdout2 != minst->heap->gs_lib_ctx->fstdout)
&& (minst->heap->gs_lib_ctx->fstdout2 != minst->heap->gs_lib_ctx->fstderr)) {
fclose(minst->heap->gs_lib_ctx->fstdout2);
minst->heap->gs_lib_ctx->fstdout2 = (FILE *)NULL;
}
minst->heap->gs_lib_ctx->stdout_is_redirected = 0;
minst->heap->gs_lib_ctx->stdout_to_stderr = 0;
/* remove any temporary files, after ghostscript has closed files */
if (tempnames) {
char *p = tempnames;
while (*p) {
unlink(p);
p += strlen(p) + 1;
}
free(tempnames);
}
gs_lib_finit(exit_status, code, minst->heap);
gs_free_object(minst->heap, minst->lib_path.container.value.refs, "lib_path array");
ialloc_finit(&dmem);
return exit_status;
} | 1 | []
| ghostpdl | 241d91112771a6104de10b3948c3f350d6690c1d | 111,754,590,613,958,970,000,000,000,000,000,000,000 | 159 | Bug 699664: Ensure the correct is in place before cleanup
If the PS job replaces the device and leaves that graphics state in place, we
wouldn't cleanup the default device in the normal way, but rely on the garbage
collector.
This works (but isn't ideal), *except* when the job replaces the device with
the null device (using the nulldevice operator) - this means that
.uninstallpagedevice doesn't replace the existing device with the nulldevice
(since it is already installed), the device from the graphics ends up being
freed - and as it is the nulldevice, which we rely on, memory corruption
and a segfault can happen.
We avoid this by checking if the current device is the nulldevice, and if so,
restoring it away, before continuing with the device cleanup. |
pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct pt_regs *tregs;
struct task_struct *task = PFM_CTX_TASK(ctx);
int state, is_system;
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
/*
* context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
*/
if (state == PFM_CTX_UNLOADED) return -EINVAL;
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
PFM_CTX_TASK(ctx)->pid,
state,
is_system));
/*
* in system mode, we need to update the PMU directly
* and the user level state of the caller, which may not
* necessarily be the creator of the context.
*/
if (is_system) {
/*
* Update local PMU first
*
* disable dcr pp
*/
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
ia64_srlz_i();
/*
* update local cpuinfo
*/
PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
/*
* stop monitoring, does srlz.i
*/
pfm_clear_psr_pp();
/*
* stop monitoring in the caller
*/
ia64_psr(regs)->pp = 0;
return 0;
}
/*
* per-task mode
*/
if (task == current) {
/* stop monitoring at kernel level */
pfm_clear_psr_up();
/*
* stop monitoring at the user level
*/
ia64_psr(regs)->up = 0;
} else {
tregs = task_pt_regs(task);
/*
* stop monitoring at the user level
*/
ia64_psr(tregs)->up = 0;
/*
* monitoring disabled in kernel at next reschedule
*/
ctx->ctx_saved_psr_up = 0;
DPRINT(("task=[%d]\n", task->pid));
}
return 0;
} | 0 | []
| linux-2.6 | 41d5e5d73ecef4ef56b7b4cde962929a712689b4 | 139,250,890,971,695,180,000,000,000,000,000,000,000 | 86 | [IA64] permon use-after-free fix
Perfmon associates vmalloc()ed memory with a file descriptor, and installs
a vma mapping that memory. Unfortunately, the vm_file field is not filled
in, so processes with mappings to that memory do not prevent the file from
being closed and the memory freed. This results in use-after-free bugs and
multiple freeing of pages, etc.
I saw this bug on an Altix on SLES9. Haven't reproduced upstream but it
looks like the same issue is there.
Signed-off-by: Nick Piggin <[email protected]>
Cc: Stephane Eranian <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Tony Luck <[email protected]> |
void usb_ep_combine_input_packets(USBEndpoint *ep)
{
USBPacket *p, *u, *next, *prev = NULL, *first = NULL;
USBPort *port = ep->dev->port;
int totalsize;
assert(ep->pipeline);
assert(ep->pid == USB_TOKEN_IN);
QTAILQ_FOREACH_SAFE(p, &ep->queue, queue, next) {
/* Empty the queue on a halt */
if (ep->halted) {
p->status = USB_RET_REMOVE_FROM_QUEUE;
port->ops->complete(port, p);
continue;
}
/* Skip packets already submitted to the device */
if (p->state == USB_PACKET_ASYNC) {
prev = p;
continue;
}
usb_packet_check_state(p, USB_PACKET_QUEUED);
/*
* If the previous (combined) packet has the short_not_ok flag set
* stop, as we must not submit packets to the device after a transfer
* ending with short_not_ok packet.
*/
if (prev && prev->short_not_ok) {
break;
}
if (first) {
if (first->combined == NULL) {
USBCombinedPacket *combined = g_new0(USBCombinedPacket, 1);
combined->first = first;
QTAILQ_INIT(&combined->packets);
qemu_iovec_init(&combined->iov, 2);
usb_combined_packet_add(combined, first);
}
usb_combined_packet_add(first->combined, p);
} else {
first = p;
}
/* Is this packet the last one of a (combined) transfer? */
totalsize = (p->combined) ? p->combined->iov.size : p->iov.size;
if ((p->iov.size % ep->max_packet_size) != 0 || !p->short_not_ok ||
next == NULL ||
/* Work around for Linux usbfs bulk splitting + migration */
(totalsize == (16 * KiB - 36) && p->int_req) ||
/* Next package may grow combined package over 1MiB */
totalsize > 1 * MiB - ep->max_packet_size) {
usb_device_handle_data(ep->dev, first);
assert(first->status == USB_RET_ASYNC);
if (first->combined) {
QTAILQ_FOREACH(u, &first->combined->packets, combined_entry) {
usb_packet_set_state(u, USB_PACKET_ASYNC);
}
} else {
usb_packet_set_state(first, USB_PACKET_ASYNC);
}
first = NULL;
prev = p;
}
}
} | 0 | [
"CWE-770"
]
| qemu | 05a40b172e4d691371534828078be47e7fff524c | 212,121,965,605,421,000,000,000,000,000,000,000,000 | 69 | usb: limit combined packets to 1 MiB (CVE-2021-3527)
usb-host and usb-redirect try to batch bulk transfers by combining many
small usb packets into a single, large transfer request, to reduce the
overhead and improve performance.
This patch adds a size limit of 1 MiB for those combined packets to
restrict the host resources the guest can bind that way.
Signed-off-by: Gerd Hoffmann <[email protected]>
Message-Id: <[email protected]> |
png_set_gAMA(png_const_structrp png_ptr, png_inforp info_ptr, double file_gamma)
{
png_set_gAMA_fixed(png_ptr, info_ptr, png_fixed(png_ptr, file_gamma,
"png_set_gAMA"));
} | 0 | [
"CWE-120"
]
| libpng | a901eb3ce6087e0afeef988247f1a1aa208cb54d | 315,197,403,134,495,650,000,000,000,000,000,000,000 | 5 | [libpng16] Prevent reading over-length PLTE chunk (Cosmin Truta). |
static inline int get_file_caps(struct linux_binprm *bprm)
{
bprm_clear_caps(bprm);
return 0;
} | 1 | []
| linux-2.6 | 3318a386e4ca68c76e0294363d29bdc46fcad670 | 183,133,645,747,739,020,000,000,000,000,000,000,000 | 5 | file caps: always start with clear bprm->caps_*
While Linux doesn't honor setuid on scripts. However, it mistakenly
behaves differently for file capabilities.
This patch fixes that behavior by making sure that get_file_caps()
begins with empty bprm->caps_*. That way when a script is loaded,
its bprm->caps_* may be filled when binfmt_misc calls prepare_binprm(),
but they will be cleared again when binfmt_elf calls prepare_binprm()
next to read the interpreter's file capabilities.
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: David Howells <[email protected]>
Acked-by: Andrew G. Morgan <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static PHP_GINIT_FUNCTION(bcmath)
{
bcmath_globals->bc_precision = 0;
bc_init_numbers(TSRMLS_C);
} | 0 | [
"CWE-20"
]
| php-src | ed52bcb3dcb2e7dbc009ef8c6579fb1276ca73c1 | 333,718,619,124,626,020,000,000,000,000,000,000,000 | 5 | Fix bug #72093: bcpowmod accepts negative scale and corrupts _one_ definition
We can not modify result since it can be copy of _zero_ or _one_, etc. and
"copy" in bcmath is just bumping the refcount.
Conflicts:
main/php_version.h |
int mygetch( ) {
struct termios oldt,
newt;
int ch;
tcgetattr( STDIN_FILENO, &oldt );
newt = oldt;
newt.c_lflag &= ~( ICANON | ECHO );
tcsetattr( STDIN_FILENO, TCSANOW, &newt );
ch = getchar();
tcsetattr( STDIN_FILENO, TCSANOW, &oldt );
return ch;
} | 0 | [
"CWE-787"
]
| aircrack-ng | ff70494dd389ba570dbdbf36f217c28d4381c6b5 | 42,807,154,637,359,880,000,000,000,000,000,000,000 | 12 | Airodump-ng: Fixed GPS stack overflow (Closes #13 on GitHub).
git-svn-id: http://svn.aircrack-ng.org/trunk@2416 28c6078b-6c39-48e3-add9-af49d547ecab |
fr_window_set_list_mode (FrWindow *window,
FrWindowListMode list_mode)
{
g_return_if_fail (window != NULL);
if (list_mode == window->priv->list_mode)
return;
window->priv->list_mode = window->priv->last_list_mode = list_mode;
if (window->priv->list_mode == FR_WINDOW_LIST_MODE_FLAT) {
fr_window_history_clear (window);
fr_window_history_add (window, "/");
}
g_settings_set_enum (window->priv->settings_listing, PREF_LISTING_LIST_MODE, window->priv->last_list_mode);
g_settings_set_boolean (window->priv->settings_listing, PREF_LISTING_SHOW_PATH, (window->priv->list_mode == FR_WINDOW_LIST_MODE_FLAT));
fr_window_update_file_list (window, TRUE);
fr_window_update_dir_tree (window);
fr_window_update_current_location (window);
} | 0 | [
"CWE-22"
]
| file-roller | b147281293a8307808475e102a14857055f81631 | 96,488,467,668,509,270,000,000,000,000,000,000,000 | 21 | libarchive: sanitize filenames before extracting |
on_pl(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(FPzero(line->A * pt->x + line->B * pt->y + line->C));
} | 0 | [
"CWE-703",
"CWE-189"
]
| postgres | 31400a673325147e1205326008e32135a78b4d8a | 328,542,499,266,202,100,000,000,000,000,000,000,000 | 7 | Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064 |
SYSCALL_DEFINE5(execveat,
int, fd, const char __user *, filename,
const char __user *const __user *, argv,
const char __user *const __user *, envp,
int, flags)
{
int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
return do_execveat(fd,
getname_flags(filename, lookup_flags, NULL),
argv, envp, flags);
} | 0 | [
"CWE-362"
]
| linux | 8b01fc86b9f425899f8a3a8fc1c47d73c2c20543 | 200,875,434,797,700,140,000,000,000,000,000,000,000 | 12 | fs: take i_mutex during prepare_binprm for set[ug]id executables
This prevents a race between chown() and execve(), where chowning a
setuid-user binary to root would momentarily make the binary setuid
root.
This patch was mostly written by Linus Torvalds.
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct page *page;
u64 hpa;
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/*
* Translate L1 physical address to host physical
* address for vmcs02. Keep the page pinned, so this
* physical address remains valid. We keep a reference
* to it so we can release it later.
*/
if (vmx->nested.apic_access_page) { /* shouldn't happen */
kvm_release_page_dirty(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
/*
* If translation failed, no matter: This feature asks
* to exit when accessing the given address, and if it
* can never be accessed, this feature won't do
* anything anyway.
*/
if (!is_error_page(page)) {
vmx->nested.apic_access_page = page;
hpa = page_to_phys(vmx->nested.apic_access_page);
vmcs_write64(APIC_ACCESS_ADDR, hpa);
} else {
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
}
} else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) &&
cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
kvm_vcpu_reload_apic_access_page(vcpu);
}
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
kvm_release_page_dirty(vmx->nested.virtual_apic_page);
vmx->nested.virtual_apic_page = NULL;
}
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
/*
* If translation failed, VM entry will fail because
* prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
* Failing the vm entry is _not_ what the processor
* does but it's basically the only possibility we
* have. We could still enter the guest if CR8 load
* exits are enabled, CR8 store exits are enabled, and
* virtualize APIC access is disabled; in this case
* the processor would never use the TPR shadow and we
* could simply clear the bit from the execution
* control. But such a configuration is useless, so
* let's keep the code simple.
*/
if (!is_error_page(page)) {
vmx->nested.virtual_apic_page = page;
hpa = page_to_phys(vmx->nested.virtual_apic_page);
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
}
}
if (nested_cpu_has_posted_intr(vmcs12)) {
if (vmx->nested.pi_desc_page) { /* shouldn't happen */
kunmap(vmx->nested.pi_desc_page);
kvm_release_page_dirty(vmx->nested.pi_desc_page);
vmx->nested.pi_desc_page = NULL;
}
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
if (is_error_page(page))
return;
vmx->nested.pi_desc_page = page;
vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
vmx->nested.pi_desc =
(struct pi_desc *)((void *)vmx->nested.pi_desc +
(unsigned long)(vmcs12->posted_intr_desc_addr &
(PAGE_SIZE - 1)));
vmcs_write64(POSTED_INTR_DESC_ADDR,
page_to_phys(vmx->nested.pi_desc_page) +
(unsigned long)(vmcs12->posted_intr_desc_addr &
(PAGE_SIZE - 1)));
}
if (cpu_has_vmx_msr_bitmap() &&
nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) &&
nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
;
else
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_USE_MSR_BITMAPS);
} | 0 | [
"CWE-20",
"CWE-617"
]
| linux | 3a8b0677fc6180a467e26cc32ce6b0c09a32f9bb | 311,643,440,552,758,730,000,000,000,000,000,000,000 | 95 | KVM: VMX: Do not BUG() on out-of-bounds guest IRQ
The value of the guest_irq argument to vmx_update_pi_irte() is
ultimately coming from a KVM_IRQFD API call. Do not BUG() in
vmx_update_pi_irte() if the value is out-of bounds. (Especially,
since KVM as a whole seems to hang after that.)
Instead, print a message only once if we find that we don't have a
route for a certain IRQ (which can be out-of-bounds or within the
array).
This fixes CVE-2017-1000252.
Fixes: efc644048ecde54 ("KVM: x86: Update IRTE for posted-interrupts")
Signed-off-by: Jan H. Schönherr <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
LogVMessageVerbSigSafe(MessageType type, int verb, const char *format, va_list args)
{
const char *type_str;
char buf[1024];
int len;
Bool newline;
type_str = LogMessageTypeVerbString(type, verb);
if (!type_str)
return;
/* if type_str is not "", prepend it and ' ', to message */
if (type_str[0] != '\0') {
LogSWrite(verb, type_str, strlen_sigsafe(type_str), FALSE);
LogSWrite(verb, " ", 1, FALSE);
}
len = vpnprintf(buf, sizeof(buf), format, args);
/* Force '\n' at end of truncated line */
if (sizeof(buf) - len == 1)
buf[len - 1] = '\n';
newline = (len > 0 && buf[len - 1] == '\n');
LogSWrite(verb, buf, len, newline);
} | 0 | [
"CWE-863"
]
| xserver | da15c7413916f754708c62c2089265528cd661e2 | 220,707,624,441,965,200,000,000,000,000,000,000,000 | 26 | LogFilePrep: add a comment to the unsafe format string.
CVE-2018-14665 also made it possible to exploit this to access
memory. With -logfile forbidden when running with elevated privileges
this is no longer an issue.
Signed-off-by: Matthieu Herrb <[email protected]>
Reviewed-by: Adam Jackson <[email protected]>
(cherry picked from commit 248d164eae27f1f310266d78e52f13f64362f81e) |
string_strip (const char *string, int left, int right, const char *chars)
{
const char *ptr_start, *ptr_end;
if (!string)
return NULL;
if (!string[0])
return strdup (string);
ptr_start = string;
ptr_end = string + strlen (string) - 1;
if (left)
{
while (ptr_start[0] && strchr (chars, ptr_start[0]))
{
ptr_start++;
}
if (!ptr_start[0])
return strdup (ptr_start);
}
if (right)
{
while ((ptr_end >= ptr_start) && strchr (chars, ptr_end[0]))
{
ptr_end--;
}
if (ptr_end < ptr_start)
return strdup ("");
}
return string_strndup (ptr_start, ptr_end - ptr_start + 1);
} | 0 | [
"CWE-20"
]
| weechat | efb795c74fe954b9544074aafcebb1be4452b03a | 121,688,566,604,697,720,000,000,000,000,000,000,000 | 35 | core: do not call shell to execute command in hook_process (fix security problem when a plugin/script gives untrusted command) (bug #37764) |
JSStream::JSStream(Environment* env, Local<Object> obj)
: AsyncWrap(env, obj, AsyncWrap::PROVIDER_JSSTREAM),
StreamBase(env) {
MakeWeak();
} | 0 | [
"CWE-416"
]
| node | 7f178663ebffc82c9f8a5a1b6bf2da0c263a30ed | 201,940,101,737,821,200,000,000,000,000,000,000,000 | 5 | src: use unique_ptr for WriteWrap
This commit attempts to avoid a use-after-free error by using unqiue_ptr
and passing a reference to it.
CVE-ID: CVE-2020-8265
Fixes: https://github.com/nodejs-private/node-private/issues/227
PR-URL: https://github.com/nodejs-private/node-private/pull/238
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Tobias Nießen <[email protected]>
Reviewed-By: Richard Lau <[email protected]> |
static inline void SetPixelIndexTraits(Image *image,const PixelTrait traits)
{
image->channel_map[IndexPixelChannel].traits=traits;
} | 0 | [
"CWE-119",
"CWE-787"
]
| ImageMagick | 450bd716ed3b9186dd10f9e60f630a3d9eeea2a4 | 270,473,897,589,511,540,000,000,000,000,000,000,000 | 4 | |
pixWindowedStats(PIX *pixs,
l_int32 wc,
l_int32 hc,
l_int32 hasborder,
PIX **ppixm,
PIX **ppixms,
FPIX **pfpixv,
FPIX **pfpixrv)
{
PIX *pixb, *pixm, *pixms;
PROCNAME("pixWindowedStats");
if (!ppixm && !ppixms && !pfpixv && !pfpixrv)
return ERROR_INT("no output requested", procName, 1);
if (ppixm) *ppixm = NULL;
if (ppixms) *ppixms = NULL;
if (pfpixv) *pfpixv = NULL;
if (pfpixrv) *pfpixrv = NULL;
if (!pixs || pixGetDepth(pixs) != 8)
return ERROR_INT("pixs not defined or not 8 bpp", procName, 1);
if (wc < 2 || hc < 2)
return ERROR_INT("wc and hc not >= 2", procName, 1);
/* Add border if requested */
if (!hasborder)
pixb = pixAddBorderGeneral(pixs, wc + 1, wc + 1, hc + 1, hc + 1, 0);
else
pixb = pixClone(pixs);
if (!pfpixv && !pfpixrv) {
if (ppixm) *ppixm = pixWindowedMean(pixb, wc, hc, 1, 1);
if (ppixms) *ppixms = pixWindowedMeanSquare(pixb, wc, hc, 1);
pixDestroy(&pixb);
return 0;
}
pixm = pixWindowedMean(pixb, wc, hc, 1, 1);
pixms = pixWindowedMeanSquare(pixb, wc, hc, 1);
pixWindowedVariance(pixm, pixms, pfpixv, pfpixrv);
if (ppixm)
*ppixm = pixm;
else
pixDestroy(&pixm);
if (ppixms)
*ppixms = pixms;
else
pixDestroy(&pixms);
pixDestroy(&pixb);
return 0;
} | 0 | []
| leptonica | 480f5e74c24fdc2003c42a4e15d1f24c9e6ea469 | 194,864,446,196,355,400,000,000,000,000,000,000,000 | 51 | Fixed issue 21972 (oss-fuzz) Divide by zero in pixBlockconvGray(). |
R_API void r_anal_var_clear_accesses(RAnalVar *var) {
r_return_if_fail (var);
RAnalFunction *fcn = var->fcn;
if (fcn->inst_vars) {
// remove all inverse references to the var's accesses
RAnalVarAccess *acc;
r_vector_foreach (&var->accesses, acc) {
RPVector *inst_accesses = ht_up_find (fcn->inst_vars, (ut64)acc->offset, NULL);
if (!inst_accesses) {
continue;
}
r_pvector_remove_data (inst_accesses, var);
}
}
r_vector_clear (&var->accesses);
R_DIRTY (var->fcn->anal);
} | 0 | [
"CWE-416"
]
| radare2 | a7ce29647fcb38386d7439696375e16e093d6acb | 264,254,301,046,859,200,000,000,000,000,000,000,000 | 17 | Fix UAF in aaaa on arm/thumb switching ##crash
* Reported by @peacock-doris via huntr.dev
* Reproducer tests_65185
* This is a logic fix, but not the fully safe as changes in the code
can result on UAF again, to properly protect r2 from crashing we
need to break the ABI and add refcounting to RRegItem, which can't
happen in 5.6.x because of abi-compat rules |
static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
u64 flags)
{
return -ENOTSUPP;
} | 0 | [
"CWE-787"
]
| bpf | 4b81ccebaeee885ab1aa1438133f2991e3a2b6ea | 265,086,318,199,041,200,000,000,000,000,000,000,000 | 5 | bpf, ringbuf: Deny reserve of buffers larger than ringbuf
A BPF program might try to reserve a buffer larger than the ringbuf size.
If the consumer pointer is way ahead of the producer, that would be
successfully reserved, allowing the BPF program to read or write out of
the ringbuf allocated area.
Reported-by: Ryota Shiga (Flatt Security)
Fixes: 457f44363a88 ("bpf: Implement BPF ring buffer and verifier support for it")
Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Andrii Nakryiko <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]> |
void ReturnRaggedTensor(OpKernelContext* context,
const RaggedTensorVariant& ragged_tensor) {
int ragged_rank = ragged_tensor.ragged_rank();
OpOutputList splits_out;
OP_REQUIRES_OK(context,
context->output_list("output_nested_splits", &splits_out));
for (int i = 0; i < ragged_rank; i++) {
splits_out.set(i, ragged_tensor.splits(i));
}
context->set_output(ragged_rank, ragged_tensor.values());
} | 0 | [
"CWE-703",
"CWE-681"
]
| tensorflow | 4e2565483d0ffcadc719bd44893fb7f609bb5f12 | 90,108,146,849,207,550,000,000,000,000,000,000,000 | 11 | Fix bug that could cause map_fn to produce incorrect results (rather than an error)
when mapping over a ragged tensor with an inappropriate fn_output_signature. (Note: there are cases where the default value for fn_output_signature is not appropriate, so the user needs to explicitly specify the correct output signature.)
PiperOrigin-RevId: 387606546
Change-Id: Ib4ea27b9634e6ab413f211cfe809a69a90f0e2cd |
var2fpos(
typval_T *varp,
int dollar_lnum, // TRUE when $ is last line
int *fnum, // set to fnum for '0, 'A, etc.
int charcol) // return character column
{
char_u *name;
static pos_T pos;
pos_T *pp;
// Argument can be [lnum, col, coladd].
if (varp->v_type == VAR_LIST)
{
list_T *l;
int len;
int error = FALSE;
listitem_T *li;
l = varp->vval.v_list;
if (l == NULL)
return NULL;
// Get the line number
pos.lnum = list_find_nr(l, 0L, &error);
if (error || pos.lnum <= 0 || pos.lnum > curbuf->b_ml.ml_line_count)
return NULL; // invalid line number
if (charcol)
len = (long)mb_charlen(ml_get(pos.lnum));
else
len = (long)STRLEN(ml_get(pos.lnum));
// Get the column number
// We accept "$" for the column number: last column.
li = list_find(l, 1L);
if (li != NULL && li->li_tv.v_type == VAR_STRING
&& li->li_tv.vval.v_string != NULL
&& STRCMP(li->li_tv.vval.v_string, "$") == 0)
{
pos.col = len + 1;
}
else
{
pos.col = list_find_nr(l, 1L, &error);
if (error)
return NULL;
}
// Accept a position up to the NUL after the line.
if (pos.col == 0 || (int)pos.col > len + 1)
return NULL; // invalid column number
--pos.col;
// Get the virtual offset. Defaults to zero.
pos.coladd = list_find_nr(l, 2L, &error);
if (error)
pos.coladd = 0;
return &pos;
}
if (in_vim9script() && check_for_string_arg(varp, 0) == FAIL)
return NULL;
name = tv_get_string_chk(varp);
if (name == NULL)
return NULL;
if (name[0] == '.') // cursor
{
pos = curwin->w_cursor;
if (charcol)
pos.col = buf_byteidx_to_charidx(curbuf, pos.lnum, pos.col);
return &pos;
}
if (name[0] == 'v' && name[1] == NUL) // Visual start
{
if (VIsual_active)
pos = VIsual;
else
pos = curwin->w_cursor;
if (charcol)
pos.col = buf_byteidx_to_charidx(curbuf, pos.lnum, pos.col);
return &pos;
}
if (name[0] == '\'') // mark
{
pp = getmark_buf_fnum(curbuf, name[1], FALSE, fnum);
if (pp == NULL || pp == (pos_T *)-1 || pp->lnum <= 0)
return NULL;
if (charcol)
pp->col = buf_byteidx_to_charidx(curbuf, pp->lnum, pp->col);
return pp;
}
pos.coladd = 0;
if (name[0] == 'w' && dollar_lnum)
{
pos.col = 0;
if (name[1] == '0') // "w0": first visible line
{
update_topline();
// In silent Ex mode topline is zero, but that's not a valid line
// number; use one instead.
pos.lnum = curwin->w_topline > 0 ? curwin->w_topline : 1;
return &pos;
}
else if (name[1] == '$') // "w$": last visible line
{
validate_botline();
// In silent Ex mode botline is zero, return zero then.
pos.lnum = curwin->w_botline > 0 ? curwin->w_botline - 1 : 0;
return &pos;
}
}
else if (name[0] == '$') // last column or line
{
if (dollar_lnum)
{
pos.lnum = curbuf->b_ml.ml_line_count;
pos.col = 0;
}
else
{
pos.lnum = curwin->w_cursor.lnum;
if (charcol)
pos.col = (colnr_T)mb_charlen(ml_get_curline());
else
pos.col = (colnr_T)STRLEN(ml_get_curline());
}
return &pos;
}
if (in_vim9script())
semsg(_(e_invalid_value_for_line_number_str), name);
return NULL;
} | 0 | [
"CWE-122",
"CWE-787"
]
| vim | 605ec91e5a7330d61be313637e495fa02a6dc264 | 107,676,063,694,300,480,000,000,000,000,000,000,000 | 135 | patch 8.2.3847: illegal memory access when using a lambda with an error
Problem: Illegal memory access when using a lambda with an error.
Solution: Avoid skipping over the NUL after a string. |
SPL_METHOD(SplObjectStorage, rewind)
{
spl_SplObjectStorage *intern = (spl_SplObjectStorage*)zend_object_store_get_object(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
zend_hash_internal_pointer_reset_ex(&intern->storage, &intern->pos);
intern->index = 0;
} /* }}} */ | 0 | [
"CWE-416"
]
| php-src | c2e197e4efc663ca55f393bf0e799848842286f3 | 207,361,601,867,277,550,000,000,000,000,000,000,000 | 11 | Fix bug #70168 - Use After Free Vulnerability in unserialize() with SplObjectStorage |
*/
void napi_gro_flush(struct napi_struct *napi, bool flush_old)
{
unsigned long bitmask = napi->gro_bitmask;
unsigned int i, base = ~0U;
while ((i = ffs(bitmask)) != 0) {
bitmask >>= i;
base += i;
__napi_gro_flush_chain(napi, base, flush_old);
} | 0 | [
"CWE-416"
]
| linux | a4270d6795b0580287453ea55974d948393e66ef | 115,147,949,264,005,450,000,000,000,000,000,000,000 | 11 | net-gro: fix use-after-free read in napi_gro_frags()
If a network driver provides to napi_gro_frags() an
skb with a page fragment of exactly 14 bytes, the call
to gro_pull_from_frag0() will 'consume' the fragment
by calling skb_frag_unref(skb, 0), and the page might
be freed and reused.
Reading eth->h_proto at the end of napi_frags_skb() might
read mangled data, or crash under specific debugging features.
BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline]
BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
Read of size 2 at addr ffff88809366840c by task syz-executor599/8957
CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x172/0x1f0 lib/dump_stack.c:113
print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188
__kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
kasan_report+0x12/0x20 mm/kasan/common.c:614
__asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142
napi_frags_skb net/core/dev.c:5833 [inline]
napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991
tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037
call_write_iter include/linux/fs.h:1872 [inline]
do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693
do_iter_write fs/read_write.c:970 [inline]
do_iter_write+0x184/0x610 fs/read_write.c:951
vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015
do_writev+0x15b/0x330 fs/read_write.c:1058
Fixes: a50e233c50db ("net-gro: restore frag0 optimization")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
inline void LocalResponseNormalization(
const tflite::LocalResponseNormalizationParams& op_params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("LocalResponseNormalization");
MatchingFlatSize(input_shape, output_shape);
const auto data_in = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
auto data_out = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
// Carry out local response normalization, vector by vector.
// Since the data are stored column major, making row-wise operation
// probably not memory efficient anyway, we do an explicit for loop over
// the columns.
const int double_range = op_params.range * 2;
Eigen::VectorXf padded_square(data_in.rows() + double_range);
padded_square.setZero();
const float bias = op_params.bias;
for (int r = 0; r < data_in.cols(); ++r) {
// Do local response normalization for data_in(:, r)
// first, compute the square and store them in buffer for repeated use
padded_square.block(op_params.range, 0, data_in.rows(), 1) =
data_in.col(r).cwiseProduct(data_in.col(r)) * op_params.alpha;
// Then, compute the scale and writes them to data_out
float accumulated_scale = 0;
for (int i = 0; i < double_range; ++i) {
accumulated_scale += padded_square(i);
}
for (int i = 0; i < data_in.rows(); ++i) {
accumulated_scale += padded_square(i + double_range);
data_out(i, r) = bias + accumulated_scale;
accumulated_scale -= padded_square(i);
}
}
// In a few cases, the pow computation could benefit from speedups.
if (op_params.beta == 1) {
data_out.array() = data_in.array() * data_out.array().inverse();
} else if (op_params.beta == 0.5f) {
data_out.array() = data_in.array() * data_out.array().sqrt().inverse();
} else {
data_out.array() = data_in.array() * data_out.array().pow(-op_params.beta);
}
} | 0 | [
"CWE-476",
"CWE-369"
]
| tensorflow | 15691e456c7dc9bd6be203b09765b063bf4a380c | 293,585,008,637,242,000,000,000,000,000,000,000,000 | 44 | Prevent dereferencing of null pointers in TFLite's `add.cc`.
PiperOrigin-RevId: 387244946
Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9 |
conn_key_extract(struct conntrack *ct, struct dp_packet *pkt, ovs_be16 dl_type,
struct conn_lookup_ctx *ctx, uint16_t zone)
{
const struct eth_header *l2 = dp_packet_eth(pkt);
const struct ip_header *l3 = dp_packet_l3(pkt);
const char *l4 = dp_packet_l4(pkt);
bool ok;
memset(ctx, 0, sizeof *ctx);
if (!l2 || !l3 || !l4) {
return false;
}
ctx->key.zone = zone;
/* XXX In this function we parse the packet (again, it has already
* gone through miniflow_extract()) for two reasons:
*
* 1) To extract the l3 addresses and l4 ports.
* We already have the l3 and l4 headers' pointers. Extracting
* the l3 addresses and the l4 ports is really cheap, since they
* can be found at fixed locations.
* 2) To extract the l4 type.
* Extracting the l4 types, for IPv6 can be quite expensive, because
* it's not at a fixed location.
*
* Here's a way to avoid (2) with the help of the datapath.
* The datapath doesn't keep the packet's extracted flow[1], so
* using that is not an option. We could use the packet's matching
* megaflow, but we have to make sure that the l4 type (nw_proto)
* is unwildcarded. This means either:
*
* a) dpif-netdev unwildcards the l4 type when a new flow is installed
* if the actions contains ct().
*
* b) ofproto-dpif-xlate unwildcards the l4 type when translating a ct()
* action. This is already done in different actions, but it's
* unnecessary for the kernel.
*
* ---
* [1] The reasons for this are that keeping the flow increases
* (slightly) the cache footprint and increases computation
* time as we move the packet around. Most importantly, the flow
* should be updated by the actions and this can be slow, as
* we use a sparse representation (miniflow).
*
*/
ctx->key.dl_type = dl_type;
if (ctx->key.dl_type == htons(ETH_TYPE_IP)) {
bool hwol_bad_l3_csum = dp_packet_ip_checksum_bad(pkt);
if (hwol_bad_l3_csum) {
ok = false;
} else {
bool hwol_good_l3_csum = dp_packet_ip_checksum_valid(pkt);
/* Validate the checksum only when hwol is not supported. */
ok = extract_l3_ipv4(&ctx->key, l3, dp_packet_l3_size(pkt), NULL,
!hwol_good_l3_csum);
}
} else if (ctx->key.dl_type == htons(ETH_TYPE_IPV6)) {
ok = extract_l3_ipv6(&ctx->key, l3, dp_packet_l3_size(pkt), NULL);
} else {
ok = false;
}
if (ok) {
bool hwol_bad_l4_csum = dp_packet_l4_checksum_bad(pkt);
if (!hwol_bad_l4_csum) {
bool hwol_good_l4_csum = dp_packet_l4_checksum_valid(pkt);
/* Validate the checksum only when hwol is not supported. */
if (extract_l4(&ctx->key, l4, dp_packet_l4_size(pkt),
&ctx->icmp_related, l3, !hwol_good_l4_csum,
NULL)) {
ctx->hash = conn_key_hash(&ctx->key, ct->hash_basis);
return true;
}
}
}
return false;
} | 0 | [
"CWE-400"
]
| ovs | 35c280072c1c3ed58202745b7d27fbbd0736999b | 281,109,027,509,751,730,000,000,000,000,000,000,000 | 82 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
static void enable_no_etm(gnutls_priority_t c)
{
c->no_etm = 1;
} | 0 | [
"CWE-310"
]
| gnutls | 21f89efad7014a5ee0debd4cd3d59e27774b29e6 | 263,547,447,688,602,900,000,000,000,000,000,000,000 | 4 | handshake: add FALLBACK_SCSV priority option
This allows clients to enable the TLS_FALLBACK_SCSV mechanism during
the handshake, as defined in RFC7507. |
static CURLcode parseurlandfillconn(struct Curl_easy *data,
struct connectdata *conn)
{
CURLcode result;
CURLU *uh;
CURLUcode uc;
char *hostname;
Curl_up_free(data); /* cleanup previous leftovers first */
/* parse the URL */
uh = data->state.uh = curl_url();
if(!uh)
return CURLE_OUT_OF_MEMORY;
if(data->set.str[STRING_DEFAULT_PROTOCOL] &&
!Curl_is_absolute_url(data->change.url, NULL, MAX_SCHEME_LEN)) {
char *url;
if(data->change.url_alloc)
free(data->change.url);
url = aprintf("%s://%s", data->set.str[STRING_DEFAULT_PROTOCOL],
data->change.url);
if(!url)
return CURLE_OUT_OF_MEMORY;
data->change.url = url;
data->change.url_alloc = TRUE;
}
uc = curl_url_set(uh, CURLUPART_URL, data->change.url,
CURLU_GUESS_SCHEME |
CURLU_NON_SUPPORT_SCHEME |
(data->set.disallow_username_in_url ?
CURLU_DISALLOW_USER : 0) |
(data->set.path_as_is ? CURLU_PATH_AS_IS : 0));
if(uc)
return Curl_uc_to_curlcode(uc);
uc = curl_url_get(uh, CURLUPART_SCHEME, &data->state.up.scheme, 0);
if(uc)
return Curl_uc_to_curlcode(uc);
result = findprotocol(data, conn, data->state.up.scheme);
if(result)
return result;
uc = curl_url_get(uh, CURLUPART_USER, &data->state.up.user,
CURLU_URLDECODE);
if(!uc) {
conn->user = strdup(data->state.up.user);
if(!conn->user)
return CURLE_OUT_OF_MEMORY;
conn->bits.user_passwd = TRUE;
}
else if(uc != CURLUE_NO_USER)
return Curl_uc_to_curlcode(uc);
uc = curl_url_get(uh, CURLUPART_PASSWORD, &data->state.up.password,
CURLU_URLDECODE);
if(!uc) {
conn->passwd = strdup(data->state.up.password);
if(!conn->passwd)
return CURLE_OUT_OF_MEMORY;
conn->bits.user_passwd = TRUE;
}
else if(uc != CURLUE_NO_PASSWORD)
return Curl_uc_to_curlcode(uc);
uc = curl_url_get(uh, CURLUPART_OPTIONS, &data->state.up.options,
CURLU_URLDECODE);
if(!uc) {
conn->options = strdup(data->state.up.options);
if(!conn->options)
return CURLE_OUT_OF_MEMORY;
}
else if(uc != CURLUE_NO_OPTIONS)
return Curl_uc_to_curlcode(uc);
uc = curl_url_get(uh, CURLUPART_HOST, &data->state.up.hostname, 0);
if(uc) {
if(!strcasecompare("file", data->state.up.scheme))
return CURLE_OUT_OF_MEMORY;
}
uc = curl_url_get(uh, CURLUPART_PATH, &data->state.up.path, 0);
if(uc)
return Curl_uc_to_curlcode(uc);
uc = curl_url_get(uh, CURLUPART_PORT, &data->state.up.port,
CURLU_DEFAULT_PORT);
if(uc) {
if(!strcasecompare("file", data->state.up.scheme))
return CURLE_OUT_OF_MEMORY;
}
else {
unsigned long port = strtoul(data->state.up.port, NULL, 10);
conn->remote_port = curlx_ultous(port);
}
(void)curl_url_get(uh, CURLUPART_QUERY, &data->state.up.query, 0);
hostname = data->state.up.hostname;
if(!hostname)
/* this is for file:// transfers, get a dummy made */
hostname = (char *)"";
if(hostname[0] == '[') {
/* This looks like an IPv6 address literal. See if there is an address
scope. */
char *percent = strchr(++hostname, '%');
conn->bits.ipv6_ip = TRUE;
if(percent) {
unsigned int identifier_offset = 3;
char *endp;
unsigned long scope;
if(strncmp("%25", percent, 3) != 0) {
infof(data,
"Please URL encode %% as %%25, see RFC 6874.\n");
identifier_offset = 1;
}
scope = strtoul(percent + identifier_offset, &endp, 10);
if(*endp == ']') {
/* The address scope was well formed. Knock it out of the
hostname. */
memmove(percent, endp, strlen(endp) + 1);
conn->scope_id = (unsigned int)scope;
}
else {
/* Zone identifier is not numeric */
#if defined(HAVE_NET_IF_H) && defined(IFNAMSIZ) && defined(HAVE_IF_NAMETOINDEX)
char ifname[IFNAMSIZ + 2];
char *square_bracket;
unsigned int scopeidx = 0;
strncpy(ifname, percent + identifier_offset, IFNAMSIZ + 2);
/* Ensure nullbyte termination */
ifname[IFNAMSIZ + 1] = '\0';
square_bracket = strchr(ifname, ']');
if(square_bracket) {
/* Remove ']' */
*square_bracket = '\0';
scopeidx = if_nametoindex(ifname);
if(scopeidx == 0) {
infof(data, "Invalid network interface: %s; %s\n", ifname,
strerror(errno));
}
}
if(scopeidx > 0) {
char *p = percent + identifier_offset + strlen(ifname);
/* Remove zone identifier from hostname */
memmove(percent, p, strlen(p) + 1);
conn->scope_id = scopeidx;
}
else
#endif /* HAVE_NET_IF_H && IFNAMSIZ */
infof(data, "Invalid IPv6 address format\n");
}
}
percent = strchr(hostname, ']');
if(percent)
/* terminate IPv6 numerical at end bracket */
*percent = 0;
}
/* make sure the connect struct gets its own copy of the host name */
conn->host.rawalloc = strdup(hostname);
if(!conn->host.rawalloc)
return CURLE_OUT_OF_MEMORY;
conn->host.name = conn->host.rawalloc;
if(data->set.scope_id)
/* Override any scope that was set above. */
conn->scope_id = data->set.scope_id;
return CURLE_OK;
} | 0 | [
"CWE-416"
]
| curl | 81d135d67155c5295b1033679c606165d4e28f3f | 313,712,119,772,349,350,000,000,000,000,000,000,000 | 175 | Curl_close: clear data->multi_easy on free to avoid use-after-free
Regression from b46cfbc068 (7.59.0)
CVE-2018-16840
Reported-by: Brian Carpenter (Geeknik Labs)
Bug: https://curl.haxx.se/docs/CVE-2018-16840.html |
void vmalloc_sync_all(void)
{
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
} | 0 | [
"CWE-264"
]
| linux | 548acf19234dbda5a52d5a8e7e205af46e9da840 | 145,261,921,573,971,180,000,000,000,000,000,000,000 | 4 | x86/mm: Expand the exception table logic to allow new handling options
Huge amounts of help from Andy Lutomirski and Borislav Petkov to
produce this. Andy provided the inspiration to add classes to the
exception table with a clever bit-squeezing trick, Boris pointed
out how much cleaner it would all be if we just had a new field.
Linus Torvalds blessed the expansion with:
' I'd rather not be clever in order to save just a tiny amount of space
in the exception table, which isn't really criticial for anybody. '
The third field is another relative function pointer, this one to a
handler that executes the actions.
We start out with three handlers:
1: Legacy - just jumps the to fixup IP
2: Fault - provide the trap number in %ax to the fixup code
3: Cleaned up legacy for the uaccess error hack
Signed-off-by: Tony Luck <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/f6af78fcbd348cf4939875cfda9c19689b5e50b8.1455732970.git.tony.luck@intel.com
Signed-off-by: Ingo Molnar <[email protected]> |
get_address(const char *value, /* I - Value string */
int defport) /* I - Default port */
{
char buffer[1024], /* Hostname + port number buffer */
defpname[255], /* Default port name */
*hostname, /* Hostname or IP */
*portname; /* Port number or name */
http_addrlist_t *addrlist; /* Address list */
/*
* Check for an empty value...
*/
if (!*value)
{
cupsdLogMessage(CUPSD_LOG_ERROR, "Bad (empty) address.");
return (NULL);
}
/*
* Grab a hostname and port number; if there is no colon and the port name
* is only digits, then we have a port number by itself...
*/
strlcpy(buffer, value, sizeof(buffer));
if ((portname = strrchr(buffer, ':')) != NULL && !strchr(portname, ']'))
{
*portname++ = '\0';
hostname = buffer;
}
else
{
for (portname = buffer; isdigit(*portname & 255); portname ++);
if (*portname)
{
/*
* Use the default port...
*/
sprintf(defpname, "%d", defport);
portname = defpname;
hostname = buffer;
}
else
{
/*
* The buffer contains just a port number...
*/
portname = buffer;
hostname = NULL;
}
}
if (hostname && !strcmp(hostname, "*"))
hostname = NULL;
/*
* Now lookup the address using httpAddrGetList()...
*/
if ((addrlist = httpAddrGetList(hostname, AF_UNSPEC, portname)) == NULL)
cupsdLogMessage(CUPSD_LOG_ERROR, "Hostname lookup for \"%s\" failed.",
hostname ? hostname : "(nil)");
return (addrlist);
} | 0 | []
| cups | d47f6aec436e0e9df6554436e391471097686ecc | 315,878,551,902,184,800,000,000,000,000,000,000,000 | 70 | Fix local privilege escalation to root and sandbox bypasses in scheduler
(rdar://37836779, rdar://37836995, rdar://37837252, rdar://37837581) |
WORK_STATE tls_post_process_server_certificate(SSL *s, WORK_STATE wst)
{
X509 *x;
EVP_PKEY *pkey = NULL;
const SSL_CERT_LOOKUP *clu;
size_t certidx;
int i;
i = ssl_verify_cert_chain(s, s->session->peer_chain);
if (i == -1) {
s->rwstate = SSL_RETRY_VERIFY;
return WORK_MORE_A;
}
/*
* The documented interface is that SSL_VERIFY_PEER should be set in order
* for client side verification of the server certificate to take place.
* However, historically the code has only checked that *any* flag is set
* to cause server verification to take place. Use of the other flags makes
* no sense in client mode. An attempt to clean up the semantics was
* reverted because at least one application *only* set
* SSL_VERIFY_FAIL_IF_NO_PEER_CERT. Prior to the clean up this still caused
* server verification to take place, after the clean up it silently did
* nothing. SSL_CTX_set_verify()/SSL_set_verify() cannot validate the flags
* sent to them because they are void functions. Therefore, we now use the
* (less clean) historic behaviour of performing validation if any flag is
* set. The *documented* interface remains the same.
*/
if (s->verify_mode != SSL_VERIFY_NONE && i == 0) {
SSLfatal(s, ssl_x509err2alert(s->verify_result),
SSL_R_CERTIFICATE_VERIFY_FAILED);
return WORK_ERROR;
}
ERR_clear_error(); /* but we keep s->verify_result */
/*
* Inconsistency alert: cert_chain does include the peer's certificate,
* which we don't include in statem_srvr.c
*/
x = sk_X509_value(s->session->peer_chain, 0);
pkey = X509_get0_pubkey(x);
if (pkey == NULL || EVP_PKEY_missing_parameters(pkey)) {
SSLfatal(s, SSL_AD_INTERNAL_ERROR,
SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS);
return WORK_ERROR;
}
if ((clu = ssl_cert_lookup_by_pkey(pkey, &certidx)) == NULL) {
SSLfatal(s, SSL_AD_ILLEGAL_PARAMETER, SSL_R_UNKNOWN_CERTIFICATE_TYPE);
return WORK_ERROR;
}
/*
* Check certificate type is consistent with ciphersuite. For TLS 1.3
* skip check since TLS 1.3 ciphersuites can be used with any certificate
* type.
*/
if (!SSL_IS_TLS13(s)) {
if ((clu->amask & s->s3.tmp.new_cipher->algorithm_auth) == 0) {
SSLfatal(s, SSL_AD_ILLEGAL_PARAMETER, SSL_R_WRONG_CERTIFICATE_TYPE);
return WORK_ERROR;
}
}
X509_free(s->session->peer);
X509_up_ref(x);
s->session->peer = x;
s->session->verify_result = s->verify_result;
/* Save the current hash state for when we receive the CertificateVerify */
if (SSL_IS_TLS13(s)
&& !ssl_handshake_hash(s, s->cert_verify_hash,
sizeof(s->cert_verify_hash),
&s->cert_verify_hash_len)) {
/* SSLfatal() already called */;
return WORK_ERROR;
}
return WORK_FINISHED_CONTINUE;
} | 0 | [
"CWE-835"
]
| openssl | 758754966791c537ea95241438454aa86f91f256 | 268,768,344,572,141,200,000,000,000,000,000,000,000 | 79 | Fix invalid handling of verify errors in libssl
In the event that X509_verify() returned an internal error result then
libssl would mishandle this and set rwstate to SSL_RETRY_VERIFY. This
subsequently causes SSL_get_error() to return SSL_ERROR_WANT_RETRY_VERIFY.
That return code is supposed to only ever be returned if an application
is using an app verify callback to complete replace the use of
X509_verify(). Applications may not be written to expect that return code
and could therefore crash (or misbehave in some other way) as a result.
CVE-2021-4044
Reviewed-by: Tomas Mraz <[email protected]> |
static void i2c_w_mask(struct sd *sd,
u8 reg,
u8 value,
u8 mask)
{
int rc;
u8 oldval;
value &= mask; /* Enforce mask on value */
rc = i2c_r(sd, reg);
if (rc < 0)
return;
oldval = rc & ~mask; /* Clear the masked bits */
value |= oldval; /* Set the desired bits */
i2c_w(sd, reg, value);
} | 0 | [
"CWE-476"
]
| linux | 998912346c0da53a6dbb71fab3a138586b596b30 | 201,035,238,427,018,400,000,000,000,000,000,000,000 | 16 | media: ov519: add missing endpoint sanity checks
Make sure to check that we have at least one endpoint before accessing
the endpoint array to avoid dereferencing a NULL-pointer on stream
start.
Note that these sanity checks are not redundant as the driver is mixing
looking up altsettings by index and by number, which need not coincide.
Fixes: 1876bb923c98 ("V4L/DVB (12079): gspca_ov519: add support for the ov511 bridge")
Fixes: b282d87332f5 ("V4L/DVB (12080): gspca_ov519: Fix ov518+ with OV7620AE (Trust spacecam 320)")
Cc: stable <[email protected]> # 2.6.31
Cc: Hans de Goede <[email protected]>
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
tr_find(unsigned int c, char table[256], VALUE del, VALUE nodel)
{
if (c < 256) {
return table[c] != 0;
}
else {
VALUE v = UINT2NUM(c);
if (del && !NIL_P(rb_hash_lookup(del, v))) {
if (!nodel || NIL_P(rb_hash_lookup(nodel, v))) {
return TRUE;
}
}
return FALSE;
}
} | 0 | [
"CWE-119"
]
| ruby | 1c2ef610358af33f9ded3086aa2d70aac03dcac5 | 318,913,905,709,135,600,000,000,000,000,000,000,000 | 16 | * string.c (rb_str_justify): CVE-2009-4124.
Fixes a bug reported by
Emmanouel Kellinis <Emmanouel.Kellinis AT kpmg.co.uk>, KPMG London;
Patch by nobu.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@26038 b2dd03c8-39d4-4d8f-98ff-823fe69b080e |
R_API RBinJavaElementValuePair *r_bin_java_element_pair_new(ut8 *buffer, ut64 sz, ut64 buf_offset) {
if (!buffer || sz < 4) {
return NULL;
}
RBinJavaElementValuePair *evp = R_NEW0 (RBinJavaElementValuePair);
if (!evp) {
return NULL;
}
// TODO: What is the signifigance of evp element
evp->element_name_idx = R_BIN_JAVA_USHORT (buffer, 0);
ut64 offset = 2;
evp->file_offset = buf_offset;
evp->name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, evp->element_name_idx);
if (!evp->name) {
// TODO: eprintf unable to find the name for the given index
eprintf ("ElementValue Name is invalid.\n");
evp->name = strdup ("UNKNOWN");
}
if (offset >= sz) {
free (evp);
return NULL;
}
evp->value = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset);
offset += evp->value->size;
if (offset >= sz) {
free (evp->value);
free (evp);
return NULL;
}
evp->size = offset;
return evp;
} | 0 | [
"CWE-787"
]
| radare2 | 9650e3c352f675687bf6c6f65ff2c4a3d0e288fa | 256,394,024,147,906,200,000,000,000,000,000,000,000 | 32 | Fix oobread segfault in java arith8.class ##crash
* Reported by Cen Zhang via huntr.dev |
longlong val_int(void) { return val_int_from_real(false); } | 0 | [
"CWE-416",
"CWE-703"
]
| server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 227,377,851,108,006,720,000,000,000,000,000,000,000 | 1 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf;
void *cmd;
if (res->func->destroy == vmw_gb_surface_destroy) {
(void) vmw_gb_surface_destroy(res);
return;
}
if (res->id != -1) {
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
return;
}
vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
/*
* used_memory_size_atomic, or separate lock
* to avoid taking dev_priv::cmdbuf_mutex in
* the destroy path.
*/
mutex_lock(&dev_priv->cmdbuf_mutex);
srf = vmw_res_to_srf(res);
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
vmw_fifo_resource_dec(dev_priv);
} | 0 | [
"CWE-20"
]
| linux | ee9c4e681ec4f58e42a83cb0c22a0289ade1aacf | 303,616,059,614,719,620,000,000,000,000,000,000,000 | 37 | drm/vmwgfx: limit the number of mip levels in vmw_gb_surface_define_ioctl()
The 'req->mip_levels' parameter in vmw_gb_surface_define_ioctl() is
a user-controlled 'uint32_t' value which is used as a loop count limit.
This can lead to a kernel lockup and DoS. Add check for 'req->mip_levels'.
References:
https://bugzilla.redhat.com/show_bug.cgi?id=1437431
Cc: <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Reviewed-by: Sinclair Yeh <[email protected]> |
dnname_string(netdissect_options *ndo, u_short dnaddr)
{
#ifdef HAVE_DNET_HTOA
struct dn_naddr dna;
char *dnname;
dna.a_len = sizeof(short);
memcpy((char *)dna.a_addr, (char *)&dnaddr, sizeof(short));
dnname = dnet_htoa(&dna);
if(dnname != NULL)
return (strdup(dnname));
else
return(dnnum_string(ndo, dnaddr));
#else
return(dnnum_string(ndo, dnaddr)); /* punt */
#endif
} | 0 | [
"CWE-125"
]
| tcpdump | c6e0531b5def26ecf912e8de6ade86cbdaed3751 | 308,213,804,771,812,850,000,000,000,000,000,000,000 | 17 | CVE-2017-12899/DECnet: Fix bounds checking.
If we're skipping over padding before the *real* flags, check whether
the real flags are in the captured data before fetching it. This fixes
a buffer over-read discovered by Kamil Frankowicz.
Note one place where we don't need to do bounds checking as it's already
been done.
Add a test using the capture file supplied by the reporter(s). |
static int fake_panic_set(void *data, u64 val)
{
mce_reset();
fake_panic = val;
return 0;
} | 0 | [
"CWE-362"
]
| linux | b3b7c4795ccab5be71f080774c45bbbcc75c2aaf | 203,154,872,959,675,660,000,000,000,000,000,000,000 | 6 | x86/MCE: Serialize sysfs changes
The check_interval file in
/sys/devices/system/machinecheck/machinecheck<cpu number>
directory is a global timer value for MCE polling. If it is changed by one
CPU, mce_restart() broadcasts the event to other CPUs to delete and restart
the MCE polling timer and __mcheck_cpu_init_timer() reinitializes the
mce_timer variable.
If more than one CPU writes a specific value to the check_interval file
concurrently, mce_timer is not protected from such concurrent accesses and
all kinds of explosions happen. Since only root can write to those sysfs
variables, the issue is not a big deal security-wise.
However, concurrent writes to these configuration variables is void of
reason so the proper thing to do is to serialize the access with a mutex.
Boris:
- Make store_int_with_restart() use device_store_ulong() to filter out
negative intervals
- Limit min interval to 1 second
- Correct locking
- Massage commit message
Signed-off-by: Seunghun Han <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: linux-edac <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected] |
static inline void Process_v9_data(exporter_v9_domain_t *exporter, void *data_flowset, FlowSource_t *fs, input_translation_t *table ){
uint64_t start_time, end_time, sampling_rate;
uint32_t size_left;
uint8_t *in, *out;
int i;
char *string;
size_left = GET_FLOWSET_LENGTH(data_flowset) - 4; // -4 for data flowset header -> id and length
// map input buffer as a byte array
in = (uint8_t *)(data_flowset + 4); // skip flowset header
dbg_printf("[%u] Process data flowset size: %u\n", exporter->info.id, size_left);
if ( table->sampler_offset )
dbg_printf("table sampler offset: %u\n", table->sampler_offset);
dbg_printf("[%u] Exporter is 0x%llu\n", exporter->info.id, (long long unsigned)exporter);
dbg_printf("[%u] Exporter has sampler: %s\n", exporter->info.id, exporter->sampler ? "yes" : "no");
// Check if sampling is announced
if ( table->sampler_offset && exporter->sampler ) {
generic_sampler_t *sampler = exporter->sampler;
uint32_t sampler_id;
if ( table->sampler_size == 2 ) {
sampler_id = Get_val16((void *)&in[table->sampler_offset]);
} else {
sampler_id = in[table->sampler_offset];
}
dbg_printf("Extract sampler: %u\n", sampler_id);
// usually not that many samplers, so following a chain is not too expensive.
while ( sampler && sampler->info.id != sampler_id )
sampler = sampler->next;
if ( sampler ) {
sampling_rate = sampler->info.interval;
dbg_printf("[%u] Sampling ID %u available\n", exporter->info.id, sampler_id);
dbg_printf("[%u] Sampler_offset : %u\n", exporter->info.id, table->sampler_offset);
dbg_printf("[%u] Sampler Data : %s\n", exporter->info.id, exporter->sampler == NULL ? "not available" : "available");
dbg_printf("[%u] Sampling rate: %llu\n", exporter->info.id, (long long unsigned)sampling_rate);
} else {
sampling_rate = default_sampling;
dbg_printf("[%u] Sampling ID %u not (yet) available\n", exporter->info.id, sampler_id);
}
} else {
generic_sampler_t *sampler = exporter->sampler;
while ( sampler && sampler->info.id != -1 )
sampler = sampler->next;
if ( sampler ) {
sampling_rate = sampler->info.interval;
dbg_printf("[%u] Std sampling available for this flow source: Rate: %llu\n", exporter->info.id, (long long unsigned)sampling_rate);
} else {
sampling_rate = default_sampling;
dbg_printf("[%u] No Sampling record found\n", exporter->info.id);
}
}
if ( overwrite_sampling > 0 ) {
sampling_rate = overwrite_sampling;
dbg_printf("[%u] Hard overwrite sampling rate: %llu\n", exporter->info.id, (long long unsigned)sampling_rate);
}
if ( sampling_rate != 1 )
SetFlag(table->flags, FLAG_SAMPLED);
while (size_left) {
common_record_t *data_record;
if ( (size_left < table->input_record_size) ) {
if ( size_left > 3 ) {
syslog(LOG_WARNING,"Process_v9: Corrupt data flowset? Pad bytes: %u", size_left);
dbg_printf("Process_v9: Corrupt data flowset? Pad bytes: %u, table record_size: %u\n",
size_left, table->input_record_size);
}
size_left = 0;
continue;
}
// check for enough space in output buffer
if ( !CheckBufferSpace(fs->nffile, table->output_record_size) ) {
// this should really never occur, because the buffer gets flushed ealier
syslog(LOG_ERR,"Process_v9: output buffer size error. Abort v9 record processing");
dbg_printf("Process_v9: output buffer size error. Abort v9 record processing");
return;
}
processed_records++;
// map file record to output buffer
data_record = (common_record_t *)fs->nffile->buff_ptr;
// map output buffer as a byte array
out = (uint8_t *)data_record;
dbg_printf("[%u] Process data record: %u addr: %llu, in record size: %u, buffer size_left: %u\n",
exporter->info.id, processed_records, (long long unsigned)((ptrdiff_t)in - (ptrdiff_t)data_flowset),
table->input_record_size, size_left);
// fill the data record
data_record->flags = table->flags;
data_record->size = table->output_record_size;
data_record->type = CommonRecordType;
data_record->ext_map = table->extension_info.map->map_id;
data_record->exporter_sysid = exporter->info.sysid;
data_record->reserved = 0;
table->packets = 0;
table->bytes = 0;
table->out_packets = 0;
table->out_bytes = 0;
dbg_printf("%u] Process data record: MapID: %u\n", exporter->info.id, table->extension_info.map->map_id);
// apply copy and processing sequence
for ( i=0; i<table->number_of_sequences; i++ ) {
int input_offset = table->sequence[i].input_offset;
int output_offset = table->sequence[i].output_offset;
void *stack = table->sequence[i].stack;
switch (table->sequence[i].id) {
case nop:
break;
case move8:
out[output_offset] = in[input_offset];
break;
case move16:
*((uint16_t *)&out[output_offset]) = Get_val16((void *)&in[input_offset]);
break;
case move32:
*((uint32_t *)&out[output_offset]) = Get_val32((void *)&in[input_offset]);
break;
case move40:
/* 64bit access to potentially unaligned output buffer. use 2 x 32bit for _LP64 CPUs */
{ type_mask_t t;
t.val.val64 = Get_val40((void *)&in[input_offset]);
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
}
break;
case move48:
/* 64bit access to potentially unaligned output buffer. use 2 x 32bit for _LP64 CPUs */
{ type_mask_t t;
t.val.val64 = Get_val48((void *)&in[input_offset]);
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
}
break;
case move56:
/* 64bit access to potentially unaligned output buffer. use 2 x 32bit for _LP64 CPUs */
{ type_mask_t t;
t.val.val64 = Get_val56((void *)&in[input_offset]);
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
}
break;
case move64:
{ type_mask_t t;
t.val.val64 = Get_val64((void *)&in[input_offset]);
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
} break;
case move96:
{ *((uint32_t *)&out[output_offset]) = Get_val32((void *)&in[input_offset]);
*((uint32_t *)&out[output_offset+4]) = Get_val32((void *)&in[input_offset+4]);
*((uint32_t *)&out[output_offset+8]) = Get_val32((void *)&in[input_offset+8]);
} break;
case move128:
/* 64bit access to potentially unaligned output buffer. use 2 x 32bit for _LP64 CPUs */
{ type_mask_t t;
t.val.val64 = Get_val64((void *)&in[input_offset]);
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
t.val.val64 = Get_val64((void *)&in[input_offset+8]);
*((uint32_t *)&out[output_offset+8]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+12]) = t.val.val32[1];
} break;
case move32_sampling:
/* 64bit access to potentially unaligned output buffer. use 2 x 32bit for _LP64 CPUs */
{ type_mask_t t;
t.val.val64 = Get_val32((void *)&in[input_offset]);
t.val.val64 *= sampling_rate;
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
*(uint64_t *)stack = t.val.val64;
} break;
case move64_sampling:
/* 64bit access to potentially unaligned output buffer. use 2 x 32bit for _LP64 CPUs */
{ type_mask_t t;
t.val.val64 = Get_val64((void *)&in[input_offset]);
t.val.val64 *= sampling_rate;
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
*(uint64_t *)stack = t.val.val64;
} break;
case move_mac:
/* 64bit access to potentially unaligned output buffer. use 2 x 32bit for _LP64 CPUs */
{ type_mask_t t;
t.val.val64 = Get_val48((void *)&in[input_offset]);
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
}
break;
case move_mpls:
*((uint32_t *)&out[output_offset]) = Get_val24((void *)&in[input_offset]);
break;
case move_ulatency:
/* 64bit access to potentially unaligned output buffer. use 2 x 32bit for _LP64 CPUs */
{ type_mask_t t;
t.val.val32[0] = *((uint32_t *)&out[output_offset]);
t.val.val32[1] = *((uint32_t *)&out[output_offset+4]);
t.val.val64 += Get_val32((void *)&in[input_offset]);
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
} break;
case move_slatency:
/* 64bit access to potentially unaligned output buffer. use 2 x 32bit for _LP64 CPUs */
{ type_mask_t t;
t.val.val32[0] = *((uint32_t *)&out[output_offset]);
t.val.val32[1] = *((uint32_t *)&out[output_offset+4]);
// update sec to usec
t.val.val64 += 1000000 * Get_val32((void *)&in[input_offset]);
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
} break;
case move_user_20:
memcpy((void *)&out[output_offset],(void *)&in[input_offset],20);
out[output_offset+20] = 0; // trailing 0 for string
break;
case move_user_65:
memcpy((void *)&out[output_offset],(void *)&in[input_offset],65);
out[output_offset+65] = 0; // trailing 0 for string
break;
case TimeMsec:
{ uint64_t DateMiliseconds = Get_val64((void *)&in[input_offset]);
*(uint64_t *)stack = DateMiliseconds;
} break;
case PushTimeMsec:
{ type_mask_t t;
t.val.val64 = Get_val64((void *)&in[input_offset]);
*(uint64_t *)stack = t.val.val64;
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
} break;
// zero sequences for unavailable elements
case zero8:
out[output_offset] = 0;
break;
case zero16:
*((uint16_t *)&out[output_offset]) = 0;
break;
case zero32:
*((uint32_t *)&out[output_offset]) = 0;
break;
case zero64: {
*((uint32_t *)&out[output_offset]) = 0;
*((uint32_t *)&out[output_offset+4]) = 0;
} break;
case zero96:
{ *((uint32_t *)&out[output_offset]) = 0;
*((uint32_t *)&out[output_offset+4]) = 0;
*((uint32_t *)&out[output_offset+8]) = 0;
} break;
case zero128: {
*((uint32_t *)&out[output_offset]) = 0;
*((uint32_t *)&out[output_offset+4]) = 0;
*((uint32_t *)&out[output_offset+8]) = 0;
*((uint32_t *)&out[output_offset+12]) = 0;
} break;
default:
syslog(LOG_ERR, "Process_v9: Software bug! Unknown Sequence: %u. at %s line %d",
table->sequence[i].id, __FILE__, __LINE__);
dbg_printf("Software bug! Unknown Sequence: %u. at %s line %d",
table->sequence[i].id, __FILE__, __LINE__);
}
}
// Ungly ICMP hack for v9, because some IOS version are lazzy
// most of them send ICMP in dst port field some don't some have both
if ( data_record->prot == IPPROTO_ICMP || data_record->prot == IPPROTO_ICMPV6 ) {
if ( table->ICMP_offset ) {
data_record->dstport = Get_val16((void *)&in[table->ICMP_offset]);
}
if ( data_record->dstport == 0 && data_record->srcport != 0 ) {
// some IOSes are even lazzier and map ICMP code in src port - ughh
data_record->dstport = data_record->srcport;
data_record->srcport = 0;
}
}
// Check for NSEL/NEL Event time
if ( table->flow_start ) {
data_record->first = table->flow_start / 1000;
data_record->msec_first = table->flow_start % 1000;
start_time = table->flow_start;
// test for tags 152/153
if ( table->flow_end ) {
data_record->last = table->flow_end / 1000;
data_record->msec_last = table->flow_end % 1000;
end_time = table->flow_end;
} else {
data_record->last = data_record->first;
data_record->msec_last = data_record->msec_first;
end_time = table->flow_start;
}
dbg_printf("Found time flow start MSEC: %llu\n", table->EventTimeMsec);
} else if ( table->EventTimeMsec && data_record->first == 0 ) {
data_record->first = table->EventTimeMsec / 1000;
data_record->msec_first = table->EventTimeMsec % 1000;
data_record->last = data_record->first;
data_record->msec_last = data_record->msec_first;
start_time = table->EventTimeMsec;
end_time = table->EventTimeMsec;
dbg_printf("Found Time Event MSEC: %llu\n", table->EventTimeMsec);
} else if ( data_record->first == 0 && data_record->last == 0 ) {
// hmm - a record with no time at all ..
data_record->first = 0;
data_record->msec_last = 0;
start_time = 0;
end_time = 0;
} else {
uint32_t First = data_record->first;
uint32_t Last = data_record->last;
if ( First > Last )
/* First in msec, in case of msec overflow, between start and end */
start_time = exporter->boot_time - 0x100000000LL + (uint64_t)First;
else
start_time = (uint64_t)First + exporter->boot_time;
/* end time in msecs */
end_time = (uint64_t)Last + exporter->boot_time;
if ( (end_time - start_time) > 0xffc00000 && table->bytes < 2000 ) {
dbg_printf("CISCO bugfix!\n");
start_time += 0xffc00000;
}
data_record->first = start_time/1000;
data_record->msec_first = start_time - data_record->first*1000;
data_record->last = end_time/1000;
data_record->msec_last = end_time - data_record->last*1000;
if ( data_record->first == 0 && data_record->last == 0 )
data_record->last = 0;
}
// update first_seen, last_seen
if ( start_time < fs->first_seen )
fs->first_seen = start_time;
if ( end_time > fs->last_seen )
fs->last_seen = end_time;
// check if we need to record the router IP address
if ( table->router_ip_offset ) {
int output_offset = table->router_ip_offset;
if ( exporter->info.sa_family == PF_INET6 ) {
/* 64bit access to potentially unaligned output buffer. use 2 x 32bit for _LP64 CPUs */
type_mask_t t;
t.val.val64 = exporter->info.ip.v6[0];
*((uint32_t *)&out[output_offset]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+4]) = t.val.val32[1];
t.val.val64 = exporter->info.ip.v6[1];
*((uint32_t *)&out[output_offset+8]) = t.val.val32[0];
*((uint32_t *)&out[output_offset+12]) = t.val.val32[1];
} else {
*((uint32_t *)&out[output_offset]) = exporter->info.ip.v4;
}
}
// Ugly hack. CISCO never really implemented #38/#39 tags in the records - so take it from the
// header, unless some data is filled in
if ( table->engine_offset ) {
if ( *((uint32_t *)&out[table->engine_offset]) == 0 ) {
tpl_ext_25_t *tpl = (tpl_ext_25_t *)&out[table->engine_offset];
tpl->engine_type = ( exporter->info.id >> 8 ) & 0xFF;
tpl->engine_id = exporter->info.id & 0xFF;
}
}
// check, if we need to store the packet received time
if ( table->received_offset ) {
type_mask_t t;
t.val.val64 = (uint64_t)((uint64_t)fs->received.tv_sec * 1000LL) + (uint64_t)((uint64_t)fs->received.tv_usec / 1000LL);
*((uint32_t *)&out[table->received_offset]) = t.val.val32[0];
*((uint32_t *)&out[table->received_offset+4]) = t.val.val32[1];
}
switch (data_record->prot ) { // switch protocol of
case IPPROTO_ICMP:
fs->nffile->stat_record->numflows_icmp++;
fs->nffile->stat_record->numpackets_icmp += table->packets;
fs->nffile->stat_record->numbytes_icmp += table->bytes;
fs->nffile->stat_record->numpackets_icmp += table->out_packets;
fs->nffile->stat_record->numbytes_icmp += table->out_bytes;
break;
case IPPROTO_TCP:
fs->nffile->stat_record->numflows_tcp++;
fs->nffile->stat_record->numpackets_tcp += table->packets;
fs->nffile->stat_record->numbytes_tcp += table->bytes;
fs->nffile->stat_record->numpackets_tcp += table->out_packets;
fs->nffile->stat_record->numbytes_tcp += table->out_bytes;
break;
case IPPROTO_UDP:
fs->nffile->stat_record->numflows_udp++;
fs->nffile->stat_record->numpackets_udp += table->packets;
fs->nffile->stat_record->numbytes_udp += table->bytes;
fs->nffile->stat_record->numpackets_udp += table->out_packets;
fs->nffile->stat_record->numbytes_udp += table->out_bytes;
break;
default:
fs->nffile->stat_record->numflows_other++;
fs->nffile->stat_record->numpackets_other += table->packets;
fs->nffile->stat_record->numbytes_other += table->bytes;
fs->nffile->stat_record->numpackets_other += table->out_packets;
fs->nffile->stat_record->numbytes_other += table->out_bytes;
}
exporter->flows++;
fs->nffile->stat_record->numflows++;
fs->nffile->stat_record->numpackets += table->packets;
fs->nffile->stat_record->numbytes += table->bytes;
fs->nffile->stat_record->numpackets += table->out_packets;
fs->nffile->stat_record->numbytes += table->out_bytes;
if ( fs->xstat ) {
uint32_t bpp = table->packets ? table->bytes/table->packets : 0;
if ( bpp > MAX_BPP )
bpp = MAX_BPP;
if ( data_record->prot == IPPROTO_TCP ) {
fs->xstat->bpp_histogram->tcp.bpp[bpp]++;
fs->xstat->bpp_histogram->tcp.count++;
fs->xstat->port_histogram->src_tcp.port[data_record->srcport]++;
fs->xstat->port_histogram->dst_tcp.port[data_record->dstport]++;
fs->xstat->port_histogram->src_tcp.count++;
fs->xstat->port_histogram->dst_tcp.count++;
} else if ( data_record->prot == IPPROTO_UDP ) {
fs->xstat->bpp_histogram->udp.bpp[bpp]++;
fs->xstat->bpp_histogram->udp.count++;
fs->xstat->port_histogram->src_udp.port[data_record->srcport]++;
fs->xstat->port_histogram->dst_udp.port[data_record->dstport]++;
fs->xstat->port_histogram->src_udp.count++;
fs->xstat->port_histogram->dst_udp.count++;
}
}
if ( verbose ) {
master_record_t master_record;
ExpandRecord_v2((common_record_t *)data_record, &(table->extension_info), &(exporter->info), &master_record);
format_file_block_record(&master_record, &string, 0);
printf("%s\n", string);
}
fs->nffile->block_header->size += data_record->size;
fs->nffile->block_header->NumRecords++;
fs->nffile->buff_ptr = (common_record_t *)((pointer_addr_t)data_record + data_record->size);
// advance input
size_left -= table->input_record_size;
in += table->input_record_size;
// buffer size sanity check
if ( fs->nffile->block_header->size > BUFFSIZE ) {
// should never happen
syslog(LOG_ERR,"### Software error ###: %s line %d", __FILE__, __LINE__);
syslog(LOG_ERR,"Process v9: Output buffer overflow! Flush buffer and skip records.");
syslog(LOG_ERR,"Buffer size: %u > %u", fs->nffile->block_header->size, BUFFSIZE);
// reset buffer
fs->nffile->block_header->size = 0;
fs->nffile->block_header->NumRecords = 0;
fs->nffile->buff_ptr = (void *)((pointer_addr_t)fs->nffile->block_header + sizeof(data_block_header_t) );
return;
}
}
} // End of Process_v9_data | 1 | []
| nfdump | ff0e855bd1f51bed9fc5d8559c64d3cfb475a5d8 | 141,032,412,999,564,350,000,000,000,000,000,000,000 | 489 | Fix security issues in netflow_v9.c and ipfix.c |
ptaGetArrays(PTA *pta,
NUMA **pnax,
NUMA **pnay)
{
l_int32 i, n;
NUMA *nax, *nay;
PROCNAME("ptaGetArrays");
if (!pnax && !pnay)
return ERROR_INT("no output requested", procName, 1);
if (pnax) *pnax = NULL;
if (pnay) *pnay = NULL;
if (!pta)
return ERROR_INT("pta not defined", procName, 1);
if ((n = ptaGetCount(pta)) == 0)
return ERROR_INT("pta is empty", procName, 1);
if (pnax) {
if ((nax = numaCreate(n)) == NULL)
return ERROR_INT("nax not made", procName, 1);
*pnax = nax;
for (i = 0; i < n; i++)
nax->array[i] = pta->x[i];
nax->n = n;
}
if (pnay) {
if ((nay = numaCreate(n)) == NULL)
return ERROR_INT("nay not made", procName, 1);
*pnay = nay;
for (i = 0; i < n; i++)
nay->array[i] = pta->y[i];
nay->n = n;
}
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
]
| leptonica | ee301cb2029db8a6289c5295daa42bba7715e99a | 54,386,159,667,490,200,000,000,000,000,000,000,000 | 36 | Security fixes: expect final changes for release 1.75.3.
* Fixed a debian security issue with fscanf() reading a string with
possible buffer overflow.
* There were also a few similar situations with sscanf(). |
bgp_attr_unintern_sub (struct attr *attr)
{
/* aspath refcount shoud be decrement. */
if (attr->aspath)
aspath_unintern (&attr->aspath);
UNSET_FLAG(attr->flag, BGP_ATTR_AS_PATH);
if (attr->community)
community_unintern (&attr->community);
UNSET_FLAG(attr->flag, BGP_ATTR_COMMUNITIES);
if (attr->extra)
{
if (attr->extra->ecommunity)
ecommunity_unintern (&attr->extra->ecommunity);
UNSET_FLAG(attr->flag, BGP_ATTR_EXT_COMMUNITIES);
if (attr->extra->cluster)
cluster_unintern (attr->extra->cluster);
UNSET_FLAG(attr->flag, BGP_ATTR_CLUSTER_LIST);
if (attr->extra->transit)
transit_unintern (attr->extra->transit);
}
} | 0 | []
| quagga | 8794e8d229dc9fe29ea31424883433d4880ef408 | 199,091,067,759,877,500,000,000,000,000,000,000,000 | 25 | bgpd: Fix regression in args consolidation, total should be inited from args
* bgp_attr.c: (bgp_attr_unknown) total should be initialised from the args. |
ephy_string_commandline_args_to_uris (char **arguments,
GError **error)
{
gchar **args;
GFile *file;
guint i;
if (arguments == NULL)
return NULL;
args = g_malloc0 (sizeof (gchar *) * (g_strv_length (arguments) + 1));
for (i = 0; arguments[i] != NULL; ++i) {
file = g_file_new_for_commandline_arg (arguments [i]);
if (g_file_is_native (file) && g_file_query_exists (file, NULL)) {
args[i] = g_file_get_uri (file);
} else {
args[i] = g_locale_to_utf8 (arguments [i], -1,
NULL, NULL, error);
if (error && *error) {
g_strfreev (args);
return NULL;
}
}
g_object_unref (file);
}
return args;
} | 0 | [
"CWE-787"
]
| epiphany | 486da133569ebfc436c959a7419565ab102e8525 | 97,849,480,803,823,360,000,000,000,000,000,000,000 | 29 | Fix memory corruption in ephy_string_shorten()
This fixes a regression that I introduced in 232c613472b38ff0d0d97338f366024ddb9cd228.
I got my browser stuck in a crash loop today while visiting a website
with a page title greater than ephy-embed.c's MAX_TITLE_LENGTH, the only
condition in which ephy_string_shorten() is ever used. Turns out this
commit is wrong: an ellipses is a multibyte character (three bytes in
UTF-8) and so we're writing past the end of the buffer when calling
strcat() here. Ooops.
Shame it took nearly four years to notice and correct this.
Part-of: <https://gitlab.gnome.org/GNOME/epiphany/-/merge_requests/1106> |
static int write_tree_extension(git_index *index, git_filebuf *file)
{
struct index_extension extension;
git_buf buf = GIT_BUF_INIT;
int error;
if (index->tree == NULL)
return 0;
if ((error = git_tree_cache_write(&buf, index->tree)) < 0)
return error;
memset(&extension, 0x0, sizeof(struct index_extension));
memcpy(&extension.signature, INDEX_EXT_TREECACHE_SIG, 4);
extension.extension_size = (uint32_t)buf.size;
error = write_extension(file, &extension, &buf);
git_buf_free(&buf);
return error;
} | 0 | [
"CWE-415",
"CWE-190"
]
| libgit2 | 3db1af1f370295ad5355b8f64b865a2a357bcac0 | 271,451,825,390,718,070,000,000,000,000,000,000,000 | 22 | index: error out on unreasonable prefix-compressed path lengths
When computing the complete path length from the encoded
prefix-compressed path, we end up just allocating the complete path
without ever checking what the encoded path length actually is. This can
easily lead to a denial of service by just encoding an unreasonable long
path name inside of the index. Git already enforces a maximum path
length of 4096 bytes. As we also have that enforcement ready in some
places, just make sure that the resulting path is smaller than
GIT_PATH_MAX.
Reported-by: Krishna Ram Prakash R <[email protected]>
Reported-by: Vivek Parikh <[email protected]> |
mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
{
struct host_cmd_tlv_wep_key *wep_key;
u16 cmd_size = *param_size;
int i;
u8 *tlv = *tlv_buf;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
for (i = 0; i < NUM_WEP_KEYS; i++) {
if (bss_cfg->wep_cfg[i].length &&
(bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
wep_key = (struct host_cmd_tlv_wep_key *)tlv;
wep_key->header.type =
cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
wep_key->header.len =
cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
bss_cfg->wep_cfg[i].length);
cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
tlv += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
}
}
*param_size = cmd_size;
*tlv_buf = tlv;
return;
} | 0 | [
"CWE-120",
"CWE-787"
]
| linux | 7caac62ed598a196d6ddf8d9c121e12e082cac3a | 66,997,266,128,007,870,000,000,000,000,000,000,000 | 33 | mwifiex: Fix three heap overflow at parsing element in cfg80211_ap_settings
mwifiex_update_vs_ie(),mwifiex_set_uap_rates() and
mwifiex_set_wmm_params() call memcpy() without checking
the destination size.Since the source is given from
user-space, this may trigger a heap buffer overflow.
Fix them by putting the length check before performing memcpy().
This fix addresses CVE-2019-14814,CVE-2019-14815,CVE-2019-14816.
Signed-off-by: Wen Huang <[email protected]>
Acked-by: Ganapathi Bhat <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
relpTcpConnectTLSInit(relpTcp_t *pThis)
{
int r;
int sockflags;
ENTER_RELPFUNC;
RELPOBJ_assert(pThis, Tcp);
/* We expect a non blocking socket to establish a tls session */
if((sockflags = fcntl(pThis->sock, F_GETFL)) != -1) {
sockflags &= ~O_NONBLOCK;
sockflags = fcntl(pThis->sock, F_SETFL, sockflags);
}
if(sockflags == -1) {
pThis->pEngine->dbgprint("error %d unsetting fcntl(O_NONBLOCK) on relp socket", errno);
ABORT_FINALIZE(RELP_RET_IO_ERR);
}
if(!called_gnutls_global_init) {
#if GNUTLS_VERSION_NUMBER <= 0x020b00
/* gcry_control must be called first, so that the thread system is correctly set up */
gcry_control (GCRYCTL_SET_THREAD_CBS, &gcry_threads_pthread);
#endif
gnutls_global_init();
/* uncomment for (very intense) debug help
* gnutls_global_set_log_function(logFunction);
* gnutls_global_set_log_level(10); // 0 (no) to 9 (most), 10 everything
*/
pThis->pEngine->dbgprint("DDDD: gnutls_global_init() called\n");
called_gnutls_global_init = 1;
}
r = gnutls_init(&pThis->session, GNUTLS_CLIENT);
if(chkGnutlsCode(pThis, "Failed to initialize GnuTLS", RELP_RET_ERR_TLS_SETUP, r)) {
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
gnutls_session_set_ptr(pThis->session, pThis);
CHKRet(relpTcpTLSSetPrio(pThis));
if(isAnonAuth(pThis)) {
r = gnutls_anon_allocate_client_credentials(&pThis->anoncred);
if(chkGnutlsCode(pThis, "Failed to allocate client credentials", RELP_RET_ERR_TLS_SETUP, r)) {
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
/* put the anonymous credentials to the current session */
r = gnutls_credentials_set(pThis->session, GNUTLS_CRD_ANON, pThis->anoncred);
if(chkGnutlsCode(pThis, "Failed to set credentials", RELP_RET_ERR_TLS_SETUP, r)) {
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
} else {
#ifdef HAVE_GNUTLS_CERTIFICATE_SET_VERIFY_FUNCTION
r = gnutls_certificate_allocate_credentials(&pThis->xcred);
if(chkGnutlsCode(pThis, "Failed to allocate certificate credentials", RELP_RET_ERR_TLS_SETUP, r)) {
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
if(pThis->caCertFile != NULL) {
r = gnutls_certificate_set_x509_trust_file(pThis->xcred,
pThis->caCertFile, GNUTLS_X509_FMT_PEM);
if(r < 0) {
chkGnutlsCode(pThis, "Failed to set certificate trust file", RELP_RET_ERR_TLS_SETUP, r);
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
pThis->pEngine->dbgprint("librelp: obtained %d certificates from %s\n", r, pThis->caCertFile);
}
if(pThis->ownCertFile != NULL) {
r = gnutls_certificate_set_x509_key_file (pThis->xcred,
pThis->ownCertFile, pThis->privKeyFile, GNUTLS_X509_FMT_PEM);
if(chkGnutlsCode(pThis, "Failed to set certificate key file", RELP_RET_ERR_TLS_SETUP, r)) {
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
}
r = gnutls_credentials_set(pThis->session, GNUTLS_CRD_CERTIFICATE, pThis->xcred);
if(chkGnutlsCode(pThis, "Failed to set credentials", RELP_RET_ERR_TLS_SETUP, r)) {
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
if(pThis->authmode == eRelpAuthMode_None)
pThis->authmode = eRelpAuthMode_Fingerprint;
gnutls_certificate_set_verify_function(pThis->xcred, relpTcpVerifyCertificateCallback);
# else /* #ifdef HAVE_GNUTLS_CERTIFICATE_SET_VERIFY_FUNCTION */
ABORT_FINALIZE(RELP_RET_ERR_NO_TLS_AUTH);
# endif /* #ifdef HAVE_GNUTLS_CERTIFICATE_SET_VERIFY_FUNCTION */
}
gnutls_transport_set_ptr(pThis->session, (gnutls_transport_ptr_t) pThis->sock);
//gnutls_handshake_set_timeout(pThis->session, GNUTLS_DEFAULT_HANDSHAKE_TIMEOUT);
/* Perform the TLS handshake */
do {
r = gnutls_handshake(pThis->session);
pThis->pEngine->dbgprint("DDDD: gnutls_handshake: %d: %s\n", r, gnutls_strerror(r));
if(r == GNUTLS_E_INTERRUPTED || r == GNUTLS_E_AGAIN) {
pThis->pEngine->dbgprint("librelp: gnutls_handshake must be retried\n");
pThis->rtryOp = relpTCP_RETRY_handshake;
} else if(r != GNUTLS_E_SUCCESS) {
chkGnutlsCode(pThis, "TLS handshake failed", RELP_RET_ERR_TLS_SETUP, r);
ABORT_FINALIZE(RELP_RET_ERR_TLS_SETUP);
}
}
while(0);
//while (r < 0 && gnutls_error_is_fatal(r) == 0);
/* set the socket to non-blocking IO (we do this on the recv() for non-TLS */
if((sockflags = fcntl(pThis->sock, F_GETFL)) != -1) {
sockflags |= O_NONBLOCK;
/* SETFL could fail too, so get it caught by the subsequent
* error check. */
sockflags = fcntl(pThis->sock, F_SETFL, sockflags);
}
finalize_it:
LEAVE_RELPFUNC;
} | 0 | [
"CWE-787"
]
| librelp | 2cfe657672636aa5d7d2a14cfcb0a6ab9d1f00cf | 207,668,871,005,323,050,000,000,000,000,000,000,000 | 111 | unify error message generation |
gstd_get_mech(gss_OID mech_oid)
{
#ifdef HAVE_GSS_OID_TO_STR
OM_uint32 maj;
OM_uint32 min;
#endif
gss_buffer_desc buf;
unsigned char *bufp;
unsigned char nibble;
char *ret;
size_t i, k;
if (mech_oid->length == sizeof(KNC_KRB5_MECH_OID) - 1 &&
memcmp(mech_oid->elements, KNC_KRB5_MECH_OID,
sizeof(KNC_KRB5_MECH_OID) - 1) == 0) {
if ((ret = strdup("krb5")) == NULL) {
LOG(LOG_ERR, ("unable to malloc"));
return NULL;
}
return ret;
}
#ifdef HAVE_GSS_OID_TO_STR
maj = gss_oid_to_str(&min, mech_oid, &buf);
if (maj != GSS_S_COMPLETE) {
LOG(LOG_ERR, ("unable to display mechanism OID"));
return NULL;
}
ret = strndup(buf.value, buf.length);
#else
ret = strdup("");
#endif
if (!ret)
LOG(LOG_ERR, ("unable to malloc"));
return ret;
} | 0 | [
"CWE-400",
"CWE-703"
]
| knc | f237f3e09ecbaf59c897f5046538a7b1a3fa40c1 | 125,308,245,722,582,750,000,000,000,000,000,000,000 | 36 | knc: fix a couple of memory leaks.
One of these can be remotely triggered during the authentication
phase which leads to a remote DoS possibility.
Pointed out by: Imre Rad <[email protected]> |
zfont_info(gs_font *font, const gs_point *pscale, int members,
gs_font_info_t *info)
{
int code = gs_default_font_info(font, pscale, members &
~(FONT_INFO_COPYRIGHT | FONT_INFO_NOTICE |
FONT_INFO_FAMILY_NAME | FONT_INFO_FULL_NAME),
info);
const ref *pfdict;
ref *pfontinfo, *pvalue;
if (code < 0)
return code;
pfdict = &pfont_data(font)->dict;
if (dict_find_string(pfdict, "FontInfo", &pfontinfo) <= 0 ||
!r_has_type(pfontinfo, t_dictionary))
return 0;
if ((members & FONT_INFO_COPYRIGHT) &&
zfont_info_has(pfontinfo, "Copyright", &info->Copyright))
info->members |= FONT_INFO_COPYRIGHT;
if ((members & FONT_INFO_NOTICE) &&
zfont_info_has(pfontinfo, "Notice", &info->Notice))
info->members |= FONT_INFO_NOTICE;
if ((members & FONT_INFO_FAMILY_NAME) &&
zfont_info_has(pfontinfo, "FamilyName", &info->FamilyName))
info->members |= FONT_INFO_FAMILY_NAME;
if ((members & FONT_INFO_FULL_NAME) &&
zfont_info_has(pfontinfo, "FullName", &info->FullName))
info->members |= FONT_INFO_FULL_NAME;
if ((members & FONT_INFO_EMBEDDING_RIGHTS)
&& (dict_find_string(pfontinfo, "FSType", &pvalue) > 0)) {
if (r_type(pvalue) != t_integer)
return gs_note_error(gs_error_typecheck);
info->EmbeddingRights = pvalue->value.intval;
info->members |= FONT_INFO_EMBEDDING_RIGHTS;
}
return code;
} | 0 | [
"CWE-704"
]
| ghostpdl | 548bb434e81dadcc9f71adf891a3ef5bea8e2b4e | 307,574,980,181,806,440,000,000,000,000,000,000,000 | 38 | PS interpreter - add some type checking
These were 'probably' safe anyway, since they mostly treat the objects
as integers without checking, which at least can't result in a crash.
Nevertheless, we ought to check.
The return from comparedictkeys could be wrong if one of the keys had
a value which was not an array, it could incorrectly decide the two
were in fact the same. |
int adapter_create_bonding(struct btd_adapter *adapter, const bdaddr_t *bdaddr,
uint8_t addr_type, uint8_t io_cap)
{
if (adapter->pair_device_id > 0) {
btd_error(adapter->dev_id,
"Unable pair since another pairing is in progress");
return -EBUSY;
}
suspend_discovery(adapter);
return adapter_bonding_attempt(adapter, bdaddr, addr_type, io_cap);
} | 0 | [
"CWE-862",
"CWE-863"
]
| bluez | b497b5942a8beb8f89ca1c359c54ad67ec843055 | 218,327,169,477,293,860,000,000,000,000,000,000,000 | 13 | adapter: Fix storing discoverable setting
discoverable setting shall only be store when changed via Discoverable
property and not when discovery client set it as that be considered
temporary just for the lifetime of the discovery. |
print_import_ok (PKT_public_key *pk, unsigned int reason)
{
byte array[MAX_FINGERPRINT_LEN], *s;
char buf[MAX_FINGERPRINT_LEN*2+30], *p;
size_t i, n;
snprintf (buf, sizeof buf, "%u ", reason);
p = buf + strlen (buf);
fingerprint_from_pk (pk, array, &n);
s = array;
for (i=0; i < n ; i++, s++, p += 2)
sprintf (p, "%02X", *s);
write_status_text (STATUS_IMPORT_OK, buf);
} | 0 | [
"CWE-20"
]
| gnupg | f0b33b6fb8e0586e9584a7a409dcc31263776a67 | 37,954,558,749,098,210,000,000,000,000,000,000,000 | 16 | gpg: Import only packets which are allowed in a keyblock.
* g10/import.c (valid_keyblock_packet): New.
(read_block): Store only valid packets.
--
A corrupted key, which for example included a mangled public key
encrypted packet, used to corrupt the keyring. This change skips all
packets which are not allowed in a keyblock.
GnuPG-bug-id: 1455
(cherry-picked from commit f795a0d59e197455f8723c300eebf59e09853efa) |
Subsets and Splits