func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
*/
static void re_yyensure_buffer_stack (yyscan_t yyscanner)
{
yy_size_t num_to_alloc;
struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if (!yyg->yy_buffer_stack) {
/* First allocation is just for 2 elements, since we don't know if this
* scanner will even need a stack. We use 2 instead of 1 to avoid an
* immediate realloc on the next call.
*/
num_to_alloc = 1; // After all that talk, this was set to 1 anyways...
yyg->yy_buffer_stack = (struct yy_buffer_state**)re_yyalloc
(num_to_alloc * sizeof(struct yy_buffer_state*)
, yyscanner);
if ( ! yyg->yy_buffer_stack )
YY_FATAL_ERROR( "out of dynamic memory in re_yyensure_buffer_stack()" );
memset(yyg->yy_buffer_stack, 0, num_to_alloc * sizeof(struct yy_buffer_state*));
yyg->yy_buffer_stack_max = num_to_alloc;
yyg->yy_buffer_stack_top = 0;
return;
}
if (yyg->yy_buffer_stack_top >= (yyg->yy_buffer_stack_max) - 1){
/* Increase the buffer to prepare for a possible push. */
yy_size_t grow_size = 8 /* arbitrary grow size */;
num_to_alloc = yyg->yy_buffer_stack_max + grow_size;
yyg->yy_buffer_stack = (struct yy_buffer_state**)re_yyrealloc
(yyg->yy_buffer_stack,
num_to_alloc * sizeof(struct yy_buffer_state*)
, yyscanner);
if ( ! yyg->yy_buffer_stack )
YY_FATAL_ERROR( "out of dynamic memory in re_yyensure_buffer_stack()" );
/* zero only the new slots.*/
memset(yyg->yy_buffer_stack + yyg->yy_buffer_stack_max, 0, grow_size * sizeof(struct yy_buffer_state*));
yyg->yy_buffer_stack_max = num_to_alloc;
}
| 0 |
[
"CWE-476",
"CWE-703",
"CWE-125"
] |
yara
|
3119b232c9c453c98d8fa8b6ae4e37ba18117cd4
| 165,543,874,067,529,490,000,000,000,000,000,000,000 | 43 |
re_lexer: Make reading escape sequences more robust (#586)
* Add test for issue #503
* re_lexer: Make reading escape sequences more robust
This commit fixes parsing incomplete escape sequences at the end of a
regular expression and parsing things like \xxy (invalid hex digits)
which before were silently turned into (char)255.
Close #503
* Update re_lexer.c
|
msg_add_lines(
int insert_space,
long lnum,
off_T nchars)
{
char_u *p;
p = IObuff + STRLEN(IObuff);
if (insert_space)
*p++ = ' ';
if (shortmess(SHM_LINES))
vim_snprintf((char *)p, IOSIZE - (p - IObuff),
"%ldL, %lldC", lnum, (varnumber_T)nchars);
else
{
if (lnum == 1)
STRCPY(p, _("1 line, "));
else
sprintf((char *)p, _("%ld lines, "), lnum);
p += STRLEN(p);
if (nchars == 1)
STRCPY(p, _("1 character"));
else
vim_snprintf((char *)p, IOSIZE - (p - IObuff),
_("%lld characters"), (varnumber_T)nchars);
}
}
| 0 |
[
"CWE-200",
"CWE-668"
] |
vim
|
5a73e0ca54c77e067c3b12ea6f35e3e8681e8cf8
| 90,024,264,915,422,480,000,000,000,000,000,000,000 | 28 |
patch 8.0.1263: others can read the swap file if a user is careless
Problem: Others can read the swap file if a user is careless with his
primary group.
Solution: If the group permission allows for reading but the world
permissions doesn't, make sure the group is right.
|
static void transit(h2_session *session, const char *action, h2_session_state nstate)
{
apr_time_t timeout;
int ostate, loglvl;
const char *s;
if (session->state != nstate) {
ostate = session->state;
session->state = nstate;
loglvl = APLOG_DEBUG;
if ((ostate == H2_SESSION_ST_BUSY && nstate == H2_SESSION_ST_WAIT)
|| (ostate == H2_SESSION_ST_WAIT && nstate == H2_SESSION_ST_BUSY)){
loglvl = APLOG_TRACE1;
}
ap_log_cerror(APLOG_MARK, loglvl, 0, session->c,
H2_SSSN_LOG(APLOGNO(03078), session,
"transit [%s] -- %s --> [%s]"),
h2_session_state_str(ostate), action,
h2_session_state_str(nstate));
switch (session->state) {
case H2_SESSION_ST_IDLE:
if (!session->remote.emitted_count) {
/* on fresh connections, with async mpm, do not return
* to mpm for a second. This gives the first request a better
* chance to arrive (und connection leaving IDLE state).
* If we return to mpm right away, this connection has the
* same chance of being cleaned up by the mpm as connections
* that already served requests - not fair. */
session->idle_sync_until = apr_time_now() + apr_time_from_sec(1);
s = "timeout";
timeout = session->s->timeout;
update_child_status(session, SERVER_BUSY_READ, "idle");
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"),
(int)apr_time_sec(H2MAX(session->s->timeout, session->s->keep_alive_timeout)));
}
else if (session->open_streams) {
s = "timeout";
timeout = session->s->timeout;
update_child_status(session, SERVER_BUSY_READ, "idle");
}
else {
/* normal keepalive setup */
s = "keepalive";
timeout = session->s->keep_alive_timeout;
update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
}
session->idle_until = apr_time_now() + timeout;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
H2_SSSN_LOG("", session, "enter idle, %s = %d sec"),
s, (int)apr_time_sec(timeout));
break;
case H2_SESSION_ST_DONE:
update_child_status(session, SERVER_CLOSING, "done");
break;
default:
/* nop */
break;
}
}
}
| 0 |
[
"CWE-770"
] |
mod_h2
|
dd05d49abe0f67512ce9ed5ba422d7711effecfb
| 29,471,569,424,109,870,000,000,000,000,000,000,000 | 63 |
* fixes Timeout vs. KeepAliveTimeout behaviour, see PR 63534 (for trunk now,
mpm event backport to 2.4.x up for vote).
* Fixes stream cleanup when connection throttling is in place.
* Counts stream resets by client on streams initiated by client as cause
for connection throttling.
* Header length checks are now logged similar to HTTP/1.1 protocol handler (thanks @mkaufmann)
* Header length is checked also on the merged value from several header instances
and results in a 431 response.
|
xmlInitPlatformSpecificIo(void)
{
static int xmlPlatformIoInitialized = 0;
OSVERSIONINFO osvi;
if(xmlPlatformIoInitialized)
return;
osvi.dwOSVersionInfoSize = sizeof(osvi);
if(GetVersionEx(&osvi) && (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT)) {
xmlWrapStat = xmlWrapStatUtf8;
xmlWrapOpen = xmlWrapOpenUtf8;
#ifdef HAVE_ZLIB_H
xmlWrapGzOpen = xmlWrapGzOpenUtf8;
#endif
} else {
xmlWrapStat = xmlWrapStatNative;
xmlWrapOpen = xmlWrapOpenNative;
#ifdef HAVE_ZLIB_H
xmlWrapGzOpen = gzopen;
#endif
}
xmlPlatformIoInitialized = 1;
return;
}
| 0 |
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
| 94,717,480,677,700,250,000,000,000,000,000,000,000 | 27 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
|
PyImaging_PcxEncoderNew(PyObject* self, PyObject* args)
{
ImagingEncoderObject* encoder;
char *mode;
char *rawmode;
int bits = 8;
if (!PyArg_ParseTuple(args, "ss|ii", &mode, &rawmode, &bits))
return NULL;
encoder = PyImaging_EncoderNew(0);
if (encoder == NULL)
return NULL;
if (get_packer(encoder, mode, rawmode) < 0)
return NULL;
encoder->encode = ImagingPcxEncode;
return (PyObject*) encoder;
}
| 0 |
[
"CWE-119"
] |
Pillow
|
a130c45990578a1bb0a6a000ed1b110e27324910
| 333,255,223,331,922,760,000,000,000,000,000,000,000 | 21 |
add several TIFF decoders and encoders
|
ParserCallbacks::handleEOF()
{
std::cout << "-EOF-" << std::endl;
}
| 0 |
[
"CWE-125"
] |
qpdf
|
1868a10f8b06631362618bfc85ca8646da4b4b71
| 26,477,124,140,464,260,000,000,000,000,000,000,000 | 4 |
Replace all atoi calls with QUtil::string_to_int
The latter catches underflow/overflow.
|
static int dh_bn_mod_exp(const DH *dh, BIGNUM *r,
const BIGNUM *a, const BIGNUM *p,
const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx)
{
return BN_mod_exp_mont(r, a, p, m, ctx, m_ctx);
}
| 0 |
[
"CWE-320"
] |
openssl
|
ea7abeeabf92b7aca160bdd0208636d4da69f4f4
| 160,939,561,809,542,270,000,000,000,000,000,000,000 | 6 |
Reject excessively large primes in DH key generation.
CVE-2018-0732
Signed-off-by: Guido Vranken <[email protected]>
(cherry picked from commit 91f7361f47b082ae61ffe1a7b17bb2adf213c7fe)
Reviewed-by: Tim Hudson <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/6457)
|
LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
bool is_oop = is_reference_type(type);
LIR_Opr result = new_register(type);
value.load_item();
// Because we want a 2-arg form of xchg and xadd
__ move(value.result(), result);
assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
__ xchg(addr, result, result, LIR_OprFact::illegalOpr);
return result;
}
| 0 |
[] |
jdk17u
|
268c0159253b3de5d72eb826ef2329b27bb33fea
| 212,215,943,509,987,800,000,000,000,000,000,000,000 | 10 |
8272014: Better array indexing
Reviewed-by: thartmann
Backport-of: 937c31d896d05aa24543b74e98a2ea9f05b5d86f
|
STATIC void GC_push_typed_structures_proc(void)
{
GC_PUSH_ALL_SYM(GC_ext_descriptors);
}
| 0 |
[
"CWE-119"
] |
bdwgc
|
4e1a6f9d8f2a49403bbd00b8c8e5324048fb84d4
| 218,054,945,457,686,950,000,000,000,000,000,000,000 | 4 |
Fix calloc_explicitly_typed in case of lb*n overflow
* typd_mlc.c: Include limits.h (for SIZE_MAX).
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): New macro (same as in
malloc.c).
* typd_mlc.c (GC_calloc_explicitly_typed): Return NULL if lb * n
overflows (same algorithm as in calloc defined in malloc.c); eliminate
lb *= n code duplication.
|
int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
return snd_ctl_add_replace(card, kcontrol, CTL_ADD_EXCLUSIVE);
}
| 0 |
[
"CWE-416",
"CWE-125"
] |
linux
|
6ab55ec0a938c7f943a4edba3d6514f775983887
| 97,906,825,209,137,040,000,000,000,000,000,000,000 | 4 |
ALSA: control: Fix an out-of-bounds bug in get_ctl_id_hash()
Since the user can control the arguments provided to the kernel by the
ioctl() system call, an out-of-bounds bug occurs when the 'id->name'
provided by the user does not end with '\0'.
The following log can reveal it:
[ 10.002313] BUG: KASAN: stack-out-of-bounds in snd_ctl_find_id+0x36c/0x3a0
[ 10.002895] Read of size 1 at addr ffff888109f5fe28 by task snd/439
[ 10.004934] Call Trace:
[ 10.007140] snd_ctl_find_id+0x36c/0x3a0
[ 10.007489] snd_ctl_ioctl+0x6cf/0x10e0
Fix this by checking the bound of 'id->name' in the loop.
Fixes: c27e1efb61c5 ("ALSA: control: Use xarray for faster lookups")
Signed-off-by: Zheyu Ma <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]>
|
uint64_t cb_subsampling_horz() const { return cb_subsampling_horz_; }
| 0 |
[
"CWE-20"
] |
libvpx
|
f00890eecdf8365ea125ac16769a83aa6b68792d
| 221,556,822,339,226,370,000,000,000,000,000,000,000 | 1 |
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf
https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf
Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
|
static ssize_t cm_write(struct file *file, const char __user * user_buf,
size_t count, loff_t *ppos)
{
static char *buf;
static u32 max_size;
static u32 uncopied_bytes;
struct acpi_table_header table;
acpi_status status;
if (!(*ppos)) {
/* parse the table header to get the table length */
if (count <= sizeof(struct acpi_table_header))
return -EINVAL;
if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
uncopied_bytes = max_size = table.length;
buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
if (buf == NULL)
return -EINVAL;
if ((*ppos > max_size) ||
(*ppos + count > max_size) ||
(*ppos + count < count) ||
(count > uncopied_bytes))
return -EINVAL;
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
buf = NULL;
return -EFAULT;
}
uncopied_bytes -= count;
*ppos += count;
if (!uncopied_bytes) {
status = acpi_install_method(buf);
kfree(buf);
buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
}
return count;
}
| 1 |
[
"CWE-284",
"CWE-264"
] |
linux
|
526b4af47f44148c9d665e57723ed9f86634c6e3
| 254,651,266,262,673,260,000,000,000,000,000,000,000 | 52 |
ACPI: Split out custom_method functionality into an own driver
With /sys/kernel/debug/acpi/custom_method root can write
to arbitrary memory and increase his priveleges, even if
these are restricted.
-> Make this an own debug .config option and warn about the
security issue in the config description.
-> Still keep acpi/debugfs.c which now only creates an empty
/sys/kernel/debug/acpi directory. There might be other
users of it later.
Signed-off-by: Thomas Renninger <[email protected]>
Acked-by: Rafael J. Wysocki <[email protected]>
Acked-by: [email protected]
Signed-off-by: Len Brown <[email protected]>
|
static int aac_eh_abort(struct scsi_cmnd* cmd)
{
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
int count;
int ret = FAILED;
printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%d)\n",
AAC_DRIVERNAME,
host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
switch (cmd->cmnd[0]) {
case SERVICE_ACTION_IN:
if (!(aac->raw_io_interface) ||
!(aac->raw_io_64) ||
((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
case INQUIRY:
case READ_CAPACITY:
/* Mark associated FIB to not complete, eh handler does this */
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
struct fib * fib = &aac->fibs[count];
if (fib->hw_fib_va->header.XferState &&
(fib->flags & FIB_CONTEXT_FLAG) &&
(fib->callback_data == cmd)) {
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
ret = SUCCESS;
}
}
break;
case TEST_UNIT_READY:
/* Mark associated FIB to not complete, eh handler does this */
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
struct scsi_cmnd * command;
struct fib * fib = &aac->fibs[count];
if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
(fib->flags & FIB_CONTEXT_FLAG) &&
((command = fib->callback_data)) &&
(command->device == cmd->device)) {
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
if (command == cmd)
ret = SUCCESS;
}
}
}
return ret;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
f856567b930dfcdbc3323261bf77240ccdde01f5
| 325,855,550,852,192,500,000,000,000,000,000,000,000 | 49 |
aacraid: missing capable() check in compat ioctl
In commit d496f94d22d1 ('[SCSI] aacraid: fix security weakness') we
added a check on CAP_SYS_RAWIO to the ioctl. The compat ioctls need the
check as well.
Signed-off-by: Dan Carpenter <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
BufferInfo::Type CoreBasicHandler::typeByTarget(const QString &target) const
{
if (target.isEmpty())
return BufferInfo::StatusBuffer;
if (network()->isChannelName(target))
return BufferInfo::ChannelBuffer;
return BufferInfo::QueryBuffer;
}
| 0 |
[
"CWE-399"
] |
quassel
|
b5e38970ffd55e2dd9f706ce75af9a8d7730b1b8
| 266,430,682,044,989,660,000,000,000,000,000,000,000 | 10 |
Improve the message-splitting algorithm for PRIVMSG and CTCP
This introduces a new message splitting algorithm based on
QTextBoundaryFinder. It works by first starting with the entire
message to be sent, encoding it, and checking to see if it is over
the maximum message length. If it is, it uses QTBF to find the
word boundary most immediately preceding the maximum length. If no
suitable boundary can be found, it falls back to searching for
grapheme boundaries. It repeats this process until the entire
message has been sent.
Unlike what it replaces, the new splitting code is not recursive
and cannot cause stack overflows. Additionally, if it is unable
to split a string, it will give up gracefully and not crash the
core or cause a thread to run away.
This patch fixes two bugs. The first is garbage characters caused
by accidentally splitting the string in the middle of a multibyte
character. Since the new code splits at a character level instead
of a byte level, this will no longer be an issue. The second is
the core crash caused by sending an overlength CTCP query ("/me")
containing only multibyte characters. This bug was caused by the
old CTCP splitter using the byte index from lastParamOverrun() as
a character index for a QString.
|
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
{
int i;
spin_lock_bh(&vsock_table_lock);
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
struct vsock_sock *vsk;
list_for_each_entry(vsk, &vsock_connected_table[i],
connected_table)
fn(sk_vsock(vsk));
}
spin_unlock_bh(&vsock_table_lock);
}
| 0 |
[
"CWE-667"
] |
linux
|
c518adafa39f37858697ac9309c6cf1805581446
| 40,425,707,195,747,730,000,000,000,000,000,000,000 | 15 |
vsock: fix the race conditions in multi-transport support
There are multiple similar bugs implicitly introduced by the
commit c0cfa2d8a788fcf4 ("vsock: add multi-transports support") and
commit 6a2c0962105ae8ce ("vsock: prevent transport modules unloading").
The bug pattern:
[1] vsock_sock.transport pointer is copied to a local variable,
[2] lock_sock() is called,
[3] the local variable is used.
VSOCK multi-transport support introduced the race condition:
vsock_sock.transport value may change between [1] and [2].
Let's copy vsock_sock.transport pointer to local variables after
the lock_sock() call.
Fixes: c0cfa2d8a788fcf4 ("vsock: add multi-transports support")
Signed-off-by: Alexander Popov <[email protected]>
Reviewed-by: Stefano Garzarella <[email protected]>
Reviewed-by: Jorgen Hansen <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
|
template<typename t>
explicit CImgList(const CImg<t>& img, const bool is_shared=false):
_width(0),_allocated_width(0),_data(0) {
assign(1);
_data[0].assign(img,is_shared);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 154,647,600,721,385,400,000,000,000,000,000,000,000 | 5 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
PHPAPI void var_push_dtor(php_unserialize_data_t *var_hashx, zval *rval)
{
var_dtor_entries *var_hash;
if (!var_hashx || !*var_hashx) {
return;
}
var_hash = (*var_hashx)->last_dtor;
#if VAR_ENTRIES_DBG
fprintf(stderr, "var_push_dtor(%ld): %d\n", var_hash?var_hash->used_slots:-1L, Z_TYPE_PP(rval));
#endif
if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) {
var_hash = emalloc(sizeof(var_dtor_entries));
var_hash->used_slots = 0;
var_hash->next = 0;
if (!(*var_hashx)->first_dtor) {
(*var_hashx)->first_dtor = var_hash;
} else {
((var_dtor_entries *) (*var_hashx)->last_dtor)->next = var_hash;
}
(*var_hashx)->last_dtor = var_hash;
}
ZVAL_COPY(&var_hash->data[var_hash->used_slots], rval);
var_hash->used_slots++;
}
| 0 |
[] |
php-src
|
d76b293ac71aa5bd4e9a433192afef6e0dd5a4ee
| 116,343,381,848,499,200,000,000,000,000,000,000,000 | 30 |
forgot to commit this one
|
CtPtr ProtocolV2::handle_message() {
ldout(cct, 20) << __func__ << dendl;
ceph_assert(state == THROTTLE_DONE);
#if defined(WITH_LTTNG) && defined(WITH_EVENTTRACE)
ltt_recv_stamp = ceph_clock_now();
#endif
recv_stamp = ceph_clock_now();
// we need to get the size before std::moving segments data
const size_t cur_msg_size = get_current_msg_size();
auto msg_frame = MessageFrame::Decode(std::move(rx_segments_data));
// XXX: paranoid copy just to avoid oops
ceph_msg_header2 current_header = msg_frame.header();
ldout(cct, 5) << __func__
<< " got " << msg_frame.front_len()
<< " + " << msg_frame.middle_len()
<< " + " << msg_frame.data_len()
<< " byte message."
<< " envelope type=" << current_header.type
<< " src " << peer_name
<< " off " << current_header.data_off
<< dendl;
INTERCEPT(16);
ceph_msg_header header{current_header.seq,
current_header.tid,
current_header.type,
current_header.priority,
current_header.version,
init_le32(msg_frame.front_len()),
init_le32(msg_frame.middle_len()),
init_le32(msg_frame.data_len()),
current_header.data_off,
peer_name,
current_header.compat_version,
current_header.reserved,
init_le32(0)};
ceph_msg_footer footer{init_le32(0), init_le32(0),
init_le32(0), init_le64(0), current_header.flags};
Message *message = decode_message(cct, 0, header, footer,
msg_frame.front(),
msg_frame.middle(),
msg_frame.data(),
connection);
if (!message) {
ldout(cct, 1) << __func__ << " decode message failed " << dendl;
return _fault();
} else {
state = READ_MESSAGE_COMPLETE;
}
INTERCEPT(17);
message->set_byte_throttler(connection->policy.throttler_bytes);
message->set_message_throttler(connection->policy.throttler_messages);
// store reservation size in message, so we don't get confused
// by messages entering the dispatch queue through other paths.
message->set_dispatch_throttle_size(cur_msg_size);
message->set_recv_stamp(recv_stamp);
message->set_throttle_stamp(throttle_stamp);
message->set_recv_complete_stamp(ceph_clock_now());
// check received seq#. if it is old, drop the message.
// note that incoming messages may skip ahead. this is convenient for the
// client side queueing because messages can't be renumbered, but the (kernel)
// client will occasionally pull a message out of the sent queue to send
// elsewhere. in that case it doesn't matter if we "got" it or not.
uint64_t cur_seq = in_seq;
if (message->get_seq() <= cur_seq) {
ldout(cct, 0) << __func__ << " got old message " << message->get_seq()
<< " <= " << cur_seq << " " << message << " " << *message
<< ", discarding" << dendl;
message->put();
if (connection->has_feature(CEPH_FEATURE_RECONNECT_SEQ) &&
cct->_conf->ms_die_on_old_message) {
ceph_assert(0 == "old msgs despite reconnect_seq feature");
}
return nullptr;
}
if (message->get_seq() > cur_seq + 1) {
ldout(cct, 0) << __func__ << " missed message? skipped from seq "
<< cur_seq << " to " << message->get_seq() << dendl;
if (cct->_conf->ms_die_on_skipped_message) {
ceph_assert(0 == "skipped incoming seq");
}
}
#if defined(WITH_LTTNG) && defined(WITH_EVENTTRACE)
if (message->get_type() == CEPH_MSG_OSD_OP ||
message->get_type() == CEPH_MSG_OSD_OPREPLY) {
utime_t ltt_processed_stamp = ceph_clock_now();
double usecs_elapsed =
(ltt_processed_stamp.to_nsec() - ltt_recv_stamp.to_nsec()) / 1000;
ostringstream buf;
if (message->get_type() == CEPH_MSG_OSD_OP)
OID_ELAPSED_WITH_MSG(message, usecs_elapsed, "TIME_TO_DECODE_OSD_OP",
false);
else
OID_ELAPSED_WITH_MSG(message, usecs_elapsed, "TIME_TO_DECODE_OSD_OPREPLY",
false);
}
#endif
// note last received message.
in_seq = message->get_seq();
ldout(cct, 5) << __func__ << " received message m=" << message
<< " seq=" << message->get_seq()
<< " from=" << message->get_source() << " type=" << header.type
<< " " << *message << dendl;
bool need_dispatch_writer = false;
if (!connection->policy.lossy) {
ack_left++;
need_dispatch_writer = true;
}
state = READY;
connection->logger->inc(l_msgr_recv_messages);
connection->logger->inc(
l_msgr_recv_bytes,
cur_msg_size + sizeof(ceph_msg_header) + sizeof(ceph_msg_footer));
messenger->ms_fast_preprocess(message);
auto fast_dispatch_time = ceph::mono_clock::now();
connection->logger->tinc(l_msgr_running_recv_time,
fast_dispatch_time - connection->recv_start_time);
if (connection->delay_state) {
double delay_period = 0;
if (rand() % 10000 < cct->_conf->ms_inject_delay_probability * 10000.0) {
delay_period =
cct->_conf->ms_inject_delay_max * (double)(rand() % 10000) / 10000.0;
ldout(cct, 1) << "queue_received will delay after "
<< (ceph_clock_now() + delay_period) << " on " << message
<< " " << *message << dendl;
}
connection->delay_state->queue(delay_period, message);
} else if (messenger->ms_can_fast_dispatch(message)) {
connection->lock.unlock();
connection->dispatch_queue->fast_dispatch(message);
connection->recv_start_time = ceph::mono_clock::now();
connection->logger->tinc(l_msgr_running_fast_dispatch_time,
connection->recv_start_time - fast_dispatch_time);
connection->lock.lock();
} else {
connection->dispatch_queue->enqueue(message, message->get_priority(),
connection->conn_id);
}
handle_message_ack(current_header.ack_seq);
// we might have been reused by another connection
// let's check if that is the case
if (state != READY) {
// yes, that was the case, let's do nothing
return nullptr;
}
if (need_dispatch_writer && connection->is_connected()) {
connection->center->dispatch_event_external(connection->write_handler);
}
return CONTINUE(read_frame);
}
| 0 |
[
"CWE-323"
] |
ceph
|
47c7e623546a7a33bd6bbddfb899fa9c9a40f40a
| 189,806,447,877,585,000,000,000,000,000,000,000,000 | 170 |
msg/async/ProtocolV2: avoid AES-GCM nonce reuse vulnerabilities
The secure mode uses AES-128-GCM with 96-bit nonces consisting of a
32-bit counter followed by a 64-bit salt. The counter is incremented
after processing each frame, the salt is fixed for the duration of
the session. Both are initialized from the session key generated
during session negotiation, so the counter starts with essentially
a random value. It is allowed to wrap, and, after 2**32 frames, it
repeats, resulting in nonce reuse (the actual sequence numbers that
the messenger works with are 64-bit, so the session continues on).
Because of how GCM works, this completely breaks both confidentiality
and integrity aspects of the secure mode. A single nonce reuse reveals
the XOR of two plaintexts and almost completely reveals the subkey
used for producing authentication tags. After a few nonces get used
twice, all confidentiality and integrity goes out the window and the
attacker can potentially encrypt-authenticate plaintext of their
choice.
We can't easily change the nonce format to extend the counter to
64 bits (and possibly XOR it with a longer salt). Instead, just
remember the initial nonce and cut the session before it repeats,
forcing renegotiation.
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Radoslaw Zarzynski <[email protected]>
Reviewed-by: Sage Weil <[email protected]>
Conflicts:
src/msg/async/ProtocolV2.cc [ context: commit 697aafa2aad2
("msg/async/ProtocolV2: remove unused parameter") not in
nautilus ]
src/msg/async/ProtocolV2.h [ context: commit ed3ec4c01d17
("msg: Build target 'common' without using namespace in
headers") not in nautilus ]
|
bool DL_Dxf::stripWhiteSpace(char** s, bool stripSpace) {
// last non-NULL char:
int lastChar = strlen(*s) - 1;
// Is last character CR or LF?
while ( (lastChar >= 0) &&
(((*s)[lastChar] == 10) || ((*s)[lastChar] == 13) ||
(stripSpace && ((*s)[lastChar] == ' ' || ((*s)[lastChar] == '\t')))) ) {
(*s)[lastChar] = '\0';
lastChar--;
}
// Skip whitespace, excluding \n, at beginning of line
if (stripSpace) {
while ((*s)[0]==' ' || (*s)[0]=='\t') {
++(*s);
}
}
return ((*s) ? true : false);
}
| 0 |
[
"CWE-191"
] |
qcad
|
1eeffc5daf5a06cf6213ffc19e95923cdebb2eb8
| 167,063,737,978,175,150,000,000,000,000,000,000,000 | 21 |
check vertexIndex which might be -1 for broken DXF
|
GF_Err bxml_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_BinaryXMLBox *ptr = (GF_BinaryXMLBox *)s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->data_length) gf_bs_write_data(bs, ptr->data, ptr->data_length);
return GF_OK;
}
| 0 |
[
"CWE-401",
"CWE-787"
] |
gpac
|
ec64c7b8966d7e4642d12debb888be5acf18efb9
| 279,424,846,225,925,320,000,000,000,000,000,000,000 | 10 |
fixed #1786 (fuzz)
|
void imap_quote_string_and_backquotes (char *dest, size_t dlen, const char *src)
{
_imap_quote_string (dest, dlen, src, "\"\\`");
}
| 0 |
[
"CWE-78"
] |
mutt
|
185152818541f5cdc059cbff3f3e8b654fc27c1d
| 190,458,951,550,878,500,000,000,000,000,000,000,000 | 4 |
Properly quote IMAP mailbox names when (un)subscribing.
When handling automatic subscription (via $imap_check_subscribed), or
manual subscribe/unsubscribe commands, mutt generating a "mailboxes"
command but failed to properly escape backquotes.
Thanks to Jeriko One for the detailed bug report and patch, which this
commit is based upon.
|
static int peak_usb_set_bittiming(struct net_device *netdev)
{
struct peak_usb_device *dev = netdev_priv(netdev);
const struct peak_usb_adapter *pa = dev->adapter;
if (pa->dev_set_bittiming) {
struct can_bittiming *bt = &dev->can.bittiming;
int err = pa->dev_set_bittiming(dev, bt);
if (err)
netdev_info(netdev, "couldn't set bitrate (err %d)\n",
err);
return err;
}
return 0;
}
| 0 |
[
"CWE-909"
] |
linux
|
f7a1337f0d29b98733c8824e165fca3371d7d4fd
| 275,440,946,420,136,580,000,000,000,000,000,000,000 | 17 |
can: peak_usb: fix slab info leak
Fix a small slab info leak due to a failure to clear the command buffer
at allocation.
The first 16 bytes of the command buffer are always sent to the device
in pcan_usb_send_cmd() even though only the first two may have been
initialised in case no argument payload is provided (e.g. when waiting
for a response).
Fixes: bb4785551f64 ("can: usb: PEAK-System Technik USB adapters driver core")
Cc: stable <[email protected]> # 3.4
Reported-by: [email protected]
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Marc Kleine-Budde <[email protected]>
|
static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
int flags)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct bcm_sock *bo = bcm_sk(sk);
struct net *net = sock_net(sk);
int ret = 0;
if (len < BCM_MIN_NAMELEN)
return -EINVAL;
lock_sock(sk);
if (bo->bound) {
ret = -EISCONN;
goto fail;
}
/* bind a device to this socket */
if (addr->can_ifindex) {
struct net_device *dev;
dev = dev_get_by_index(net, addr->can_ifindex);
if (!dev) {
ret = -ENODEV;
goto fail;
}
if (dev->type != ARPHRD_CAN) {
dev_put(dev);
ret = -ENODEV;
goto fail;
}
bo->ifindex = dev->ifindex;
dev_put(dev);
} else {
/* no interface reference for ifindex = 0 ('any' CAN device) */
bo->ifindex = 0;
}
#if IS_ENABLED(CONFIG_PROC_FS)
if (net->can.bcmproc_dir) {
/* unique socket address as filename */
sprintf(bo->procname, "%lu", sock_i_ino(sk));
bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
net->can.bcmproc_dir,
bcm_proc_show, sk);
if (!bo->bcm_proc_read) {
ret = -ENOMEM;
goto fail;
}
}
#endif /* CONFIG_PROC_FS */
bo->bound = 1;
fail:
release_sock(sk);
return ret;
}
| 0 |
[
"CWE-362"
] |
linux
|
d5f9023fa61ee8b94f37a93f08e94b136cf1e463
| 272,770,630,294,466,200,000,000,000,000,000,000,000 | 63 |
can: bcm: delay release of struct bcm_op after synchronize_rcu()
can_rx_register() callbacks may be called concurrently to the call to
can_rx_unregister(). The callbacks and callback data, though, are
protected by RCU and the struct sock reference count.
So the callback data is really attached to the life of sk, meaning
that it should be released on sk_destruct. However, bcm_remove_op()
calls tasklet_kill(), and RCU callbacks may be called under RCU
softirq, so that cannot be used on kernels before the introduction of
HRTIMER_MODE_SOFT.
However, bcm_rx_handler() is called under RCU protection, so after
calling can_rx_unregister(), we may call synchronize_rcu() in order to
wait for any RCU read-side critical sections to finish. That is,
bcm_rx_handler() won't be called anymore for those ops. So, we only
free them, after we do that synchronize_rcu().
Fixes: ffd980f976e7 ("[CAN]: Add broadcast manager (bcm) protocol")
Link: https://lore.kernel.org/r/[email protected]
Cc: linux-stable <[email protected]>
Reported-by: [email protected]
Reported-by: Norbert Slusarek <[email protected]>
Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]>
Acked-by: Oliver Hartkopp <[email protected]>
Signed-off-by: Marc Kleine-Budde <[email protected]>
|
get_contour_point(hb_font_t *font, void *font_data, hb_codepoint_t glyph,
unsigned int point_index, hb_position_t *x,
hb_position_t *y, void *user_data)
{
FT_Face face = font_data;
int load_flags = FT_LOAD_DEFAULT | FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH
| FT_LOAD_IGNORE_TRANSFORM;
if (FT_Load_Glyph(face, glyph, load_flags))
return false;
if (point_index >= (unsigned)face->glyph->outline.n_points)
return false;
*x = face->glyph->outline.points[point_index].x;
*y = face->glyph->outline.points[point_index].y;
return true;
}
| 0 |
[
"CWE-399"
] |
libass
|
aa54e0b59200a994d50a346b5d7ac818ebcf2d4b
| 338,102,659,822,471,450,000,000,000,000,000,000,000 | 18 |
shaper: fix reallocation
Update the variable that tracks the allocated size. This potentially
improves performance and avoid some side effects, which lead to
undefined behavior in some cases.
Fixes fuzzer test case id:000051,sig:11,sync:fuzzer3,src:004221.
|
int pgpPubkeyFingerprint(const uint8_t *h, size_t hlen,
uint8_t **fp, size_t *fplen)
{
int rc = -1; /* assume failure */
const uint8_t *se;
const uint8_t *pend = h + hlen;
uint8_t version = 0;
if (pgpVersion(h, hlen, &version))
return rc;
/* We only permit V4 keys, V3 keys are long long since deprecated */
switch (version) {
case 4:
{ pgpPktKeyV4 v = (pgpPktKeyV4) (h);
int mpis = -1;
/* Packet must be larger than v to have room for the required MPIs */
if (hlen > sizeof(*v)) {
switch (v->pubkey_algo) {
case PGPPUBKEYALGO_RSA:
mpis = 2;
break;
case PGPPUBKEYALGO_DSA:
mpis = 4;
break;
case PGPPUBKEYALGO_EDDSA:
mpis = 1;
break;
}
}
se = (uint8_t *)(v + 1);
/* EdDSA has a curve id before the MPIs */
if (v->pubkey_algo == PGPPUBKEYALGO_EDDSA) {
if (se < pend && se[0] != 0x00 && se[0] != 0xff)
se += 1 + se[0];
else
se = pend; /* error out when reading the MPI */
}
while (se < pend && mpis-- > 0)
se += pgpMpiLen(se);
/* Does the size and number of MPI's match our expectations? */
if (se == pend && mpis == 0) {
DIGEST_CTX ctx = rpmDigestInit(PGPHASHALGO_SHA1, RPMDIGEST_NONE);
uint8_t *d = NULL;
size_t dlen = 0;
int i = se - h;
uint8_t in[3] = { 0x99, (i >> 8), i };
(void) rpmDigestUpdate(ctx, in, 3);
(void) rpmDigestUpdate(ctx, h, i);
(void) rpmDigestFinal(ctx, (void **)&d, &dlen, 0);
if (dlen == 20) {
rc = 0;
*fp = d;
*fplen = dlen;
} else {
free(d);
}
}
} break;
default:
rpmlog(RPMLOG_WARNING, _("Unsupported version of key: V%d\n"), version);
}
return rc;
}
| 0 |
[
"CWE-347",
"CWE-284"
] |
rpm
|
bd36c5dc9fb6d90c46fbfed8c2d67516fc571ec8
| 48,882,020,473,691,730,000,000,000,000,000,000,000 | 70 |
Validate and require subkey binding signatures on PGP public keys
All subkeys must be followed by a binding signature by the primary key
as per the OpenPGP RFC, enforce the presence and validity in the parser.
The implementation is as kludgey as they come to work around our
simple-minded parser structure without touching API, to maximise
backportability. Store all the raw packets internally as we decode them
to be able to access previous elements at will, needed to validate ordering
and access the actual data. Add testcases for manipulated keys whose
import previously would succeed.
Depends on the two previous commits:
7b399fcb8f52566e6f3b4327197a85facd08db91 and
236b802a4aa48711823a191d1b7f753c82a89ec5
Fixes CVE-2021-3521.
|
PHP_FUNCTION(openssl_digest)
{
zend_bool raw_output = 0;
char *data, *method;
int data_len, method_len;
const EVP_MD *mdtype;
EVP_MD_CTX md_ctx;
int siglen;
unsigned char *sigbuf;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss|b", &data, &data_len, &method, &method_len, &raw_output) == FAILURE) {
return;
}
mdtype = EVP_get_digestbyname(method);
if (!mdtype) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown signature algorithm");
RETURN_FALSE;
}
siglen = EVP_MD_size(mdtype);
sigbuf = emalloc(siglen + 1);
EVP_DigestInit(&md_ctx, mdtype);
EVP_DigestUpdate(&md_ctx, (unsigned char *)data, data_len);
if (EVP_DigestFinal (&md_ctx, (unsigned char *)sigbuf, (unsigned int *)&siglen)) {
if (raw_output) {
sigbuf[siglen] = '\0';
RETVAL_STRINGL((char *)sigbuf, siglen, 0);
} else {
int digest_str_len = siglen * 2;
char *digest_str = emalloc(digest_str_len + 1);
make_digest_ex(digest_str, sigbuf, siglen);
efree(sigbuf);
RETVAL_STRINGL(digest_str, digest_str_len, 0);
}
} else {
efree(sigbuf);
RETVAL_FALSE;
}
}
| 0 |
[
"CWE-200"
] |
php-src
|
270a406ac94b5fc5cc9ef59fc61e3b4b95648a3e
| 290,417,917,237,249,340,000,000,000,000,000,000,000 | 41 |
Fix bug #61413 ext\openssl\tests\openssl_encrypt_crash.phpt fails 5.3 only
|
static int ok_inflater_decode_distance(ok_inflater *inflater,
const ok_inflater_huffman_tree *tree) {
if (!ok_inflater_load_bits(inflater, tree->bits)) {
return -1;
}
uint32_t p = ok_inflater_peek_bits(inflater, tree->bits);
uint16_t tree_value = tree->lookup_table[p];
int value = tree_value & VALUE_BIT_MASK;
unsigned int value_bits = tree_value >> VALUE_BITS;
if (value < 4) {
ok_inflater_read_bits(inflater, value_bits);
return value + 1;
} else if (value >= OK_INFLATER_DISTANCE_TABLE_LENGTH) {
ok_inflater_error(inflater, "Invalid distance");
return -1;
} else {
unsigned int extra_bits = (unsigned int)((value >> 1) - 1);
if (!ok_inflater_can_read_bits(inflater, value_bits + extra_bits)) {
return -1;
}
// Make sure to load no more than 32 bits at once
ok_inflater_read_bits(inflater, value_bits);
ok_inflater_load_bits(inflater, extra_bits);
int d = (int)ok_inflater_read_bits(inflater, extra_bits);
return OK_INFLATER_DISTANCE_TABLE[value] + d;
}
}
| 0 |
[
"CWE-787"
] |
ok-file-formats
|
e49cdfb84fb5eca2a6261f3c51a3c793fab9f62e
| 157,135,087,003,536,280,000,000,000,000,000,000,000 | 27 |
ok_png: Disallow multiple IHDR chunks (#15)
|
static void printMAPOpcodeVersion1(const uint8_t *buf)
{
char map_addr[INET6_ADDRSTRLEN];
syslog(LOG_DEBUG, "PCP MAP: v1 Opcode specific information. \n");
syslog(LOG_DEBUG, "MAP protocol: \t\t %d\n", (int)buf[0] );
syslog(LOG_DEBUG, "MAP int port: \t\t %d\n", (int)READNU16(buf+4));
syslog(LOG_DEBUG, "MAP ext port: \t\t %d\n", (int)READNU16(buf+6));
syslog(LOG_DEBUG, "MAP Ext IP: \t\t %s\n", inet_ntop(AF_INET6,
buf+8, map_addr, INET6_ADDRSTRLEN));
}
| 0 |
[
"CWE-476"
] |
miniupnp
|
cb8a02af7a5677cf608e86d57ab04241cf34e24f
| 186,035,383,603,670,320,000,000,000,000,000,000,000 | 10 |
pcpserver.c: copyIPv6IfDifferent() check for NULL src argument
|
free_connection (struct connection *conn)
{
if (!conn)
return;
threadlocal_set_conn (NULL);
conn->close (conn);
if (listen_stdin) {
int fd;
/* Restore something to stdin/out so the rest of our code can
* continue to assume that all new fds will be above stderr.
* Swap directions to get EBADF on improper use of stdin/out.
*/
fd = open ("/dev/null", O_WRONLY | O_CLOEXEC);
assert (fd == 0);
fd = open ("/dev/null", O_RDONLY | O_CLOEXEC);
assert (fd == 1);
}
/* Don't call the plugin again if quit has been set because the main
* thread will be in the process of unloading it. The plugin.unload
* callback should always be called.
*/
if (!quit && connection_get_handle (conn, 0)) {
lock_request (conn);
backend->close (backend, conn);
unlock_request (conn);
}
if (conn->status_pipe[0] >= 0) {
close (conn->status_pipe[0]);
close (conn->status_pipe[1]);
}
pthread_mutex_destroy (&conn->request_lock);
pthread_mutex_destroy (&conn->read_lock);
pthread_mutex_destroy (&conn->write_lock);
pthread_mutex_destroy (&conn->status_lock);
free (conn->handles);
free (conn->exportname);
free (conn);
}
| 1 |
[
"CWE-406"
] |
nbdkit
|
a6b88b195a959b17524d1c8353fd425d4891dc5f
| 222,872,785,554,992,430,000,000,000,000,000,000,000 | 44 |
server: Fix regression for NBD_OPT_INFO before NBD_OPT_GO
Most known NBD clients do not bother with NBD_OPT_INFO (except for
clients like 'qemu-nbd --list' that don't ever intend to connect), but
go straight to NBD_OPT_GO. However, it's not too hard to hack up qemu
to add in an extra client step (whether info on the same name, or more
interestingly, info on a different name), as a patch against qemu
commit 6f214b30445:
| diff --git i/nbd/client.c w/nbd/client.c
| index f6733962b49b..425292ac5ea9 100644
| --- i/nbd/client.c
| +++ w/nbd/client.c
| @@ -1038,6 +1038,14 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
| * TLS). If it is not available, fall back to
| * NBD_OPT_LIST for nicer error messages about a missing
| * export, then use NBD_OPT_EXPORT_NAME. */
| + if (getenv ("HACK"))
| + info->name[0]++;
| + result = nbd_opt_info_or_go(ioc, NBD_OPT_INFO, info, errp);
| + if (getenv ("HACK"))
| + info->name[0]--;
| + if (result < 0) {
| + return -EINVAL;
| + }
| result = nbd_opt_info_or_go(ioc, NBD_OPT_GO, info, errp);
| if (result < 0) {
| return -EINVAL;
This works just fine in 1.14.0, where we call .open only once (so the
INFO and GO repeat calls into the same plugin handle), but in 1.14.1
it regressed into causing an assertion failure: we are now calling
.open a second time on a connection that is already opened:
$ nbdkit -rfv null &
$ hacked-qemu-io -f raw -r nbd://localhost -c quit
...
nbdkit: null[1]: debug: null: open readonly=1
nbdkit: backend.c:179: backend_open: Assertion `h->handle == NULL' failed.
Worse, on the mainline development, we have recently made it possible
for plugins to actively report different information for different
export names; for example, a plugin may choose to report different
answers for .can_write on export A than for export B; but if we share
cached handles, then an NBD_OPT_INFO on one export prevents correct
answers for NBD_OPT_GO on the second export name. (The HACK envvar in
my qemu modifications can be used to demonstrate cross-name requests,
which are even less likely in a real client).
The solution is to call .close after NBD_OPT_INFO, coupled with enough
glue logic to reset cached connection handles back to the state
expected by .open. This in turn means factoring out another backend_*
function, but also gives us an opportunity to change
backend_set_handle to no longer accept NULL.
The assertion failure is, to some extent, a possible denial of service
attack (one client can force nbdkit to exit by merely sending OPT_INFO
before OPT_GO, preventing the next client from connecting), although
this is mitigated by using TLS to weed out untrusted clients. Still,
the fact that we introduced a potential DoS attack while trying to fix
a traffic amplification security bug is not very nice.
Sadly, as there are no known clients that easily trigger this mode of
operation (OPT_INFO before OPT_GO), there is no easy way to cover this
via a testsuite addition. I may end up hacking something into libnbd.
Fixes: c05686f957
Signed-off-by: Eric Blake <[email protected]>
|
static void free_nitem(Dt_t * d, namev_t * np, Dtdisc_t * disc)
{
free(np->unique_name);
free(np);
}
| 0 |
[
"CWE-476"
] |
graphviz
|
839085f8026afd6f6920a0c31ad2a9d880d97932
| 80,525,528,217,527,315,000,000,000,000,000,000,000 | 5 |
attempted fix for null pointer deference on malformed input
|
Double gf_bs_read_double(GF_BitStream *bs)
{
char buf [8] = "\0\0\0\0\0\0\0";
s32 i;
for (i = 0; i < 64; i++)
buf[7-i/8] |= gf_bs_read_bit(bs) << (7 - i%8);
return (* (Double *) buf);
}
| 0 |
[
"CWE-617",
"CWE-703"
] |
gpac
|
9ea93a2ec8f555ceed1ee27294cf94822f14f10f
| 244,307,980,012,760,750,000,000,000,000,000,000,000 | 8 |
fixed #2165
|
st_init_numtable(void)
{
return st_init_table(&type_numhash);
}
| 0 |
[
"CWE-125"
] |
oniguruma
|
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
| 149,788,621,264,952,780,000,000,000,000,000,000,000 | 4 |
onig-5.9.2
|
xmlParseCDSect(xmlParserCtxtPtr ctxt) {
xmlChar *buf = NULL;
int len = 0;
int size = XML_PARSER_BUFFER_SIZE;
int r, rl;
int s, sl;
int cur, l;
int count = 0;
/* Check 2.6.0 was NXT(0) not RAW */
if (CMP9(CUR_PTR, '<', '!', '[', 'C', 'D', 'A', 'T', 'A', '[')) {
SKIP(9);
} else
return;
ctxt->instate = XML_PARSER_CDATA_SECTION;
r = CUR_CHAR(rl);
if (!IS_CHAR(r)) {
xmlFatalErr(ctxt, XML_ERR_CDATA_NOT_FINISHED, NULL);
ctxt->instate = XML_PARSER_CONTENT;
return;
}
NEXTL(rl);
s = CUR_CHAR(sl);
if (!IS_CHAR(s)) {
xmlFatalErr(ctxt, XML_ERR_CDATA_NOT_FINISHED, NULL);
ctxt->instate = XML_PARSER_CONTENT;
return;
}
NEXTL(sl);
cur = CUR_CHAR(l);
buf = (xmlChar *) xmlMallocAtomic(size * sizeof(xmlChar));
if (buf == NULL) {
xmlErrMemory(ctxt, NULL);
return;
}
while (IS_CHAR(cur) &&
((r != ']') || (s != ']') || (cur != '>'))) {
if (len + 5 >= size) {
xmlChar *tmp;
size *= 2;
tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
if (tmp == NULL) {
xmlFree(buf);
xmlErrMemory(ctxt, NULL);
return;
}
buf = tmp;
}
COPY_BUF(rl,buf,len,r);
r = s;
rl = sl;
s = cur;
sl = l;
count++;
if (count > 50) {
GROW;
count = 0;
}
NEXTL(l);
cur = CUR_CHAR(l);
}
buf[len] = 0;
ctxt->instate = XML_PARSER_CONTENT;
if (cur != '>') {
xmlFatalErrMsgStr(ctxt, XML_ERR_CDATA_NOT_FINISHED,
"CData section not finished\n%.50s\n", buf);
xmlFree(buf);
return;
}
NEXTL(l);
/*
* OK the buffer is to be consumed as cdata.
*/
if ((ctxt->sax != NULL) && (!ctxt->disableSAX)) {
if (ctxt->sax->cdataBlock != NULL)
ctxt->sax->cdataBlock(ctxt->userData, buf, len);
else if (ctxt->sax->characters != NULL)
ctxt->sax->characters(ctxt->userData, buf, len);
}
xmlFree(buf);
}
| 0 |
[
"CWE-125"
] |
libxml2
|
77404b8b69bc122d12231807abf1a837d121b551
| 259,341,539,031,381,060,000,000,000,000,000,000,000 | 84 |
Make sure the parser returns when getting a Stop order
patch backported from chromiun bug fixes, assuming author is Chris
|
void Compute(OpKernelContext* ctx) override {
const Tensor* inputs;
const Tensor* seq_len;
Tensor* log_prob = nullptr;
OpOutputList decoded_indices;
OpOutputList decoded_values;
OpOutputList decoded_shape;
OP_REQUIRES_OK(ctx, decode_helper_.ValidateInputsGenerateOutputs(
ctx, &inputs, &seq_len, &log_prob, &decoded_indices,
&decoded_values, &decoded_shape));
const TensorShape& inputs_shape = inputs->shape();
std::vector<typename TTypes<T>::UnalignedConstMatrix> input_list_t;
const int64 max_time = inputs_shape.dim_size(0);
const int64 batch_size = inputs_shape.dim_size(1);
const int64 num_classes_raw = inputs_shape.dim_size(2);
OP_REQUIRES(
ctx, FastBoundsCheck(num_classes_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("num_classes cannot exceed max int"));
const int num_classes = static_cast<const int>(num_classes_raw);
auto inputs_t = inputs->tensor<T, 3>();
input_list_t.reserve(max_time);
for (std::size_t t = 0; t < max_time; ++t) {
input_list_t.emplace_back(inputs_t.data() + t * batch_size * num_classes,
batch_size, num_classes);
}
auto seq_len_t = seq_len->vec<int32>();
auto log_prob_t = log_prob->matrix<T>();
log_prob_t.setZero();
// Assumption: the blank index is num_classes - 1
int blank_index = num_classes - 1;
// Perform best path decoding
std::vector<std::vector<std::vector<int> > > sequences(batch_size);
auto decode = [&](const int64 begin, const int64 end) {
for (int b = begin; b < end; ++b) {
sequences[b].resize(1);
auto &sequence = sequences[b][0];
int prev_indices = -1;
for (int t = 0; t < seq_len_t(b); ++t) {
int max_class_indices;
OP_REQUIRES(ctx, input_list_t[t].dimension(1) > 0,
errors::InvalidArgument("Invalid input dimensions."));
log_prob_t(b, 0) +=
-RowMax<T>(input_list_t[t], b, &max_class_indices);
if (max_class_indices != blank_index &&
!(merge_repeated_ && max_class_indices == prev_indices)) {
sequence.push_back(max_class_indices);
}
prev_indices = max_class_indices;
}
}
};
const int64 kCostPerUnit = 50 * max_time * num_classes;
const int64 total = batch_size;
const DeviceBase::CpuWorkerThreads& worker_threads =
*ctx->device()->tensorflow_cpu_worker_threads();
Shard(worker_threads.num_threads, worker_threads.workers, total,
kCostPerUnit, decode);
OP_REQUIRES_OK(
ctx, decode_helper_.StoreAllDecodedSequences(
sequences, &decoded_indices, &decoded_values, &decoded_shape));
}
| 0 |
[
"CWE-369",
"CWE-908"
] |
tensorflow
|
b1b323042264740c398140da32e93fb9c2c9f33e
| 209,677,972,704,506,140,000,000,000,000,000,000,000 | 70 |
Fix SEGV in CTC ops
PiperOrigin-RevId: 372430279
Change-Id: I7ec2ad9d6f4d0980c33de45d27c6b17df5c6e26f
|
void tty_init_termios(struct tty_struct *tty)
{
struct ktermios *tp;
int idx = tty->index;
if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
tty->termios = tty->driver->init_termios;
else {
/* Check for lazy saved data */
tp = tty->driver->termios[idx];
if (tp != NULL) {
tty->termios = *tp;
tty->termios.c_line = tty->driver->init_termios.c_line;
} else
tty->termios = tty->driver->init_termios;
}
/* Compatibility until drivers always set this */
tty->termios.c_ispeed = tty_termios_input_baud_rate(&tty->termios);
tty->termios.c_ospeed = tty_termios_baud_rate(&tty->termios);
}
| 0 |
[
"CWE-416"
] |
linux
|
c8bcd9c5be24fb9e6132e97da5a35e55a83e36b9
| 189,209,743,279,904,440,000,000,000,000,000,000,000 | 20 |
tty: Fix ->session locking
Currently, locking of ->session is very inconsistent; most places
protect it using the legacy tty mutex, but disassociate_ctty(),
__do_SAK(), tiocspgrp() and tiocgsid() don't.
Two of the writers hold the ctrl_lock (because they already need it for
->pgrp), but __proc_set_tty() doesn't do that yet.
On a PREEMPT=y system, an unprivileged user can theoretically abuse
this broken locking to read 4 bytes of freed memory via TIOCGSID if
tiocgsid() is preempted long enough at the right point. (Other things
might also go wrong, especially if root-only ioctls are involved; I'm
not sure about that.)
Change the locking on ->session such that:
- tty_lock() is held by all writers: By making disassociate_ctty()
hold it. This should be fine because the same lock can already be
taken through the call to tty_vhangup_session().
The tricky part is that we need to shorten the area covered by
siglock to be able to take tty_lock() without ugly retry logic; as
far as I can tell, this should be fine, since nothing in the
signal_struct is touched in the `if (tty)` branch.
- ctrl_lock is held by all writers: By changing __proc_set_tty() to
hold the lock a little longer.
- All readers that aren't holding tty_lock() hold ctrl_lock: By
adding locking to tiocgsid() and __do_SAK(), and expanding the area
covered by ctrl_lock in tiocspgrp().
Cc: [email protected]
Signed-off-by: Jann Horn <[email protected]>
Reviewed-by: Jiri Slaby <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
suffix_requires_dir_check (char const *end)
{
/* If END does not start with a slash, the suffix is OK. */
while (ISSLASH (*end))
{
/* Two or more slashes act like a single slash. */
do
end++;
while (ISSLASH (*end));
switch (*end++)
{
default: return false; /* An ordinary file name component is OK. */
case '\0': return true; /* Trailing "/" is trouble. */
case '.': break; /* Possibly "." or "..". */
}
/* Trailing "/.", or "/.." even if not trailing, is trouble. */
if (!*end || (*end == '.' && (!end[1] || ISSLASH (end[1]))))
return true;
}
return false;
}
| 0 |
[
"CWE-125"
] |
glibc
|
ee8d5e33adb284601c00c94687bc907e10aec9bb
| 127,785,888,010,518,320,000,000,000,000,000,000,000 | 23 |
realpath: Set errno to ENAMETOOLONG for result larger than PATH_MAX [BZ #28770]
realpath returns an allocated string when the result exceeds PATH_MAX,
which is unexpected when its second argument is not NULL. This results
in the second argument (resolved) being uninitialized and also results
in a memory leak since the caller expects resolved to be the same as the
returned value.
Return NULL and set errno to ENAMETOOLONG if the result exceeds
PATH_MAX. This fixes [BZ #28770], which is CVE-2021-3998.
Reviewed-by: Adhemerval Zanella <[email protected]>
Signed-off-by: Siddhesh Poyarekar <[email protected]>
|
static int fts3DoAutoincrmerge(
Fts3Table *p, /* FTS3 table handle */
const char *zParam /* Nul-terminated string containing boolean */
){
int rc = SQLITE_OK;
sqlite3_stmt *pStmt = 0;
p->nAutoincrmerge = fts3Getint(&zParam);
if( p->nAutoincrmerge==1 || p->nAutoincrmerge>FTS3_MERGE_COUNT ){
p->nAutoincrmerge = 8;
}
if( !p->bHasStat ){
assert( p->bFts4==0 );
sqlite3Fts3CreateStatTable(&rc, p);
if( rc ) return rc;
}
rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0);
if( rc ) return rc;
sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE);
sqlite3_bind_int(pStmt, 2, p->nAutoincrmerge);
sqlite3_step(pStmt);
rc = sqlite3_reset(pStmt);
return rc;
}
| 1 |
[
"CWE-787"
] |
sqlite
|
c72f2fb7feff582444b8ffdc6c900c69847ce8a9
| 120,148,002,406,359,820,000,000,000,000,000,000,000 | 23 |
More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d
|
static int unix_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
if (protocol && protocol != PF_UNIX)
return -EPROTONOSUPPORT;
sock->state = SS_UNCONNECTED;
switch (sock->type) {
case SOCK_STREAM:
sock->ops = &unix_stream_ops;
break;
/*
* Believe it or not BSD has AF_UNIX, SOCK_RAW though
* nothing uses it.
*/
case SOCK_RAW:
sock->type = SOCK_DGRAM;
fallthrough;
case SOCK_DGRAM:
sock->ops = &unix_dgram_ops;
break;
case SOCK_SEQPACKET:
sock->ops = &unix_seqpacket_ops;
break;
default:
return -ESOCKTNOSUPPORT;
}
return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
}
| 0 |
[
"CWE-362"
] |
linux
|
cbcf01128d0a92e131bd09f1688fe032480b65ca
| 105,538,005,485,733,680,000,000,000,000,000,000,000 | 31 |
af_unix: fix garbage collect vs MSG_PEEK
unix_gc() assumes that candidate sockets can never gain an external
reference (i.e. be installed into an fd) while the unix_gc_lock is
held. Except for MSG_PEEK this is guaranteed by modifying inflight
count under the unix_gc_lock.
MSG_PEEK does not touch any variable protected by unix_gc_lock (file
count is not), yet it needs to be serialized with garbage collection.
Do this by locking/unlocking unix_gc_lock:
1) increment file count
2) lock/unlock barrier to make sure incremented file count is visible
to garbage collection
3) install file into fd
This is a lock barrier (unlike smp_mb()) that ensures that garbage
collection is run completely before or completely after the barrier.
Cc: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
ar6000_channel_change_event(struct ar6_softc *ar, u16 oldChannel,
u16 newChannel)
{
A_PRINTF("Channel Change notification\nOld Channel: %d, New Channel: %d\n",
oldChannel, newChannel);
}
| 0 |
[
"CWE-703",
"CWE-264"
] |
linux
|
550fd08c2cebad61c548def135f67aba284c6162
| 304,856,811,823,720,200,000,000,000,000,000,000,000 | 6 |
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
utfc_ptr2char(
char_u *p,
int *pcc) // return: composing chars, last one is 0
{
int len;
int c;
int cc;
int i = 0;
c = utf_ptr2char(p);
len = utf_ptr2len(p);
// Only accept a composing char when the first char isn't illegal.
if ((len > 1 || *p < 0x80)
&& p[len] >= 0x80
&& UTF_COMPOSINGLIKE(p, p + len))
{
cc = utf_ptr2char(p + len);
for (;;)
{
pcc[i++] = cc;
if (i == MAX_MCO)
break;
len += utf_ptr2len(p + len);
if (p[len] < 0x80 || !utf_iscomposing(cc = utf_ptr2char(p + len)))
break;
}
}
if (i < MAX_MCO) // last composing char must be 0
pcc[i] = 0;
return c;
}
| 0 |
[
"CWE-122",
"CWE-787"
] |
vim
|
f6d39c31d2177549a986d170e192d8351bd571e2
| 306,874,752,942,038,300,000,000,000,000,000,000,000 | 34 |
patch 9.0.0220: invalid memory access with for loop over NULL string
Problem: Invalid memory access with for loop over NULL string.
Solution: Make sure mb_ptr2len() consistently returns zero for NUL.
|
SYSCALL_DEFINE3(open_by_handle_at, int, mountdirfd,
struct file_handle __user *, handle,
int, flags)
{
long ret;
if (force_o_largefile())
flags |= O_LARGEFILE;
ret = do_handle_open(mountdirfd, handle, flags);
return ret;
}
| 0 |
[
"CWE-362"
] |
linux
|
161f873b89136eb1e69477c847d5a5033239d9ba
| 119,916,571,620,328,730,000,000,000,000,000,000,000 | 12 |
vfs: read file_handle only once in handle_to_path
We used to read file_handle twice. Once to get the amount of extra
bytes, and once to fetch the entire structure.
This may be problematic since we do size verifications only after the
first read, so if the number of extra bytes changes in userspace between
the first and second calls, we'll have an incoherent view of
file_handle.
Instead, read the constant size once, and copy that over to the final
structure without having to re-read it again.
Signed-off-by: Sasha Levin <[email protected]>
Cc: Al Viro <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
scroll_csr_backward(NCURSES_SP_DCLx
int n,
int top,
int bot,
int miny,
int maxy,
NCURSES_CH_T blank)
{
int i;
if (n == 1 && scroll_reverse && top == miny && bot == maxy) {
GoTo(NCURSES_SP_ARGx top, 0);
UpdateAttrs(SP_PARM, blank);
NCURSES_PUTP2("scroll_reverse", scroll_reverse);
} else if (n == 1 && insert_line && bot == maxy) {
GoTo(NCURSES_SP_ARGx top, 0);
UpdateAttrs(SP_PARM, blank);
NCURSES_PUTP2("insert_line", insert_line);
} else if (parm_rindex && top == miny && bot == maxy) {
GoTo(NCURSES_SP_ARGx top, 0);
UpdateAttrs(SP_PARM, blank);
TPUTS_TRACE("parm_rindex");
NCURSES_SP_NAME(tputs) (NCURSES_SP_ARGx
TPARM_2(parm_rindex, n, 0),
n,
NCURSES_SP_NAME(_nc_outch));
} else if (parm_insert_line && bot == maxy) {
GoTo(NCURSES_SP_ARGx top, 0);
UpdateAttrs(SP_PARM, blank);
TPUTS_TRACE("parm_insert_line");
NCURSES_SP_NAME(tputs) (NCURSES_SP_ARGx
TPARM_2(parm_insert_line, n, 0),
n,
NCURSES_SP_NAME(_nc_outch));
} else if (scroll_reverse && top == miny && bot == maxy) {
GoTo(NCURSES_SP_ARGx top, 0);
UpdateAttrs(SP_PARM, blank);
for (i = 0; i < n; i++) {
NCURSES_PUTP2("scroll_reverse", scroll_reverse);
}
} else if (insert_line && bot == maxy) {
GoTo(NCURSES_SP_ARGx top, 0);
UpdateAttrs(SP_PARM, blank);
for (i = 0; i < n; i++) {
NCURSES_PUTP2("insert_line", insert_line);
}
} else
return ERR;
#if NCURSES_EXT_FUNCS
if (FILL_BCE(SP_PARM)) {
int j;
for (i = 0; i < n; i++) {
GoTo(NCURSES_SP_ARGx top + i, 0);
for (j = 0; j < screen_columns(SP_PARM); j++)
PutChar(NCURSES_SP_ARGx CHREF(blank));
}
}
#endif
return OK;
}
| 1 |
[] |
ncurses
|
790a85dbd4a81d5f5d8dd02a44d84f01512ef443
| 188,091,578,380,745,600,000,000,000,000,000,000,000 | 61 |
ncurses 6.2 - patch 20200531
+ correct configure version-check/warnng for g++ to allow for 10.x
+ re-enable "bel" in konsole-base (report by Nia Huang)
+ add linux-s entry (patch by Alexandre Montaron).
+ drop long-obsolete convert_configure.pl
+ add test/test_parm.c, for checking tparm changes.
+ improve parameter-checking for tparm, adding function _nc_tiparm() to
handle the most-used case, which accepts only numeric parameters
(report/testcase by "puppet-meteor").
+ use a more conservative estimate of the buffer-size in lib_tparm.c's
save_text() and save_number(), in case the sprintf() function
passes-through unexpected characters from a format specifier
(report/testcase by "puppet-meteor").
+ add a check for end-of-string in cvtchar to handle a malformed
string in infotocap (report/testcase by "puppet-meteor").
|
std::unique_ptr<HttpResponse> HttpConnection::receiveResponse()
{
if (outstandingHttpRequests_.empty()) {
throw DL_ABORT_EX(EX_NO_HTTP_REQUEST_ENTRY_FOUND);
}
if (socketRecvBuffer_->bufferEmpty()) {
if (socketRecvBuffer_->recv() == 0 && !socket_->wantRead() &&
!socket_->wantWrite()) {
throw DL_RETRY_EX(EX_GOT_EOF);
}
}
const auto& proc = outstandingHttpRequests_.front()->getHttpHeaderProcessor();
if (proc->parse(socketRecvBuffer_->getBuffer(),
socketRecvBuffer_->getBufferLength())) {
A2_LOG_INFO(fmt(MSG_RECEIVE_RESPONSE, cuid_,
eraseConfidentialInfo(proc->getHeaderString()).c_str()));
auto result = proc->getResult();
if (result->getStatusCode() / 100 == 1) {
socketRecvBuffer_->drain(proc->getLastBytesProcessed());
outstandingHttpRequests_.front()->resetHttpHeaderProcessor();
return nullptr;
}
auto httpResponse = make_unique<HttpResponse>();
httpResponse->setCuid(cuid_);
httpResponse->setHttpHeader(std::move(result));
httpResponse->setHttpRequest(
outstandingHttpRequests_.front()->popHttpRequest());
socketRecvBuffer_->drain(proc->getLastBytesProcessed());
outstandingHttpRequests_.pop_front();
return httpResponse;
}
socketRecvBuffer_->drain(proc->getLastBytesProcessed());
return nullptr;
}
| 0 |
[
"CWE-532"
] |
aria2
|
37368130ca7de5491a75fd18a20c5c5cc641824a
| 191,517,797,518,241,530,000,000,000,000,000,000,000 | 37 |
Mask headers
|
static NTSTATUS dcesrv_lsa_QueryTrustedDomainInfo(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx,
struct lsa_QueryTrustedDomainInfo *r)
{
union lsa_TrustedDomainInfo *info = NULL;
struct dcesrv_handle *h;
struct lsa_trusted_domain_state *trusted_domain_state;
struct ldb_message *msg;
int ret;
struct ldb_message **res;
const char *attrs[] = {
"flatname",
"trustPartner",
"securityIdentifier",
"trustDirection",
"trustType",
"trustAttributes",
"msDs-supportedEncryptionTypes",
NULL
};
DCESRV_PULL_HANDLE(h, r->in.trustdom_handle, LSA_HANDLE_TRUSTED_DOMAIN);
trusted_domain_state = talloc_get_type(h->data, struct lsa_trusted_domain_state);
/* pull all the user attributes */
ret = gendb_search_dn(trusted_domain_state->policy->sam_ldb, mem_ctx,
trusted_domain_state->trusted_domain_dn, &res, attrs);
if (ret != 1) {
return NT_STATUS_INTERNAL_DB_CORRUPTION;
}
msg = res[0];
info = talloc_zero(mem_ctx, union lsa_TrustedDomainInfo);
if (!info) {
return NT_STATUS_NO_MEMORY;
}
*r->out.info = info;
switch (r->in.level) {
case LSA_TRUSTED_DOMAIN_INFO_NAME:
info->name.netbios_name.string
= ldb_msg_find_attr_as_string(msg, "flatname", NULL);
break;
case LSA_TRUSTED_DOMAIN_INFO_POSIX_OFFSET:
info->posix_offset.posix_offset
= ldb_msg_find_attr_as_uint(msg, "posixOffset", 0);
break;
#if 0 /* Win2k3 doesn't implement this */
case LSA_TRUSTED_DOMAIN_INFO_BASIC:
r->out.info->info_basic.netbios_name.string
= ldb_msg_find_attr_as_string(msg, "flatname", NULL);
r->out.info->info_basic.sid
= samdb_result_dom_sid(mem_ctx, msg, "securityIdentifier");
break;
#endif
case LSA_TRUSTED_DOMAIN_INFO_INFO_EX:
return fill_trust_domain_ex(mem_ctx, msg, &info->info_ex);
case LSA_TRUSTED_DOMAIN_INFO_FULL_INFO:
ZERO_STRUCT(info->full_info);
return fill_trust_domain_ex(mem_ctx, msg, &info->full_info.info_ex);
case LSA_TRUSTED_DOMAIN_INFO_FULL_INFO_2_INTERNAL:
ZERO_STRUCT(info->full_info2_internal);
info->full_info2_internal.posix_offset.posix_offset
= ldb_msg_find_attr_as_uint(msg, "posixOffset", 0);
return fill_trust_domain_ex(mem_ctx, msg, &info->full_info2_internal.info.info_ex);
case LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES:
info->enc_types.enc_types
= ldb_msg_find_attr_as_uint(msg, "msDs-supportedEncryptionTypes", KERB_ENCTYPE_RC4_HMAC_MD5);
break;
case LSA_TRUSTED_DOMAIN_INFO_CONTROLLERS:
case LSA_TRUSTED_DOMAIN_INFO_INFO_EX2_INTERNAL:
/* oops, we don't want to return the info after all */
talloc_free(info);
*r->out.info = NULL;
return NT_STATUS_INVALID_PARAMETER;
default:
/* oops, we don't want to return the info after all */
talloc_free(info);
*r->out.info = NULL;
return NT_STATUS_INVALID_INFO_CLASS;
}
return NT_STATUS_OK;
}
| 0 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 203,503,341,335,620,800,000,000,000,000,000,000,000 | 87 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
njs_vm_array_length(njs_vm_t *vm, njs_value_t *value, int64_t *length)
{
if (njs_fast_path(njs_is_array(value))) {
*length = njs_array(value)->length;
}
return njs_object_length(vm, value, length);
}
| 0 |
[
"CWE-416"
] |
njs
|
6a07c2156a07ef307b6dcf3c2ca8571a5f1af7a6
| 284,993,815,249,909,900,000,000,000,000,000,000,000 | 8 |
Fixed recursive async function calls.
Previously, PromiseCapability record was stored (function->context)
directly in function object during a function invocation. This is
not correct, because PromiseCapability record should be linked to
current execution context. As a result, function->context is
overwritten with consecutive recursive calls which results in
use-after-free.
This closes #451 issue on Github.
|
execute_in_subshell (command, asynchronous, pipe_in, pipe_out, fds_to_close)
COMMAND *command;
int asynchronous;
int pipe_in, pipe_out;
struct fd_bitmap *fds_to_close;
{
int user_subshell, return_code, function_value, should_redir_stdin, invert;
int ois, user_coproc;
int result;
volatile COMMAND *tcom;
USE_VAR(user_subshell);
USE_VAR(user_coproc);
USE_VAR(invert);
USE_VAR(tcom);
USE_VAR(asynchronous);
subshell_level++;
should_redir_stdin = (asynchronous && (command->flags & CMD_STDIN_REDIR) &&
pipe_in == NO_PIPE &&
stdin_redirects (command->redirects) == 0);
invert = (command->flags & CMD_INVERT_RETURN) != 0;
user_subshell = command->type == cm_subshell || ((command->flags & CMD_WANT_SUBSHELL) != 0);
user_coproc = command->type == cm_coproc;
command->flags &= ~(CMD_FORCE_SUBSHELL | CMD_WANT_SUBSHELL | CMD_INVERT_RETURN);
/* If a command is asynchronous in a subshell (like ( foo ) & or
the special case of an asynchronous GROUP command where the
the subshell bit is turned on down in case cm_group: below),
turn off `asynchronous', so that two subshells aren't spawned.
XXX - asynchronous used to be set to 0 in this block, but that
means that setup_async_signals was never run. Now it's set to
0 after subshell_environment is set appropriately and setup_async_signals
is run.
This seems semantically correct to me. For example,
( foo ) & seems to say ``do the command `foo' in a subshell
environment, but don't wait for that subshell to finish'',
and "{ foo ; bar ; } &" seems to me to be like functions or
builtins in the background, which executed in a subshell
environment. I just don't see the need to fork two subshells. */
/* Don't fork again, we are already in a subshell. A `doubly
async' shell is not interactive, however. */
if (asynchronous)
{
#if defined (JOB_CONTROL)
/* If a construct like ( exec xxx yyy ) & is given while job
control is active, we want to prevent exec from putting the
subshell back into the original process group, carefully
undoing all the work we just did in make_child. */
original_pgrp = -1;
#endif /* JOB_CONTROL */
ois = interactive_shell;
interactive_shell = 0;
/* This test is to prevent alias expansion by interactive shells that
run `(command) &' but to allow scripts that have enabled alias
expansion with `shopt -s expand_alias' to continue to expand
aliases. */
if (ois != interactive_shell)
expand_aliases = 0;
}
/* Subshells are neither login nor interactive. */
login_shell = interactive = 0;
if (user_subshell)
subshell_environment = SUBSHELL_PAREN;
else
{
subshell_environment = 0; /* XXX */
if (asynchronous)
subshell_environment |= SUBSHELL_ASYNC;
if (pipe_in != NO_PIPE || pipe_out != NO_PIPE)
subshell_environment |= SUBSHELL_PIPE;
if (user_coproc)
subshell_environment |= SUBSHELL_COPROC;
}
reset_terminating_signals (); /* in sig.c */
/* Cancel traps, in trap.c. */
/* Reset the signal handlers in the child, but don't free the
trap strings. Set a flag noting that we have to free the
trap strings if we run trap to change a signal disposition. */
reset_signal_handlers ();
subshell_environment |= SUBSHELL_RESETTRAP;
/* Make sure restore_original_signals doesn't undo the work done by
make_child to ensure that asynchronous children are immune to SIGINT
and SIGQUIT. Turn off asynchronous to make sure more subshells are
not spawned. */
if (asynchronous)
{
setup_async_signals ();
asynchronous = 0;
}
#if defined (JOB_CONTROL)
set_sigchld_handler ();
#endif /* JOB_CONTROL */
set_sigint_handler ();
#if defined (JOB_CONTROL)
/* Delete all traces that there were any jobs running. This is
only for subshells. */
without_job_control ();
#endif /* JOB_CONTROL */
if (fds_to_close)
close_fd_bitmap (fds_to_close);
do_piping (pipe_in, pipe_out);
#if defined (COPROCESS_SUPPORT)
coproc_closeall ();
#endif
/* If this is a user subshell, set a flag if stdin was redirected.
This is used later to decide whether to redirect fd 0 to
/dev/null for async commands in the subshell. This adds more
sh compatibility, but I'm not sure it's the right thing to do. */
if (user_subshell)
{
stdin_redir = stdin_redirects (command->redirects);
restore_default_signal (0);
}
/* If this is an asynchronous command (command &), we want to
redirect the standard input from /dev/null in the absence of
any specific redirection involving stdin. */
if (should_redir_stdin && stdin_redir == 0)
async_redirect_stdin ();
/* Do redirections, then dispose of them before recursive call. */
if (command->redirects)
{
if (do_redirections (command->redirects, RX_ACTIVE) != 0)
exit (invert ? EXECUTION_SUCCESS : EXECUTION_FAILURE);
dispose_redirects (command->redirects);
command->redirects = (REDIRECT *)NULL;
}
if (command->type == cm_subshell)
tcom = command->value.Subshell->command;
else if (user_coproc)
tcom = command->value.Coproc->command;
else
tcom = command;
if (command->flags & CMD_TIME_PIPELINE)
tcom->flags |= CMD_TIME_PIPELINE;
if (command->flags & CMD_TIME_POSIX)
tcom->flags |= CMD_TIME_POSIX;
/* Make sure the subshell inherits any CMD_IGNORE_RETURN flag. */
if ((command->flags & CMD_IGNORE_RETURN) && tcom != command)
tcom->flags |= CMD_IGNORE_RETURN;
/* If this is a simple command, tell execute_disk_command that it
might be able to get away without forking and simply exec.
This means things like ( sleep 10 ) will only cause one fork.
If we're timing the command or inverting its return value, however,
we cannot do this optimization. */
if ((user_subshell || user_coproc) && (tcom->type == cm_simple || tcom->type == cm_subshell) &&
((tcom->flags & CMD_TIME_PIPELINE) == 0) &&
((tcom->flags & CMD_INVERT_RETURN) == 0))
{
tcom->flags |= CMD_NO_FORK;
if (tcom->type == cm_simple)
tcom->value.Simple->flags |= CMD_NO_FORK;
}
invert = (tcom->flags & CMD_INVERT_RETURN) != 0;
tcom->flags &= ~CMD_INVERT_RETURN;
result = setjmp (top_level);
/* If we're inside a function while executing this subshell, we
need to handle a possible `return'. */
function_value = 0;
if (return_catch_flag)
function_value = setjmp (return_catch);
/* If we're going to exit the shell, we don't want to invert the return
status. */
if (result == EXITPROG)
invert = 0, return_code = last_command_exit_value;
else if (result)
return_code = EXECUTION_FAILURE;
else if (function_value)
return_code = return_catch_value;
else
return_code = execute_command_internal ((COMMAND *)tcom, asynchronous, NO_PIPE, NO_PIPE, fds_to_close);
/* If we are asked to, invert the return value. */
if (invert)
return_code = (return_code == EXECUTION_SUCCESS) ? EXECUTION_FAILURE
: EXECUTION_SUCCESS;
/* If we were explicitly placed in a subshell with (), we need
to do the `shell cleanup' things, such as running traps[0]. */
if (user_subshell && signal_is_trapped (0))
{
last_command_exit_value = return_code;
return_code = run_exit_trap ();
}
subshell_level--;
return (return_code);
/* NOTREACHED */
}
| 0 |
[] |
bash
|
863d31ae775d56b785dc5b0105b6d251515d81d5
| 59,213,835,028,366,460,000,000,000,000,000,000,000 | 215 |
commit bash-20120224 snapshot
|
static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char *pt;
unsigned char l, lg, n = 0;
int fac_national_digis_received = 0;
do {
switch (*p & 0xC0) {
case 0x00:
if (len < 2)
return -1;
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
if (len < 3)
return -1;
if (*p == FAC_NATIONAL_RAND)
facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
if (len < 4)
return -1;
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
if (len < 2)
return -1;
l = p[1];
if (len < 2 + l)
return -1;
if (*p == FAC_NATIONAL_DEST_DIGI) {
if (!fac_national_digis_received) {
if (l < AX25_ADDR_LEN)
return -1;
memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
facilities->source_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_SRC_DIGI) {
if (!fac_national_digis_received) {
if (l < AX25_ADDR_LEN)
return -1;
memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
facilities->dest_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_FAIL_CALL) {
if (l < AX25_ADDR_LEN)
return -1;
memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_FAIL_ADD) {
if (l < 1 + ROSE_ADDR_LEN)
return -1;
memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_DIGIS) {
if (l % AX25_ADDR_LEN)
return -1;
fac_national_digis_received = 1;
facilities->source_ndigis = 0;
facilities->dest_ndigis = 0;
for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
if (pt[6] & AX25_HBIT) {
if (facilities->dest_ndigis >= ROSE_MAX_DIGIS)
return -1;
memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN);
} else {
if (facilities->source_ndigis >= ROSE_MAX_DIGIS)
return -1;
memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN);
}
}
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
| 0 |
[
"CWE-20"
] |
linux
|
e0bccd315db0c2f919e7fcf9cb60db21d9986f52
| 245,289,646,097,418,000,000,000,000,000,000,000,000 | 93 |
rose: Add length checks to CALL_REQUEST parsing
Define some constant offsets for CALL_REQUEST based on the description
at <http://www.techfest.com/networking/wan/x25plp.htm> and the
definition of ROSE as using 10-digit (5-byte) addresses. Use them
consistently. Validate all implicit and explicit facilities lengths.
Validate the address length byte rather than either trusting or
assuming its value.
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
* postprocessing step, like decryption or authenticity verification.
*/
static inline bool f2fs_post_read_required(struct inode *inode)
{
| 0 |
[
"CWE-476"
] |
linux
|
4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6
| 10,129,997,050,980,459,000,000,000,000,000,000,000 | 4 |
f2fs: support swap file w/ DIO
Signed-off-by: Jaegeuk Kim <[email protected]>
|
void set_bitmaps(struct activity *act[], uint64_t *flags)
{
int p;
if (!USE_OPTION_P(*flags)) {
/* Force -P ALL */
p = get_activity_position(act, A_CPU, EXIT_IF_NOT_FOUND);
memset(act[p]->bitmap->b_array, ~0,
BITMAP_SIZE(act[p]->bitmap->b_size));
}
if (!USE_OPTION_I(*flags)) {
/* Force -I ALL */
p = get_activity_position(act, A_IRQ, EXIT_IF_NOT_FOUND);
memset(act[p]->bitmap->b_array, ~0,
BITMAP_SIZE(act[p]->bitmap->b_size));
}
}
| 0 |
[
"CWE-415"
] |
sysstat
|
a5c8abd4a481ee6e27a3acf00e6d9b0f023e20ed
| 157,081,748,609,155,160,000,000,000,000,000,000,000 | 18 |
Fix #242: Double free in check_file_actlst()
Avoid freeing buffer() twice.
Signed-off-by: Sebastien GODARD <[email protected]>
|
static int hfs_decompress_zlib_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree)
{
if ((rawBuf[0] & 0x0F) == 0x0F) {
return hfs_decompress_noncompressed_attr(
rawBuf, rawSize, uncSize, dstBuf, dstSize, dstBufFree);
}
else {
#ifdef HAVE_LIBZ
char* uncBuf = NULL;
uint64_t uLen;
unsigned long bytesConsumed;
int infResult;
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Uncompressing (inflating) data.", __func__);
// Uncompress the remainder of the attribute, and load as 128-0
// Note: cast is OK because uncSize will be quite modest, < 4000.
uncBuf = (char *) tsk_malloc((size_t) uncSize + 100); // add some extra space
if (uncBuf == NULL) {
error_returned
(" - %s, space for the uncompressed attr", __func__);
return 0;
}
infResult = zlib_inflate(rawBuf, (uint64_t) rawSize,
uncBuf, (uint64_t) (uncSize + 100),
&uLen, &bytesConsumed);
if (infResult != 0) {
error_returned
(" %s, zlib could not uncompress attr", __func__);
free(uncBuf);
return 0;
}
if (bytesConsumed != rawSize) {
error_detected(TSK_ERR_FS_READ,
" %s, decompressor did not consume the whole compressed data",
__func__);
free(uncBuf);
return 0;
}
*dstBuf = uncBuf;
*dstSize = uncSize;
*dstBufFree = TRUE;
#else
// ZLIB compression library is not available, so we will load a
// zero-length default DATA attribute. Without this, icat may
// misbehave.
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: ZLIB not available, so loading an empty default DATA attribute.\n", __func__);
// Dummy is one byte long, so the ptr is not null, but we set the
// length to zero bytes, so it is never read.
static uint8_t dummy[1];
*dstBuf = dummy;
*dstSize = 0;
*dstBufFree = FALSE;
#endif
}
return 1;
}
| 0 |
[
"CWE-190",
"CWE-284"
] |
sleuthkit
|
114cd3d0aac8bd1aeaf4b33840feb0163d342d5b
| 254,465,177,809,513,220,000,000,000,000,000,000,000 | 68 |
hfs: fix keylen check in hfs_cat_traverse()
If key->key_len is 65535, calculating "uint16_t keylen' would
cause an overflow:
uint16_t keylen;
...
keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len)
so the code bypasses the sanity check "if (keylen > nodesize)"
which results in crash later:
./toolfs/fstools/fls -b 512 -f hfs <image>
=================================================================
==16==ERROR: AddressSanitizer: SEGV on unknown address 0x6210000256a4 (pc 0x00000054812b bp 0x7ffca548a8f0 sp 0x7ffca548a480 T0)
==16==The signal is caused by a READ memory access.
#0 0x54812a in hfs_dir_open_meta_cb /fuzzing/sleuthkit/tsk/fs/hfs_dent.c:237:20
#1 0x51a96c in hfs_cat_traverse /fuzzing/sleuthkit/tsk/fs/hfs.c:1082:21
#2 0x547785 in hfs_dir_open_meta /fuzzing/sleuthkit/tsk/fs/hfs_dent.c:480:9
#3 0x50f57d in tsk_fs_dir_open_meta /fuzzing/sleuthkit/tsk/fs/fs_dir.c:290:14
#4 0x54af17 in tsk_fs_path2inum /fuzzing/sleuthkit/tsk/fs/ifind_lib.c:237:23
#5 0x522266 in hfs_open /fuzzing/sleuthkit/tsk/fs/hfs.c:6579:9
#6 0x508e89 in main /fuzzing/sleuthkit/tools/fstools/fls.cpp:267:19
#7 0x7f9daf67c2b0 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x202b0)
#8 0x41d679 in _start (/fuzzing/sleuthkit/tools/fstools/fls+0x41d679)
Make 'keylen' int type to prevent the overflow and fix that.
Now, I get proper error message instead of crash:
./toolfs/fstools/fls -b 512 -f hfs <image>
General file system error (hfs_cat_traverse: length of key 3 in leaf node 1 too large (65537 vs 4096))
|
static void hns_gmac_set_uc_match(void *mac_drv, u16 en)
{
struct mac_driver *drv = mac_drv;
dsaf_set_dev_bit(drv, GMAC_REC_FILT_CONTROL_REG,
GMAC_UC_MATCH_EN_B, !en);
dsaf_set_dev_bit(drv, GMAC_STATION_ADDR_HIGH_2_REG,
GMAC_ADDR_EN_B, !en);
}
| 0 |
[
"CWE-119",
"CWE-703"
] |
linux
|
412b65d15a7f8a93794653968308fc100f2aa87c
| 128,405,101,856,034,430,000,000,000,000,000,000,000 | 9 |
net: hns: fix ethtool_get_strings overflow in hns driver
hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated
is not enough for ethtool_get_strings(), which will cause random memory
corruption.
When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the
the following can be observed without this patch:
[ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80
[ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070.
[ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70)
[ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk
[ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k
[ 43.115218] Next obj: start=ffff801fb0b69098, len=80
[ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b.
[ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38)
[ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_
[ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai
Signed-off-by: Timmy Li <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
{
/*
* VM_SOFTDIRTY should not prevent from VMA merging, if we
* match the flags but dirty bit -- the caller should mark
* merged VMA as dirty. If dirty bit won't be excluded from
* comparison, we increase pressure on the memory system forcing
* the kernel to generate new VMAs when old one could be
* extended instead.
*/
if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
return 0;
if (vma->vm_file != file)
return 0;
if (vma->vm_ops && vma->vm_ops->close)
return 0;
if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
return 0;
return 1;
}
| 0 |
[
"CWE-362",
"CWE-703",
"CWE-667"
] |
linux
|
04f5866e41fb70690e28397487d8bd8eea7d712a
| 305,541,758,327,735,000,000,000,000,000,000,000,000 | 22 |
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/[email protected]
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Jann Horn <[email protected]>
Suggested-by: Oleg Nesterov <[email protected]>
Acked-by: Peter Xu <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
Reviewed-by: Oleg Nesterov <[email protected]>
Reviewed-by: Jann Horn <[email protected]>
Acked-by: Jason Gunthorpe <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
if (asoc->overall_error_count > asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
/* Section 3.3.5.
* The Sender-specific Heartbeat Info field should normally include
* information about the sender's current time when this HEARTBEAT
* chunk is sent and the destination transport address to which this
* HEARTBEAT is sent (see Section 8.3).
*/
if (transport->param_flags & SPP_HB_ENABLE) {
if (SCTP_DISPOSITION_NOMEM ==
sctp_sf_heartbeat(ep, asoc, type, arg,
commands))
return SCTP_DISPOSITION_NOMEM;
/* Set transport error counter and association error counter
* when sending heartbeat.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET,
SCTP_TRANSPORT(transport));
}
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
| 315,005,561,558,043,330,000,000,000,000,000,000,000 | 42 |
sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
struct nf_udp_net *un = nf_udp_pernet(net);
struct ctl_table *table;
BUILD_BUG_ON(ARRAY_SIZE(nf_ct_sysctl_table) != NF_SYSCTL_CT_LAST_SYSCTL);
table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
GFP_KERNEL);
if (!table)
return -ENOMEM;
table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
#endif
table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED];
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
nf_conntrack_standalone_init_tcp_sysctl(net, table);
nf_conntrack_standalone_init_sctp_sysctl(net, table);
nf_conntrack_standalone_init_dccp_sysctl(net, table);
nf_conntrack_standalone_init_gre_sysctl(net, table);
/* Don't allow unprivileged users to alter certain sysctls */
if (net->user_ns != &init_user_ns) {
table[NF_SYSCTL_CT_MAX].mode = 0444;
table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
table[NF_SYSCTL_CT_HELPER].mode = 0444;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
table[NF_SYSCTL_CT_EVENTS].mode = 0444;
#endif
table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
} else if (!net_eq(&init_net, net)) {
table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
}
cnet->sysctl_header = register_net_sysctl(net, "net/netfilter", table);
if (!cnet->sysctl_header)
goto out_unregister_netfilter;
return 0;
out_unregister_netfilter:
kfree(table);
return -ENOMEM;
}
| 1 |
[
"CWE-203"
] |
linux
|
2671fa4dc0109d3fb581bc3078fdf17b5d9080f6
| 215,048,753,126,922,700,000,000,000,000,000,000,000 | 58 |
netfilter: conntrack: Make global sysctls readonly in non-init netns
These sysctls point to global variables:
- NF_SYSCTL_CT_MAX (&nf_conntrack_max)
- NF_SYSCTL_CT_EXPECT_MAX (&nf_ct_expect_max)
- NF_SYSCTL_CT_BUCKETS (&nf_conntrack_htable_size_user)
Because their data pointers are not updated to point to per-netns
structures, they must be marked read-only in a non-init_net ns.
Otherwise, changes in any net namespace are reflected in (leaked into)
all other net namespaces. This problem has existed since the
introduction of net namespaces.
The current logic marks them read-only only if the net namespace is
owned by an unprivileged user (other than init_user_ns).
Commit d0febd81ae77 ("netfilter: conntrack: re-visit sysctls in
unprivileged namespaces") "exposes all sysctls even if the namespace is
unpriviliged." Since we need to mark them readonly in any case, we can
forego the unprivileged user check altogether.
Fixes: d0febd81ae77 ("netfilter: conntrack: re-visit sysctls in unprivileged namespaces")
Signed-off-by: Jonathon Reinhart <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void parse_version(std::string const& full_version_string,
std::string& version, int& extension_level)
{
PointerHolder<char> vp(true, QUtil::copy_string(full_version_string));
char* v = vp.getPointer();
char* p1 = strchr(v, '.');
char* p2 = (p1 ? strchr(1 + p1, '.') : 0);
if (p2 && *(p2 + 1))
{
*p2++ = '\0';
extension_level = QUtil::string_to_int(p2);
}
version = v;
}
| 0 |
[
"CWE-125"
] |
qpdf
|
1868a10f8b06631362618bfc85ca8646da4b4b71
| 103,398,590,887,672,860,000,000,000,000,000,000,000 | 14 |
Replace all atoi calls with QUtil::string_to_int
The latter catches underflow/overflow.
|
static void sco_conn_del(struct hci_conn *hcon, int err)
{
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
if (!conn)
return;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
/* Kill socket */
sco_conn_lock(conn);
sk = conn->sk;
sco_conn_unlock(conn);
if (sk) {
sock_hold(sk);
lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
release_sock(sk);
sock_put(sk);
/* Ensure no more work items will run before freeing conn. */
cancel_delayed_work_sync(&conn->timeout_work);
}
hcon->sco_data = NULL;
kfree(conn);
}
| 0 |
[
"CWE-416"
] |
linux
|
99c23da0eed4fd20cae8243f2b51e10e66aa0951
| 291,443,116,384,815,320,000,000,000,000,000,000,000 | 30 |
Bluetooth: sco: Fix lock_sock() blockage by memcpy_from_msg()
The sco_send_frame() also takes lock_sock() during memcpy_from_msg()
call that may be endlessly blocked by a task with userfaultd
technique, and this will result in a hung task watchdog trigger.
Just like the similar fix for hci_sock_sendmsg() in commit
92c685dc5de0 ("Bluetooth: reorganize functions..."), this patch moves
the memcpy_from_msg() out of lock_sock() for addressing the hang.
This should be the last piece for fixing CVE-2021-3640 after a few
already queued fixes.
Signed-off-by: Takashi Iwai <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
|
**/
CImg<T>& haar(const bool invert=false, const unsigned int nb_scales=1) {
return get_haar(invert,nb_scales).move_to(*this);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 177,904,461,311,224,300,000,000,000,000,000,000,000 | 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVQcowState *s = bs->opaque;
unsigned int len, i;
int ret = 0;
QCowHeader header;
QemuOpts *opts;
Error *local_err = NULL;
uint64_t ext_end;
uint64_t l1_vm_state_index;
const char *opt_overlap_check;
int overlap_check_template = 0;
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read qcow2 header");
goto fail;
}
be32_to_cpus(&header.magic);
be32_to_cpus(&header.version);
be64_to_cpus(&header.backing_file_offset);
be32_to_cpus(&header.backing_file_size);
be64_to_cpus(&header.size);
be32_to_cpus(&header.cluster_bits);
be32_to_cpus(&header.crypt_method);
be64_to_cpus(&header.l1_table_offset);
be32_to_cpus(&header.l1_size);
be64_to_cpus(&header.refcount_table_offset);
be32_to_cpus(&header.refcount_table_clusters);
be64_to_cpus(&header.snapshots_offset);
be32_to_cpus(&header.nb_snapshots);
if (header.magic != QCOW_MAGIC) {
error_setg(errp, "Image is not in qcow2 format");
ret = -EINVAL;
goto fail;
}
if (header.version < 2 || header.version > 3) {
report_unsupported(bs, errp, "QCOW version %d", header.version);
ret = -ENOTSUP;
goto fail;
}
s->qcow_version = header.version;
/* Initialise cluster size */
if (header.cluster_bits < MIN_CLUSTER_BITS ||
header.cluster_bits > MAX_CLUSTER_BITS) {
error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
ret = -EINVAL;
goto fail;
}
s->cluster_bits = header.cluster_bits;
s->cluster_size = 1 << s->cluster_bits;
s->cluster_sectors = 1 << (s->cluster_bits - 9);
/* Initialise version 3 header fields */
if (header.version == 2) {
header.incompatible_features = 0;
header.compatible_features = 0;
header.autoclear_features = 0;
header.refcount_order = 4;
header.header_length = 72;
} else {
be64_to_cpus(&header.incompatible_features);
be64_to_cpus(&header.compatible_features);
be64_to_cpus(&header.autoclear_features);
be32_to_cpus(&header.refcount_order);
be32_to_cpus(&header.header_length);
if (header.header_length < 104) {
error_setg(errp, "qcow2 header too short");
ret = -EINVAL;
goto fail;
}
}
if (header.header_length > s->cluster_size) {
error_setg(errp, "qcow2 header exceeds cluster size");
ret = -EINVAL;
goto fail;
}
if (header.header_length > sizeof(header)) {
s->unknown_header_fields_size = header.header_length - sizeof(header);
s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
s->unknown_header_fields_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
"fields");
goto fail;
}
}
if (header.backing_file_offset > s->cluster_size) {
error_setg(errp, "Invalid backing file offset");
ret = -EINVAL;
goto fail;
}
if (header.backing_file_offset) {
ext_end = header.backing_file_offset;
} else {
ext_end = 1 << header.cluster_bits;
}
/* Handle feature bits */
s->incompatible_features = header.incompatible_features;
s->compatible_features = header.compatible_features;
s->autoclear_features = header.autoclear_features;
if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
void *feature_table = NULL;
qcow2_read_extensions(bs, header.header_length, ext_end,
&feature_table, NULL);
report_unsupported_feature(bs, errp, feature_table,
s->incompatible_features &
~QCOW2_INCOMPAT_MASK);
ret = -ENOTSUP;
g_free(feature_table);
goto fail;
}
if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
/* Corrupt images may not be written to unless they are being repaired
*/
if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
error_setg(errp, "qcow2: Image is corrupt; cannot be opened "
"read/write");
ret = -EACCES;
goto fail;
}
}
/* Check support for various header values */
if (header.refcount_order != 4) {
report_unsupported(bs, errp, "%d bit reference counts",
1 << header.refcount_order);
ret = -ENOTSUP;
goto fail;
}
s->refcount_order = header.refcount_order;
if (header.crypt_method > QCOW_CRYPT_AES) {
error_setg(errp, "Unsupported encryption method: %i",
header.crypt_method);
ret = -EINVAL;
goto fail;
}
s->crypt_method_header = header.crypt_method;
if (s->crypt_method_header) {
bs->encrypted = 1;
}
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
s->l2_size = 1 << s->l2_bits;
bs->total_sectors = header.size / 512;
s->csize_shift = (62 - (s->cluster_bits - 8));
s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
s->refcount_table_offset = header.refcount_table_offset;
s->refcount_table_size =
header.refcount_table_clusters << (s->cluster_bits - 3);
if (header.refcount_table_clusters > (0x800000 >> s->cluster_bits)) {
/* 8 MB refcount table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
error_setg(errp, "Reference count table too large");
ret = -EINVAL;
goto fail;
}
ret = validate_table_offset(bs, s->refcount_table_offset,
s->refcount_table_size, sizeof(uint64_t));
if (ret < 0) {
error_setg(errp, "Invalid reference count table offset");
goto fail;
}
/* Snapshot table offset/length */
if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) {
error_setg(errp, "Too many snapshots");
ret = -EINVAL;
goto fail;
}
ret = validate_table_offset(bs, header.snapshots_offset,
header.nb_snapshots,
sizeof(QCowSnapshotHeader));
if (ret < 0) {
error_setg(errp, "Invalid snapshot table offset");
goto fail;
}
s->snapshots_offset = header.snapshots_offset;
s->nb_snapshots = header.nb_snapshots;
/* read the level 1 table */
if (header.l1_size > 0x2000000) {
/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
error_setg(errp, "Active L1 table too large");
ret = -EFBIG;
goto fail;
}
s->l1_size = header.l1_size;
l1_vm_state_index = size_to_l1(s, header.size);
if (l1_vm_state_index > INT_MAX) {
error_setg(errp, "Image is too big");
ret = -EFBIG;
goto fail;
}
s->l1_vm_state_index = l1_vm_state_index;
/* the L1 table must contain at least enough entries to put
header.size bytes */
if (s->l1_size < s->l1_vm_state_index) {
error_setg(errp, "L1 table is too small");
ret = -EINVAL;
goto fail;
}
ret = validate_table_offset(bs, header.l1_table_offset,
header.l1_size, sizeof(uint64_t));
if (ret < 0) {
error_setg(errp, "Invalid L1 table offset");
goto fail;
}
s->l1_table_offset = header.l1_table_offset;
if (s->l1_size > 0) {
s->l1_table = g_malloc0(
align_offset(s->l1_size * sizeof(uint64_t), 512));
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read L1 table");
goto fail;
}
for(i = 0;i < s->l1_size; i++) {
be64_to_cpus(&s->l1_table[i]);
}
}
/* alloc L2 table/refcount block cache */
s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE);
s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE);
s->cluster_cache = g_malloc(s->cluster_size);
/* one more sector for decompressed data alignment */
s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
+ 512);
s->cluster_cache_offset = -1;
s->flags = flags;
ret = qcow2_refcount_init(bs);
if (ret != 0) {
error_setg_errno(errp, -ret, "Could not initialize refcount handling");
goto fail;
}
QLIST_INIT(&s->cluster_allocs);
QTAILQ_INIT(&s->discards);
/* read qcow2 extensions */
if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,
&local_err)) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
/* read the backing file name */
if (header.backing_file_offset != 0) {
len = header.backing_file_size;
if (len > MIN(1023, s->cluster_size - header.backing_file_offset)) {
error_setg(errp, "Backing file name too long");
ret = -EINVAL;
goto fail;
}
ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read backing file name");
goto fail;
}
bs->backing_file[len] = '\0';
}
ret = qcow2_read_snapshots(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read snapshots");
goto fail;
}
/* Clear unknown autoclear feature bits */
if (!bs->read_only && !(flags & BDRV_O_INCOMING) && s->autoclear_features) {
s->autoclear_features = 0;
ret = qcow2_update_header(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not update qcow2 header");
goto fail;
}
}
/* Initialise locks */
qemu_co_mutex_init(&s->lock);
/* Repair image if dirty */
if (!(flags & (BDRV_O_CHECK | BDRV_O_INCOMING)) && !bs->read_only &&
(s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
BdrvCheckResult result = {0};
ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not repair dirty image");
goto fail;
}
}
/* Enable lazy_refcounts according to image and command line options */
opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
if (local_err) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
s->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
(s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
s->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
s->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
s->discard_passthrough[QCOW2_DISCARD_REQUEST] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
flags & BDRV_O_UNMAP);
s->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
s->discard_passthrough[QCOW2_DISCARD_OTHER] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
opt_overlap_check = qemu_opt_get(opts, "overlap-check") ?: "cached";
if (!strcmp(opt_overlap_check, "none")) {
overlap_check_template = 0;
} else if (!strcmp(opt_overlap_check, "constant")) {
overlap_check_template = QCOW2_OL_CONSTANT;
} else if (!strcmp(opt_overlap_check, "cached")) {
overlap_check_template = QCOW2_OL_CACHED;
} else if (!strcmp(opt_overlap_check, "all")) {
overlap_check_template = QCOW2_OL_ALL;
} else {
error_setg(errp, "Unsupported value '%s' for qcow2 option "
"'overlap-check'. Allowed are either of the following: "
"none, constant, cached, all", opt_overlap_check);
qemu_opts_del(opts);
ret = -EINVAL;
goto fail;
}
s->overlap_check = 0;
for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {
/* overlap-check defines a template bitmask, but every flag may be
* overwritten through the associated boolean option */
s->overlap_check |=
qemu_opt_get_bool(opts, overlap_bool_option_names[i],
overlap_check_template & (1 << i)) << i;
}
qemu_opts_del(opts);
if (s->use_lazy_refcounts && s->qcow_version < 3) {
error_setg(errp, "Lazy refcounts require a qcow2 image with at least "
"qemu 1.1 compatibility level");
ret = -EINVAL;
goto fail;
}
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
qcow2_check_refcounts(bs, &result, 0);
}
#endif
return ret;
fail:
g_free(s->unknown_header_fields);
cleanup_unknown_header_ext(bs);
qcow2_free_snapshots(bs);
qcow2_refcount_close(bs);
g_free(s->l1_table);
/* else pre-write overlap checks in cache_destroy may crash */
s->l1_table = NULL;
if (s->l2_table_cache) {
qcow2_cache_destroy(bs, s->l2_table_cache);
}
if (s->refcount_block_cache) {
qcow2_cache_destroy(bs, s->refcount_block_cache);
}
g_free(s->cluster_cache);
qemu_vfree(s->cluster_data);
return ret;
}
| 0 |
[
"CWE-190"
] |
qemu
|
b106ad9185f35fc4ad669555ad0e79e276083bd7
| 265,594,576,484,595,040,000,000,000,000,000,000,000 | 410 |
qcow2: Don't rely on free_cluster_index in alloc_refcount_block() (CVE-2014-0147)
free_cluster_index is only correct if update_refcount() was called from
an allocation function, and even there it's brittle because it's used to
protect unfinished allocations which still have a refcount of 0 - if it
moves in the wrong place, the unfinished allocation can be corrupted.
So not using it any more seems to be a good idea. Instead, use the
first requested cluster to do the calculations. Return -EAGAIN if
unfinished allocations could become invalid and let the caller restart
its search for some free clusters.
The context of creating a snapsnot is one situation where
update_refcount() is called outside of a cluster allocation. For this
case, the change fixes a buffer overflow if a cluster is referenced in
an L2 table that cannot be represented by an existing refcount block.
(new_table[refcount_table_index] was out of bounds)
[Bump the qemu-iotests 026 refblock_alloc.write leak count from 10 to
11.
--Stefan]
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>
|
get_list_range(char_u **str, int *num1, int *num2)
{
int len;
int first = FALSE;
varnumber_T num;
*str = skipwhite(*str);
if (**str == '-' || vim_isdigit(**str)) // parse "from" part of range
{
vim_str2nr(*str, NULL, &len, 0, &num, NULL, 0, FALSE);
*str += len;
*num1 = (int)num;
first = TRUE;
}
*str = skipwhite(*str);
if (**str == ',') // parse "to" part of range
{
*str = skipwhite(*str + 1);
vim_str2nr(*str, NULL, &len, 0, &num, NULL, 0, FALSE);
if (len > 0)
{
*num2 = (int)num;
*str = skipwhite(*str + len);
}
else if (!first) // no number given at all
return FAIL;
}
else if (first) // only one number given
*num2 = *num1;
return OK;
}
| 0 |
[
"CWE-122",
"CWE-787"
] |
vim
|
85b6747abc15a7a81086db31289cf1b8b17e6cb1
| 162,846,942,813,959,070,000,000,000,000,000,000,000 | 31 |
patch 8.2.4214: illegal memory access with large 'tabstop' in Ex mode
Problem: Illegal memory access with large 'tabstop' in Ex mode.
Solution: Allocate enough memory.
|
ThreadLocal::Instance& ListenerFactoryContextBaseImpl::threadLocal() {
return server_.threadLocal();
}
| 0 |
[
"CWE-400"
] |
envoy
|
dfddb529e914d794ac552e906b13d71233609bf7
| 134,404,052,702,831,240,000,000,000,000,000,000,000 | 3 |
listener: Add configurable accepted connection limits (#153)
Add support for per-listener limits on accepted connections.
Signed-off-by: Tony Allen <[email protected]>
|
rdp_recv(uint8 * type)
{
RD_BOOL is_fastpath;
static STREAM rdp_s;
uint16 length;
while (1)
{
/* fill stream with data if needed for parsing a new packet */
if ((rdp_s == NULL) || (g_next_packet >= rdp_s->end) || (g_next_packet == NULL))
{
rdp_s = sec_recv(&is_fastpath);
if (rdp_s == NULL)
return NULL;
if (is_fastpath == True)
{
/* process_ts_fp_updates moves g_next_packet */
process_ts_fp_updates(rdp_s);
continue;
}
g_next_packet = rdp_s->p;
}
else
{
rdp_s->p = g_next_packet;
}
/* parse a TS_SHARECONTROLHEADER */
if (rdp_ts_in_share_control_header(rdp_s, type, &length) == False)
continue;
break;
}
logger(Protocol, Debug, "rdp_recv(), RDP packet #%d, type 0x%x", ++g_packetno, *type);
g_next_packet += length;
return rdp_s;
}
| 0 |
[
"CWE-119",
"CWE-125",
"CWE-703",
"CWE-787"
] |
rdesktop
|
4dca546d04321a610c1835010b5dad85163b65e1
| 170,250,044,360,110,300,000,000,000,000,000,000,000 | 41 |
Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182
|
static void prb_setup_retire_blk_timer(struct packet_sock *po)
{
struct tpacket_kbdq_core *pkc;
pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
0);
pkc->retire_blk_timer.expires = jiffies;
}
| 0 |
[
"CWE-362"
] |
linux
|
15fe076edea787807a7cdc168df832544b58eba6
| 63,645,835,688,754,320,000,000,000,000,000,000,000 | 9 |
net/packet: fix a race in packet_bind() and packet_notifier()
syzbot reported crashes [1] and provided a C repro easing bug hunting.
When/if packet_do_bind() calls __unregister_prot_hook() and releases
po->bind_lock, another thread can run packet_notifier() and process an
NETDEV_UP event.
This calls register_prot_hook() and hooks again the socket right before
first thread is able to grab again po->bind_lock.
Fixes this issue by temporarily setting po->num to 0, as suggested by
David Miller.
[1]
dev_remove_pack: ffff8801bf16fa80 not found
------------[ cut here ]------------
kernel BUG at net/core/dev.c:7945! ( BUG_ON(!list_empty(&dev->ptype_all)); )
invalid opcode: 0000 [#1] SMP KASAN
Dumping ftrace buffer:
(ftrace buffer empty)
Modules linked in:
device syz0 entered promiscuous mode
CPU: 0 PID: 3161 Comm: syzkaller404108 Not tainted 4.14.0+ #190
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
task: ffff8801cc57a500 task.stack: ffff8801cc588000
RIP: 0010:netdev_run_todo+0x772/0xae0 net/core/dev.c:7945
RSP: 0018:ffff8801cc58f598 EFLAGS: 00010293
RAX: ffff8801cc57a500 RBX: dffffc0000000000 RCX: ffffffff841f75b2
RDX: 0000000000000000 RSI: 1ffff100398b1ede RDI: ffff8801bf1f8810
device syz0 entered promiscuous mode
RBP: ffff8801cc58f898 R08: 0000000000000001 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: ffff8801bf1f8cd8
R13: ffff8801cc58f870 R14: ffff8801bf1f8780 R15: ffff8801cc58f7f0
FS: 0000000001716880(0000) GS:ffff8801db400000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020b13000 CR3: 0000000005e25000 CR4: 00000000001406f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
rtnl_unlock+0xe/0x10 net/core/rtnetlink.c:106
tun_detach drivers/net/tun.c:670 [inline]
tun_chr_close+0x49/0x60 drivers/net/tun.c:2845
__fput+0x333/0x7f0 fs/file_table.c:210
____fput+0x15/0x20 fs/file_table.c:244
task_work_run+0x199/0x270 kernel/task_work.c:113
exit_task_work include/linux/task_work.h:22 [inline]
do_exit+0x9bb/0x1ae0 kernel/exit.c:865
do_group_exit+0x149/0x400 kernel/exit.c:968
SYSC_exit_group kernel/exit.c:979 [inline]
SyS_exit_group+0x1d/0x20 kernel/exit.c:977
entry_SYSCALL_64_fastpath+0x1f/0x96
RIP: 0033:0x44ad19
Fixes: 30f7ea1c2b5f ("packet: race condition in packet_bind")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: syzbot <[email protected]>
Cc: Francesco Ruggeri <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static BOOL is_below(struct file_struct *file, struct file_struct *subtree)
{
return F_DEPTH(file) > F_DEPTH(subtree)
&& (!implied_dirs_are_missing || f_name_has_prefix(file, subtree));
}
| 0 |
[
"CWE-59"
] |
rsync
|
e12a6c087ca1eecdb8eae5977be239c24f4dd3d9
| 264,186,765,249,557,740,000,000,000,000,000,000,000 | 5 |
Add parent-dir validation for --no-inc-recurse too.
|
static void inet_forward_change(struct net *net)
{
struct net_device *dev;
int on = IPV4_DEVCONF_ALL(net, FORWARDING);
IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
NETCONFA_IFINDEX_ALL,
net->ipv4.devconf_all);
inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
NETCONFA_IFINDEX_DEFAULT,
net->ipv4.devconf_dflt);
for_each_netdev(net, dev) {
struct in_device *in_dev;
if (on)
dev_disable_lro(dev);
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
IN_DEV_CONF_SET(in_dev, FORWARDING, on);
inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
dev->ifindex, &in_dev->cnf);
}
rcu_read_unlock();
}
}
| 0 |
[
"CWE-399"
] |
net-next
|
fbd40ea0180a2d328c5adc61414dc8bab9335ce2
| 326,197,243,071,594,900,000,000,000,000,000,000,000 | 28 |
ipv4: Don't do expensive useless work during inetdev destroy.
When an inetdev is destroyed, every address assigned to the interface
is removed. And in this scenerio we do two pointless things which can
be very expensive if the number of assigned interfaces is large:
1) Address promotion. We are deleting all addresses, so there is no
point in doing this.
2) A full nf conntrack table purge for every address. We only need to
do this once, as is already caught by the existing
masq_dev_notifier so masq_inet_event() can skip this.
Reported-by: Solar Designer <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
Tested-by: Cyrill Gorcunov <[email protected]>
|
print_p2r_to0apdu (const unsigned char *msg, size_t msglen)
{
print_p2r_header ("PC_to_RDR_T0APDU", msg, msglen);
if (msglen < 10)
return;
DEBUGOUT_1 (" bmChanges .........: 0x%02x\n", msg[7]);
DEBUGOUT_1 (" bClassGetResponse .: 0x%02x\n", msg[8]);
DEBUGOUT_1 (" bClassEnvelope ....: 0x%02x\n", msg[9]);
print_pr_data (msg, msglen, 10);
}
| 0 |
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
| 300,726,265,235,992,300,000,000,000,000,000,000,000 | 10 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]>
|
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs((double) (alpha*GetPixelRed(p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(MagickTrue);
}
| 0 |
[
"CWE-190"
] |
ImageMagick6
|
d5df600d43c8706df513a3273d09aee6f54a9233
| 184,189,503,623,028,200,000,000,000,000,000,000,000 | 90 |
https://github.com/ImageMagick/ImageMagick/issues/1754
|
handle_movecursor(
VTermPos pos,
VTermPos oldpos UNUSED,
int visible,
void *user)
{
term_T *term = (term_T *)user;
win_T *wp;
term->tl_cursor_pos = pos;
term->tl_cursor_visible = visible;
FOR_ALL_WINDOWS(wp)
{
if (wp->w_buffer == term->tl_buffer)
position_cursor(wp, &pos);
}
if (term->tl_buffer == curbuf && !term->tl_normal_mode)
{
may_toggle_cursor(term);
update_cursor(term, term->tl_cursor_visible);
}
return 1;
}
| 0 |
[
"CWE-476"
] |
vim
|
cd929f7ba8cc5b6d6dcf35c8b34124e969fed6b8
| 288,793,491,162,568,900,000,000,000,000,000,000,000 | 25 |
patch 8.1.0633: crash when out of memory while opening a terminal window
Problem: Crash when out of memory while opening a terminal window.
Solution: Handle out-of-memory more gracefully.
|
void downstream_wtimeoutcb(struct ev_loop *loop, ev_timer *w, int revents) {
downstream_timeoutcb(loop, w, EV_WRITE);
}
| 0 |
[] |
nghttp2
|
319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c
| 113,801,901,553,474,630,000,000,000,000,000,000,000 | 3 |
nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full.
|
static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
dma_addr_t phys_complete;
spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
spin_unlock_bh(&chan->cleanup_lock);
}
| 0 |
[] |
linux
|
7bced397510ab569d31de4c70b39e13355046387
| 128,705,360,418,380,960,000,000,000,000,000,000,000 | 10 |
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
|
free_regex_ext(RegexExt* ext)
{
if (IS_NOT_NULL(ext)) {
if (IS_NOT_NULL(ext->pattern))
xfree((void* )ext->pattern);
#ifdef USE_CALLOUT
if (IS_NOT_NULL(ext->tag_table))
onig_callout_tag_table_free(ext->tag_table);
if (IS_NOT_NULL(ext->callout_list))
onig_free_reg_callout_list(ext->callout_num, ext->callout_list);
#endif
xfree(ext);
}
}
| 0 |
[
"CWE-476",
"CWE-125"
] |
oniguruma
|
c509265c5f6ae7264f7b8a8aae1cfa5fc59d108c
| 120,321,576,711,644,800,000,000,000,000,000,000,000 | 17 |
Fix CVE-2019-13225: problem in converting if-then-else pattern to bytecode.
|
static inline int may_create(struct inode *dir, struct dentry *child)
{
audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
| 0 |
[
"CWE-416"
] |
linux
|
f15133df088ecadd141ea1907f2c96df67c729f0
| 44,364,963,757,251,675,000,000,000,000,000,000,000 | 9 |
path_openat(): fix double fput()
path_openat() jumps to the wrong place after do_tmpfile() - it has
already done path_cleanup() (as part of path_lookupat() called by
do_tmpfile()), so doing that again can lead to double fput().
Cc: [email protected] # v3.11+
Signed-off-by: Al Viro <[email protected]>
|
SSL_METHOD *SSL_get_ssl_method(SSL *s)
{
return(s->method);
}
| 0 |
[
"CWE-310"
] |
openssl
|
c6a876473cbff0fd323c8abcaace98ee2d21863d
| 140,993,585,852,991,690,000,000,000,000,000,000,000 | 4 |
Support TLS_FALLBACK_SCSV.
Reviewed-by: Stephen Henson <[email protected]>
|
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
{
struct sctp_af *af;
/* Verify basic sockaddr. */
af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
if (!af)
return -EINVAL;
/* Is this a valid SCTP address? */
if (!af->addr_valid(addr, sctp_sk(sk), NULL))
return -EINVAL;
if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
return -EINVAL;
return 0;
}
| 0 |
[] |
linux-2.6
|
5e739d1752aca4e8f3e794d431503bfca3162df4
| 88,816,118,310,446,520,000,000,000,000,000,000,000 | 19 |
sctp: fix potential panics in the SCTP-AUTH API.
All of the SCTP-AUTH socket options could cause a panic
if the extension is disabled and the API is envoked.
Additionally, there were some additional assumptions that
certain pointers would always be valid which may not
always be the case.
This patch hardens the API and address all of the crash
scenarios.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int SSL_set_wfd(SSL *s, int fd)
{
int ret = 0;
BIO *bio = NULL;
if ((s->rbio == NULL) || (BIO_method_type(s->rbio) != BIO_TYPE_SOCKET)
|| ((int)BIO_get_fd(s->rbio, NULL) != fd)) {
bio = BIO_new(BIO_s_socket());
if (bio == NULL) {
SSLerr(SSL_F_SSL_SET_WFD, ERR_R_BUF_LIB);
goto err;
}
BIO_set_fd(bio, fd, BIO_NOCLOSE);
SSL_set_bio(s, SSL_get_rbio(s), bio);
} else
SSL_set_bio(s, SSL_get_rbio(s), SSL_get_rbio(s));
ret = 1;
err:
return (ret);
}
| 0 |
[
"CWE-310"
] |
openssl
|
56f1acf5ef8a432992497a04792ff4b3b2c6f286
| 143,710,598,869,378,910,000,000,000,000,000,000,000 | 21 |
Disable SSLv2 default build, default negotiation and weak ciphers.
SSLv2 is by default disabled at build-time. Builds that are not
configured with "enable-ssl2" will not support SSLv2. Even if
"enable-ssl2" is used, users who want to negotiate SSLv2 via the
version-flexible SSLv23_method() will need to explicitly call either
of:
SSL_CTX_clear_options(ctx, SSL_OP_NO_SSLv2);
or
SSL_clear_options(ssl, SSL_OP_NO_SSLv2);
as appropriate. Even if either of those is used, or the application
explicitly uses the version-specific SSLv2_method() or its client
or server variants, SSLv2 ciphers vulnerable to exhaustive search
key recovery have been removed. Specifically, the SSLv2 40-bit
EXPORT ciphers, and SSLv2 56-bit DES are no longer available.
Mitigation for CVE-2016-0800
Reviewed-by: Emilia Käsper <[email protected]>
|
z2currentgstate(i_ctx_t *i_ctx_p)
{
if (!save_page_device(igs))
return zcurrentgstate(i_ctx_p);
return push_callout(i_ctx_p, "%currentgstatepagedevice");
}
| 0 |
[] |
ghostpdl
|
5516c614dc33662a2afdc377159f70218e67bde5
| 18,104,393,694,084,502,000,000,000,000,000,000,000 | 6 |
Improve restore robustness
Prompted by looking at Bug 699654:
There are two variants of the restore operator in Ghostscript: one is Level 1
(restoring VM), the other is Level 2+ (adding page device restoring to the
Level operator).
This was implemented by the Level 2+ version restoring the device in the
graphics state, then calling the Level 1 implementation to handle actually
restoring the VM state.
The problem was that the operand checking, and sanity of the save object was
only done by the Level 1 variant, thus meaning an invalid save object could
leave a (Level 2+) restore partially complete - with the page device part
restored, but not VM, and the page device not configured.
To solve that, this commit splits the operand and sanity checking, and the
core of the restore operation into separate functions, so the relevant
operators can validate the operand *before* taking any further action. That
reduces the chances of an invalid restore leaving the interpreter in an
unknown state.
If an error occurs during the actual VM restore it is essentially fatal, and the
interpreter cannot continue, but as an extra surety for security, in the event
of such an error, we'll explicitly preserve the LockSafetyParams of the device,
rather than rely on the post-restore device configuration (which won't happen
in the event of an error).
|
static int __net_init ip6mr_rules_init(struct net *net)
{
net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
return net->ipv6.mrt6 ? 0 : -ENOMEM;
}
| 0 |
[
"CWE-20"
] |
linux
|
99253eb750fda6a644d5188fb26c43bad8d5a745
| 139,828,234,413,470,580,000,000,000,000,000,000,000 | 5 |
ipv6: check sk sk_type and protocol early in ip_mroute_set/getsockopt
Commit 5e1859fbcc3c ("ipv4: ipmr: various fixes and cleanups") fixed
the issue for ipv4 ipmr:
ip_mroute_setsockopt() & ip_mroute_getsockopt() should not
access/set raw_sk(sk)->ipmr_table before making sure the socket
is a raw socket, and protocol is IGMP
The same fix should be done for ipv6 ipmr as well.
This patch can fix the panic caused by overwriting the same offset
as ipmr_table as in raw_sk(sk) when accessing other type's socket
by ip_mroute_setsockopt().
Signed-off-by: Xin Long <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
{
struct sock *sk, *prev = NULL;
int ret = NET_RX_SUCCESS;
__le16 pan_id, short_addr;
__le64 hw_addr;
/* Data frame processing */
BUG_ON(dev->type != ARPHRD_IEEE802154);
pan_id = dev->ieee802154_ptr->pan_id;
short_addr = dev->ieee802154_ptr->short_addr;
hw_addr = dev->ieee802154_ptr->extended_addr;
read_lock(&dgram_lock);
sk_for_each(sk, &dgram_head) {
if (ieee802154_match_sock(hw_addr, pan_id, short_addr,
dgram_sk(sk))) {
if (prev) {
struct sk_buff *clone;
clone = skb_clone(skb, GFP_ATOMIC);
if (clone)
dgram_rcv_skb(prev, clone);
}
prev = sk;
}
}
if (prev) {
dgram_rcv_skb(prev, skb);
} else {
kfree_skb(skb);
ret = NET_RX_DROP;
}
read_unlock(&dgram_lock);
return ret;
}
| 0 |
[
"CWE-276"
] |
linux
|
e69dbd4619e7674c1679cba49afd9dd9ac347eef
| 302,078,781,602,794,040,000,000,000,000,000,000,000 | 40 |
ieee802154: enforce CAP_NET_RAW for raw sockets
When creating a raw AF_IEEE802154 socket, CAP_NET_RAW needs to be
checked first.
Signed-off-by: Ori Nimron <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Acked-by: Stefan Schmidt <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
zip_read_data_deflate(struct archive_read *a, const void **buff,
size_t *size, int64_t *offset)
{
struct zip *zip;
ssize_t bytes_avail;
const void *compressed_buff, *sp;
int r;
(void)offset; /* UNUSED */
zip = (struct zip *)(a->format->data);
/* If the buffer hasn't been allocated, allocate it now. */
if (zip->uncompressed_buffer == NULL) {
zip->uncompressed_buffer_size = 256 * 1024;
zip->uncompressed_buffer
= (unsigned char *)malloc(zip->uncompressed_buffer_size);
if (zip->uncompressed_buffer == NULL) {
archive_set_error(&a->archive, ENOMEM,
"No memory for ZIP decompression");
return (ARCHIVE_FATAL);
}
}
r = zip_deflate_init(a, zip);
if (r != ARCHIVE_OK)
return (r);
/*
* Note: '1' here is a performance optimization.
* Recall that the decompression layer returns a count of
* available bytes; asking for more than that forces the
* decompressor to combine reads by copying data.
*/
compressed_buff = sp = __archive_read_ahead(a, 1, &bytes_avail);
if (0 == (zip->entry->zip_flags & ZIP_LENGTH_AT_END)
&& bytes_avail > zip->entry_bytes_remaining) {
bytes_avail = (ssize_t)zip->entry_bytes_remaining;
}
if (bytes_avail < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated ZIP file body");
return (ARCHIVE_FATAL);
}
if (zip->tctx_valid || zip->cctx_valid) {
if (zip->decrypted_bytes_remaining < (size_t)bytes_avail) {
size_t buff_remaining =
(zip->decrypted_buffer + zip->decrypted_buffer_size)
- (zip->decrypted_ptr + zip->decrypted_bytes_remaining);
if (buff_remaining > (size_t)bytes_avail)
buff_remaining = (size_t)bytes_avail;
if (0 == (zip->entry->zip_flags & ZIP_LENGTH_AT_END) &&
zip->entry_bytes_remaining > 0) {
if ((int64_t)(zip->decrypted_bytes_remaining
+ buff_remaining)
> zip->entry_bytes_remaining) {
if (zip->entry_bytes_remaining <
(int64_t)zip->decrypted_bytes_remaining)
buff_remaining = 0;
else
buff_remaining =
(size_t)zip->entry_bytes_remaining
- zip->decrypted_bytes_remaining;
}
}
if (buff_remaining > 0) {
if (zip->tctx_valid) {
trad_enc_decrypt_update(&zip->tctx,
compressed_buff, buff_remaining,
zip->decrypted_ptr
+ zip->decrypted_bytes_remaining,
buff_remaining);
} else {
size_t dsize = buff_remaining;
archive_decrypto_aes_ctr_update(
&zip->cctx,
compressed_buff, buff_remaining,
zip->decrypted_ptr
+ zip->decrypted_bytes_remaining,
&dsize);
}
zip->decrypted_bytes_remaining += buff_remaining;
}
}
bytes_avail = zip->decrypted_bytes_remaining;
compressed_buff = (const char *)zip->decrypted_ptr;
}
/*
* A bug in zlib.h: stream.next_in should be marked 'const'
* but isn't (the library never alters data through the
* next_in pointer, only reads it). The result: this ugly
* cast to remove 'const'.
*/
zip->stream.next_in = (Bytef *)(uintptr_t)(const void *)compressed_buff;
zip->stream.avail_in = (uInt)bytes_avail;
zip->stream.total_in = 0;
zip->stream.next_out = zip->uncompressed_buffer;
zip->stream.avail_out = (uInt)zip->uncompressed_buffer_size;
zip->stream.total_out = 0;
r = inflate(&zip->stream, 0);
switch (r) {
case Z_OK:
break;
case Z_STREAM_END:
zip->end_of_entry = 1;
break;
case Z_MEM_ERROR:
archive_set_error(&a->archive, ENOMEM,
"Out of memory for ZIP decompression");
return (ARCHIVE_FATAL);
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"ZIP decompression failed (%d)", r);
return (ARCHIVE_FATAL);
}
/* Consume as much as the compressor actually used. */
bytes_avail = zip->stream.total_in;
if (zip->tctx_valid || zip->cctx_valid) {
zip->decrypted_bytes_remaining -= bytes_avail;
if (zip->decrypted_bytes_remaining == 0)
zip->decrypted_ptr = zip->decrypted_buffer;
else
zip->decrypted_ptr += bytes_avail;
}
/* Calculate compressed data as much as we used.*/
if (zip->hctx_valid)
archive_hmac_sha1_update(&zip->hctx, sp, bytes_avail);
__archive_read_consume(a, bytes_avail);
zip->entry_bytes_remaining -= bytes_avail;
zip->entry_compressed_bytes_read += bytes_avail;
*size = zip->stream.total_out;
zip->entry_uncompressed_bytes_read += zip->stream.total_out;
*buff = zip->uncompressed_buffer;
if (zip->end_of_entry && zip->hctx_valid) {
r = check_authentication_code(a, NULL);
if (r != ARCHIVE_OK)
return (r);
}
r = consume_optional_marker(a, zip);
if (r != ARCHIVE_OK)
return (r);
return (ARCHIVE_OK);
}
| 0 |
[
"CWE-399",
"CWE-401"
] |
libarchive
|
ba641f73f3d758d9032b3f0e5597a9c6e593a505
| 246,608,673,135,142,360,000,000,000,000,000,000,000 | 153 |
Fix typo in preprocessor macro in archive_read_format_zip_cleanup()
Frees lzma_stream on cleanup()
Fixes #1165
|
Mat_Rewind( mat_t *mat )
{
int err = 0;
switch ( mat->version ) {
case MAT_FT_MAT5:
(void)fseek((FILE*)mat->fp,128L,SEEK_SET);
break;
case MAT_FT_MAT73:
mat->next_index = 0;
break;
case MAT_FT_MAT4:
(void)fseek((FILE*)mat->fp,0L,SEEK_SET);
break;
default:
err = -1;
break;
}
return err;
}
| 0 |
[
"CWE-401"
] |
matio
|
a47b7cd3aca70e9a0bddf8146eb4ab0cbd19c2c3
| 183,763,795,893,151,400,000,000,000,000,000,000,000 | 21 |
Fix memory leak
As reported by https://github.com/tbeu/matio/issues/131
|
int rtnl_open_byproto(struct rtnl_handle *rth, unsigned int subscriptions,
int protocol)
{
socklen_t addr_len;
int sndbuf = 32768;
int one = 1;
memset(rth, 0, sizeof(*rth));
rth->proto = protocol;
rth->fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol);
if (rth->fd < 0) {
perror("Cannot open netlink socket");
return -1;
}
if (setsockopt(rth->fd, SOL_SOCKET, SO_SNDBUF,
&sndbuf, sizeof(sndbuf)) < 0) {
perror("SO_SNDBUF");
return -1;
}
if (setsockopt(rth->fd, SOL_SOCKET, SO_RCVBUF,
&rcvbuf, sizeof(rcvbuf)) < 0) {
perror("SO_RCVBUF");
return -1;
}
/* Older kernels may no support extended ACK reporting */
setsockopt(rth->fd, SOL_NETLINK, NETLINK_EXT_ACK,
&one, sizeof(one));
memset(&rth->local, 0, sizeof(rth->local));
rth->local.nl_family = AF_NETLINK;
rth->local.nl_groups = subscriptions;
if (bind(rth->fd, (struct sockaddr *)&rth->local,
sizeof(rth->local)) < 0) {
perror("Cannot bind netlink socket");
return -1;
}
addr_len = sizeof(rth->local);
if (getsockname(rth->fd, (struct sockaddr *)&rth->local,
&addr_len) < 0) {
perror("Cannot getsockname");
return -1;
}
if (addr_len != sizeof(rth->local)) {
fprintf(stderr, "Wrong address length %d\n", addr_len);
return -1;
}
if (rth->local.nl_family != AF_NETLINK) {
fprintf(stderr, "Wrong address family %d\n",
rth->local.nl_family);
return -1;
}
rth->seq = time(NULL);
return 0;
}
| 0 |
[] |
iproute2
|
8c50b728b226f6254251282697ce38a72639a6fc
| 177,846,950,137,574,250,000,000,000,000,000,000,000 | 59 |
libnetlink: fix use-after-free of message buf
In __rtnl_talk_iov() main loop, err is a pointer to memory in dynamically
allocated 'buf' that is used to store netlink messages. If netlink message
is an error message, buf is deallocated before returning with error code.
However, on return err->error code is checked one more time to generate
return value, after memory which err points to has already been
freed. Save error code in temporary variable and use the variable to
generate return value.
Fixes: c60389e4f9ea ("libnetlink: fix leak and using unused memory on error")
Signed-off-by: Vlad Buslov <[email protected]>
Signed-off-by: Stephen Hemminger <[email protected]>
|
nfs3svc_encode_linkres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_linkres *resp)
{
p = encode_post_op_attr(rqstp, p, &resp->fh);
p = encode_wcc_data(rqstp, p, &resp->tfh);
return xdr_ressize_check(rqstp, p);
}
| 0 |
[
"CWE-119",
"CWE-703"
] |
linux
|
13bf9fbff0e5e099e2b6f003a0ab8ae145436309
| 62,209,397,251,611,800,000,000,000,000,000,000,000 | 7 |
nfsd: stricter decoding of write-like NFSv2/v3 ops
The NFSv2/v3 code does not systematically check whether we decode past
the end of the buffer. This generally appears to be harmless, but there
are a few places where we do arithmetic on the pointers involved and
don't account for the possibility that a length could be negative. Add
checks to catch these.
Reported-by: Tuomas Haanpää <[email protected]>
Reported-by: Ari Kauppi <[email protected]>
Reviewed-by: NeilBrown <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]>
|
PJ_DEF(pj_status_t) pjsip_target_set_add_from_msg( pjsip_target_set *tset,
pj_pool_t *pool,
const pjsip_msg *msg)
{
const pjsip_hdr *hdr;
unsigned added = 0;
PJ_ASSERT_RETURN(tset && pool && msg, PJ_EINVAL);
/* Scan for Contact headers and add the URI */
hdr = msg->hdr.next;
while (hdr != &msg->hdr) {
if (hdr->type == PJSIP_H_CONTACT) {
const pjsip_contact_hdr *cn_hdr = (const pjsip_contact_hdr*)hdr;
if (!cn_hdr->star) {
pj_status_t rc;
rc = pjsip_target_set_add_uri(tset, pool, cn_hdr->uri,
cn_hdr->q1000);
if (rc == PJ_SUCCESS)
++added;
}
}
hdr = hdr->next;
}
return added ? PJ_SUCCESS : PJ_EEXISTS;
}
| 0 |
[
"CWE-297",
"CWE-295"
] |
pjproject
|
67e46c1ac45ad784db5b9080f5ed8b133c122872
| 217,160,805,008,193,380,000,000,000,000,000,000,000 | 28 |
Merge pull request from GHSA-8hcp-hm38-mfph
* Check hostname during TLS transport selection
* revision based on feedback
* remove the code in create_request that has been moved
|
static UINT drdynvc_virtual_channel_event_detached(drdynvcPlugin* drdynvc)
{
int i;
DVCMAN* dvcman;
if (!drdynvc)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
dvcman = (DVCMAN*) drdynvc->channel_mgr;
if (!dvcman)
return CHANNEL_RC_BAD_CHANNEL_HANDLE;
for (i = 0; i < dvcman->num_plugins; i++)
{
UINT error;
IWTSPlugin* pPlugin = dvcman->plugins[i];
if (pPlugin->Detached)
if ((error = pPlugin->Detached(pPlugin)))
{
WLog_Print(drdynvc->log, WLOG_ERROR, "Detach failed with error %"PRIu32"!", error);
return error;
}
}
return CHANNEL_RC_OK;
}
| 0 |
[
"CWE-125"
] |
FreeRDP
|
baee520e3dd9be6511c45a14c5f5e77784de1471
| 196,690,553,544,949,220,000,000,000,000,000,000,000 | 28 |
Fix for #4866: Added additional length checks
|
static void rds_conn_message_info_retrans(struct socket *sock,
unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
rds_conn_message_info(sock, len, iter, lens, 0);
}
| 0 |
[
"CWE-703"
] |
linux
|
74e98eb085889b0d2d4908f59f6e00026063014f
| 186,239,165,184,885,240,000,000,000,000,000,000,000 | 7 |
RDS: verify the underlying transport exists before creating a connection
There was no verification that an underlying transport exists when creating
a connection, this would cause dereferencing a NULL ptr.
It might happen on sockets that weren't properly bound before attempting to
send a message, which will cause a NULL ptr deref:
[135546.047719] kasan: GPF could be caused by NULL-ptr deref or user memory accessgeneral protection fault: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC KASAN
[135546.051270] Modules linked in:
[135546.051781] CPU: 4 PID: 15650 Comm: trinity-c4 Not tainted 4.2.0-next-20150902-sasha-00041-gbaa1222-dirty #2527
[135546.053217] task: ffff8800835bc000 ti: ffff8800bc708000 task.ti: ffff8800bc708000
[135546.054291] RIP: __rds_conn_create (net/rds/connection.c:194)
[135546.055666] RSP: 0018:ffff8800bc70fab0 EFLAGS: 00010202
[135546.056457] RAX: dffffc0000000000 RBX: 0000000000000f2c RCX: ffff8800835bc000
[135546.057494] RDX: 0000000000000007 RSI: ffff8800835bccd8 RDI: 0000000000000038
[135546.058530] RBP: ffff8800bc70fb18 R08: 0000000000000001 R09: 0000000000000000
[135546.059556] R10: ffffed014d7a3a23 R11: ffffed014d7a3a21 R12: 0000000000000000
[135546.060614] R13: 0000000000000001 R14: ffff8801ec3d0000 R15: 0000000000000000
[135546.061668] FS: 00007faad4ffb700(0000) GS:ffff880252000000(0000) knlGS:0000000000000000
[135546.062836] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[135546.063682] CR2: 000000000000846a CR3: 000000009d137000 CR4: 00000000000006a0
[135546.064723] Stack:
[135546.065048] ffffffffafe2055c ffffffffafe23fc1 ffffed00493097bf ffff8801ec3d0008
[135546.066247] 0000000000000000 00000000000000d0 0000000000000000 ac194a24c0586342
[135546.067438] 1ffff100178e1f78 ffff880320581b00 ffff8800bc70fdd0 ffff880320581b00
[135546.068629] Call Trace:
[135546.069028] ? __rds_conn_create (include/linux/rcupdate.h:856 net/rds/connection.c:134)
[135546.069989] ? rds_message_copy_from_user (net/rds/message.c:298)
[135546.071021] rds_conn_create_outgoing (net/rds/connection.c:278)
[135546.071981] rds_sendmsg (net/rds/send.c:1058)
[135546.072858] ? perf_trace_lock (include/trace/events/lock.h:38)
[135546.073744] ? lockdep_init (kernel/locking/lockdep.c:3298)
[135546.074577] ? rds_send_drop_to (net/rds/send.c:976)
[135546.075508] ? __might_fault (./arch/x86/include/asm/current.h:14 mm/memory.c:3795)
[135546.076349] ? __might_fault (mm/memory.c:3795)
[135546.077179] ? rds_send_drop_to (net/rds/send.c:976)
[135546.078114] sock_sendmsg (net/socket.c:611 net/socket.c:620)
[135546.078856] SYSC_sendto (net/socket.c:1657)
[135546.079596] ? SYSC_connect (net/socket.c:1628)
[135546.080510] ? trace_dump_stack (kernel/trace/trace.c:1926)
[135546.081397] ? ring_buffer_unlock_commit (kernel/trace/ring_buffer.c:2479 kernel/trace/ring_buffer.c:2558 kernel/trace/ring_buffer.c:2674)
[135546.082390] ? trace_buffer_unlock_commit (kernel/trace/trace.c:1749)
[135546.083410] ? trace_event_raw_event_sys_enter (include/trace/events/syscalls.h:16)
[135546.084481] ? do_audit_syscall_entry (include/trace/events/syscalls.h:16)
[135546.085438] ? trace_buffer_unlock_commit (kernel/trace/trace.c:1749)
[135546.085515] rds_ib_laddr_check(): addr 36.74.25.172 ret -99 node type -1
Acked-by: Santosh Shilimkar <[email protected]>
Signed-off-by: Sasha Levin <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
{
/*
* Allocate the copy buffer
*/
struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
if (!n)
goto out;
/* Set the data pointer */
skb_reserve(n, skb->data - skb->head);
/* Set the tail pointer and length */
skb_put(n, skb_headlen(skb));
/* Copy the bytes */
memcpy(n->data, skb->data, n->len);
n->csum = skb->csum;
n->ip_summed = skb->ip_summed;
n->data_len = skb->data_len;
n->len = skb->len;
if (skb_shinfo(skb)->nr_frags) {
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
get_page(skb_shinfo(n)->frags[i].page);
}
skb_shinfo(n)->nr_frags = i;
}
if (skb_shinfo(skb)->frag_list) {
skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
skb_clone_fraglist(n);
}
copy_skb_header(n, skb);
out:
return n;
}
| 0 |
[] |
linux
|
e89e9cf539a28df7d0eb1d0a545368e9920b34ac
| 97,602,489,823,374,280,000,000,000,000,000,000,000 | 41 |
[IPv4/IPv6]: UFO Scatter-gather approach
Attached is kernel patch for UDP Fragmentation Offload (UFO) feature.
1. This patch incorporate the review comments by Jeff Garzik.
2. Renamed USO as UFO (UDP Fragmentation Offload)
3. udp sendfile support with UFO
This patches uses scatter-gather feature of skb to generate large UDP
datagram. Below is a "how-to" on changes required in network device
driver to use the UFO interface.
UDP Fragmentation Offload (UFO) Interface:
-------------------------------------------
UFO is a feature wherein the Linux kernel network stack will offload the
IP fragmentation functionality of large UDP datagram to hardware. This
will reduce the overhead of stack in fragmenting the large UDP datagram to
MTU sized packets
1) Drivers indicate their capability of UFO using
dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG
NETIF_F_HW_CSUM is required for UFO over ipv6.
2) UFO packet will be submitted for transmission using driver xmit routine.
UFO packet will have a non-zero value for
"skb_shinfo(skb)->ufo_size"
skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP
fragment going out of the adapter after IP fragmentation by hardware.
skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[]
contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW
indicating that hardware has to do checksum calculation. Hardware should
compute the UDP checksum of complete datagram and also ip header checksum of
each fragmented IP packet.
For IPV6 the UFO provides the fragment identification-id in
skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating
IPv6 fragments.
Signed-off-by: Ananda Raju <[email protected]>
Signed-off-by: Rusty Russell <[email protected]> (forwarded)
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
|
main( int argc,
char* argv[] )
{
grEvent event;
parse_cmdline( &argc, &argv );
#if FREETYPE_MAJOR == 2 && FREETYPE_MINOR == 0 && FREETYPE_PATCH <= 8
if ( status.debug )
{
#ifdef FT_DEBUG_LEVEL_TRACE
FT_SetTraceLevel( trace_any, (FT_Byte)status.trace_level );
#else
status.trace_level = 0;
#endif
}
#elif 0
/* `setenv' and `putenv' is not ANSI and I don't want to mess */
/* with this portability issue right now... */
if ( status.debug )
{
char temp[32];
sprintf( temp, "any=%d", status.trace_level );
setenv( "FT2_DEBUG", temp );
}
#endif
/* Initialize engine */
handle = FTDemo_New( status.encoding );
FT_Library_SetLcdFilter( handle->library, FT_LCD_FILTER_DEFAULT );
if ( status.preload )
FTDemo_Set_Preload( handle, 1 );
for ( ; argc > 0; argc--, argv++ )
FTDemo_Install_Font( handle, argv[0] );
if ( handle->num_fonts == 0 )
Fatal( "could not find/open any font file" );
display = FTDemo_Display_New( gr_pixel_mode_rgb24 );
if ( !display )
Fatal( "could not allocate display surface" );
memset( display->fore_color.chroma, 0, 4 );
memset( display->back_color.chroma, 0xff, 4 );
grSetTitle( display->surface,
"FreeType Glyph Viewer - press F1 for help" );
status.Fail = 0;
event_font_change( 0 );
if ( status.lcd_mode >= 0 )
handle->lcd_mode = status.lcd_mode;
FTDemo_Update_Current_Flags( handle );
for ( ;; )
{
FTDemo_Display_Clear( display );
switch ( status.render_mode )
{
case RENDER_MODE_ALL:
error = Render_All( handle->current_font->num_indices,
status.Num );
break;
case RENDER_MODE_EMBOLDEN:
error = Render_Embolden( handle->current_font->num_indices,
status.Num );
break;
case RENDER_MODE_SLANTED:
error = Render_Slanted( handle->current_font->num_indices,
status.Num );
break;
case RENDER_MODE_STROKE:
error = Render_Stroke( handle->current_font->num_indices,
status.Num );
break;
case RENDER_MODE_TEXT:
error = Render_Text( -1, status.Num );
break;
case RENDER_MODE_WATERFALL:
error = Render_Waterfall( status.ptsize );
break;
}
write_header( error );
#if FREETYPE_MAJOR == 2 && FREETYPE_MINOR < 2
if ( status.dump_cache_stats )
{
/* dump simple cache manager statistics */
fprintf( stderr, "cache manager [ nodes, bytes, average ] = "
" [ %d, %ld, %f ]\n",
handle->cache_manager->num_nodes,
handle->cache_manager->cur_weight,
handle->cache_manager->num_nodes > 0
? handle->cache_manager->cur_weight * 1.0 /
handle->cache_manager->num_nodes
: 0.0 );
}
#endif
status.header = 0;
grListenSurface( display->surface, 0, &event );
if ( Process_Event( &event ) )
break;
}
printf( "Execution completed successfully.\n" );
printf( "Fails = %d\n", status.Fail );
FTDemo_Display_Done( display );
FTDemo_Done( handle );
exit( 0 ); /* for safety reasons */
return 0; /* never reached */
}
| 0 |
[
"CWE-120"
] |
freetype2-demos
|
b995299b73ba4cd259f221f500d4e63095508bec
| 298,006,204,351,881,480,000,000,000,000,000,000,000 | 128 |
Fix Savannah bug #30054.
* src/ftdiff.c, src/ftgrid.c, src/ftmulti.c, src/ftstring.c,
src/ftview.c: Use precision for `%s' where appropriate to avoid
buffer overflows.
|
template<typename T>
CImgDisplay& display(const CImgList<T>& list, const char axis='x', const float align=0) {
if (list._width==1) {
const CImg<T>& img = list[0];
if (img._depth==1 && (img._spectrum==1 || img._spectrum>=3) && _normalization!=1) return display(img);
}
CImgList<typename CImg<T>::ucharT> visu(list._width);
unsigned int dims = 0;
cimglist_for(list,l) {
const CImg<T>& img = list._data[l];
img._get_select(*this,_normalization,(img._width - 1)/2,(img._height - 1)/2,
(img._depth - 1)/2).move_to(visu[l]);
dims = std::max(dims,visu[l]._spectrum);
}
cimglist_for(list,l) if (visu[l]._spectrum<dims) visu[l].resize(-100,-100,-100,dims,1);
visu.get_append(axis,align).display(*this);
return *this;
| 0 |
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
| 332,279,571,354,825,400,000,000,000,000,000,000,000 | 17 |
.
|
opj_codestream_index_t * OPJ_CALLCONV opj_get_cstr_index(opj_codec_t *p_codec)
{
if (p_codec) {
opj_codec_private_t* l_codec = (opj_codec_private_t*) p_codec;
return l_codec->opj_get_codec_index(l_codec->m_codec);
}
return NULL;
}
| 0 |
[
"CWE-20"
] |
openjpeg
|
4edb8c83374f52cd6a8f2c7c875e8ffacccb5fa5
| 306,296,760,817,978,700,000,000,000,000,000,000,000 | 10 |
Add support for generation of PLT markers in encoder
* -PLT switch added to opj_compress
* Add a opj_encoder_set_extra_options() function that
accepts a PLT=YES option, and could be expanded later
for other uses.
-------
Testing with a Sentinel2 10m band, T36JTT_20160914T074612_B02.jp2,
coming from S2A_MSIL1C_20160914T074612_N0204_R135_T36JTT_20160914T081456.SAFE
Decompress it to TIFF:
```
opj_uncompress -i T36JTT_20160914T074612_B02.jp2 -o T36JTT_20160914T074612_B02.tif
```
Recompress it with similar parameters as original:
```
opj_compress -n 5 -c [256,256],[256,256],[256,256],[256,256],[256,256] -t 1024,1024 -PLT -i T36JTT_20160914T074612_B02.tif -o T36JTT_20160914T074612_B02_PLT.jp2
```
Dump codestream detail with GDAL dump_jp2.py utility (https://github.com/OSGeo/gdal/blob/master/gdal/swig/python/samples/dump_jp2.py)
```
python dump_jp2.py T36JTT_20160914T074612_B02.jp2 > /tmp/dump_sentinel2_ori.txt
python dump_jp2.py T36JTT_20160914T074612_B02_PLT.jp2 > /tmp/dump_sentinel2_openjpeg_plt.txt
```
The diff between both show very similar structure, and identical number of packets in PLT markers
Now testing with Kakadu (KDU803_Demo_Apps_for_Linux-x86-64_200210)
Full file decompression:
```
kdu_expand -i T36JTT_20160914T074612_B02_PLT.jp2 -o tmp.tif
Consumed 121 tile-part(s) from a total of 121 tile(s).
Consumed 80,318,806 codestream bytes (excluding any file format) = 5.329697
bits/pel.
Processed using the multi-threaded environment, with
8 parallel threads of execution
```
Partial decompresson (presumably using PLT markers):
```
kdu_expand -i T36JTT_20160914T074612_B02.jp2 -o tmp.pgm -region "{0.5,0.5},{0.01,0.01}"
kdu_expand -i T36JTT_20160914T074612_B02_PLT.jp2 -o tmp2.pgm -region "{0.5,0.5},{0.01,0.01}"
diff tmp.pgm tmp2.pgm && echo "same !"
```
-------
Funded by ESA for S2-MPC project
|
static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
{
struct compat_ethtool_rxnfc __user *compat_rxnfc;
bool convert_in = false, convert_out = false;
size_t buf_size = ALIGN(sizeof(struct ifreq), 8);
struct ethtool_rxnfc __user *rxnfc;
struct ifreq __user *ifr;
u32 rule_cnt = 0, actual_rule_cnt;
u32 ethcmd;
u32 data;
int ret;
if (get_user(data, &ifr32->ifr_ifru.ifru_data))
return -EFAULT;
compat_rxnfc = compat_ptr(data);
if (get_user(ethcmd, &compat_rxnfc->cmd))
return -EFAULT;
/* Most ethtool structures are defined without padding.
* Unfortunately struct ethtool_rxnfc is an exception.
*/
switch (ethcmd) {
default:
break;
case ETHTOOL_GRXCLSRLALL:
/* Buffer size is variable */
if (get_user(rule_cnt, &compat_rxnfc->rule_cnt))
return -EFAULT;
if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
return -ENOMEM;
buf_size += rule_cnt * sizeof(u32);
/* fall through */
case ETHTOOL_GRXRINGS:
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_SRXCLSRLINS:
convert_out = true;
/* fall through */
case ETHTOOL_SRXCLSRLDEL:
buf_size += sizeof(struct ethtool_rxnfc);
convert_in = true;
break;
}
ifr = compat_alloc_user_space(buf_size);
rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
return -EFAULT;
if (put_user(convert_in ? rxnfc : compat_ptr(data),
&ifr->ifr_ifru.ifru_data))
return -EFAULT;
if (convert_in) {
/* We expect there to be holes between fs.m_ext and
* fs.ring_cookie and at the end of fs, but nowhere else.
*/
BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
sizeof(compat_rxnfc->fs.m_ext) !=
offsetof(struct ethtool_rxnfc, fs.m_ext) +
sizeof(rxnfc->fs.m_ext));
BUILD_BUG_ON(
offsetof(struct compat_ethtool_rxnfc, fs.location) -
offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
offsetof(struct ethtool_rxnfc, fs.location) -
offsetof(struct ethtool_rxnfc, fs.ring_cookie));
if (copy_in_user(rxnfc, compat_rxnfc,
(void __user *)(&rxnfc->fs.m_ext + 1) -
(void __user *)rxnfc) ||
copy_in_user(&rxnfc->fs.ring_cookie,
&compat_rxnfc->fs.ring_cookie,
(void __user *)(&rxnfc->fs.location + 1) -
(void __user *)&rxnfc->fs.ring_cookie) ||
copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
sizeof(rxnfc->rule_cnt)))
return -EFAULT;
}
ret = dev_ioctl(net, SIOCETHTOOL, ifr);
if (ret)
return ret;
if (convert_out) {
if (copy_in_user(compat_rxnfc, rxnfc,
(const void __user *)(&rxnfc->fs.m_ext + 1) -
(const void __user *)rxnfc) ||
copy_in_user(&compat_rxnfc->fs.ring_cookie,
&rxnfc->fs.ring_cookie,
(const void __user *)(&rxnfc->fs.location + 1) -
(const void __user *)&rxnfc->fs.ring_cookie) ||
copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
sizeof(rxnfc->rule_cnt)))
return -EFAULT;
if (ethcmd == ETHTOOL_GRXCLSRLALL) {
/* As an optimisation, we only copy the actual
* number of rules that the underlying
* function returned. Since Mallory might
* change the rule count in user memory, we
* check that it is less than the rule count
* originally given (as the user buffer size),
* which has been range-checked.
*/
if (get_user(actual_rule_cnt, &rxnfc->rule_cnt))
return -EFAULT;
if (actual_rule_cnt < rule_cnt)
rule_cnt = actual_rule_cnt;
if (copy_in_user(&compat_rxnfc->rule_locs[0],
&rxnfc->rule_locs[0],
rule_cnt * sizeof(u32)))
return -EFAULT;
}
}
return 0;
}
| 0 |
[
"CWE-264"
] |
net
|
4de930efc23b92ddf88ce91c405ee645fe6e27ea
| 197,388,617,294,668,900,000,000,000,000,000,000,000 | 120 |
net: validate the range we feed to iov_iter_init() in sys_sendto/sys_recvfrom
Cc: [email protected] # v3.19
Signed-off-by: Al Viro <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
{
return (struct mdesc_elem *) (mdesc + 1);
}
| 0 |
[
"CWE-476"
] |
sparc
|
80caf43549e7e41a695c6d1e11066286538b336f
| 317,192,478,164,693,470,000,000,000,000,000,000,000 | 4 |
mdesc: fix a missing-check bug in get_vdev_port_node_info()
In get_vdev_port_node_info(), 'node_info->vdev_port.name' is allcoated
by kstrdup_const(), and it returns NULL when fails. So
'node_info->vdev_port.name' should be checked.
Signed-off-by: Gen Zhang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
bool basic_const_item() const
{ return example && example->basic_const_item(); }
| 0 |
[
"CWE-617"
] |
server
|
2e7891080667c59ac80f788eef4d59d447595772
| 10,171,740,268,490,449,000,000,000,000,000,000,000 | 2 |
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]>
|
ews_trigger_next_request (EEwsConnection *cnc)
{
GSource *source;
g_return_if_fail (cnc != NULL);
if (cnc->priv->soup_session) {
source = g_idle_source_new ();
g_source_set_priority (source, G_PRIORITY_DEFAULT);
g_source_set_callback (source, ews_next_request, cnc, NULL);
g_source_attach (source, cnc->priv->soup_context);
g_source_unref (source);
} else {
ews_next_request (cnc);
}
}
| 0 |
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
| 291,326,047,377,577,560,000,000,000,000,000,000,000 | 16 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
|
static int ssl_failed(pn_transport_t *transport, const char *reason)
{
char buf[512] = "Unknown error.";
if (!reason) {
HRESULT status = GetLastError();
FormatMessage(FORMAT_MESSAGE_MAX_WIDTH_MASK | FORMAT_MESSAGE_FROM_SYSTEM,
0, status, 0, buf, sizeof(buf), 0);
reason = buf;
}
pni_ssl_t *ssl = transport->ssl;
ssl->ssl_closed = true;
ssl->app_input_closed = ssl->app_output_closed = PN_EOS;
ssl->state = SSL_CLOSED;
pn_do_error(transport, "amqp:connection:framing-error", "SSL Failure: %s", reason);
return PN_EOS;
}
| 0 |
[] |
qpid-proton
|
4aea0fd8502f5e9af7f22fd60645eeec07bce0b2
| 26,802,615,036,608,476,000,000,000,000,000,000,000 | 17 |
PROTON-2014: [c] Ensure SSL mutual authentication
(cherry picked from commit 97c7733f07712665f3d08091c82c393e4c3adbf7)
|
static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
unsigned long *bitmap, int max)
{
int i;
bool skip_empty = true;
char buf[18];
seq_printf(seq, "B: %s=", name);
for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
if (input_bits_to_string(buf, sizeof(buf),
bitmap[i], skip_empty)) {
skip_empty = false;
seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
}
}
/*
* If no output was produced print a single 0.
*/
if (skip_empty)
seq_putc(seq, '0');
seq_putc(seq, '\n');
}
| 0 |
[
"CWE-703",
"CWE-787"
] |
linux
|
cb222aed03d798fc074be55e59d9a112338ee784
| 252,139,204,397,986,000,000,000,000,000,000,000,000 | 25 |
Input: add safety guards to input_set_keycode()
If we happen to have a garbage in input device's keycode table with values
too big we'll end up doing clear_bit() with offset way outside of our
bitmaps, damaging other objects within an input device or even outside of
it. Let's add sanity checks to the returned old keycodes.
Reported-by: [email protected]
Reported-by: [email protected]
Link: https://lore.kernel.org/r/20191207212757.GA245964@dtor-ws
Signed-off-by: Dmitry Torokhov <[email protected]>
|
void Filter::doRetry() {
ENVOY_STREAM_LOG(debug, "performing retry", *callbacks_);
is_retry_ = true;
attempt_count_++;
callbacks_->streamInfo().setAttemptCount(attempt_count_);
ASSERT(pending_retries_ > 0);
pending_retries_--;
// Clusters can technically get removed by CDS during a retry. Make sure it still exists.
const auto cluster = config_.cm_.getThreadLocalCluster(route_entry_->clusterName());
std::unique_ptr<GenericConnPool> generic_conn_pool;
if (cluster != nullptr) {
cluster_ = cluster->info();
generic_conn_pool = createConnPool(*cluster);
}
if (!generic_conn_pool) {
sendNoHealthyUpstreamResponse();
cleanup();
return;
}
UpstreamRequestPtr upstream_request =
std::make_unique<UpstreamRequest>(*this, std::move(generic_conn_pool));
if (include_attempt_count_in_request_) {
downstream_headers_->setEnvoyAttemptCount(attempt_count_);
}
if (Runtime::runtimeFeatureEnabled(
"envoy.reloadable_features.update_expected_rq_timeout_on_retry")) {
// If not enabled, then it will re-use the previous headers (if any.)
// The request timeouts only account for time elapsed since the downstream request completed
// which might not have happened yet (in which case zero time has elapsed.)
std::chrono::milliseconds elapsed_time = std::chrono::milliseconds::zero();
if (DateUtil::timePointValid(downstream_request_complete_time_)) {
Event::Dispatcher& dispatcher = callbacks_->dispatcher();
elapsed_time = std::chrono::duration_cast<std::chrono::milliseconds>(
dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_);
}
FilterUtility::setTimeoutHeaders(elapsed_time.count(), timeout_, *route_entry_,
*downstream_headers_, !config_.suppress_envoy_headers_,
grpc_request_, hedging_params_.hedge_on_per_try_timeout_);
}
UpstreamRequest* upstream_request_tmp = upstream_request.get();
LinkedList::moveIntoList(std::move(upstream_request), upstream_requests_);
upstream_requests_.front()->encodeHeaders(!callbacks_->decodingBuffer() &&
!downstream_trailers_ && downstream_end_stream_);
// It's possible we got immediately reset which means the upstream request we just
// added to the front of the list might have been removed, so we need to check to make
// sure we don't encodeData on the wrong request.
if (!upstream_requests_.empty() && (upstream_requests_.front().get() == upstream_request_tmp)) {
if (callbacks_->decodingBuffer()) {
// If we are doing a retry we need to make a copy.
Buffer::OwnedImpl copy(*callbacks_->decodingBuffer());
upstream_requests_.front()->encodeData(copy, !downstream_trailers_ && downstream_end_stream_);
}
if (downstream_trailers_) {
upstream_requests_.front()->encodeTrailers(*downstream_trailers_);
}
}
}
| 0 |
[
"CWE-703"
] |
envoy
|
5bf9b0f1e7f247a4eee7180849cb0823926f7fff
| 151,311,654,038,232,960,000,000,000,000,000,000,000 | 67 |
[1.21] CVE-2022-21655
Signed-off-by: Otto van der Schaaf <[email protected]>
|
_copyAConst(const A_Const *from)
{
A_Const *newnode = makeNode(A_Const);
/* This part must duplicate _copyValue */
COPY_SCALAR_FIELD(val.type);
switch (from->val.type)
{
case T_Integer:
COPY_SCALAR_FIELD(val.val.ival);
break;
case T_Float:
case T_String:
case T_BitString:
COPY_STRING_FIELD(val.val.str);
break;
case T_Null:
/* nothing to do */
break;
default:
elog(ERROR, "unrecognized node type: %d",
(int) from->val.type);
break;
}
COPY_LOCATION_FIELD(location);
return newnode;
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 241,401,860,604,000,200,000,000,000,000,000,000,000 | 29 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
static void zr364xx_release(struct v4l2_device *v4l2_dev)
{
struct zr364xx_camera *cam =
container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
unsigned long i;
v4l2_device_unregister(&cam->v4l2_dev);
videobuf_mmap_free(&cam->vb_vidq);
/* release sys buffers */
for (i = 0; i < FRAMES; i++) {
if (cam->buffer.frame[i].lpvbits) {
DBG("vfree %p\n", cam->buffer.frame[i].lpvbits);
vfree(cam->buffer.frame[i].lpvbits);
}
cam->buffer.frame[i].lpvbits = NULL;
}
v4l2_ctrl_handler_free(&cam->ctrl_handler);
/* release transfer buffer */
kfree(cam->pipe->transfer_buffer);
kfree(cam);
}
| 0 |
[
"CWE-476"
] |
linux
|
5d2e73a5f80a5b5aff3caf1ec6d39b5b3f54b26e
| 255,458,912,919,858,500,000,000,000,000,000,000,000 | 24 |
media: usb:zr364xx:Fix KASAN:null-ptr-deref Read in zr364xx_vidioc_querycap
SyzKaller hit the null pointer deref while reading from uninitialized
udev->product in zr364xx_vidioc_querycap().
==================================================================
BUG: KASAN: null-ptr-deref in read_word_at_a_time+0xe/0x20
include/linux/compiler.h:274
Read of size 1 at addr 0000000000000000 by task v4l_id/5287
CPU: 1 PID: 5287 Comm: v4l_id Not tainted 5.1.0-rc3-319004-g43151d6 #6
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xe8/0x16e lib/dump_stack.c:113
kasan_report.cold+0x5/0x3c mm/kasan/report.c:321
read_word_at_a_time+0xe/0x20 include/linux/compiler.h:274
strscpy+0x8a/0x280 lib/string.c:207
zr364xx_vidioc_querycap+0xb5/0x210 drivers/media/usb/zr364xx/zr364xx.c:706
v4l_querycap+0x12b/0x340 drivers/media/v4l2-core/v4l2-ioctl.c:1062
__video_do_ioctl+0x5bb/0xb40 drivers/media/v4l2-core/v4l2-ioctl.c:2874
video_usercopy+0x44e/0xf00 drivers/media/v4l2-core/v4l2-ioctl.c:3056
v4l2_ioctl+0x14e/0x1a0 drivers/media/v4l2-core/v4l2-dev.c:364
vfs_ioctl fs/ioctl.c:46 [inline]
file_ioctl fs/ioctl.c:509 [inline]
do_vfs_ioctl+0xced/0x12f0 fs/ioctl.c:696
ksys_ioctl+0xa0/0xc0 fs/ioctl.c:713
__do_sys_ioctl fs/ioctl.c:720 [inline]
__se_sys_ioctl fs/ioctl.c:718 [inline]
__x64_sys_ioctl+0x74/0xb0 fs/ioctl.c:718
do_syscall_64+0xcf/0x4f0 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x7f3b56d8b347
Code: 90 90 90 48 8b 05 f1 fa 2a 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff
ff c3 90 90 90 90 90 90 90 90 90 90 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff
ff 73 01 c3 48 8b 0d c1 fa 2a 00 31 d2 48 29 c2 64
RSP: 002b:00007ffe005d5d68 EFLAGS: 00000202 ORIG_RAX: 0000000000000010
RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007f3b56d8b347
RDX: 00007ffe005d5d70 RSI: 0000000080685600 RDI: 0000000000000003
RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000202 R12: 0000000000400884
R13: 00007ffe005d5ec0 R14: 0000000000000000 R15: 0000000000000000
==================================================================
For this device udev->product is not initialized and accessing it causes a NULL pointer deref.
The fix is to check for NULL before strscpy() and copy empty string, if
product is NULL
Reported-by: [email protected]
Signed-off-by: Vandana BN <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
{
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
update_stats_dequeue(cfs_rq, se);
if (sleep) {
#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
if (tsk->state & TASK_INTERRUPTIBLE)
se->sleep_start = rq_of(cfs_rq)->clock;
if (tsk->state & TASK_UNINTERRUPTIBLE)
se->block_start = rq_of(cfs_rq)->clock;
}
#endif
}
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
account_entity_dequeue(cfs_rq, se);
}
| 0 |
[] |
linux-2.6
|
6a6029b8cefe0ca7e82f27f3904dbedba3de4e06
| 288,579,989,738,836,100,000,000,000,000,000,000,000 | 25 |
sched: simplify sched_slice()
Use the existing calc_delta_mine() calculation for sched_slice(). This
saves a divide and simplifies the code because we share it with the
other /cfs_rq->load users.
It also improves code size:
text data bss dec hex filename
42659 2740 144 45543 b1e7 sched.o.before
42093 2740 144 44977 afb1 sched.o.after
Signed-off-by: Ingo Molnar <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
|
static void start_debug (void)
{
time_t t;
int i;
char buf[_POSIX_PATH_MAX];
char buf2[_POSIX_PATH_MAX];
/* rotate the old debug logs */
for (i=3; i>=0; i--)
{
snprintf (buf, sizeof(buf), "%s/.muttdebug%d", NONULL(Homedir), i);
snprintf (buf2, sizeof(buf2), "%s/.muttdebug%d", NONULL(Homedir), i+1);
rename (buf, buf2);
}
if ((debugfile = safe_fopen(buf, "w")) != NULL)
{
t = time (0);
setbuf (debugfile, NULL); /* don't buffer the debugging output! */
dprint(1,(debugfile,"Mutt/%s (%s) debugging at level %d\n",
MUTT_VERSION, ReleaseDate, debuglevel));
}
}
| 0 |
[
"CWE-668"
] |
mutt
|
6d0624411a979e2e1d76af4dd97d03f47679ea4a
| 94,378,064,175,599,850,000,000,000,000,000,000,000 | 22 |
use a 64-bit random value in temporary filenames.
closes #3158
|
void extr_del(GF_Box *s)
{
GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s;
if (ptr == NULL) return;
if (ptr->feci) gf_isom_box_del((GF_Box*)ptr->feci);
if (ptr->data) gf_free(ptr->data);
gf_free(ptr);
}
| 0 |
[
"CWE-125"
] |
gpac
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
| 283,157,381,082,090,600,000,000,000,000,000,000,000 | 9 |
fixed 2 possible heap overflows (inc. #1088)
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.