func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static bool FromV8(v8::Isolate* isolate,
v8::Local<v8::Value> val,
printing::MarginType* out) {
std::string type;
if (ConvertFromV8(isolate, val, &type)) {
if (type == "default") {
*out = printing::DEFAULT_MARGINS;
return true;
}
if (type == "none") {
*out = printing::NO_MARGINS;
return true;
}
if (type == "printableArea") {
*out = printing::PRINTABLE_AREA_MARGINS;
return true;
}
if (type == "custom") {
*out = printing::CUSTOM_MARGINS;
return true;
}
}
return false;
} | 0 | [
"CWE-284",
"CWE-693"
]
| electron | 18613925610ba319da7f497b6deed85ad712c59b | 30,164,111,676,874,930,000,000,000,000,000,000,000 | 24 | refactor: wire will-navigate up to a navigation throttle instead of OpenURL (#25108)
* refactor: wire will-navigate up to a navigation throttle instead of OpenURL (#25065)
* refactor: wire will-navigate up to a navigation throttle instead of OpenURL
* spec: add test for x-site _top navigation
* chore: old code be old |
static int ZEND_FASTCALL ZEND_BW_AND_SPEC_CONST_CV_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
bitwise_and_function(&EX_T(opline->result.u.var).tmp_var,
&opline->op1.u.constant,
_get_zval_ptr_cv(&opline->op2, EX(Ts), BP_VAR_R TSRMLS_CC) TSRMLS_CC);
ZEND_VM_NEXT_OPCODE();
} | 0 | []
| php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 243,662,121,365,783,400,000,000,000,000,000,000,000 | 12 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
void* ipc_rcu_alloc(int size)
{
void* out;
/*
* We prepend the allocation with the rcu struct, and
* workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
if (out) {
out += HDRLEN_VMALLOC;
container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
}
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
if (out) {
out += HDRLEN_KMALLOC;
container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
}
}
return out;
} | 1 | [
"CWE-703",
"CWE-189"
]
| linux | 6062a8dc0517bce23e3c2f7d2fea5e22411269a3 | 62,123,922,015,285,410,000,000,000,000,000,000,000 | 25 | ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[[email protected]: do not call sem_lock when bogus sma]
[[email protected]: make refcounter atomic]
Signed-off-by: Rik van Riel <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Chegu Vinod <[email protected]>
Cc: Jason Low <[email protected]>
Reviewed-by: Michel Lespinasse <[email protected]>
Cc: Peter Hurley <[email protected]>
Cc: Stanislav Kinsbursky <[email protected]>
Tested-by: Emmanuel Benisty <[email protected]>
Tested-by: Sedat Dilek <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
struct super_block *s;
struct ecryptfs_sb_info *sbi;
struct ecryptfs_dentry_info *root_info;
const char *err = "Getting sb failed";
struct inode *inode;
struct path path;
int rc;
sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL);
if (!sbi) {
rc = -ENOMEM;
goto out;
}
rc = ecryptfs_parse_options(sbi, raw_data);
if (rc) {
err = "Error parsing options";
goto out;
}
s = sget(fs_type, NULL, set_anon_super, NULL);
if (IS_ERR(s)) {
rc = PTR_ERR(s);
goto out;
}
s->s_flags = flags;
rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
if (rc)
goto out1;
ecryptfs_set_superblock_private(s, sbi);
s->s_bdi = &sbi->bdi;
/* ->kill_sb() will take care of sbi after that point */
sbi = NULL;
s->s_op = &ecryptfs_sops;
s->s_d_op = &ecryptfs_dops;
err = "Reading sb failed";
rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
if (rc) {
ecryptfs_printk(KERN_WARNING, "kern_path() failed\n");
goto out1;
}
if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
rc = -EINVAL;
printk(KERN_ERR "Mount on filesystem of type "
"eCryptfs explicitly disallowed due to "
"known incompatibilities\n");
goto out_free;
}
ecryptfs_set_superblock_lower(s, path.dentry->d_sb);
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
s->s_blocksize = path.dentry->d_sb->s_blocksize;
s->s_magic = ECRYPTFS_SUPER_MAGIC;
inode = ecryptfs_get_inode(path.dentry->d_inode, s);
rc = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free;
s->s_root = d_alloc_root(inode);
if (!s->s_root) {
iput(inode);
rc = -ENOMEM;
goto out_free;
}
rc = -ENOMEM;
root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
if (!root_info)
goto out_free;
/* ->kill_sb() will take care of root_info */
ecryptfs_set_dentry_private(s->s_root, root_info);
ecryptfs_set_dentry_lower(s->s_root, path.dentry);
ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt);
s->s_flags |= MS_ACTIVE;
return dget(s->s_root);
out_free:
path_put(&path);
out1:
deactivate_locked_super(s);
out:
if (sbi) {
ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat);
kmem_cache_free(ecryptfs_sb_info_cache, sbi);
}
printk(KERN_ERR "%s; rc = [%d]\n", err, rc);
return ERR_PTR(rc);
} | 1 | [
"CWE-362",
"CWE-284",
"CWE-264"
]
| linux | 764355487ea220fdc2faf128d577d7f679b91f97 | 336,940,878,620,414,650,000,000,000,000,000,000,000 | 97 | Ecryptfs: Add mount option to check uid of device being mounted = expect uid
Close a TOCTOU race for mounts done via ecryptfs-mount-private. The mount
source (device) can be raced when the ownership test is done in userspace.
Provide Ecryptfs a means to force the uid check at mount time.
Signed-off-by: John Johansen <[email protected]>
Cc: <[email protected]>
Signed-off-by: Tyler Hicks <[email protected]> |
void Gfx::opSetFillColorN(Object args[], int numArgs) {
GfxColor color;
GfxPattern *pattern;
int i;
if (state->getFillColorSpace()->getMode() == csPattern) {
if (numArgs > 1) {
if (!((GfxPatternColorSpace *)state->getFillColorSpace())->getUnder() ||
numArgs - 1 != ((GfxPatternColorSpace *)state->getFillColorSpace())
->getUnder()->getNComps()) {
error(getPos(), "Incorrect number of arguments in 'scn' command");
return;
}
for (i = 0; i < numArgs - 1 && i < gfxColorMaxComps; ++i) {
if (args[i].isNum()) {
color.c[i] = dblToCol(args[i].getNum());
} else {
color.c[i] = 0; // TODO Investigate if this is what Adobe does
}
}
state->setFillColor(&color);
out->updateFillColor(state);
}
if (args[numArgs-1].isName() &&
(pattern = res->lookupPattern(args[numArgs-1].getName(), this))) {
state->setFillPattern(pattern);
}
} else {
if (numArgs != state->getFillColorSpace()->getNComps()) {
error(getPos(), "Incorrect number of arguments in 'scn' command");
return;
}
state->setFillPattern(NULL);
for (i = 0; i < numArgs && i < gfxColorMaxComps; ++i) {
if (args[i].isNum()) {
color.c[i] = dblToCol(args[i].getNum());
} else {
color.c[i] = 0; // TODO Investigate if this is what Adobe does
}
}
state->setFillColor(&color);
out->updateFillColor(state);
}
} | 0 | []
| poppler | abf167af8b15e5f3b510275ce619e6fdb42edd40 | 157,535,561,444,034,750,000,000,000,000,000,000,000 | 45 | Implement tiling/patterns in SplashOutputDev
Fixes bug 13518 |
static void add_assoc_image_info(zval *value, int sub_array, image_info_type *image_info, int section_index)
{
char buffer[64], *val, *name, uname[64];
int i, ap, l, b, idx=0, unknown=0;
#ifdef EXIF_DEBUG
int info_tag;
#endif
image_info_value *info_value;
image_info_data *info_data;
zval tmpi, array;
#ifdef EXIF_DEBUG
/* php_error_docref(NULL, E_NOTICE, "Adding %d infos from section %s", image_info->info_list[section_index].count, exif_get_sectionname(section_index));*/
#endif
if (image_info->info_list[section_index].count) {
if (sub_array) {
array_init(&tmpi);
} else {
ZVAL_COPY_VALUE(&tmpi, value);
}
for(i=0; i<image_info->info_list[section_index].count; i++) {
info_data = &image_info->info_list[section_index].list[i];
#ifdef EXIF_DEBUG
info_tag = info_data->tag; /* conversion */
#endif
info_value = &info_data->value;
if (!(name = info_data->name)) {
snprintf(uname, sizeof(uname), "%d", unknown++);
name = uname;
}
#ifdef EXIF_DEBUG
/* php_error_docref(NULL, E_NOTICE, "Adding infos: tag(0x%04X,%12s,L=0x%04X): %s", info_tag, exif_get_tagname_debug(info_tag, exif_get_tag_table(section_index)), info_data->length, info_data->format==TAG_FMT_STRING?(info_value&&info_value->s?info_value->s:"<no data>"):exif_get_tagformat(info_data->format));*/
#endif
if (info_data->length==0) {
add_assoc_null(&tmpi, name);
} else {
switch (info_data->format) {
default:
/* Standard says more types possible but skip them...
* but allow users to handle data if they know how to
* So not return but use type UNDEFINED
* return;
*/
case TAG_FMT_BYTE:
case TAG_FMT_SBYTE:
case TAG_FMT_UNDEFINED:
if (!info_value->s) {
add_assoc_stringl(&tmpi, name, "", 0);
} else {
add_assoc_stringl(&tmpi, name, info_value->s, info_data->length);
}
break;
case TAG_FMT_STRING:
if (!(val = info_value->s)) {
val = "";
}
if (section_index==SECTION_COMMENT) {
add_index_string(&tmpi, idx++, val);
} else {
add_assoc_string(&tmpi, name, val);
}
break;
case TAG_FMT_URATIONAL:
case TAG_FMT_SRATIONAL:
/*case TAG_FMT_BYTE:
case TAG_FMT_SBYTE:*/
case TAG_FMT_USHORT:
case TAG_FMT_SSHORT:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
case TAG_FMT_ULONG:
case TAG_FMT_SLONG:
/* now the rest, first see if it becomes an array */
if ((l = info_data->length) > 1) {
array_init(&array);
}
for(ap=0; ap<l; ap++) {
if (l>1) {
info_value = &info_data->value.list[ap];
}
switch (info_data->format) {
case TAG_FMT_BYTE:
if (l>1) {
info_value = &info_data->value;
for (b=0;b<l;b++) {
add_index_long(&array, b, (int)(info_value->s[b]));
}
break;
}
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
if (l==1) {
add_assoc_long(&tmpi, name, (int)info_value->u);
} else {
add_index_long(&array, ap, (int)info_value->u);
}
break;
case TAG_FMT_URATIONAL:
snprintf(buffer, sizeof(buffer), "%u/%u", info_value->ur.num, info_value->ur.den);
if (l==1) {
add_assoc_string(&tmpi, name, buffer);
} else {
add_index_string(&array, ap, buffer);
}
break;
case TAG_FMT_SBYTE:
if (l>1) {
info_value = &info_data->value;
for (b=0;b<l;b++) {
add_index_long(&array, ap, (int)info_value->s[b]);
}
break;
}
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
if (l==1) {
add_assoc_long(&tmpi, name, info_value->i);
} else {
add_index_long(&array, ap, info_value->i);
}
break;
case TAG_FMT_SRATIONAL:
snprintf(buffer, sizeof(buffer), "%i/%i", info_value->sr.num, info_value->sr.den);
if (l==1) {
add_assoc_string(&tmpi, name, buffer);
} else {
add_index_string(&array, ap, buffer);
}
break;
case TAG_FMT_SINGLE:
if (l==1) {
add_assoc_double(&tmpi, name, info_value->f);
} else {
add_index_double(&array, ap, info_value->f);
}
break;
case TAG_FMT_DOUBLE:
if (l==1) {
add_assoc_double(&tmpi, name, info_value->d);
} else {
add_index_double(&array, ap, info_value->d);
}
break;
}
info_value = &info_data->value.list[ap];
}
if (l>1) {
add_assoc_zval(&tmpi, name, &array);
}
break;
}
}
}
if (sub_array) {
add_assoc_zval(value, exif_get_sectionname(section_index), &tmpi);
}
}
} | 0 | [
"CWE-125"
]
| php-src | 0c77b4307df73217283a4aaf9313e1a33a0967ff | 237,842,514,458,585,720,000,000,000,000,000,000,000 | 166 | Fixed bug #79282 |
mptcp_cryptodata_sha1(const guint64 key, guint32 *token, guint64 *idsn)
{
guint8 digest_buf[HASH_SHA1_LENGTH];
guint64 pseudokey = GUINT64_TO_BE(key);
guint32 _token;
guint64 _isdn;
gcry_md_hash_buffer(GCRY_MD_SHA1, digest_buf, (const guint8 *)&pseudokey, 8);
/* memcpy to prevent -Wstrict-aliasing errors with GCC 4 */
memcpy(&_token, digest_buf, sizeof(_token));
*token = GUINT32_FROM_BE(_token);
memcpy(&_isdn, digest_buf + HASH_SHA1_LENGTH - sizeof(_isdn), sizeof(_isdn));
*idsn = GUINT64_FROM_BE(_isdn);
} | 0 | [
"CWE-354"
]
| wireshark | 7f3fe6164a68b76d9988c4253b24d43f498f1753 | 143,802,154,260,981,220,000,000,000,000,000,000,000 | 15 | TCP: do not use an unknown status when the checksum is 0xffff
Otherwise it triggers an assert when adding the column as the field is
defined as BASE_NONE and not BASE_DEC or BASE_HEX. Thus an unknown value
(not in proto_checksum_vals[)array) cannot be represented.
Mark the checksum as bad even if we process the packet.
Closes #16816
Conflicts:
epan/dissectors/packet-tcp.c |
os2_printer_fclose(gx_io_device * iodev, FILE * file)
{
os2_printer_t *pr = (os2_printer_t *)iodev->state;
fclose(file);
pm_spool(pr->memory, pr->filename, pr->queue);
unlink(pr->filename); /* unlink, not gp_unlink */
return 0;
} | 0 | [
"CWE-20"
]
| ghostpdl | a9bd3dec9fde03327a4a2c69dad1036bf9632e20 | 88,889,574,888,208,060,000,000,000,000,000,000,000 | 8 | Bug 704342: Include device specifier strings in access validation
for the "%pipe%", %handle%" and %printer% io devices.
We previously validated only the part after the "%pipe%" Postscript device
specifier, but this proved insufficient.
This rebuilds the original file name string, and validates it complete. The
slight complication for "%pipe%" is it can be reached implicitly using
"|" so we have to check both prefixes.
Addresses CVE-2021-3781 |
void amqp_set_socket(amqp_connection_state_t state, amqp_socket_t *socket) {
amqp_socket_delete(state->socket);
state->socket = socket;
} | 0 | [
"CWE-20",
"CWE-190",
"CWE-787"
]
| rabbitmq-c | fc85be7123050b91b054e45b91c78d3241a5047a | 270,072,670,395,874,080,000,000,000,000,000,000,000 | 4 | lib: check frame_size is >= INT32_MAX
When parsing a frame header, validate that the frame_size is less than
or equal to INT32_MAX. Given frame_max is limited between 0 and
INT32_MAX in amqp_login and friends, this does not change the API.
This prevents a potential buffer overflow when a malicious client sends
a frame_size that is close to UINT32_MAX, in which causes an overflow
when computing state->target_size resulting in a small value there. A
buffer is then allocated with the small amount, then memcopy copies the
frame_size writing to memory beyond the end of the buffer. |
static void addBitsToStreamReversed(size_t* bitpointer, ucvector* bitstream, unsigned value, size_t nbits)
{
size_t i;
for(i = 0; i < nbits; i++) addBitToStream(bitpointer, bitstream, (unsigned char)((value >> (nbits - 1 - i)) & 1));
} | 0 | [
"CWE-401"
]
| FreeRDP | 9fee4ae076b1ec97b97efb79ece08d1dab4df29a | 226,007,959,466,316,050,000,000,000,000,000,000,000 | 5 | Fixed #5645: realloc return handling |
static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer,
const void * pvTxData,
size_t xDataLengthBytes,
size_t xSpace,
size_t xRequiredSpace )
{
BaseType_t xShouldWrite;
size_t xReturn;
if( xSpace == ( size_t ) 0 )
{
/* Doesn't matter if this is a stream buffer or a message buffer, there
* is no space to write. */
xShouldWrite = pdFALSE;
}
else if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) == ( uint8_t ) 0 )
{
/* This is a stream buffer, as opposed to a message buffer, so writing a
* stream of bytes rather than discrete messages. Write as many bytes as
* possible. */
xShouldWrite = pdTRUE;
xDataLengthBytes = configMIN( xDataLengthBytes, xSpace );
}
else if( xSpace >= xRequiredSpace )
{
/* This is a message buffer, as opposed to a stream buffer, and there
* is enough space to write both the message length and the message itself
* into the buffer. Start by writing the length of the data, the data
* itself will be written later in this function. */
xShouldWrite = pdTRUE;
( void ) prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) &( xDataLengthBytes ), sbBYTES_TO_STORE_MESSAGE_LENGTH );
}
else
{
/* There is space available, but not enough space. */
xShouldWrite = pdFALSE;
}
if( xShouldWrite != pdFALSE )
{
/* Writes the data itself. */
xReturn = prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) pvTxData, xDataLengthBytes ); /*lint !e9079 Storage buffer is implemented as uint8_t for ease of sizing, alignment and access. */
}
else
{
xReturn = 0;
}
return xReturn;
}
| 0 | [
"CWE-190"
]
| FreeRTOS-Kernel | d05b9c123f2bf9090bce386a244fc934ae44db5b | 332,078,903,378,954,360,000,000,000,000,000,000,000 | 50 | Add addition overflow check for stream buffer (#226) |
static noinline int btrfs_ioctl_ino_lookup(struct file *file,
void __user *argp)
{
struct btrfs_ioctl_ino_lookup_args *args;
struct inode *inode;
int ret = 0;
args = memdup_user(argp, sizeof(*args));
if (IS_ERR(args))
return PTR_ERR(args);
inode = file_inode(file);
/*
* Unprivileged query to obtain the containing subvolume root id. The
* path is reset so it's consistent with btrfs_search_path_in_tree.
*/
if (args->treeid == 0)
args->treeid = BTRFS_I(inode)->root->root_key.objectid;
if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
args->name[0] = 0;
goto out;
}
if (!capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
goto out;
}
ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
args->treeid, args->objectid,
args->name);
out:
if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
ret = -EFAULT;
kfree(args);
return ret;
} | 0 | [
"CWE-200"
]
| linux | 8039d87d9e473aeb740d4fdbd59b9d2f89b2ced9 | 211,979,812,066,761,730,000,000,000,000,000,000,000 | 41 | Btrfs: fix file corruption and data loss after cloning inline extents
Currently the clone ioctl allows to clone an inline extent from one file
to another that already has other (non-inlined) extents. This is a problem
because btrfs is not designed to deal with files having inline and regular
extents, if a file has an inline extent then it must be the only extent
in the file and must start at file offset 0. Having a file with an inline
extent followed by regular extents results in EIO errors when doing reads
or writes against the first 4K of the file.
Also, the clone ioctl allows one to lose data if the source file consists
of a single inline extent, with a size of N bytes, and the destination
file consists of a single inline extent with a size of M bytes, where we
have M > N. In this case the clone operation removes the inline extent
from the destination file and then copies the inline extent from the
source file into the destination file - we lose the M - N bytes from the
destination file, a read operation will get the value 0x00 for any bytes
in the the range [N, M] (the destination inode's i_size remained as M,
that's why we can read past N bytes).
So fix this by not allowing such destructive operations to happen and
return errno EOPNOTSUPP to user space.
Currently the fstest btrfs/035 tests the data loss case but it totally
ignores this - i.e. expects the operation to succeed and does not check
the we got data loss.
The following test case for fstests exercises all these cases that result
in file corruption and data loss:
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
_need_to_be_root
_supported_fs btrfs
_supported_os Linux
_require_scratch
_require_cloner
_require_btrfs_fs_feature "no_holes"
_require_btrfs_mkfs_feature "no-holes"
rm -f $seqres.full
test_cloning_inline_extents()
{
local mkfs_opts=$1
local mount_opts=$2
_scratch_mkfs $mkfs_opts >>$seqres.full 2>&1
_scratch_mount $mount_opts
# File bar, the source for all the following clone operations, consists
# of a single inline extent (50 bytes).
$XFS_IO_PROG -f -c "pwrite -S 0xbb 0 50" $SCRATCH_MNT/bar \
| _filter_xfs_io
# Test cloning into a file with an extent (non-inlined) where the
# destination offset overlaps that extent. It should not be possible to
# clone the inline extent from file bar into this file.
$XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 16K" $SCRATCH_MNT/foo \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo
# Doing IO against any range in the first 4K of the file should work.
# Due to a past clone ioctl bug which allowed cloning the inline extent,
# these operations resulted in EIO errors.
echo "File foo data after clone operation:"
# All bytes should have the value 0xaa (clone operation failed and did
# not modify our file).
od -t x1 $SCRATCH_MNT/foo
$XFS_IO_PROG -c "pwrite -S 0xcc 0 100" $SCRATCH_MNT/foo | _filter_xfs_io
# Test cloning the inline extent against a file which has a hole in its
# first 4K followed by a non-inlined extent. It should not be possible
# as well to clone the inline extent from file bar into this file.
$XFS_IO_PROG -f -c "pwrite -S 0xdd 4K 12K" $SCRATCH_MNT/foo2 \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo2
# Doing IO against any range in the first 4K of the file should work.
# Due to a past clone ioctl bug which allowed cloning the inline extent,
# these operations resulted in EIO errors.
echo "File foo2 data after clone operation:"
# All bytes should have the value 0x00 (clone operation failed and did
# not modify our file).
od -t x1 $SCRATCH_MNT/foo2
$XFS_IO_PROG -c "pwrite -S 0xee 0 90" $SCRATCH_MNT/foo2 | _filter_xfs_io
# Test cloning the inline extent against a file which has a size of zero
# but has a prealloc extent. It should not be possible as well to clone
# the inline extent from file bar into this file.
$XFS_IO_PROG -f -c "falloc -k 0 1M" $SCRATCH_MNT/foo3 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo3
# Doing IO against any range in the first 4K of the file should work.
# Due to a past clone ioctl bug which allowed cloning the inline extent,
# these operations resulted in EIO errors.
echo "First 50 bytes of foo3 after clone operation:"
# Should not be able to read any bytes, file has 0 bytes i_size (the
# clone operation failed and did not modify our file).
od -t x1 $SCRATCH_MNT/foo3
$XFS_IO_PROG -c "pwrite -S 0xff 0 90" $SCRATCH_MNT/foo3 | _filter_xfs_io
# Test cloning the inline extent against a file which consists of a
# single inline extent that has a size not greater than the size of
# bar's inline extent (40 < 50).
# It should be possible to do the extent cloning from bar to this file.
$XFS_IO_PROG -f -c "pwrite -S 0x01 0 40" $SCRATCH_MNT/foo4 \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo4
# Doing IO against any range in the first 4K of the file should work.
echo "File foo4 data after clone operation:"
# Must match file bar's content.
od -t x1 $SCRATCH_MNT/foo4
$XFS_IO_PROG -c "pwrite -S 0x02 0 90" $SCRATCH_MNT/foo4 | _filter_xfs_io
# Test cloning the inline extent against a file which consists of a
# single inline extent that has a size greater than the size of bar's
# inline extent (60 > 50).
# It should not be possible to clone the inline extent from file bar
# into this file.
$XFS_IO_PROG -f -c "pwrite -S 0x03 0 60" $SCRATCH_MNT/foo5 \
| _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo5
# Reading the file should not fail.
echo "File foo5 data after clone operation:"
# Must have a size of 60 bytes, with all bytes having a value of 0x03
# (the clone operation failed and did not modify our file).
od -t x1 $SCRATCH_MNT/foo5
# Test cloning the inline extent against a file which has no extents but
# has a size greater than bar's inline extent (16K > 50).
# It should not be possible to clone the inline extent from file bar
# into this file.
$XFS_IO_PROG -f -c "truncate 16K" $SCRATCH_MNT/foo6 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo6
# Reading the file should not fail.
echo "File foo6 data after clone operation:"
# Must have a size of 16K, with all bytes having a value of 0x00 (the
# clone operation failed and did not modify our file).
od -t x1 $SCRATCH_MNT/foo6
# Test cloning the inline extent against a file which has no extents but
# has a size not greater than bar's inline extent (30 < 50).
# It should be possible to clone the inline extent from file bar into
# this file.
$XFS_IO_PROG -f -c "truncate 30" $SCRATCH_MNT/foo7 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo7
# Reading the file should not fail.
echo "File foo7 data after clone operation:"
# Must have a size of 50 bytes, with all bytes having a value of 0xbb.
od -t x1 $SCRATCH_MNT/foo7
# Test cloning the inline extent against a file which has a size not
# greater than the size of bar's inline extent (20 < 50) but has
# a prealloc extent that goes beyond the file's size. It should not be
# possible to clone the inline extent from bar into this file.
$XFS_IO_PROG -f -c "falloc -k 0 1M" \
-c "pwrite -S 0x88 0 20" \
$SCRATCH_MNT/foo8 | _filter_xfs_io
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo8
echo "File foo8 data after clone operation:"
# Must have a size of 20 bytes, with all bytes having a value of 0x88
# (the clone operation did not modify our file).
od -t x1 $SCRATCH_MNT/foo8
_scratch_unmount
}
echo -e "\nTesting without compression and without the no-holes feature...\n"
test_cloning_inline_extents
echo -e "\nTesting with compression and without the no-holes feature...\n"
test_cloning_inline_extents "" "-o compress"
echo -e "\nTesting without compression and with the no-holes feature...\n"
test_cloning_inline_extents "-O no-holes" ""
echo -e "\nTesting with compression and with the no-holes feature...\n"
test_cloning_inline_extents "-O no-holes" "-o compress"
status=0
exit
Cc: [email protected]
Signed-off-by: Filipe Manana <[email protected]> |
static void fpm_child_resources_use(struct fpm_child_s *child) /* {{{ */
{
struct fpm_worker_pool_s *wp;
for (wp = fpm_worker_all_pools; wp; wp = wp->next) {
if (wp == child->wp || wp == child->wp->shared) {
continue;
}
fpm_scoreboard_free(wp);
}
fpm_scoreboard_child_use(child, getpid());
fpm_stdio_child_use_pipes(child);
fpm_child_free(child);
} | 0 | [
"CWE-787"
]
| php-src | fadb1f8c1d08ae62b4f0a16917040fde57a3b93b | 100,673,401,318,173,720,000,000,000,000,000,000,000 | 14 | Fix bug #81026 (PHP-FPM oob R/W in root process leading to priv escalation)
The main change is to store scoreboard procs directly to the variable sized
array rather than indirectly through the pointer.
Signed-off-by: Stanislav Malyshev <[email protected]> |
static ssize_t hpage_pmd_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE);
} | 0 | [
"CWE-362"
]
| linux | a8f97366452ed491d13cf1e44241bc0b5740b1f0 | 312,427,220,687,130,870,000,000,000,000,000,000,000 | 5 | mm, thp: Do not make page table dirty unconditionally in touch_p[mu]d()
Currently, we unconditionally make page table dirty in touch_pmd().
It may result in false-positive can_follow_write_pmd().
We may avoid the situation, if we would only make the page table entry
dirty if caller asks for write access -- FOLL_WRITE.
The patch also changes touch_pud() in the same way.
Signed-off-by: Kirill A. Shutemov <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Hugh Dickins <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void test_bug11909()
{
MYSQL_STMT *stmt1, *stmt2;
MYSQL_BIND my_bind[7];
int rc;
char firstname[20], midinit[20], lastname[20], workdept[20];
ulong firstname_len, midinit_len, lastname_len, workdept_len;
uint32 empno;
double salary;
float bonus;
const char *stmt_text;
myheader("test_bug11909");
stmt_text= "drop table if exists t1";
rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
myquery(rc);
stmt_text= "create table t1 ("
" empno int(11) not null, firstname varchar(20) not null,"
" midinit varchar(20) not null, lastname varchar(20) not null,"
" workdept varchar(6) not null, salary double not null,"
" bonus float not null, primary key (empno)"
") default charset=latin1 collate=latin1_bin";
rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
myquery(rc);
stmt_text= "insert into t1 values "
"(10, 'CHRISTINE', 'I', 'HAAS', 'A00', 52750, 1000), "
"(20, 'MICHAEL', 'L', 'THOMPSON', 'B01', 41250, 800),"
"(30, 'SALLY', 'A', 'KWAN', 'C01', 38250, 800),"
"(50, 'JOHN', 'B', 'GEYER', 'E01', 40175, 800), "
"(60, 'IRVING', 'F', 'STERN', 'D11', 32250, 500)";
rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
myquery(rc);
/* ****** Begin of trace ****** */
stmt1= open_cursor("SELECT empno, firstname, midinit, lastname,"
"workdept, salary, bonus FROM t1");
memset(my_bind, 0, sizeof(my_bind));
my_bind[0].buffer_type= MYSQL_TYPE_LONG;
my_bind[0].buffer= (void*) &empno;
my_bind[1].buffer_type= MYSQL_TYPE_VAR_STRING;
my_bind[1].buffer= (void*) firstname;
my_bind[1].buffer_length= sizeof(firstname);
my_bind[1].length= &firstname_len;
my_bind[2].buffer_type= MYSQL_TYPE_VAR_STRING;
my_bind[2].buffer= (void*) midinit;
my_bind[2].buffer_length= sizeof(midinit);
my_bind[2].length= &midinit_len;
my_bind[3].buffer_type= MYSQL_TYPE_VAR_STRING;
my_bind[3].buffer= (void*) lastname;
my_bind[3].buffer_length= sizeof(lastname);
my_bind[3].length= &lastname_len;
my_bind[4].buffer_type= MYSQL_TYPE_VAR_STRING;
my_bind[4].buffer= (void*) workdept;
my_bind[4].buffer_length= sizeof(workdept);
my_bind[4].length= &workdept_len;
my_bind[5].buffer_type= MYSQL_TYPE_DOUBLE;
my_bind[5].buffer= (void*) &salary;
my_bind[6].buffer_type= MYSQL_TYPE_FLOAT;
my_bind[6].buffer= (void*) &bonus;
rc= mysql_stmt_bind_result(stmt1, my_bind);
check_execute(stmt1, rc);
rc= mysql_stmt_execute(stmt1);
check_execute(stmt1, rc);
rc= mysql_stmt_fetch(stmt1);
DIE_UNLESS(rc == 0);
DIE_UNLESS(empno == 10);
DIE_UNLESS(strcmp(firstname, "CHRISTINE") == 0);
DIE_UNLESS(strcmp(midinit, "I") == 0);
DIE_UNLESS(strcmp(lastname, "HAAS") == 0);
DIE_UNLESS(strcmp(workdept, "A00") == 0);
DIE_UNLESS(salary == (double) 52750.0);
DIE_UNLESS(bonus == (float) 1000.0);
stmt2= open_cursor("SELECT empno, firstname FROM t1");
rc= mysql_stmt_bind_result(stmt2, my_bind);
check_execute(stmt2, rc);
rc= mysql_stmt_execute(stmt2);
check_execute(stmt2, rc);
rc= mysql_stmt_fetch(stmt2);
DIE_UNLESS(rc == 0);
DIE_UNLESS(empno == 10);
DIE_UNLESS(strcmp(firstname, "CHRISTINE") == 0);
rc= mysql_stmt_reset(stmt2);
check_execute(stmt2, rc);
/* ERROR: next statement should return 0 */
rc= mysql_stmt_fetch(stmt1);
DIE_UNLESS(rc == 0);
mysql_stmt_close(stmt1);
mysql_stmt_close(stmt2);
rc= mysql_rollback(mysql);
myquery(rc);
rc= mysql_query(mysql, "drop table t1");
myquery(rc);
} | 0 | [
"CWE-284",
"CWE-295"
]
| mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 273,418,667,667,428,440,000,000,000,000,000,000,000 | 115 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
static int umount_one(struct libmnt_context *cxt, const char *spec)
{
int rc;
if (!spec)
return MOUNT_EX_SOFTWARE;
if (mnt_context_set_target(cxt, spec))
err(MOUNT_EX_SYSERR, _("failed to set umount target"));
rc = mnt_context_umount(cxt);
rc = mk_exit_code(cxt, rc);
if (rc == MOUNT_EX_SUCCESS && mnt_context_is_verbose(cxt))
success_message(cxt);
mnt_reset_context(cxt);
return rc;
} | 0 | [
"CWE-200"
]
| util-linux | cc8cc8f32c863f3ae6a8a88e97b47bcd6a21825f | 208,867,976,541,046,400,000,000,000,000,000,000,000 | 19 | umount: sanitize paths from non-root users
Signed-off-by: Karel Zak <[email protected]> |
void InstanceKlass::methods_do(void f(Method* method)) {
// Methods aren't stable until they are loaded. This can be read outside
// a lock through the ClassLoaderData for profiling
if (!is_loaded()) {
return;
}
int len = methods()->length();
for (int index = 0; index < len; index++) {
Method* m = methods()->at(index);
assert(m->is_method(), "must be method");
f(m);
}
} | 0 | []
| jdk17u | f8eb9abe034f7c6bea4da05a9ea42017b3f80730 | 34,392,886,943,977,590,000,000,000,000,000,000,000 | 14 | 8270386: Better verification of scan methods
Reviewed-by: coleenp
Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4 |
wf_cliprdr_server_format_data_response(CliprdrClientContext* context,
const CLIPRDR_FORMAT_DATA_RESPONSE* formatDataResponse)
{
BYTE* data;
HANDLE hMem;
wfClipboard* clipboard;
if (!context || !formatDataResponse)
return ERROR_INTERNAL_ERROR;
if (formatDataResponse->msgFlags != CB_RESPONSE_OK)
return E_FAIL;
clipboard = (wfClipboard*)context->custom;
if (!clipboard)
return ERROR_INTERNAL_ERROR;
hMem = GlobalAlloc(GMEM_MOVEABLE, formatDataResponse->dataLen);
if (!hMem)
return ERROR_INTERNAL_ERROR;
data = (BYTE*)GlobalLock(hMem);
if (!data)
{
GlobalFree(hMem);
return ERROR_INTERNAL_ERROR;
}
CopyMemory(data, formatDataResponse->requestedFormatData, formatDataResponse->dataLen);
if (!GlobalUnlock(hMem) && GetLastError())
{
GlobalFree(hMem);
return ERROR_INTERNAL_ERROR;
}
clipboard->hmem = hMem;
if (!SetEvent(clipboard->response_data_event))
return ERROR_INTERNAL_ERROR;
return CHANNEL_RC_OK;
} | 0 | [
"CWE-20"
]
| FreeRDP | 0d79670a28c0ab049af08613621aa0c267f977e9 | 165,058,420,314,588,670,000,000,000,000,000,000,000 | 46 | Fixed missing input checks for file contents request
reported by Valentino Ricotta (Thalium) |
conntrack_get_nconns(struct conntrack *ct, uint32_t *nconns)
{
*nconns = atomic_count_get(&ct->n_conn);
return 0;
} | 0 | [
"CWE-400"
]
| ovs | 79349cbab0b2a755140eedb91833ad2760520a83 | 309,737,530,094,924,200,000,000,000,000,000,000,000 | 5 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
static int AppLayerProtoDetectTest02(void)
{
AppLayerProtoDetectUnittestCtxBackup();
AppLayerProtoDetectSetup();
int r = 0;
const char *buf = "HTTP";
AppLayerProtoDetectPMRegisterPatternCS(IPPROTO_TCP, ALPROTO_HTTP, buf, 4, 0, STREAM_TOCLIENT);
buf = "ftp";
AppLayerProtoDetectPMRegisterPatternCS(IPPROTO_TCP, ALPROTO_FTP, buf, 4, 0, STREAM_TOCLIENT);
AppLayerProtoDetectPrepareState();
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].max_pat_id != 0) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].max_pat_id != 0\n");
goto end;
}
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].max_pat_id != 2) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].max_pat_id != 2\n");
goto end;
}
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].map != NULL) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].map != NULL\n");
goto end;
}
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map == NULL) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map != NULL\n");
goto end;
}
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map[0]->alproto != ALPROTO_FTP) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map[0].alproto != ALPROTO_FTP\n");
goto end;
}
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map[1]->alproto != ALPROTO_HTTP) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map[1].alproto != ALPROTO_HTTP\n");
goto end;
}
r = 1;
end:
AppLayerProtoDetectDeSetup();
AppLayerProtoDetectUnittestCtxRestore();
return r;
} | 0 | [
"CWE-20"
]
| suricata | 8357ef3f8ffc7d99ef6571350724160de356158b | 320,128,234,431,460,500,000,000,000,000,000,000,000 | 47 | proto/detect: workaround dns misdetected as dcerpc
The DCERPC UDP detection would misfire on DNS with transaction
ID 0x0400. This would happen as the protocol detection engine
gives preference to pattern based detection over probing parsers for
performance reasons.
This hack/workaround fixes this specific case by still running the
probing parser if DCERPC has been detected on UDP. The probing
parser result will take precedence.
Bug #2736. |
static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
unsigned int len)
{
int ret;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case ARPT_SO_SET_REPLACE:
ret = compat_do_replace(sock_net(sk), user, len);
break;
case ARPT_SO_SET_ADD_COUNTERS:
ret = do_add_counters(sock_net(sk), user, len, 1);
break;
default:
duprintf("do_arpt_set_ctl: unknown request %i\n", cmd);
ret = -EINVAL;
}
return ret;
} | 0 | [
"CWE-119"
]
| nf-next | d7591f0c41ce3e67600a982bab6989ef0f07b3ce | 184,931,566,841,637,400,000,000,000,000,000,000,000 | 24 | netfilter: x_tables: introduce and use xt_copy_counters_from_user
The three variants use same copy&pasted code, condense this into a
helper and use that.
Make sure info.name is 0-terminated.
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
mrb_obj_clone(mrb_state *mrb, mrb_value self)
{
struct RObject *p;
mrb_value clone;
if (mrb_immediate_p(self)) {
return self;
}
if (mrb_sclass_p(self)) {
mrb_raise(mrb, E_TYPE_ERROR, "can't clone singleton class");
}
p = (struct RObject*)mrb_obj_alloc(mrb, mrb_type(self), mrb_obj_class(mrb, self));
p->c = mrb_singleton_class_clone(mrb, self);
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)p->c);
clone = mrb_obj_value(p);
init_copy(mrb, clone, self);
p->flags |= mrb_obj_ptr(self)->flags & MRB_FL_OBJ_IS_FROZEN;
return clone;
} | 0 | [
"CWE-787"
]
| mruby | b1d0296a937fe278239bdfac840a3fd0e93b3ee9 | 199,158,828,345,728,600,000,000,000,000,000,000,000 | 20 | class.c: clear method cache after `remove_method`. |
_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
{
struct inode *inode = data->state->inode;
struct nfs4_state *state = data->state;
int ret;
if (!data->rpc_done) {
ret = data->rpc_status;
goto err;
}
ret = -ESTALE;
if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) ||
!(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) ||
!(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE))
goto err;
ret = -ENOMEM;
state = nfs4_get_open_state(inode, data->owner);
if (state == NULL)
goto err;
ret = nfs_refresh_inode(inode, &data->f_attr);
if (ret)
goto err;
if (data->o_res.delegation_type != 0)
nfs4_opendata_check_deleg(data, state);
update_open_stateid(state, &data->o_res.stateid, NULL,
data->o_arg.fmode);
return state;
err:
return ERR_PTR(ret);
} | 0 | [
"CWE-119",
"CWE-401"
]
| linux | 7d3e91a89b7adbc2831334def9e494dd9892f9af | 75,279,954,993,606,160,000,000,000,000,000,000,000 | 36 | NFSv4: Check for buffer length in __nfs4_get_acl_uncached
Commit 1f1ea6c "NFSv4: Fix buffer overflow checking in
__nfs4_get_acl_uncached" accidently dropped the checking for too small
result buffer length.
If someone uses getxattr on "system.nfs4_acl" on an NFSv4 mount
supporting ACLs, the ACL has not been cached and the buffer suplied is
too short, we still copy the complete ACL, resulting in kernel and user
space memory corruption.
Signed-off-by: Sven Wegener <[email protected]>
Cc: [email protected]
Signed-off-by: Trond Myklebust <[email protected]> |
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
perf_overflow_handler_t overflow_handler,
void *context)
{
struct perf_event_context *ctx;
struct perf_event *event;
int err;
/*
* Get the target context (task or percpu):
*/
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
overflow_handler, context);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err;
}
ctx = find_get_context(event->pmu, task, cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_free;
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
return event;
err_free:
free_event(event);
err:
return ERR_PTR(err);
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 8176cced706b5e5d15887584150764894e94e02f | 166,034,224,004,910,750,000,000,000,000,000,000,000 | 40 | perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_regs *regs)
{
unsigned long addr, seg;
addr = regs->eip;
seg = regs->xcs & 0xffff;
if (regs->eflags & VM_MASK) {
addr = (addr & 0xffff) + (seg << 4);
return addr;
}
/*
* We'll assume that the code segments in the GDT
* are all zero-based. That is largely true: the
* TLS segments are used for data, and the PNPBIOS
* and APM bios ones we just ignore here.
*/
if (seg & LDT_SEGMENT) {
u32 *desc;
unsigned long base;
seg &= ~7UL;
down(&child->mm->context.sem);
if (unlikely((seg >> 3) >= child->mm->context.size))
addr = -1L; /* bogus selector, access would fault */
else {
desc = child->mm->context.ldt + seg;
base = ((desc[0] >> 16) |
((desc[1] & 0xff) << 16) |
(desc[1] & 0xff000000));
/* 16-bit code segment? */
if (!((desc[1] >> 22) & 1))
addr &= 0xffff;
addr += base;
}
up(&child->mm->context.sem);
}
return addr;
} | 0 | [
"CWE-20"
]
| linux-2.6 | 29eb51101c02df517ca64ec472d7501127ad1da8 | 144,698,100,908,239,220,000,000,000,000,000,000,000 | 41 | Handle bogus %cs selector in single-step instruction decoding
The code for LDT segment selectors was not robust in the face of a bogus
selector set in %cs via ptrace before the single-step was done.
Signed-off-by: Roland McGrath <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
struct ath6kl_vif *vif)
{
struct wmi_cac_event *reply;
struct ieee80211_tspec_ie *ts;
u16 active_tsids, tsinfo;
u8 tsid, index;
u8 ts_id;
if (len < sizeof(struct wmi_cac_event))
return -EINVAL;
reply = (struct wmi_cac_event *) datap;
if (reply->ac >= WMM_NUM_AC) {
ath6kl_err("invalid AC: %d\n", reply->ac);
return -EINVAL;
}
if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
(reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo);
tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
IEEE80211_WMM_IE_TSPEC_TID_MASK;
ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
reply->ac, tsid);
} else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
/*
* Following assumes that there is only one outstanding
* ADDTS request when this event is received
*/
spin_lock_bh(&wmi->lock);
active_tsids = wmi->stream_exist_for_ac[reply->ac];
spin_unlock_bh(&wmi->lock);
for (index = 0; index < sizeof(active_tsids) * 8; index++) {
if ((active_tsids >> index) & 1)
break;
}
if (index < (sizeof(active_tsids) * 8))
ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
reply->ac, index);
}
/*
* Clear active tsids and Add missing handling
* for delete qos stream from AP
*/
else if (reply->cac_indication == CAC_INDICATION_DELETE) {
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo);
ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
IEEE80211_WMM_IE_TSPEC_TID_MASK);
spin_lock_bh(&wmi->lock);
wmi->stream_exist_for_ac[reply->ac] &= ~(1 << ts_id);
active_tsids = wmi->stream_exist_for_ac[reply->ac];
spin_unlock_bh(&wmi->lock);
/* Indicate stream inactivity to driver layer only if all tsids
* within this AC are deleted.
*/
if (!active_tsids) {
ath6kl_indicate_tx_activity(wmi->parent_dev, reply->ac,
false);
wmi->fat_pipe_exist &= ~(1 << reply->ac);
}
}
return 0;
} | 0 | [
"CWE-125"
]
| linux | 5d6751eaff672ea77642e74e92e6c0ac7f9709ab | 59,331,335,071,738,745,000,000,000,000,000,000,000 | 72 | ath6kl: add some bounds checking
The "ev->traffic_class" and "reply->ac" variables come from the network
and they're used as an offset into the wmi->stream_exist_for_ac[] array.
Those variables are u8 so they can be 0-255 but the stream_exist_for_ac[]
array only has WMM_NUM_AC (4) elements. We need to add a couple bounds
checks to prevent array overflows.
I also modified one existing check from "if (traffic_class > 3) {" to
"if (traffic_class >= WMM_NUM_AC) {" just to make them all consistent.
Fixes: bdcd81707973 (" Add ath6kl cleaned up driver")
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
static void mark_ptr_or_null_reg(struct bpf_func_state *state,
struct bpf_reg_state *reg, u32 id,
bool is_null)
{
if (reg_type_may_be_null(reg->type) && reg->id == id &&
!WARN_ON_ONCE(!reg->id)) {
/* Old offset (both fixed and variable parts) should
* have been known-zero, because we don't allow pointer
* arithmetic on pointers that might be NULL.
*/
if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
!tnum_equals_const(reg->var_off, 0) ||
reg->off)) {
__mark_reg_known_zero(reg);
reg->off = 0;
}
if (is_null) {
reg->type = SCALAR_VALUE;
} else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
const struct bpf_map *map = reg->map_ptr;
if (map->inner_map_meta) {
reg->type = CONST_PTR_TO_MAP;
reg->map_ptr = map->inner_map_meta;
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
reg->type = PTR_TO_XDP_SOCK;
} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
map->map_type == BPF_MAP_TYPE_SOCKHASH) {
reg->type = PTR_TO_SOCKET;
} else {
reg->type = PTR_TO_MAP_VALUE;
}
} else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
reg->type = PTR_TO_SOCKET;
} else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
reg->type = PTR_TO_SOCK_COMMON;
} else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
reg->type = PTR_TO_TCP_SOCK;
} else if (reg->type == PTR_TO_BTF_ID_OR_NULL) {
reg->type = PTR_TO_BTF_ID;
} else if (reg->type == PTR_TO_MEM_OR_NULL) {
reg->type = PTR_TO_MEM;
} else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) {
reg->type = PTR_TO_RDONLY_BUF;
} else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) {
reg->type = PTR_TO_RDWR_BUF;
}
if (is_null) {
/* We don't need id and ref_obj_id from this point
* onwards anymore, thus we should better reset it,
* so that state pruning has chances to take effect.
*/
reg->id = 0;
reg->ref_obj_id = 0;
} else if (!reg_may_point_to_spin_lock(reg)) {
/* For not-NULL ptr, reg->ref_obj_id will be reset
* in release_reg_references().
*
* reg->id is still used by spin_lock ptr. Other
* than spin_lock ptr type, reg->id can be reset.
*/
reg->id = 0;
}
}
} | 0 | []
| linux | 9b00f1b78809309163dda2d044d9e94a3c0248a3 | 328,621,586,278,739,540,000,000,000,000,000,000,000 | 65 | bpf: Fix truncation handling for mod32 dst reg wrt zero
Recently noticed that when mod32 with a known src reg of 0 is performed,
then the dst register is 32-bit truncated in verifier:
0: R1=ctx(id=0,off=0,imm=0) R10=fp0
0: (b7) r0 = 0
1: R0_w=inv0 R1=ctx(id=0,off=0,imm=0) R10=fp0
1: (b7) r1 = -1
2: R0_w=inv0 R1_w=inv-1 R10=fp0
2: (b4) w2 = -1
3: R0_w=inv0 R1_w=inv-1 R2_w=inv4294967295 R10=fp0
3: (9c) w1 %= w0
4: R0_w=inv0 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
4: (b7) r0 = 1
5: R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
5: (1d) if r1 == r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
6: R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
6: (b7) r0 = 2
7: R0_w=inv2 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
7: (95) exit
7: R0=inv1 R1=inv(id=0,umin_value=4294967295,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2=inv4294967295 R10=fp0
7: (95) exit
However, as a runtime result, we get 2 instead of 1, meaning the dst
register does not contain (u32)-1 in this case. The reason is fairly
straight forward given the 0 test leaves the dst register as-is:
# ./bpftool p d x i 23
0: (b7) r0 = 0
1: (b7) r1 = -1
2: (b4) w2 = -1
3: (16) if w0 == 0x0 goto pc+1
4: (9c) w1 %= w0
5: (b7) r0 = 1
6: (1d) if r1 == r2 goto pc+1
7: (b7) r0 = 2
8: (95) exit
This was originally not an issue given the dst register was marked as
completely unknown (aka 64 bit unknown). However, after 468f6eafa6c4
("bpf: fix 32-bit ALU op verification") the verifier casts the register
output to 32 bit, and hence it becomes 32 bit unknown. Note that for
the case where the src register is unknown, the dst register is marked
64 bit unknown. After the fix, the register is truncated by the runtime
and the test passes:
# ./bpftool p d x i 23
0: (b7) r0 = 0
1: (b7) r1 = -1
2: (b4) w2 = -1
3: (16) if w0 == 0x0 goto pc+2
4: (9c) w1 %= w0
5: (05) goto pc+1
6: (bc) w1 = w1
7: (b7) r0 = 1
8: (1d) if r1 == r2 goto pc+1
9: (b7) r0 = 2
10: (95) exit
Semantics also match with {R,W}x mod{64,32} 0 -> {R,W}x. Invalid div
has always been {R,W}x div{64,32} 0 -> 0. Rewrites are as follows:
mod32: mod64:
(16) if w0 == 0x0 goto pc+2 (15) if r0 == 0x0 goto pc+1
(9c) w1 %= w0 (9f) r1 %= r0
(05) goto pc+1
(bc) w1 = w1
Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification")
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]> |
R_API void r_core_anal_esil(RCore *core, const char *str, const char *target) {
bool cfg_anal_strings = r_config_get_i (core->config, "anal.strings");
bool emu_lazy = r_config_get_i (core->config, "emu.lazy");
bool gp_fixed = r_config_get_i (core->config, "anal.gpfixed");
RAnalEsil *ESIL = core->anal->esil;
ut64 refptr = 0LL;
char *pcname = NULL;
RAnalOp op = R_EMPTY;
ut8 *buf = NULL;
bool end_address_set = false;
int iend;
int minopsize = 4; // XXX this depends on asm->mininstrsize
bool archIsArm = false;
ut64 addr = core->offset;
ut64 start = addr;
ut64 end = 0LL;
ut64 cur;
if (esil_anal_stop || r_cons_is_breaked ()) {
// faster ^C
return;
}
mycore = core;
if (!strcmp (str, "?")) {
eprintf ("Usage: aae[f] [len] [addr] - analyze refs in function, section or len bytes with esil\n");
eprintf (" aae $SS @ $S - analyze the whole section\n");
eprintf (" aae $SS str.Hello @ $S - find references for str.Hellow\n");
eprintf (" aaef - analyze functions discovered with esil\n");
return;
}
#define CHECKREF(x) ((refptr && (x) == refptr) || !refptr)
if (target) {
const char *expr = r_str_trim_head_ro (target);
if (*expr) {
refptr = ntarget = r_num_math (core->num, expr);
if (!refptr) {
ntarget = refptr = addr;
}
} else {
ntarget = UT64_MAX;
refptr = 0LL;
}
} else {
ntarget = UT64_MAX;
refptr = 0LL;
}
RAnalFunction *fcn = NULL;
if (!strcmp (str, "f")) {
fcn = r_anal_get_fcn_in (core->anal, core->offset, 0);
if (fcn) {
start = r_anal_function_min_addr (fcn);
addr = fcn->addr;
end = r_anal_function_max_addr (fcn);
end_address_set = true;
}
}
if (!end_address_set) {
if (str[0] == ' ') {
end = addr + r_num_math (core->num, str + 1);
} else {
RIOMap *map = r_io_map_get_at (core->io, addr);
if (map) {
end = r_io_map_end (map);
} else {
end = addr + core->blocksize;
}
}
}
iend = end - start;
if (iend < 0) {
return;
}
if (iend > MAX_SCAN_SIZE) {
eprintf ("Warning: Not going to analyze 0x%08"PFMT64x" bytes.\n", (ut64)iend);
return;
}
buf = malloc ((size_t)iend + 2);
if (!buf) {
perror ("malloc");
return;
}
esilbreak_last_read = UT64_MAX;
r_io_read_at (core->io, start, buf, iend + 1);
if (!ESIL) {
r_core_cmd0 (core, "aei");
ESIL = core->anal->esil;
if (!ESIL) {
eprintf ("ESIL not initialized\n");
return;
}
r_core_cmd0 (core, "aeim");
ESIL = core->anal->esil;
}
const char *kspname = r_reg_get_name (core->anal->reg, R_REG_NAME_SP);
if (R_STR_ISEMPTY (kspname)) {
eprintf ("Error: No =SP defined in the reg profile.\n");
return;
}
char *spname = strdup (kspname);
EsilBreakCtx ctx = {
&op,
fcn,
spname,
r_reg_getv (core->anal->reg, spname)
};
ESIL->cb.hook_reg_write = &esilbreak_reg_write;
//this is necessary for the hook to read the id of analop
ESIL->user = &ctx;
ESIL->cb.hook_mem_read = &esilbreak_mem_read;
ESIL->cb.hook_mem_write = &esilbreak_mem_write;
if (fcn && fcn->reg_save_area) {
r_reg_setv (core->anal->reg, ctx.spname, ctx.initial_sp - fcn->reg_save_area);
}
//eprintf ("Analyzing ESIL refs from 0x%"PFMT64x" - 0x%"PFMT64x"\n", addr, end);
// TODO: backup/restore register state before/after analysis
const char *kpcname = r_reg_get_name (core->anal->reg, R_REG_NAME_PC);
if (!kpcname || !*kpcname) {
eprintf ("Cannot find program counter register in the current profile.\n");
return;
}
pcname = strdup (kpcname);
esil_anal_stop = false;
r_cons_break_push (cccb, core);
int arch = -1;
if (!strcmp (core->anal->cur->arch, "arm")) {
switch (core->anal->cur->bits) {
case 64: arch = R2_ARCH_ARM64; break;
case 32: arch = R2_ARCH_ARM32; break;
case 16: arch = R2_ARCH_THUMB; break;
}
archIsArm = true;
}
ut64 gp = r_config_get_i (core->config, "anal.gp");
const char *gp_reg = NULL;
if (!strcmp (core->anal->cur->arch, "mips")) {
gp_reg = "gp";
arch = R2_ARCH_MIPS;
}
const char *sn = r_reg_get_name (core->anal->reg, R_REG_NAME_SN);
if (!sn) {
eprintf ("Warning: No SN reg alias for current architecture.\n");
}
r_reg_arena_push (core->anal->reg);
IterCtx ictx = { start, end, fcn, NULL };
size_t i = addr - start;
size_t i_old = 0;
do {
if (esil_anal_stop || r_cons_is_breaked ()) {
break;
}
cur = start + i;
if (!r_io_is_valid_offset (core->io, cur, 0)) {
break;
}
#if 0
// disabled because it causes some tests to fail
{
RPVector *list = r_meta_get_all_in (core->anal, cur, R_META_TYPE_ANY);
void **it;
r_pvector_foreach (list, it) {
RIntervalNode *node = *it;
RAnalMetaItem *meta = node->data;
switch (meta->type) {
case R_META_TYPE_DATA:
case R_META_TYPE_STRING:
case R_META_TYPE_FORMAT:
#if 0
{
int msz = r_meta_get_size (core->anal, meta->type);
i += (msz > 0)? msz: minopsize;
}
r_pvector_free (list);
goto loopback;
#elif 0
{
int msz = r_meta_get_size (core->anal, meta->type);
i += (msz > 0)? msz: minopsize;
i--;
}
#else
i += 4;
goto repeat;
#endif
default:
break;
}
}
r_pvector_free (list);
}
#endif
/* realign address if needed */
r_core_seek_arch_bits (core, cur);
int opalign = core->anal->pcalign;
if (opalign > 0) {
cur -= (cur % opalign);
}
r_anal_op_fini (&op);
r_asm_set_pc (core->rasm, cur);
i_old = i;
if (i > iend) {
goto repeat;
}
if (!r_anal_op (core->anal, &op, cur, buf + i, iend - i, R_ANAL_OP_MASK_ESIL | R_ANAL_OP_MASK_VAL | R_ANAL_OP_MASK_HINT)) {
i += minopsize - 1; // XXX dupe in op.size below
}
if (op.type == R_ANAL_OP_TYPE_ILL || op.type == R_ANAL_OP_TYPE_UNK) {
// i += 2
r_anal_op_fini (&op);
goto repeat;
}
//we need to check again i because buf+i may goes beyond its boundaries
//because of i+= minopsize - 1
if (op.size < 1) {
i += minopsize - 1;
goto repeat;
}
if (emu_lazy) {
if (op.type & R_ANAL_OP_TYPE_REP) {
i += op.size - 1;
goto repeat;
}
switch (op.type & R_ANAL_OP_TYPE_MASK) {
case R_ANAL_OP_TYPE_JMP:
case R_ANAL_OP_TYPE_CJMP:
case R_ANAL_OP_TYPE_CALL:
case R_ANAL_OP_TYPE_RET:
case R_ANAL_OP_TYPE_ILL:
case R_ANAL_OP_TYPE_NOP:
case R_ANAL_OP_TYPE_UJMP:
case R_ANAL_OP_TYPE_IO:
case R_ANAL_OP_TYPE_LEAVE:
case R_ANAL_OP_TYPE_CRYPTO:
case R_ANAL_OP_TYPE_CPL:
case R_ANAL_OP_TYPE_SYNC:
case R_ANAL_OP_TYPE_SWI:
case R_ANAL_OP_TYPE_CMP:
case R_ANAL_OP_TYPE_ACMP:
case R_ANAL_OP_TYPE_NULL:
case R_ANAL_OP_TYPE_CSWI:
case R_ANAL_OP_TYPE_TRAP:
i += op.size - 1;
goto repeat;
// those require write support
case R_ANAL_OP_TYPE_PUSH:
case R_ANAL_OP_TYPE_POP:
i += op.size - 1;
goto repeat;
}
}
if (sn && op.type == R_ANAL_OP_TYPE_SWI) {
r_strf_buffer (64);
r_flag_space_set (core->flags, R_FLAGS_FS_SYSCALLS);
int snv = (arch == R2_ARCH_THUMB)? op.val: (int)r_reg_getv (core->anal->reg, sn);
RSyscallItem *si = r_syscall_get (core->anal->syscall, snv, -1);
if (si) {
// eprintf ("0x%08"PFMT64x" SYSCALL %-4d %s\n", cur, snv, si->name);
r_flag_set_next (core->flags, r_strf ("syscall.%s", si->name), cur, 1);
} else {
//todo were doing less filtering up top because we can't match against 80 on all platforms
// might get too many of this path now..
// eprintf ("0x%08"PFMT64x" SYSCALL %d\n", cur, snv);
r_flag_set_next (core->flags, r_strf ("syscall.%d", snv), cur, 1);
}
r_flag_space_set (core->flags, NULL);
r_syscall_item_free (si);
}
const char *esilstr = R_STRBUF_SAFEGET (&op.esil);
i += op.size - 1;
if (R_STR_ISEMPTY (esilstr)) {
goto repeat;
}
r_anal_esil_set_pc (ESIL, cur);
r_reg_setv (core->anal->reg, pcname, cur + op.size);
if (gp_fixed && gp_reg) {
r_reg_setv (core->anal->reg, gp_reg, gp);
}
(void)r_anal_esil_parse (ESIL, esilstr);
// looks like ^C is handled by esil_parse !!!!
//r_anal_esil_dumpstack (ESIL);
//r_anal_esil_stack_free (ESIL);
switch (op.type) {
case R_ANAL_OP_TYPE_LEA:
// arm64
if (core->anal->cur && arch == R2_ARCH_ARM64) {
if (CHECKREF (ESIL->cur)) {
r_anal_xrefs_set (core->anal, cur, ESIL->cur, R_ANAL_REF_TYPE_STRING);
}
} else if ((target && op.ptr == ntarget) || !target) {
if (CHECKREF (ESIL->cur)) {
if (op.ptr && r_io_is_valid_offset (core->io, op.ptr, !core->anal->opt.noncode)) {
r_anal_xrefs_set (core->anal, cur, op.ptr, R_ANAL_REF_TYPE_STRING);
} else {
r_anal_xrefs_set (core->anal, cur, ESIL->cur, R_ANAL_REF_TYPE_STRING);
}
}
}
if (cfg_anal_strings) {
add_string_ref (core, op.addr, op.ptr);
}
break;
case R_ANAL_OP_TYPE_ADD:
/* TODO: test if this is valid for other archs too */
if (core->anal->cur && archIsArm) {
/* This code is known to work on Thumb, ARM and ARM64 */
ut64 dst = ESIL->cur;
if ((target && dst == ntarget) || !target) {
if (CHECKREF (dst)) {
int type = core_type_by_addr (core, dst); // R_ANAL_REF_TYPE_DATA;
r_anal_xrefs_set (core->anal, cur, dst, type);
}
}
if (cfg_anal_strings) {
add_string_ref (core, op.addr, dst);
}
} else if ((core->anal->bits == 32 && core->anal->cur && arch == R2_ARCH_MIPS)) {
ut64 dst = ESIL->cur;
if (!op.src[0] || !op.src[0]->reg || !op.src[0]->reg->name) {
break;
}
if (!strcmp (op.src[0]->reg->name, "sp")) {
break;
}
if (!strcmp (op.src[0]->reg->name, "zero")) {
break;
}
if ((target && dst == ntarget) || !target) {
if (dst > 0xffff && op.src[1] && (dst & 0xffff) == (op.src[1]->imm & 0xffff) && myvalid (mycore->io, dst)) {
RFlagItem *f;
char *str;
if (CHECKREF (dst) || CHECKREF (cur)) {
r_anal_xrefs_set (core->anal, cur, dst, R_ANAL_REF_TYPE_DATA);
if (cfg_anal_strings) {
add_string_ref (core, op.addr, dst);
}
if ((f = r_core_flag_get_by_spaces (core->flags, dst))) {
r_meta_set_string (core->anal, R_META_TYPE_COMMENT, cur, f->name);
} else if ((str = is_string_at (mycore, dst, NULL))) {
char *str2 = r_str_newf ("esilref: '%s'", str);
// HACK avoid format string inside string used later as format
// string crashes disasm inside agf under some conditions.
// https://github.com/radareorg/radare2/issues/6937
r_str_replace_char (str2, '%', '&');
r_meta_set_string (core->anal, R_META_TYPE_COMMENT, cur, str2);
free (str2);
free (str);
}
}
}
}
}
break;
case R_ANAL_OP_TYPE_LOAD:
{
ut64 dst = esilbreak_last_read;
if (dst != UT64_MAX && CHECKREF (dst)) {
if (myvalid (mycore->io, dst)) {
r_anal_xrefs_set (core->anal, cur, dst, R_ANAL_REF_TYPE_DATA);
if (cfg_anal_strings) {
add_string_ref (core, op.addr, dst);
}
}
}
dst = esilbreak_last_data;
if (dst != UT64_MAX && CHECKREF (dst)) {
if (myvalid (mycore->io, dst)) {
r_anal_xrefs_set (core->anal, cur, dst, R_ANAL_REF_TYPE_DATA);
if (cfg_anal_strings) {
add_string_ref (core, op.addr, dst);
}
}
}
}
break;
case R_ANAL_OP_TYPE_JMP:
{
ut64 dst = op.jump;
if (CHECKREF (dst)) {
if (myvalid (core->io, dst)) {
r_anal_xrefs_set (core->anal, cur, dst, R_ANAL_REF_TYPE_CODE);
}
}
}
break;
case R_ANAL_OP_TYPE_CALL:
{
ut64 dst = op.jump;
if (CHECKREF (dst)) {
if (myvalid (core->io, dst)) {
r_anal_xrefs_set (core->anal, cur, dst, R_ANAL_REF_TYPE_CALL);
}
ESIL->old = cur + op.size;
getpcfromstack (core, ESIL);
}
}
break;
case R_ANAL_OP_TYPE_UJMP:
case R_ANAL_OP_TYPE_UCALL:
case R_ANAL_OP_TYPE_ICALL:
case R_ANAL_OP_TYPE_RCALL:
case R_ANAL_OP_TYPE_IRCALL:
case R_ANAL_OP_TYPE_MJMP:
{
ut64 dst = core->anal->esil->jump_target;
if (dst == 0 || dst == UT64_MAX) {
dst = r_reg_getv (core->anal->reg, pcname);
}
if (CHECKREF (dst)) {
if (myvalid (core->io, dst)) {
RAnalRefType ref =
(op.type & R_ANAL_OP_TYPE_MASK) == R_ANAL_OP_TYPE_UCALL
? R_ANAL_REF_TYPE_CALL
: R_ANAL_REF_TYPE_CODE;
r_anal_xrefs_set (core->anal, cur, dst, ref);
r_core_anal_fcn (core, dst, UT64_MAX, R_ANAL_REF_TYPE_NULL, 1);
// analyze function here
#if 0
if (op.type == R_ANAL_OP_TYPE_UCALL || op.type == R_ANAL_OP_TYPE_RCALL) {
eprintf ("0x%08"PFMT64x" RCALL TO %llx\n", cur, dst);
}
#endif
}
}
}
break;
default:
break;
}
r_anal_esil_stack_free (ESIL);
repeat:
if (!r_anal_get_block_at (core->anal, cur)) {
size_t fcn_i;
for (fcn_i = i_old + 1; fcn_i <= i; fcn_i++) {
if (r_anal_get_function_at (core->anal, start + fcn_i)) {
i = fcn_i - 1;
break;
}
}
}
if (i >= iend) {
break;
}
} while (get_next_i (&ictx, &i));
free (pcname);
free (spname);
r_list_free (ictx.bbl);
r_list_free (ictx.path);
r_list_free (ictx.switch_path);
free (buf);
ESIL->cb.hook_mem_read = NULL;
ESIL->cb.hook_mem_write = NULL;
ESIL->cb.hook_reg_write = NULL;
ESIL->user = NULL;
r_anal_op_fini (&op);
r_cons_break_pop ();
// restore register
r_reg_arena_pop (core->anal->reg);
} | 0 | [
"CWE-416"
]
| radare2 | 10517e3ff0e609697eb8cde60ec8dc999ee5ea24 | 214,766,261,348,486,150,000,000,000,000,000,000,000 | 466 | aaef on arm/thumb switches causes uaf ##crash
* Reported by peacock-doris via huntr.dev
* Reproducer: poc_uaf_r_reg_get |
TEST_F(QueryPlannerTest, IndexBoundsAndWithNestedOr) {
addIndex(BSON("a" << 1));
runQuery(fromjson("{$and: [{a: 1, $or: [{a: 2}, {a: 3}]}]}"));
// Given that the index over 'a' isn't multikey, we ideally won't generate any solutions
// since we know the query describes an empty set if 'a' isn't multikey. Any solutions
// below are "this is how it currently works" instead of "this is how it should work."
// It's kind of iffy to look for indexed solutions so we don't...
size_t matches = 0;
matches += numSolutionMatches(
"{cscan: {dir: 1, filter: "
"{$or: [{a: 2, a:1}, {a: 3, a:1}]}}}");
matches += numSolutionMatches(
"{cscan: {dir: 1, filter: "
"{$and: [{$or: [{a: 2}, {a: 3}]}, {a: 1}]}}}");
ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
} | 0 | []
| mongo | ee97c0699fd55b498310996ee002328e533681a3 | 159,002,609,811,706,020,000,000,000,000,000,000,000 | 18 | SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr. |
static struct fib6_node *node_alloc(struct net *net)
{
struct fib6_node *fn;
fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC);
if (fn)
net->ipv6.rt6_stats->fib_nodes++;
return fn;
} | 0 | [
"CWE-755"
]
| linux | 7b09c2d052db4b4ad0b27b97918b46a7746966fa | 202,214,943,992,920,260,000,000,000,000,000,000,000 | 10 | ipv6: fix a typo in fib6_rule_lookup()
Yi Ren reported an issue discovered by syzkaller, and bisected
to the cited commit.
Many thanks to Yi, this trivial patch does not reflect the patient
work that has been done.
Fixes: d64a1f574a29 ("ipv6: honor RT6_LOOKUP_F_DST_NOREF in rule lookup logic")
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Wei Wang <[email protected]>
Bisected-and-reported-by: Yi Ren <[email protected]>
Signed-off-by: Jakub Kicinski <[email protected]> |
R_API RList *r_bin_java_get_bin_obj_list_thru_obj(RBinJavaObj *bin_obj) {
RList *the_list;
Sdb *sdb;
if (!bin_obj) {
return NULL;
}
sdb = bin_obj->AllJavaBinObjs;
if (!sdb) {
return NULL;
}
the_list = r_list_new ();
if (!the_list) {
return NULL;
}
sdb_foreach (sdb, sdb_iterate_build_list, (void *) the_list);
return the_list;
} | 0 | [
"CWE-119",
"CWE-788"
]
| radare2 | 6c4428f018d385fc80a33ecddcb37becea685dd5 | 287,724,326,087,792,400,000,000,000,000,000,000,000 | 17 | Improve boundary checks to fix oobread segfaults ##crash
* Reported by Cen Zhang via huntr.dev
* Reproducer: bins/fuzzed/javaoob-havoc.class |
static int ql_create_send_free_list(struct ql3_adapter *qdev)
{
struct ql_tx_buf_cb *tx_cb;
int i;
struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
/* Create free list of transmit buffers */
for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
tx_cb = &qdev->tx_buf[i];
tx_cb->skb = NULL;
tx_cb->queue_entry = req_q_curr;
req_q_curr++;
tx_cb->oal = kmalloc(512, GFP_KERNEL);
if (tx_cb->oal == NULL)
return -ENOMEM;
}
return 0; | 0 | [
"CWE-401"
]
| linux | 1acb8f2a7a9f10543868ddd737e37424d5c36cf4 | 249,427,911,120,946,670,000,000,000,000,000,000,000 | 19 | net: qlogic: Fix memory leak in ql_alloc_large_buffers
In ql_alloc_large_buffers, a new skb is allocated via netdev_alloc_skb.
This skb should be released if pci_dma_mapping_error fails.
Fixes: 0f8ab89e825f ("qla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_free_list(), ql_populate_free_queue(), ql_alloc_large_buffers(), and ql3xxx_send()")
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
call_partial(
typval_T *tv,
int argcount_arg,
ectx_T *ectx)
{
int argcount = argcount_arg;
char_u *name = NULL;
int called_emsg_before = called_emsg;
int res = FAIL;
dict_T *selfdict = NULL;
if (tv->v_type == VAR_PARTIAL)
{
partial_T *pt = tv->vval.v_partial;
int i;
if (pt->pt_argc > 0)
{
// Make space for arguments from the partial, shift the "argcount"
// arguments up.
if (GA_GROW_FAILS(&ectx->ec_stack, pt->pt_argc))
return FAIL;
for (i = 1; i <= argcount; ++i)
*STACK_TV_BOT(-i + pt->pt_argc) = *STACK_TV_BOT(-i);
ectx->ec_stack.ga_len += pt->pt_argc;
argcount += pt->pt_argc;
// copy the arguments from the partial onto the stack
for (i = 0; i < pt->pt_argc; ++i)
copy_tv(&pt->pt_argv[i], STACK_TV_BOT(-argcount + i));
}
selfdict = pt->pt_dict;
if (pt->pt_func != NULL)
return call_ufunc(pt->pt_func, pt, argcount, ectx, NULL, selfdict);
name = pt->pt_name;
}
else if (tv->v_type == VAR_FUNC)
name = tv->vval.v_string;
if (name != NULL)
{
char_u fname_buf[FLEN_FIXED + 1];
char_u *tofree = NULL;
int error = FCERR_NONE;
char_u *fname;
// May need to translate <SNR>123_ to K_SNR.
fname = fname_trans_sid(name, fname_buf, &tofree, &error);
if (error != FCERR_NONE)
res = FAIL;
else
res = call_by_name(fname, argcount, ectx, NULL, selfdict);
vim_free(tofree);
}
if (res == FAIL)
{
if (called_emsg == called_emsg_before)
semsg(_(e_unknown_function_str),
name == NULL ? (char_u *)"[unknown]" : name);
return FAIL;
}
return OK;
} | 0 | [
"CWE-416"
]
| vim | 9c23f9bb5fe435b28245ba8ac65aa0ca6b902c04 | 179,803,794,467,342,350,000,000,000,000,000,000,000 | 65 | patch 8.2.3902: Vim9: double free with nested :def function
Problem: Vim9: double free with nested :def function.
Solution: Pass "line_to_free" from compile_def_function() and make sure
cmdlinep is valid. |
xmlWrapOpenUtf8(const char *path,int mode)
{
FILE *fd = NULL;
wchar_t *wPath;
wPath = __xmlIOWin32UTF8ToWChar(path);
if(wPath)
{
fd = _wfopen(wPath, mode ? L"wb" : L"rb");
xmlFree(wPath);
}
/* maybe path in native encoding */
if(fd == NULL)
fd = fopen(path, mode ? "wb" : "rb");
return fd;
} | 0 | [
"CWE-134"
]
| libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 128,963,386,627,112,520,000,000,000,000,000,000,000 | 17 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
static lu_mem propagatemark (global_State *g) {
GCObject *o = g->gray;
gray2black(o);
g->gray = *getgclist(o); /* remove from 'gray' list */
switch (o->tt) {
case LUA_VTABLE: return traversetable(g, gco2t(o));
case LUA_VUSERDATA: return traverseudata(g, gco2u(o));
case LUA_VLCL: return traverseLclosure(g, gco2lcl(o));
case LUA_VCCL: return traverseCclosure(g, gco2ccl(o));
case LUA_VPROTO: return traverseproto(g, gco2p(o));
case LUA_VTHREAD: {
lua_State *th = gco2th(o);
linkgclist(th, g->grayagain); /* insert into 'grayagain' list */
black2gray(o);
return traversethread(g, th);
}
default: lua_assert(0); return 0;
}
} | 0 | [
"CWE-125"
]
| lua | 127e7a6c8942b362aa3c6627f44d660a4fb75312 | 214,489,897,559,174,800,000,000,000,000,000,000,000 | 19 | Fixed bug of old finalized objects in the GC
When an object aged OLD1 is finalized, it is moved from the list
'finobj' to the *beginning* of the list 'allgc'. So, this part of the
list (and not only the survival list) must be visited by 'markold'. |
f_resolve(typval_T *argvars, typval_T *rettv)
{
char_u *p;
#ifdef HAVE_READLINK
char_u *buf = NULL;
#endif
p = tv_get_string(&argvars[0]);
#ifdef FEAT_SHORTCUT
{
char_u *v = NULL;
v = mch_resolve_shortcut(p);
if (v != NULL)
rettv->vval.v_string = v;
else
rettv->vval.v_string = vim_strsave(p);
}
#else
# ifdef HAVE_READLINK
{
char_u *cpy;
int len;
char_u *remain = NULL;
char_u *q;
int is_relative_to_current = FALSE;
int has_trailing_pathsep = FALSE;
int limit = 100;
p = vim_strsave(p);
if (p[0] == '.' && (vim_ispathsep(p[1])
|| (p[1] == '.' && (vim_ispathsep(p[2])))))
is_relative_to_current = TRUE;
len = STRLEN(p);
if (len > 0 && after_pathsep(p, p + len))
{
has_trailing_pathsep = TRUE;
p[len - 1] = NUL; /* the trailing slash breaks readlink() */
}
q = getnextcomp(p);
if (*q != NUL)
{
/* Separate the first path component in "p", and keep the
* remainder (beginning with the path separator). */
remain = vim_strsave(q - 1);
q[-1] = NUL;
}
buf = alloc(MAXPATHL + 1);
if (buf == NULL)
goto fail;
for (;;)
{
for (;;)
{
len = readlink((char *)p, (char *)buf, MAXPATHL);
if (len <= 0)
break;
buf[len] = NUL;
if (limit-- == 0)
{
vim_free(p);
vim_free(remain);
emsg(_("E655: Too many symbolic links (cycle?)"));
rettv->vval.v_string = NULL;
goto fail;
}
/* Ensure that the result will have a trailing path separator
* if the argument has one. */
if (remain == NULL && has_trailing_pathsep)
add_pathsep(buf);
/* Separate the first path component in the link value and
* concatenate the remainders. */
q = getnextcomp(vim_ispathsep(*buf) ? buf + 1 : buf);
if (*q != NUL)
{
if (remain == NULL)
remain = vim_strsave(q - 1);
else
{
cpy = concat_str(q - 1, remain);
if (cpy != NULL)
{
vim_free(remain);
remain = cpy;
}
}
q[-1] = NUL;
}
q = gettail(p);
if (q > p && *q == NUL)
{
/* Ignore trailing path separator. */
q[-1] = NUL;
q = gettail(p);
}
if (q > p && !mch_isFullName(buf))
{
/* symlink is relative to directory of argument */
cpy = alloc((unsigned)(STRLEN(p) + STRLEN(buf) + 1));
if (cpy != NULL)
{
STRCPY(cpy, p);
STRCPY(gettail(cpy), buf);
vim_free(p);
p = cpy;
}
}
else
{
vim_free(p);
p = vim_strsave(buf);
}
}
if (remain == NULL)
break;
/* Append the first path component of "remain" to "p". */
q = getnextcomp(remain + 1);
len = q - remain - (*q != NUL);
cpy = vim_strnsave(p, STRLEN(p) + len);
if (cpy != NULL)
{
STRNCAT(cpy, remain, len);
vim_free(p);
p = cpy;
}
/* Shorten "remain". */
if (*q != NUL)
STRMOVE(remain, q - 1);
else
VIM_CLEAR(remain);
}
/* If the result is a relative path name, make it explicitly relative to
* the current directory if and only if the argument had this form. */
if (!vim_ispathsep(*p))
{
if (is_relative_to_current
&& *p != NUL
&& !(p[0] == '.'
&& (p[1] == NUL
|| vim_ispathsep(p[1])
|| (p[1] == '.'
&& (p[2] == NUL
|| vim_ispathsep(p[2]))))))
{
/* Prepend "./". */
cpy = concat_str((char_u *)"./", p);
if (cpy != NULL)
{
vim_free(p);
p = cpy;
}
}
else if (!is_relative_to_current)
{
/* Strip leading "./". */
q = p;
while (q[0] == '.' && vim_ispathsep(q[1]))
q += 2;
if (q > p)
STRMOVE(p, p + 2);
}
}
/* Ensure that the result will have no trailing path separator
* if the argument had none. But keep "/" or "//". */
if (!has_trailing_pathsep)
{
q = p + STRLEN(p);
if (after_pathsep(p, q))
*gettail_sep(p) = NUL;
}
rettv->vval.v_string = p;
}
# else
rettv->vval.v_string = vim_strsave(p);
# endif
#endif
simplify_filename(rettv->vval.v_string);
#ifdef HAVE_READLINK
fail:
vim_free(buf);
#endif
rettv->v_type = VAR_STRING;
} | 0 | [
"CWE-78"
]
| vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 332,520,519,045,599,370,000,000,000,000,000,000,000 | 199 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
static BOOL clear_format_map(wfClipboard* clipboard)
{
size_t i;
formatMapping* map;
if (!clipboard)
return FALSE;
if (clipboard->format_mappings)
{
for (i = 0; i < clipboard->map_capacity; i++)
{
map = &clipboard->format_mappings[i];
map->remote_format_id = 0;
map->local_format_id = 0;
free(map->name);
map->name = NULL;
}
}
clipboard->map_size = 0;
return TRUE;
} | 0 | [
"CWE-20"
]
| FreeRDP | 0d79670a28c0ab049af08613621aa0c267f977e9 | 308,459,647,956,114,760,000,000,000,000,000,000,000 | 23 | Fixed missing input checks for file contents request
reported by Valentino Ricotta (Thalium) |
static int handle_monitor_trap(struct kvm_vcpu *vcpu)
{
return 1;
} | 0 | [
"CWE-284"
]
| linux | 727ba748e110b4de50d142edca9d6a9b7e6111d8 | 131,784,959,079,122,420,000,000,000,000,000,000,000 | 4 | kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
process_pa_data_to_md(krb5_context context,
const krb5_creds *creds,
const AS_REQ *a,
krb5_get_init_creds_ctx *ctx,
METHOD_DATA *in_md,
METHOD_DATA **out_md,
krb5_prompter_fct prompter,
void *prompter_data)
{
krb5_error_code ret;
ALLOC(*out_md, 1);
if (*out_md == NULL)
return krb5_enomem(context);
(*out_md)->len = 0;
(*out_md)->val = NULL;
if (_krb5_have_debug(context, 5)) {
unsigned i;
_krb5_debug(context, 5, "KDC send %d patypes", in_md->len);
for (i = 0; i < in_md->len; i++)
_krb5_debug(context, 5, "KDC send PA-DATA type: %d", in_md->val[i].padata_type);
}
/*
* Make sure we don't sent both ENC-TS and PK-INIT pa data, no
* need to expose our password protecting our PKCS12 key.
*/
if (ctx->pk_init_ctx) {
_krb5_debug(context, 5, "krb5_get_init_creds: "
"prepareing PKINIT padata (%s)",
(ctx->used_pa_types & USED_PKINIT_W2K) ? "win2k" : "ietf");
if (ctx->used_pa_types & USED_PKINIT_W2K) {
krb5_set_error_message(context, KRB5_GET_IN_TKT_LOOP,
"Already tried pkinit, looping");
return KRB5_GET_IN_TKT_LOOP;
}
ret = pa_data_to_md_pkinit(context, a, creds->client,
(ctx->used_pa_types & USED_PKINIT),
ctx, *out_md);
if (ret)
return ret;
if (ctx->used_pa_types & USED_PKINIT)
ctx->used_pa_types |= USED_PKINIT_W2K;
else
ctx->used_pa_types |= USED_PKINIT;
} else if (in_md->len != 0) {
struct pa_info_data *paid, *ppaid;
unsigned flag;
paid = calloc(1, sizeof(*paid));
if (paid == NULL)
return krb5_enomem(context);
paid->etype = KRB5_ENCTYPE_NULL;
ppaid = process_pa_info(context, creds->client, a, paid, in_md);
if (ppaid)
flag = USED_ENC_TS_INFO;
else
flag = USED_ENC_TS_GUESS;
if (ctx->used_pa_types & flag) {
if (ppaid)
free_paid(context, ppaid);
free(paid);
krb5_set_error_message(context, KRB5_GET_IN_TKT_LOOP,
"Already tried ENC-TS-%s, looping",
flag == USED_ENC_TS_INFO ? "info" : "guess");
return KRB5_GET_IN_TKT_LOOP;
}
pa_data_to_md_ts_enc(context, a, creds->client, ctx, ppaid, *out_md);
ctx->used_pa_types |= flag;
if (ppaid) {
if (ctx->ppaid) {
free_paid(context, ctx->ppaid);
free(ctx->ppaid);
}
ctx->ppaid = ppaid;
} else
free(paid);
}
pa_data_add_pac_request(context, ctx, *out_md);
if ((ctx->fast_state.flags & KRB5_FAST_DISABLED) == 0) {
ret = krb5_padata_add(context, *out_md, KRB5_PADATA_REQ_ENC_PA_REP, NULL, 0);
if (ret)
return ret;
}
if ((*out_md)->len == 0) {
free(*out_md);
*out_md = NULL;
}
return 0;
} | 0 | [
"CWE-320"
]
| heimdal | 2f7f3d9960aa6ea21358bdf3687cee5149aa35cf | 45,535,660,851,987,900,000,000,000,000,000,000,000 | 108 | CVE-2019-12098: krb5: always confirm PA-PKINIT-KX for anon PKINIT
RFC8062 Section 7 requires verification of the PA-PKINIT-KX key excahnge
when anonymous PKINIT is used. Failure to do so can permit an active
attacker to become a man-in-the-middle.
Introduced by a1ef548600c5bb51cf52a9a9ea12676506ede19f. First tagged
release Heimdal 1.4.0.
CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N (4.8)
Change-Id: I6cc1c0c24985936468af08693839ac6c3edda133
Signed-off-by: Jeffrey Altman <[email protected]>
Approved-by: Jeffrey Altman <[email protected]>
(cherry picked from commit 38c797e1ae9b9c8f99ae4aa2e73957679031fd2b) |
static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc,
unsigned int cmd)
{
struct cdrom_volctrl volctrl;
unsigned char buffer[32];
char mask[sizeof(buffer)];
unsigned short offset;
int ret;
cd_dbg(CD_DO_IOCTL, "entering CDROMVOLUME\n");
if (copy_from_user(&volctrl, (struct cdrom_volctrl __user *)arg,
sizeof(volctrl)))
return -EFAULT;
cgc->buffer = buffer;
cgc->buflen = 24;
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
/* originally the code depended on buffer[1] to determine
how much data is available for transfer. buffer[1] is
unfortunately ambigious and the only reliable way seem
to be to simply skip over the block descriptor... */
offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6));
if (offset + 16 > sizeof(buffer))
return -E2BIG;
if (offset + 16 > cgc->buflen) {
cgc->buflen = offset + 16;
ret = cdrom_mode_sense(cdi, cgc,
GPMODE_AUDIO_CTL_PAGE, 0);
if (ret)
return ret;
}
/* sanity check */
if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
buffer[offset + 1] < 14)
return -EINVAL;
/* now we have the current volume settings. if it was only
a CDROMVOLREAD, return these values */
if (cmd == CDROMVOLREAD) {
volctrl.channel0 = buffer[offset+9];
volctrl.channel1 = buffer[offset+11];
volctrl.channel2 = buffer[offset+13];
volctrl.channel3 = buffer[offset+15];
if (copy_to_user((struct cdrom_volctrl __user *)arg, &volctrl,
sizeof(volctrl)))
return -EFAULT;
return 0;
}
/* get the volume mask */
cgc->buffer = mask;
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1);
if (ret)
return ret;
buffer[offset + 9] = volctrl.channel0 & mask[offset + 9];
buffer[offset + 11] = volctrl.channel1 & mask[offset + 11];
buffer[offset + 13] = volctrl.channel2 & mask[offset + 13];
buffer[offset + 15] = volctrl.channel3 & mask[offset + 15];
/* set volume */
cgc->buffer = buffer + offset - 8;
memset(cgc->buffer, 0, 8);
return cdrom_mode_select(cdi, cgc);
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 9de4ee40547fd315d4a0ed1dd15a2fa3559ad707 | 289,411,178,079,489,730,000,000,000,000,000,000,000 | 74 | cdrom: information leak in cdrom_ioctl_media_changed()
This cast is wrong. "cdi->capacity" is an int and "arg" is an unsigned
long. The way the check is written now, if one of the high 32 bits is
set then we could read outside the info->slots[] array.
This bug is pretty old and it predates git.
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: [email protected]
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
ServerItem::ServerItem(const PublicInfo &pi) : QTreeWidgetItem(QTreeWidgetItem::UserType) {
siParent = NULL;
bParent = false;
itType = PublicType;
qsName = pi.qsName;
qsHostname = pi.qsIp;
usPort = pi.usPort;
qsUrl = pi.quUrl.toString();
qsCountry = pi.qsCountry;
qsCountryCode = pi.qsCountryCode;
qsContinentCode = pi.qsContinentCode;
bCA = pi.bCA;
init();
} | 0 | [
"CWE-59",
"CWE-61"
]
| mumble | e59ee87abe249f345908c7d568f6879d16bfd648 | 164,329,992,698,863,900,000,000,000,000,000,000,000 | 15 | FIX(client): Only allow "http"/"https" for URLs in ConnectDialog
Our public server list registration script doesn't have an URL scheme
whitelist for the website field.
Turns out a malicious server can register itself with a dangerous URL in
an attempt to attack a user's machine.
User interaction is required, as the URL has to be opened by
right-clicking on the server entry and clicking on "Open Webpage".
This commit introduces a client-side whitelist, which only allows "http"
and "https" schemes. We will also implement it in our public list.
In future we should probably add a warning QMessageBox informing the
user that there's no guarantee the URL is safe (regardless of the
scheme).
Thanks a lot to https://positive.security for reporting the RCE
vulnerability to us privately. |
_ppdCacheGetBin(
_ppd_cache_t *pc, /* I - PPD cache and mapping data */
const char *output_bin) /* I - PPD OutputBin string */
{
int i; /* Looping var */
/*
* Range check input...
*/
if (!pc || !output_bin)
return (NULL);
/*
* Look up the OutputBin string...
*/
for (i = 0; i < pc->num_bins; i ++)
if (!_cups_strcasecmp(output_bin, pc->bins[i].ppd))
return (pc->bins[i].pwg);
return (NULL);
} | 0 | [
"CWE-93"
]
| cups | 07428f6a640ff93aa0b4cc69ca372e2cf8490e41 | 81,717,356,577,914,920,000,000,000,000,000,000,000 | 25 | Only list supported PDLs (Issue #4923) |
static inline enum fbq_type fbq_classify_rq(struct rq *rq)
{
return regular;
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-835"
]
| linux | c40f7d74c741a907cfaeb73a7697081881c497d0 | 319,163,261,713,369,970,000,000,000,000,000,000,000 | 4 | sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
static void phar_spl_foreign_clone(spl_filesystem_object *src, spl_filesystem_object *dst) /* {{{ */
{
phar_archive_data *phar_data = (phar_archive_data *) dst->oth;
if (!phar_data->is_persistent) {
++(phar_data->refcount);
}
} | 0 | [
"CWE-20"
]
| php-src | 1e9b175204e3286d64dfd6c9f09151c31b5e099a | 51,359,494,917,019,550,000,000,000,000,000,000,000 | 8 | Fix bug #71860: Require valid paths for phar filenames |
static inline void inc_slabs_node(struct kmem_cache *s, int node,
int objects) {} | 0 | [
"CWE-189"
]
| linux | f8bd2258e2d520dff28c855658bd24bdafb5102d | 79,275,815,031,060,980,000,000,000,000,000,000,000 | 2 | remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: john stultz <[email protected]>
Cc: Christoph Lameter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static bool checkreturn pb_readbyte(pb_istream_t *stream, uint8_t *buf)
{
if (stream->bytes_left == 0)
PB_RETURN_ERROR(stream, "end-of-stream");
#ifndef PB_BUFFER_ONLY
if (!stream->callback(stream, buf, 1))
PB_RETURN_ERROR(stream, "io error");
#else
*buf = *(uint8_t*)stream->state;
stream->state = (uint8_t*)stream->state + 1;
#endif
stream->bytes_left--;
return true;
} | 0 | [
"CWE-125"
]
| nanopb | 7b396821ddd06df8e39143f16e1dc0a4645b89a3 | 208,759,917,778,108,640,000,000,000,000,000,000,000 | 17 | Fix invalid free() after failed realloc() (GHSA-gcx3-7m76-287p) |
GF_Err Media_UpdateSample(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample *sample, Bool data_only)
{
GF_Err e;
u32 drefIndex, chunkNum, descIndex;
u64 newOffset, DTS;
GF_DataEntryURLBox *Dentry;
GF_SampleTableBox *stbl;
if (!mdia || !sample || !sampleNumber || !mdia->mediaTrack->moov->mov->editFileMap)
return GF_BAD_PARAM;
stbl = mdia->information->sampleTable;
if (!data_only) {
//check we have the sampe dts
e = stbl_GetSampleDTS(stbl->TimeToSample, sampleNumber, &DTS);
if (e) return e;
if (DTS != sample->DTS) return GF_BAD_PARAM;
}
//get our infos
stbl_GetSampleInfos(stbl, sampleNumber, &newOffset, &chunkNum, &descIndex, NULL);
//then check the data ref
e = Media_GetSampleDesc(mdia, descIndex, NULL, &drefIndex);
if (e) return e;
Dentry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1);
if (!Dentry) return GF_ISOM_INVALID_FILE;
if (Dentry->flags != 1) return GF_BAD_PARAM;
//MEDIA DATA EDIT: write this new sample to the edit temp file
newOffset = gf_isom_datamap_get_offset(mdia->mediaTrack->moov->mov->editFileMap);
if (sample->dataLength) {
e = gf_isom_datamap_add_data(mdia->mediaTrack->moov->mov->editFileMap, sample->data, sample->dataLength);
if (e) return e;
}
if (data_only) {
stbl_SetSampleSize(stbl->SampleSize, sampleNumber, sample->dataLength);
return stbl_SetChunkOffset(mdia, sampleNumber, newOffset);
}
return UpdateSample(mdia, sampleNumber, sample->dataLength, sample->CTS_Offset, newOffset, sample->IsRAP);
} | 0 | [
"CWE-787"
]
| gpac | 328def7d3b93847d64ecb6e9e0399684e57c3eca | 180,369,596,394,530,700,000,000,000,000,000,000,000 | 44 | fixed #1766 (fuzz) |
static st_plugin_dl *plugin_dl_insert_or_reuse(struct st_plugin_dl *plugin_dl)
{
uint i;
struct st_plugin_dl *tmp;
DBUG_ENTER("plugin_dl_insert_or_reuse");
for (i= 0; i < plugin_dl_array.elements; i++)
{
tmp= *dynamic_element(&plugin_dl_array, i, struct st_plugin_dl **);
if (! tmp->ref_count)
{
memcpy(tmp, plugin_dl, sizeof(struct st_plugin_dl));
DBUG_RETURN(tmp);
}
}
if (insert_dynamic(&plugin_dl_array, (uchar*)&plugin_dl))
DBUG_RETURN(0);
tmp= *dynamic_element(&plugin_dl_array, plugin_dl_array.elements - 1,
struct st_plugin_dl **)=
(struct st_plugin_dl *) memdup_root(&plugin_mem_root, (uchar*)plugin_dl,
sizeof(struct st_plugin_dl));
DBUG_RETURN(tmp);
} | 0 | [
"CWE-416"
]
| server | c05fd700970ad45735caed3a6f9930d4ce19a3bd | 158,845,668,306,540,780,000,000,000,000,000,000,000 | 22 | MDEV-26323 use-after-poison issue of MariaDB server |
int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
{
if (!file) return HA_ADMIN_INTERNAL_ERROR;
int error;
MI_CHECK param;
MYISAM_SHARE* share = file->s;
const char *old_proc_info=thd->proc_info;
thd_proc_info(thd, "Checking table");
myisamchk_init(¶m);
param.thd = thd;
param.op_name = "check";
param.db_name= table->s->db.str;
param.table_name= table->alias;
param.testflag = check_opt->flags | T_CHECK | T_SILENT;
param.stats_method= (enum_mi_stats_method)THDVAR(thd, stats_method);
if (!(table->db_stat & HA_READ_ONLY))
param.testflag|= T_STATISTICS;
param.using_global_keycache = 1;
if (!mi_is_crashed(file) &&
(((param.testflag & T_CHECK_ONLY_CHANGED) &&
!(share->state.changed & (STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR)) &&
share->state.open_count == 0) ||
((param.testflag & T_FAST) && (share->state.open_count ==
(uint) (share->global_changed ? 1 : 0)))))
return HA_ADMIN_ALREADY_DONE;
error = chk_status(¶m, file); // Not fatal
error = chk_size(¶m, file);
if (!error)
error |= chk_del(¶m, file, param.testflag);
if (!error)
error = chk_key(¶m, file);
if (!error)
{
if ((!(param.testflag & T_QUICK) &&
((share->options &
(HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) ||
(param.testflag & (T_EXTEND | T_MEDIUM)))) ||
mi_is_crashed(file))
{
uint old_testflag=param.testflag;
param.testflag|=T_MEDIUM;
if (!(error= init_io_cache(¶m.read_cache, file->dfile,
my_default_record_cache_size, READ_CACHE,
share->pack.header_length, 1, MYF(MY_WME))))
{
error= chk_data_link(¶m, file, param.testflag & T_EXTEND);
end_io_cache(&(param.read_cache));
}
param.testflag= old_testflag;
}
}
if (!error)
{
if ((share->state.changed & (STATE_CHANGED |
STATE_CRASHED_ON_REPAIR |
STATE_CRASHED | STATE_NOT_ANALYZED)) ||
(param.testflag & T_STATISTICS) ||
mi_is_crashed(file))
{
file->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
mysql_mutex_lock(&share->intern_lock);
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR);
if (!(table->db_stat & HA_READ_ONLY))
error=update_state_info(¶m,file,UPDATE_TIME | UPDATE_OPEN_COUNT |
UPDATE_STAT);
mysql_mutex_unlock(&share->intern_lock);
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_CONST);
}
}
else if (!mi_is_crashed(file) && !thd->killed)
{
mi_mark_crashed(file);
file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
}
thd_proc_info(thd, old_proc_info);
return error ? HA_ADMIN_CORRUPT : HA_ADMIN_OK;
} | 0 | [
"CWE-362"
]
| mysql-server | 4e5473862e6852b0f3802b0cd0c6fa10b5253291 | 53,413,662,457,065,590,000,000,000,000,000,000,000 | 85 | Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD)
is created. When repair finishes, this file is renamed to the original
.MYD file. The problem was that during this rename, we copied the
stats from the old file to the new file with chmod/chown. If a user
managed to replace the temporary file before chmod/chown was executed,
it was possible to get an arbitrary file with the privileges of the
mysql user.
This patch fixes the problem by not copying stats from the old
file to the new file. This is not needed as the new file was
created with the correct stats. This fix only changes server
behavior - external utilities such as myisamchk still does
chmod/chown.
No test case provided since the problem involves synchronization
with file system operations. |
TEST(LtOp, MatchesDotNotationNull) {
BSONObj operand = BSON("$lt" << BSONNULL);
LTMatchExpression lt;
ASSERT(lt.init("a.b", operand["$lt"]).isOK());
ASSERT(!lt.matchesBSON(BSONObj(), NULL));
ASSERT(!lt.matchesBSON(BSON("a" << BSONNULL), NULL));
ASSERT(!lt.matchesBSON(BSON("a" << 4), NULL));
ASSERT(!lt.matchesBSON(BSON("a" << BSONObj()), NULL));
ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), NULL));
ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), NULL));
ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), NULL));
} | 0 | []
| mongo | b0ef26c639112b50648a02d969298650fbd402a4 | 266,390,907,680,847,600,000,000,000,000,000,000,000 | 13 | SERVER-51083 Reject invalid UTF-8 from $regex match expressions |
void ext4_ext_drop_refs(struct ext4_ext_path *path)
{
int depth = path->p_depth;
int i;
for (i = 0; i <= depth; i++, path++)
if (path->p_bh) {
brelse(path->p_bh);
path->p_bh = NULL;
}
} | 0 | [
"CWE-362"
]
| linux-2.6 | dee1f973ca341c266229faa5a1a5bb268bed3531 | 43,892,952,864,940,260,000,000,000,000,000,000,000 | 11 | ext4: race-condition protection for ext4_convert_unwritten_extents_endio
We assumed that at the time we call ext4_convert_unwritten_extents_endio()
extent in question is fully inside [map.m_lblk, map->m_len] because
it was already split during submission. But this may not be true due to
a race between writeback vs fallocate.
If extent in question is larger than requested we will split it again.
Special precautions should being done if zeroout required because
[map.m_lblk, map->m_len] already contains valid data.
Signed-off-by: Dmitry Monakhov <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: [email protected] |
webSocketsGenMd5(char * target, char *key1, char *key2, char *key3)
{
unsigned int i, spaces1 = 0, spaces2 = 0;
unsigned long num1 = 0, num2 = 0;
unsigned char buf[17];
struct iovec iov[1];
for (i=0; i < strlen(key1); i++) {
if (key1[i] == ' ') {
spaces1 += 1;
}
if ((key1[i] >= 48) && (key1[i] <= 57)) {
num1 = num1 * 10 + (key1[i] - 48);
}
}
num1 = num1 / spaces1;
for (i=0; i < strlen(key2); i++) {
if (key2[i] == ' ') {
spaces2 += 1;
}
if ((key2[i] >= 48) && (key2[i] <= 57)) {
num2 = num2 * 10 + (key2[i] - 48);
}
}
num2 = num2 / spaces2;
/* Pack it big-endian */
buf[0] = (num1 & 0xff000000) >> 24;
buf[1] = (num1 & 0xff0000) >> 16;
buf[2] = (num1 & 0xff00) >> 8;
buf[3] = num1 & 0xff;
buf[4] = (num2 & 0xff000000) >> 24;
buf[5] = (num2 & 0xff0000) >> 16;
buf[6] = (num2 & 0xff00) >> 8;
buf[7] = num2 & 0xff;
strncpy((char *)buf+8, key3, 8);
buf[16] = '\0';
iov[0].iov_base = buf;
iov[0].iov_len = 16;
digestmd5(iov, 1, target);
target[16] = '\0';
return;
} | 0 | [
"CWE-787"
]
| libvncserver | aac95a9dcf4bbba87b76c72706c3221a842ca433 | 67,919,639,578,332,980,000,000,000,000,000,000,000 | 48 | fix overflow and refactor websockets decode (Hybi)
fix critical heap-based buffer overflow which allowed easy modification
of a return address via an overwritten function pointer
fix bug causing connections to fail due a "one websocket frame = one
ws_read" assumption, which failed with LibVNCServer-0.9.11
refactor websocket Hybi decode to use a simple state machine for
decoding of websocket frames |
int __must_check media_device_register(struct media_device *mdev)
{
int ret;
if (WARN_ON(mdev->dev == NULL || mdev->model[0] == 0))
return -EINVAL;
mdev->entity_id = 1;
INIT_LIST_HEAD(&mdev->entities);
spin_lock_init(&mdev->lock);
mutex_init(&mdev->graph_mutex);
/* Register the device node. */
mdev->devnode.fops = &media_device_fops;
mdev->devnode.parent = mdev->dev;
mdev->devnode.release = media_device_release;
ret = media_devnode_register(&mdev->devnode);
if (ret < 0)
return ret;
ret = device_create_file(&mdev->devnode.dev, &dev_attr_model);
if (ret < 0) {
media_devnode_unregister(&mdev->devnode);
return ret;
}
return 0;
} | 0 | [
"CWE-200"
]
| linux | e6a623460e5fc960ac3ee9f946d3106233fd28d8 | 267,652,676,899,040,800,000,000,000,000,000,000,000 | 28 | [media] media-device: fix infoleak in ioctl media_enum_entities()
This fixes CVE-2014-1739.
Signed-off-by: Salva Peiró <[email protected]>
Acked-by: Laurent Pinchart <[email protected]>
Cc: [email protected]
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
if (conn)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
if (!conn)
return NULL;
spin_lock_init(&conn->lock);
INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout);
hcon->sco_data = conn;
conn->hcon = hcon;
if (hdev->sco_mtu > 0)
conn->mtu = hdev->sco_mtu;
else
conn->mtu = 60;
BT_DBG("hcon %p conn %p", hcon, conn);
return conn;
} | 0 | [
"CWE-416"
]
| linux | 0771cbb3b97d3c1d68eecd7f00055f599954c34e | 272,025,704,591,117,240,000,000,000,000,000,000,000 | 27 | Bluetooth: SCO: Replace use of memcpy_from_msg with bt_skb_sendmsg
This makes use of bt_skb_sendmsg instead of allocating a different
buffer to be used with memcpy_from_msg which cause one extra copy.
Signed-off-by: Luiz Augusto von Dentz <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]> |
CImg<T>& load_rgb(std::FILE *const file, const unsigned int dimw, const unsigned int dimh=1) {
return _load_rgb(file,0,dimw,dimh);
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 249,313,661,004,187,060,000,000,000,000,000,000,000 | 3 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
brcmf_notify_roaming_status(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
u32 event = e->event_code;
u32 status = e->status;
if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
brcmf_bss_roaming_done(cfg, ifp->ndev, e);
else
brcmf_bss_connect_done(cfg, ifp->ndev, e, true);
}
return 0;
} | 0 | [
"CWE-119",
"CWE-703"
]
| linux | ded89912156b1a47d940a0c954c43afbabd0c42c | 195,560,385,704,595,900,000,000,000,000,000,000,000 | 16 | brcmfmac: avoid potential stack overflow in brcmf_cfg80211_start_ap()
User-space can choose to omit NL80211_ATTR_SSID and only provide raw
IE TLV data. When doing so it can provide SSID IE with length exceeding
the allowed size. The driver further processes this IE copying it
into a local variable without checking the length. Hence stack can be
corrupted and used as exploit.
Cc: [email protected] # v4.7
Reported-by: Daxing Guo <[email protected]>
Reviewed-by: Hante Meuleman <[email protected]>
Reviewed-by: Pieter-Paul Giesberts <[email protected]>
Reviewed-by: Franky Lin <[email protected]>
Signed-off-by: Arend van Spriel <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
bool IsString() const { return (type_ == STRING_TYPE); } | 0 | [
"CWE-20"
]
| tinygltf | 52ff00a38447f06a17eab1caa2cf0730a119c751 | 194,982,116,482,238,770,000,000,000,000,000,000,000 | 1 | Do not expand file path since its not necessary for glTF asset path(URI) and for security reason(`wordexp`). |
static int callback_static_file_uncompressed (const struct _u_request * request, struct _u_response * response, void * user_data) {
size_t length;
FILE * f;
char * file_requested, * file_path, * url_dup_save, * real_path = NULL;
const char * content_type;
int ret = U_CALLBACK_CONTINUE;
if (user_data != NULL && ((struct _u_compressed_inmemory_website_config *)user_data)->files_path != NULL) {
file_requested = o_strdup(request->http_url);
url_dup_save = file_requested;
file_requested += o_strlen(((struct _u_compressed_inmemory_website_config *)user_data)->url_prefix);
while (file_requested[0] == '/') {
file_requested++;
}
if (strchr(file_requested, '#') != NULL) {
*strchr(file_requested, '#') = '\0';
}
if (strchr(file_requested, '?') != NULL) {
*strchr(file_requested, '?') = '\0';
}
if (file_requested == NULL || o_strnullempty(file_requested) || 0 == o_strcmp("/", file_requested)) {
o_free(url_dup_save);
url_dup_save = file_requested = o_strdup("index.html");
}
file_path = msprintf("%s/%s", ((struct _u_compressed_inmemory_website_config *)user_data)->files_path, file_requested);
real_path = realpath(file_path, NULL);
if (0 == o_strncmp(((struct _u_compressed_inmemory_website_config *)user_data)->files_path, real_path, o_strlen(((struct _u_compressed_inmemory_website_config *)user_data)->files_path))) {
f = fopen (file_path, "rb");
if (f) {
fseek (f, 0, SEEK_END);
length = ftell (f);
fseek (f, 0, SEEK_SET);
content_type = u_map_get_case(&((struct _u_compressed_inmemory_website_config *)user_data)->mime_types, get_filename_ext(file_requested));
if (content_type == NULL) {
content_type = u_map_get(&((struct _u_compressed_inmemory_website_config *)user_data)->mime_types, "*");
y_log_message(Y_LOG_LEVEL_WARNING, "Static File Server - Unknown mime type for extension %s", get_filename_ext(file_requested));
}
u_map_put(response->map_header, "Content-Type", content_type);
u_map_copy_into(response->map_header, &((struct _u_compressed_inmemory_website_config *)user_data)->map_header);
if (ulfius_set_stream_response(response, 200, callback_static_file_uncompressed_stream, callback_static_file_uncompressed_stream_free, length, CHUNK, f) != U_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "Static File Server - Error ulfius_set_stream_response");
}
} else {
if (((struct _u_compressed_inmemory_website_config *)user_data)->redirect_on_404 == NULL) {
ret = U_CALLBACK_IGNORE;
} else {
ulfius_add_header_to_response(response, "Location", ((struct _u_compressed_inmemory_website_config *)user_data)->redirect_on_404);
response->status = 302;
}
}
o_free(url_dup_save);
} else {
response->status = 403;
}
o_free(file_path);
free(real_path); // realpath uses malloc
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "Static File Server - Error, user_data is NULL or inconsistent");
ret = U_CALLBACK_ERROR;
}
return ret;
} | 0 | [
"CWE-269",
"CWE-22"
]
| glewlwyd | e3f7245c33897bf9b3a75acfcdb8b7b93974bf11 | 269,980,508,615,108,100,000,000,000,000,000,000,000 | 70 | Fix file access check for directory traversal, and fix call for callback_static_file_uncompressed if header not set |
static void print_tiles(void) {
/* hack for viewing tile diffs on the screen. */
static char *prev = NULL;
int n, x, y, ms = 1500;
ms = 1;
if (! prev) {
prev = (char *) malloc((size_t) ntiles);
for (n=0; n < ntiles; n++) {
prev[n] = 0;
}
}
fprintf(stderr, " ");
for (x=0; x < ntiles_x; x++) {
fprintf(stderr, "%1d", x % 10);
}
fprintf(stderr, "\n");
n = 0;
for (y=0; y < ntiles_y; y++) {
fprintf(stderr, "%2d ", y);
for (x=0; x < ntiles_x; x++) {
if (tile_has_diff[n]) {
fprintf(stderr, "X");
} else if (prev[n]) {
fprintf(stderr, "o");
} else {
fprintf(stderr, ".");
}
n++;
}
fprintf(stderr, "\n");
}
for (n=0; n < ntiles; n++) {
prev[n] = tile_has_diff[n];
}
usleep(ms * 1000);
} | 0 | [
"CWE-862",
"CWE-284",
"CWE-732"
]
| x11vnc | 69eeb9f7baa14ca03b16c9de821f9876def7a36a | 270,393,181,376,092,330,000,000,000,000,000,000,000 | 38 | scan: limit access to shared memory segments to current user |
recoveryStopsBefore(XLogRecord *record)
{
bool stopsHere = false;
uint8 record_info;
bool isCommit;
TimestampTz recordXtime = 0;
/* Check if we should stop as soon as reaching consistency */
if (recoveryTarget == RECOVERY_TARGET_IMMEDIATE && reachedConsistency)
{
ereport(LOG,
(errmsg("recovery stopping after reaching consistency")));
recoveryStopAfter = false;
recoveryStopXid = InvalidTransactionId;
recoveryStopTime = 0;
recoveryStopName[0] = '\0';
return true;
}
/* Otherwise we only consider stopping before COMMIT or ABORT records. */
if (record->xl_rmid != RM_XACT_ID)
return false;
record_info = record->xl_info & ~XLR_INFO_MASK;
if (record_info == XLOG_XACT_COMMIT_COMPACT || record_info == XLOG_XACT_COMMIT)
isCommit = true;
else if (record_info == XLOG_XACT_ABORT)
isCommit = false;
else
return false;
if (recoveryTarget == RECOVERY_TARGET_XID && !recoveryTargetInclusive)
{
/*
* There can be only one transaction end record with this exact
* transactionid
*
* when testing for an xid, we MUST test for equality only, since
* transactions are numbered in the order they start, not the order
* they complete. A higher numbered xid will complete before you
* about 50% of the time...
*/
stopsHere = (record->xl_xid == recoveryTargetXid);
}
if (recoveryTarget == RECOVERY_TARGET_TIME &&
getRecordTimestamp(record, &recordXtime))
{
/*
* There can be many transactions that share the same commit time, so
* we stop after the last one, if we are inclusive, or stop at the
* first one if we are exclusive
*/
if (recoveryTargetInclusive)
stopsHere = (recordXtime > recoveryTargetTime);
else
stopsHere = (recordXtime >= recoveryTargetTime);
}
if (stopsHere)
{
recoveryStopAfter = false;
recoveryStopXid = record->xl_xid;
recoveryStopTime = recordXtime;
recoveryStopName[0] = '\0';
if (isCommit)
{
ereport(LOG,
(errmsg("recovery stopping before commit of transaction %u, time %s",
recoveryStopXid,
timestamptz_to_str(recoveryStopTime))));
}
else
{
ereport(LOG,
(errmsg("recovery stopping before abort of transaction %u, time %s",
recoveryStopXid,
timestamptz_to_str(recoveryStopTime))));
}
}
return stopsHere;
} | 0 | [
"CWE-119"
]
| postgres | 01824385aead50e557ca1af28640460fa9877d51 | 283,651,307,732,682,800,000,000,000,000,000,000,000 | 84 | Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt() |
static double mp_vector_map_vv(_cimg_math_parser& mp) { // Operator(vector,vector)
unsigned int
siz = (unsigned int)mp.opcode[2],
ptrs1 = (unsigned int)mp.opcode[4] + 1,
ptrs2 = (unsigned int)mp.opcode[5] + 1;
double *ptrd = &_mp_arg(1) + 1;
mp_func op = (mp_func)mp.opcode[3];
CImg<ulongT> l_opcode(1,4);
l_opcode.swap(mp.opcode);
ulongT &argument1 = mp.opcode[2], &argument2 = mp.opcode[3];
while (siz-->0) { argument1 = ptrs1++; argument2 = ptrs2++; *(ptrd++) = (*op)(mp); }
l_opcode.swap(mp.opcode);
return cimg::type<double>::nan(); | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 48,867,544,131,683,790,000,000,000,000,000,000,000 | 14 | Fix other issues in 'CImg<T>::load_bmp()'. |
getparm(int parm, int n)
/* push n copies of param on the terminfo stack if not already there */
{
int nn;
if (seenr) {
if (parm == 1)
parm = 2;
else if (parm == 2)
parm = 1;
}
for (nn = 0; nn < n; ++nn) {
dp = save_string(dp, "%p");
dp = save_char(dp, '0' + parm);
}
if (onstack == parm) {
if (n > 1) {
_nc_warning("string may not be optimal");
dp = save_string(dp, "%Pa");
while (n-- > 0) {
dp = save_string(dp, "%ga");
}
}
return;
}
if (onstack != 0)
push();
onstack = parm;
if (seenn && parm < 3) {
dp = save_string(dp, "%{96}%^");
}
if (seenm && parm < 3) {
dp = save_string(dp, "%{127}%^");
}
} | 0 | []
| ncurses | 790a85dbd4a81d5f5d8dd02a44d84f01512ef443 | 146,012,526,746,544,740,000,000,000,000,000,000,000 | 40 | ncurses 6.2 - patch 20200531
+ correct configure version-check/warnng for g++ to allow for 10.x
+ re-enable "bel" in konsole-base (report by Nia Huang)
+ add linux-s entry (patch by Alexandre Montaron).
+ drop long-obsolete convert_configure.pl
+ add test/test_parm.c, for checking tparm changes.
+ improve parameter-checking for tparm, adding function _nc_tiparm() to
handle the most-used case, which accepts only numeric parameters
(report/testcase by "puppet-meteor").
+ use a more conservative estimate of the buffer-size in lib_tparm.c's
save_text() and save_number(), in case the sprintf() function
passes-through unexpected characters from a format specifier
(report/testcase by "puppet-meteor").
+ add a check for end-of-string in cvtchar to handle a malformed
string in infotocap (report/testcase by "puppet-meteor"). |
escapes(cp, tp)
const char *cp;
char *tp;
{
while (*cp) {
int cval = 0, meta = 0;
if (*cp == '\\' && cp[1] && index("mM", cp[1]) && cp[2]) {
meta = 1;
cp += 2;
}
if (*cp == '\\' && cp[1] && index("0123456789xXoO", cp[1]) && cp[2]) {
NEARDATA const char hex[] = "00112233445566778899aAbBcCdDeEfF";
const char *dp;
int dcount = 0;
cp++;
if (*cp == 'x' || *cp == 'X')
for (++cp; *cp && (dp = index(hex, *cp)) && (dcount++ < 2); cp++)
cval = (cval * 16) + ((int)(dp - hex) / 2);
else if (*cp == 'o' || *cp == 'O')
for (++cp; *cp && (index("01234567",*cp)) && (dcount++ < 3); cp++)
cval = (cval * 8) + (*cp - '0');
else
for (; *cp && (index("0123456789",*cp)) && (dcount++ < 3); cp++)
cval = (cval * 10) + (*cp - '0');
} else if (*cp == '\\' && cp[1]) { /* C-style character escapes */
switch (*++cp) {
case '\\': cval = '\\'; break;
case 'n': cval = '\n'; break;
case 't': cval = '\t'; break;
case 'b': cval = '\b'; break;
case 'r': cval = '\r'; break;
default: cval = *cp;
}
cp++;
} else if (*cp == '^' && cp[1]) { /* expand control-character syntax */
cval = (*++cp & 0x1f);
cp++;
} else
cval = *cp++;
if (meta)
cval |= 0x80;
*tp++ = cval;
}
*tp = '\0';
} | 1 | [
"CWE-269"
]
| NetHack | 612755bfb5c412079795c68ba392df5d93874ed8 | 331,882,839,961,033,300,000,000,000,000,000,000,000 | 48 | escapes() revamp
Partial rewrite of escapes(), mostly changing its if-then-else
logic so that end-of-string can be checked once instead for each case.
The previous version had a bug if the input string ended with backslash
and one decimal digit (due to being lumped together with the handling
for trailing \X or \O). |
static int load_refcount_block(BlockDriverState *bs,
int64_t refcount_block_offset,
void **refcount_block)
{
BDRVQcowState *s = bs->opaque;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD);
ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
refcount_block);
return ret;
} | 0 | [
"CWE-190"
]
| qemu | b106ad9185f35fc4ad669555ad0e79e276083bd7 | 147,484,905,367,794,010,000,000,000,000,000,000,000 | 13 | qcow2: Don't rely on free_cluster_index in alloc_refcount_block() (CVE-2014-0147)
free_cluster_index is only correct if update_refcount() was called from
an allocation function, and even there it's brittle because it's used to
protect unfinished allocations which still have a refcount of 0 - if it
moves in the wrong place, the unfinished allocation can be corrupted.
So not using it any more seems to be a good idea. Instead, use the
first requested cluster to do the calculations. Return -EAGAIN if
unfinished allocations could become invalid and let the caller restart
its search for some free clusters.
The context of creating a snapsnot is one situation where
update_refcount() is called outside of a cluster allocation. For this
case, the change fixes a buffer overflow if a cluster is referenced in
an L2 table that cannot be represented by an existing refcount block.
(new_table[refcount_table_index] was out of bounds)
[Bump the qemu-iotests 026 refblock_alloc.write leak count from 10 to
11.
--Stefan]
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]> |
nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
{
__be32 *p;
if (bmval2) {
p = xdr_reserve_space(xdr, 16);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
*p++ = cpu_to_be32(bmval2);
} else if (bmval1) {
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
} else {
p = xdr_reserve_space(xdr, 8);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
*p++ = cpu_to_be32(bmval0);
}
return 0;
out_resource:
return nfserr_resource;
} | 0 | [
"CWE-20",
"CWE-129"
]
| linux | f961e3f2acae94b727380c0b74e2d3954d0edf79 | 63,960,591,319,662,500,000,000,000,000,000,000,000 | 31 | nfsd: encoders mustn't use unitialized values in error cases
In error cases, lgp->lg_layout_type may be out of bounds; so we
shouldn't be using it until after the check of nfserr.
This was seen to crash nfsd threads when the server receives a LAYOUTGET
request with a large layout type.
GETDEVICEINFO has the same problem.
Reported-by: Ari Kauppi <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]> |
static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed,
AVDictionary **dict)
{
int ret, method;
const uint8_t *data = s->gb.buffer;
const uint8_t *data_end = data + length;
const uint8_t *keyword = data;
const uint8_t *keyword_end = memchr(keyword, 0, data_end - keyword);
uint8_t *kw_utf8 = NULL, *text, *txt_utf8 = NULL;
unsigned text_len;
AVBPrint bp;
if (!keyword_end)
return AVERROR_INVALIDDATA;
data = keyword_end + 1;
if (compressed) {
if (data == data_end)
return AVERROR_INVALIDDATA;
method = *(data++);
if (method)
return AVERROR_INVALIDDATA;
if ((ret = decode_zbuf(&bp, data, data_end)) < 0)
return ret;
text_len = bp.len;
av_bprint_finalize(&bp, (char **)&text);
if (!text)
return AVERROR(ENOMEM);
} else {
text = (uint8_t *)data;
text_len = data_end - text;
}
kw_utf8 = iso88591_to_utf8(keyword, keyword_end - keyword);
txt_utf8 = iso88591_to_utf8(text, text_len);
if (text != data)
av_free(text);
if (!(kw_utf8 && txt_utf8)) {
av_free(kw_utf8);
av_free(txt_utf8);
return AVERROR(ENOMEM);
}
av_dict_set(dict, kw_utf8, txt_utf8,
AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
return 0;
} | 0 | [
"CWE-787"
]
| FFmpeg | e371f031b942d73e02c090170975561fabd5c264 | 191,625,078,196,006,440,000,000,000,000,000,000,000 | 47 | avcodec/pngdec: Fix off by 1 size in decode_zbuf()
Fixes out of array access
Fixes: 444/fuzz-2-ffmpeg_VIDEO_AV_CODEC_ID_PNG_fuzzer
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Signed-off-by: Michael Niedermayer <[email protected]> |
mac_init (digest_hd_st* td, gnutls_mac_algorithm_t mac, opaque * secret, int secret_size,
int ver)
{
int ret = 0;
if (mac == GNUTLS_MAC_NULL)
{
gnutls_assert();
return GNUTLS_E_HASH_FAILED;
}
if (ver == GNUTLS_SSL3)
{ /* SSL 3.0 */
ret = _gnutls_mac_init_ssl3 (td, mac, secret, secret_size);
}
else
{ /* TLS 1.x */
ret = _gnutls_hmac_init (td, mac, secret, secret_size);
}
return ret;
} | 0 | [
"CWE-189"
]
| gnutls | bc8102405fda11ea00ca3b42acc4f4bce9d6e97b | 72,321,256,663,876,720,000,000,000,000,000,000,000 | 22 | Fix GNUTLS-SA-2008-1 security vulnerabilities.
See http://www.gnu.org/software/gnutls/security.html for updates. |
find_name_end(
char_u *arg,
char_u **expr_start,
char_u **expr_end,
int flags)
{
int mb_nest = 0;
int br_nest = 0;
char_u *p;
int len;
int vim9script = in_vim9script();
if (expr_start != NULL)
{
*expr_start = NULL;
*expr_end = NULL;
}
// Quick check for valid starting character.
if ((flags & FNE_CHECK_START) && !eval_isnamec1(*arg)
&& (*arg != '{' || vim9script))
return arg;
for (p = arg; *p != NUL
&& (eval_isnamec(*p)
|| (*p == '{' && !vim9script)
|| ((flags & FNE_INCL_BR) && (*p == '['
|| (*p == '.' && eval_isdictc(p[1]))))
|| mb_nest != 0
|| br_nest != 0); MB_PTR_ADV(p))
{
if (*p == '\'')
{
// skip over 'string' to avoid counting [ and ] inside it.
for (p = p + 1; *p != NUL && *p != '\''; MB_PTR_ADV(p))
;
if (*p == NUL)
break;
}
else if (*p == '"')
{
// skip over "str\"ing" to avoid counting [ and ] inside it.
for (p = p + 1; *p != NUL && *p != '"'; MB_PTR_ADV(p))
if (*p == '\\' && p[1] != NUL)
++p;
if (*p == NUL)
break;
}
else if (br_nest == 0 && mb_nest == 0 && *p == ':')
{
// "s:" is start of "s:var", but "n:" is not and can be used in
// slice "[n:]". Also "xx:" is not a namespace. But {ns}: is.
len = (int)(p - arg);
if ((len == 1 && vim_strchr(NAMESPACE_CHAR, *arg) == NULL)
|| (len > 1 && p[-1] != '}'))
break;
}
if (mb_nest == 0)
{
if (*p == '[')
++br_nest;
else if (*p == ']')
--br_nest;
}
if (br_nest == 0 && !vim9script)
{
if (*p == '{')
{
mb_nest++;
if (expr_start != NULL && *expr_start == NULL)
*expr_start = p;
}
else if (*p == '}')
{
mb_nest--;
if (expr_start != NULL && mb_nest == 0 && *expr_end == NULL)
*expr_end = p;
}
}
}
return p;
} | 0 | [
"CWE-122",
"CWE-787"
]
| vim | 605ec91e5a7330d61be313637e495fa02a6dc264 | 201,398,337,700,369,140,000,000,000,000,000,000,000 | 85 | patch 8.2.3847: illegal memory access when using a lambda with an error
Problem: Illegal memory access when using a lambda with an error.
Solution: Avoid skipping over the NUL after a string. |
stuff_inserted(
int c, // Command character to be inserted
long count, // Repeat this many times
int no_esc) // Don't add an ESC at the end
{
char_u *esc_ptr;
char_u *ptr;
char_u *last_ptr;
char_u last = NUL;
ptr = get_last_insert();
if (ptr == NULL)
{
emsg(_(e_no_inserted_text_yet));
return FAIL;
}
// may want to stuff the command character, to start Insert mode
if (c != NUL)
stuffcharReadbuff(c);
if ((esc_ptr = vim_strrchr(ptr, ESC)) != NULL)
*esc_ptr = NUL; // remove the ESC
// when the last char is either "0" or "^" it will be quoted if no ESC
// comes after it OR if it will inserted more than once and "ptr"
// starts with ^D. -- Acevedo
last_ptr = (esc_ptr ? esc_ptr : ptr + STRLEN(ptr)) - 1;
if (last_ptr >= ptr && (*last_ptr == '0' || *last_ptr == '^')
&& (no_esc || (*ptr == Ctrl_D && count > 1)))
{
last = *last_ptr;
*last_ptr = NUL;
}
do
{
stuffReadbuff(ptr);
// a trailing "0" is inserted as "<C-V>048", "^" as "<C-V>^"
if (last)
stuffReadbuff(
(char_u *)(last == '0' ? "\026\060\064\070" : "\026^"));
}
while (--count > 0);
if (last)
*last_ptr = last;
if (esc_ptr != NULL)
*esc_ptr = ESC; // put the ESC back
// may want to stuff a trailing ESC, to get out of Insert mode
if (!no_esc)
stuffcharReadbuff(ESC);
return OK;
} | 0 | [
"CWE-120"
]
| vim | 7ce5b2b590256ce53d6af28c1d203fb3bc1d2d97 | 288,301,941,940,575,300,000,000,000,000,000,000,000 | 56 | patch 8.2.4969: changing text in Visual mode may cause invalid memory access
Problem: Changing text in Visual mode may cause invalid memory access.
Solution: Check the Visual position after making a change. |
static void warn_dirty_buffer(struct buffer_head *bh)
{
printk(KERN_WARNING
"JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). "
"There's a risk of filesystem corruption in case of system "
"crash.\n",
bh->b_bdev, (unsigned long long)bh->b_blocknr);
} | 0 | [
"CWE-787"
]
| linux | e09463f220ca9a1a1ecfda84fcda658f99a1f12a | 121,075,978,190,658,490,000,000,000,000,000,000,000 | 8 | jbd2: don't mark block as modified if the handle is out of credits
Do not set the b_modified flag in block's journal head should not
until after we're sure that jbd2_journal_dirty_metadat() will not
abort with an error due to there not being enough space reserved in
the jbd2 handle.
Otherwise, future attempts to modify the buffer may lead a large
number of spurious errors and warnings.
This addresses CVE-2018-10883.
https://bugzilla.kernel.org/show_bug.cgi?id=200071
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected] |
TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) {
config_helper_.addConfigModifier(
[](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&
hcm) -> void {
hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true);
});
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
auto encoder_decoder =
codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"},
{":path", "/test/long/url"},
{":authority", "host"},
{"content-length", "3,2"}});
auto response = std::move(encoder_decoder.second);
if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {
ASSERT_TRUE(codec_client_->waitForDisconnect());
} else {
response->waitForReset();
codec_client_->close();
}
if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) {
ASSERT_TRUE(response->complete());
EXPECT_EQ("400", response->headers().getStatusValue());
} else {
ASSERT_TRUE(response->reset());
EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason());
}
} | 0 | [
"CWE-770"
]
| envoy | 7ca28ff7d46454ae930e193d97b7d08156b1ba59 | 93,748,315,825,449,840,000,000,000,000,000,000,000 | 31 | [http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <[email protected]> |
maybe_get_metakey (FlatpakDir *dir,
FlatpakDir *shadowing_dir,
FlatpakDecomposed *ref,
GHashTable *metadata_injection,
GKeyFile **out_metakey,
gboolean *out_ref_is_shadowed)
{
if (shadowing_dir &&
dir_get_metadata (shadowing_dir, ref, out_metakey))
{
*out_ref_is_shadowed = TRUE;
return TRUE;
}
if (metadata_injection != NULL)
{
GKeyFile *injected_metakey = g_hash_table_lookup (metadata_injection, flatpak_decomposed_get_ref (ref));
if (injected_metakey != NULL)
{
*out_ref_is_shadowed = FALSE;
*out_metakey = g_key_file_ref (injected_metakey);
return TRUE;
}
}
if (dir_get_metadata (dir, ref, out_metakey))
{
*out_ref_is_shadowed = FALSE;
return TRUE;
}
return FALSE;
} | 0 | [
"CWE-74"
]
| flatpak | fb473cad801c6b61706353256cab32330557374a | 116,445,288,703,094,800,000,000,000,000,000,000,000 | 33 | dir: Pass environment via bwrap --setenv when running apply_extra
This means we can systematically pass the environment variables
through bwrap(1), even if it is setuid and thus is filtering out
security-sensitive environment variables. bwrap ends up being
run with an empty environment instead.
As with the previous commit, this regressed while fixing CVE-2021-21261.
Fixes: 6d1773d2 "run: Convert all environment variables into bwrap arguments"
Signed-off-by: Simon McVittie <[email protected]> |
static apr_byte_t oidc_handle_flows(request_rec *r, oidc_cfg *c,
oidc_proto_state_t *proto_state, oidc_provider_t *provider,
apr_table_t *params, const char *response_mode, oidc_jwt_t **jwt) {
apr_byte_t rc = FALSE;
const char *requested_response_type = oidc_proto_state_get_response_type(
proto_state);
/* handle the requested response type/mode */
if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
OIDC_PROTO_RESPONSE_TYPE_CODE_IDTOKEN_TOKEN)) {
rc = oidc_proto_authorization_response_code_idtoken_token(r, c,
proto_state, provider, params, response_mode, jwt);
} else if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
OIDC_PROTO_RESPONSE_TYPE_CODE_IDTOKEN)) {
rc = oidc_proto_authorization_response_code_idtoken(r, c, proto_state,
provider, params, response_mode, jwt);
} else if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
OIDC_PROTO_RESPONSE_TYPE_CODE_TOKEN)) {
rc = oidc_proto_handle_authorization_response_code_token(r, c,
proto_state, provider, params, response_mode, jwt);
} else if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
OIDC_PROTO_RESPONSE_TYPE_CODE)) {
rc = oidc_proto_handle_authorization_response_code(r, c, proto_state,
provider, params, response_mode, jwt);
} else if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
OIDC_PROTO_RESPONSE_TYPE_IDTOKEN_TOKEN)) {
rc = oidc_proto_handle_authorization_response_idtoken_token(r, c,
proto_state, provider, params, response_mode, jwt);
} else if (oidc_util_spaced_string_equals(r->pool, requested_response_type,
OIDC_PROTO_RESPONSE_TYPE_IDTOKEN)) {
rc = oidc_proto_handle_authorization_response_idtoken(r, c, proto_state,
provider, params, response_mode, jwt);
} else {
oidc_error(r, "unsupported response type: \"%s\"",
requested_response_type);
}
if ((rc == FALSE) && (*jwt != NULL)) {
oidc_jwt_destroy(*jwt);
*jwt = NULL;
}
return rc;
} | 0 | [
"CWE-601"
]
| mod_auth_openidc | 5c15dfb08106c2451c2c44ce7ace6813c216ba75 | 286,878,390,085,018,920,000,000,000,000,000,000,000 | 46 | improve validation of the post-logout URL; closes #449
- to avoid an open redirect; thanks AIMOTO Norihito
- release 2.4.0.1
Signed-off-by: Hans Zandbelt <[email protected]> |
start_vrrp(data_t *old_global_data)
{
/* Clear the flags used for optimising performance */
clear_summary_flags();
/* Initialize sub-system */
if (!__test_bit(CONFIG_TEST_BIT, &debug))
kernel_netlink_init();
if (reload)
global_data = alloc_global_data();
else if (global_data->default_ifname) {
/* We need to set the default_ifp here on startup, since
* the parent process doesn't know about the interfaces */
global_data->default_ifp = if_get_by_ifname(global_data->default_ifname, IF_CREATE_IF_DYNAMIC);
if (!global_data->default_ifp)
log_message(LOG_INFO, "WARNING - default interface %s doesn't exist", global_data->default_ifname);
}
/* Parse configuration file */
vrrp_data = alloc_vrrp_data();
if (!vrrp_data) {
stop_vrrp(KEEPALIVED_EXIT_FATAL);
return;
}
init_data(conf_file, vrrp_init_keywords);
if (non_existent_interface_specified) {
report_config_error(CONFIG_BAD_IF, "Non-existent interface specified in configuration");
stop_vrrp(KEEPALIVED_EXIT_CONFIG);
return;
}
if (reload)
init_global_data(global_data, old_global_data);
/* Set our copy of time */
set_time_now();
if (!__test_bit(CONFIG_TEST_BIT, &debug)) {
#if defined _WITH_SNMP_RFC || defined _WITH_SNMP_VRRP_
if (!reload && (
#ifdef _WITH_SNMP_VRRP_
global_data->enable_snmp_vrrp ||
#endif
#ifdef _WITH_SNMP_RFCV2_
global_data->enable_snmp_rfcv2 ||
#endif
#ifdef _WITH_SNMP_RFCV3_
global_data->enable_snmp_rfcv3 ||
#endif
false)) {
vrrp_snmp_agent_init(global_data->snmp_socket);
#ifdef _WITH_SNMP_RFC_
vrrp_start_time = time_now;
#endif
}
#endif
#ifdef _WITH_LVS_
if (vrrp_ipvs_needed()) {
/* Initialize ipvs related */
if (ipvs_start() != IPVS_SUCCESS) {
stop_vrrp(KEEPALIVED_EXIT_FATAL);
return;
}
/* Set LVS timeouts */
if (global_data->lvs_tcp_timeout ||
global_data->lvs_tcpfin_timeout ||
global_data->lvs_udp_timeout)
ipvs_set_timeouts(global_data->lvs_tcp_timeout, global_data->lvs_tcpfin_timeout, global_data->lvs_udp_timeout);
/* If we are managing the sync daemon, then stop any
* instances of it that may have been running if
* we terminated abnormally */
ipvs_syncd_cmd(IPVS_STOPDAEMON, NULL, IPVS_MASTER, true, true);
ipvs_syncd_cmd(IPVS_STOPDAEMON, NULL, IPVS_BACKUP, true, true);
}
#endif
if (reload) {
kernel_netlink_set_recv_bufs();
clear_diff_saddresses();
#ifdef _HAVE_FIB_ROUTING_
clear_diff_srules();
clear_diff_sroutes();
#endif
clear_diff_script();
#ifdef _WITH_BFD_
clear_diff_bfd();
#endif
}
else {
/* Clear leftover static entries */
netlink_iplist(vrrp_data->static_addresses, IPADDRESS_DEL, false);
#ifdef _HAVE_FIB_ROUTING_
netlink_rtlist(vrrp_data->static_routes, IPROUTE_DEL);
netlink_error_ignore = ENOENT;
netlink_rulelist(vrrp_data->static_rules, IPRULE_DEL, true);
netlink_error_ignore = 0;
#endif
}
#ifdef _WITH_DBUS_
if (!reload && global_data->enable_dbus)
if (!dbus_start())
global_data->enable_dbus = false;
#endif
}
/* Complete VRRP initialization */
if (!vrrp_complete_init()) {
stop_vrrp(KEEPALIVED_EXIT_CONFIG);
return;
}
/* If we are just testing the configuration, then we terminate now */
if (__test_bit(CONFIG_TEST_BIT, &debug))
return;
/* Start or stop gratuitous arp/ndisc as appropriate */
if (have_ipv4_instance)
gratuitous_arp_init();
else
gratuitous_arp_close();
if (have_ipv6_instance)
ndisc_init();
else
ndisc_close();
/* We need to delay the init of iptables to after vrrp_complete_init()
* has been called so we know whether we want IPv4 and/or IPv6 */
iptables_init();
/* Make sure we don't have any old iptables/ipsets settings left around */
#ifdef _HAVE_LIBIPTC_
if (!reload)
iptables_cleanup();
iptables_startup(reload);
#endif
if (!reload)
vrrp_restore_interfaces_startup();
/* clear_diff_vrrp must be called after vrrp_complete_init, since the latter
* sets ifp on the addresses, which is used for the address comparison */
if (reload)
clear_diff_vrrp();
#ifdef _WITH_DBUS_
if (reload && global_data->enable_dbus)
dbus_reload(old_vrrp_data->vrrp, vrrp_data->vrrp);
#endif
/* Post initializations */
#ifdef _MEM_CHECK_
log_message(LOG_INFO, "Configuration is using : %zu Bytes", mem_allocated);
#endif
/* Set static entries */
netlink_iplist(vrrp_data->static_addresses, IPADDRESS_ADD, false);
#ifdef _HAVE_FIB_ROUTING_
netlink_rtlist(vrrp_data->static_routes, IPROUTE_ADD);
netlink_rulelist(vrrp_data->static_rules, IPRULE_ADD, false);
#endif
/* Dump configuration */
if (__test_bit(DUMP_CONF_BIT, &debug))
dump_data_vrrp(NULL);
/* Init & start the VRRP packet dispatcher */
thread_add_event(master, vrrp_dispatcher_init, NULL,
VRRP_DISPATCHER);
/* Set the process priority and non swappable if configured */
set_process_priorities(
#ifdef _HAVE_SCHED_RT_
global_data->vrrp_realtime_priority,
#if HAVE_DECL_RLIMIT_RTTIME == 1
global_data->vrrp_rlimit_rt,
#endif
#endif
global_data->vrrp_process_priority, global_data->vrrp_no_swap ? 4096 : 0);
/* Ensure we can open sufficient file descriptors */
set_vrrp_max_fds();
} | 0 | [
"CWE-200"
]
| keepalived | 26c8d6374db33bcfcdcd758b1282f12ceef4b94f | 328,425,141,300,641,620,000,000,000,000,000,000,000 | 191 | Disable fopen_safe() append mode by default
If a non privileged user creates /tmp/keepalived.log and has it open
for read (e.g. tail -f), then even though keepalived will change the
owner to root and remove all read/write permissions from non owners,
the application which already has the file open will be able to read
the added log entries.
Accordingly, opening a file in append mode is disabled by default, and
only enabled if --enable-smtp-alert-debug or --enable-log-file (which
are debugging options and unset by default) are enabled.
This should further alleviate security concerns related to CVE-2018-19046.
Signed-off-by: Quentin Armitage <[email protected]> |
void CertDecoder::DecodeToKey()
{
ReadHeader();
signatureOID_ = GetAlgoId();
GetName(ISSUER);
GetValidity();
GetName(SUBJECT);
GetKey();
} | 0 | [
"CWE-254"
]
| mysql-server | e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69 | 319,736,298,784,079,700,000,000,000,000,000,000,000 | 9 | Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED. |
int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct ip_options *opt = inet->opt;
struct rtable *rt;
struct iphdr *iph;
/* Skip all of this if the packet is already routed,
* f.e. by something like SCTP.
*/
rt = (struct rtable *) skb->dst;
if (rt != NULL)
goto packet_routed;
/* Make sure we can route this packet. */
rt = (struct rtable *)__sk_dst_check(sk, 0);
if (rt == NULL) {
u32 daddr;
/* Use correct destination address if we have options. */
daddr = inet->daddr;
if(opt && opt->srr)
daddr = opt->faddr;
{
struct flowi fl = { .oif = sk->sk_bound_dev_if,
.nl_u = { .ip4_u =
{ .daddr = daddr,
.saddr = inet->saddr,
.tos = RT_CONN_FLAGS(sk) } },
.proto = sk->sk_protocol,
.uli_u = { .ports =
{ .sport = inet->sport,
.dport = inet->dport } } };
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out.
*/
if (ip_route_output_flow(&rt, &fl, sk, 0))
goto no_route;
}
sk_setup_caps(sk, &rt->u.dst);
}
skb->dst = dst_clone(&rt->u.dst);
packet_routed:
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
goto no_route;
/* OK, we know where to send it, allocate and build IP header. */
iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
*((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
iph->tot_len = htons(skb->len);
if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->u.dst);
iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
skb->nh.iph = iph;
/* Transport layer set skb->h.foo itself. */
if (opt && opt->optlen) {
iph->ihl += opt->optlen >> 2;
ip_options_build(skb, opt, inet->daddr, rt, 0);
}
ip_select_ident_more(iph, &rt->u.dst, sk, skb_shinfo(skb)->tso_segs);
/* Add an IP checksum. */
ip_send_check(iph);
skb->priority = sk->sk_priority;
return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
no_route:
IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EHOSTUNREACH;
} | 0 | []
| linux | e89e9cf539a28df7d0eb1d0a545368e9920b34ac | 42,528,037,884,088,716,000,000,000,000,000,000,000 | 86 | [IPv4/IPv6]: UFO Scatter-gather approach
Attached is kernel patch for UDP Fragmentation Offload (UFO) feature.
1. This patch incorporate the review comments by Jeff Garzik.
2. Renamed USO as UFO (UDP Fragmentation Offload)
3. udp sendfile support with UFO
This patches uses scatter-gather feature of skb to generate large UDP
datagram. Below is a "how-to" on changes required in network device
driver to use the UFO interface.
UDP Fragmentation Offload (UFO) Interface:
-------------------------------------------
UFO is a feature wherein the Linux kernel network stack will offload the
IP fragmentation functionality of large UDP datagram to hardware. This
will reduce the overhead of stack in fragmenting the large UDP datagram to
MTU sized packets
1) Drivers indicate their capability of UFO using
dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG
NETIF_F_HW_CSUM is required for UFO over ipv6.
2) UFO packet will be submitted for transmission using driver xmit routine.
UFO packet will have a non-zero value for
"skb_shinfo(skb)->ufo_size"
skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP
fragment going out of the adapter after IP fragmentation by hardware.
skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[]
contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW
indicating that hardware has to do checksum calculation. Hardware should
compute the UDP checksum of complete datagram and also ip header checksum of
each fragmented IP packet.
For IPV6 the UFO provides the fragment identification-id in
skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating
IPv6 fragments.
Signed-off-by: Ananda Raju <[email protected]>
Signed-off-by: Rusty Russell <[email protected]> (forwarded)
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]> |
ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
{
struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
dprintk("--> %s\n", __func__);
if (lseg->pls_range.iomode == IOMODE_RW) {
struct nfs4_flexfile_layout *ffl;
struct inode *inode;
ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
inode = ffl->generic_hdr.plh_inode;
spin_lock(&inode->i_lock);
pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
spin_unlock(&inode->i_lock);
}
_ff_layout_free_lseg(fls);
} | 0 | [
"CWE-787"
]
| linux | ed34695e15aba74f45247f1ee2cf7e09d449f925 | 314,106,947,097,758,900,000,000,000,000,000,000,000 | 18 | pNFS/flexfiles: fix incorrect size check in decode_nfs_fh()
We (adam zabrocki, alexander matrosov, alexander tereshkin, maksym
bazalii) observed the check:
if (fh->size > sizeof(struct nfs_fh))
should not use the size of the nfs_fh struct which includes an extra two
bytes from the size field.
struct nfs_fh {
unsigned short size;
unsigned char data[NFS_MAXFHSIZE];
}
but should determine the size from data[NFS_MAXFHSIZE] so the memcpy
will not write 2 bytes beyond destination. The proposed fix is to
compare against the NFS_MAXFHSIZE directly, as is done elsewhere in fs
code base.
Fixes: d67ae825a59d ("pnfs/flexfiles: Add the FlexFile Layout Driver")
Signed-off-by: Nikola Livic <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]> |
static int decode_exchange_id(struct xdr_stream *xdr,
struct nfs41_exchange_id_res *res)
{
__be32 *p;
uint32_t dummy;
char *dummy_str;
int status;
struct nfs_client *clp = res->client;
status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
if (status)
return status;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, &clp->cl_clientid);
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
goto out_overflow;
clp->cl_seqid = be32_to_cpup(p++);
clp->cl_exchange_flags = be32_to_cpup(p++);
/* We ask for SP4_NONE */
dummy = be32_to_cpup(p);
if (dummy != SP4_NONE)
return -EIO;
/* Throw away minor_id */
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
/* Throw away Major id */
status = decode_opaque_inline(xdr, &dummy, &dummy_str);
if (unlikely(status))
return status;
/* Save server_scope */
status = decode_opaque_inline(xdr, &dummy, &dummy_str);
if (unlikely(status))
return status;
if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
return -EIO;
memcpy(res->server_scope->server_scope, dummy_str, dummy);
res->server_scope->server_scope_sz = dummy;
/* Throw away Implementation id array */
status = decode_opaque_inline(xdr, &dummy, &dummy_str);
if (unlikely(status))
return status;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | bf118a342f10dafe44b14451a1392c3254629a1f | 169,347,605,250,854,430,000,000,000,000,000,000,000 | 59 | NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: [email protected]
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]> |
void TFE_DeleteContextCapsule(PyObject* context) {
TFE_Context* ctx =
reinterpret_cast<TFE_Context*>(PyCapsule_GetPointer(context, nullptr));
auto op = ReleaseThreadLocalOp(ctx);
op.reset();
TFE_DeleteContext(ctx);
} | 0 | [
"CWE-476",
"CWE-908"
]
| tensorflow | 237822b59fc504dda2c564787f5d3ad9c4aa62d9 | 179,177,297,490,259,000,000,000,000,000,000,000,000 | 7 | Fix tf.compat.v1.placeholder_with_default vulnerability with quantized types.
When iterating through the tensor to extract shape values, an underlying missing kernel
(`StridedSlice` for quantized types) causes an error, which then results in a `nullptr`
being passed to `ParseDimensionValue()`, causing a segfault.
The `nullptr` check allows the missing kernel error to propagate.
Adding the missing kernel registrations allows the shape values
to be extracted successfully.
PiperOrigin-RevId: 445045957 |
void t_go_generator::generate_struct(t_struct* tstruct) {
generate_go_struct(tstruct, false);
} | 0 | [
"CWE-77"
]
| thrift | 2007783e874d524a46b818598a45078448ecc53e | 306,307,600,413,413,640,000,000,000,000,000,000,000 | 3 | THRIFT-3893 Command injection in format_go_output
Client: Go
Patch: Jens Geyer |
int jas_stream_write(jas_stream_t *stream, const void *buf, int cnt)
{
int n;
const char *bufptr;
if (cnt < 0) {
jas_deprecated("negative count for jas_stream_write");
}
bufptr = buf;
n = 0;
while (n < cnt) {
if (jas_stream_putc(stream, *bufptr) == EOF) {
return n;
}
++bufptr;
++n;
}
return n;
} | 0 | [
"CWE-415",
"CWE-190",
"CWE-369"
]
| jasper | 634ce8e8a5accc0fa05dd2c20d42b4749d4b2735 | 165,086,391,637,189,820,000,000,000,000,000,000,000 | 22 | Made some changes to the I/O stream library for memory streams.
There were a number of potential problems due to the possibility
of integer overflow.
Changed some integral types to the larger types size_t or ssize_t.
For example, the function mem_resize now takes the buffer size parameter
as a size_t.
Added a new function jas_stream_memopen2, which takes a
buffer size specified as a size_t instead of an int.
This can be used in jas_image_cmpt_create to avoid potential
overflow problems.
Added a new function jas_deprecated to warn about reliance on
deprecated library behavior. |
static void test_json_append_escaped(void)
{
string_t *str = t_str_new(32);
test_begin("json_append_escaped()");
json_append_escaped(str, "\b\f\r\n\t\"\\\001\002-\xC3\xA4\xf0\x90\x90\xb7\xff");
test_assert(strcmp(str_c(str), "\\b\\f\\r\\n\\t\\\"\\\\\\u0001\\u0002-\\u00e4\\ud801\\udc37" UNICODE_REPLACEMENT_CHAR_UTF8) == 0);
test_end();
} | 0 | []
| core | 973769d74433de3c56c4ffdf4f343cb35d98e4f7 | 265,529,581,624,819,600,000,000,000,000,000,000,000 | 9 | lib: json - Escape invalid UTF-8 as unicode bytes
This prevents dovecot from crashing if invalid UTF-8 input
is given. |
static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path)
{
struct ext4_extent_header *eh;
int depth = ext_depth(inode);
struct ext4_extent *ex;
__le32 border;
int k, err = 0;
eh = path[depth].p_hdr;
ex = path[depth].p_ext;
BUG_ON(ex == NULL);
BUG_ON(eh == NULL);
if (depth == 0) {
/* there is no tree at all */
return 0;
}
if (ex != EXT_FIRST_EXTENT(eh)) {
/* we correct tree if first leaf got modified only */
return 0;
}
/*
* TODO: we need correction if border is smaller than current one
*/
k = depth - 1;
border = path[depth].p_ext->ee_block;
err = ext4_ext_get_access(handle, inode, path + k);
if (err)
return err;
path[k].p_idx->ei_block = border;
err = ext4_ext_dirty(handle, inode, path + k);
if (err)
return err;
while (k--) {
/* change all left-side indexes */
if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
break;
err = ext4_ext_get_access(handle, inode, path + k);
if (err)
break;
path[k].p_idx->ei_block = border;
err = ext4_ext_dirty(handle, inode, path + k);
if (err)
break;
}
return err;
} | 0 | [
"CWE-703"
]
| linux | 744692dc059845b2a3022119871846e74d4f6e11 | 61,224,645,015,965,490,000,000,000,000,000,000,000 | 52 | ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]> |
void FillDiagHelper(const TfLiteTensor* input, const TfLiteTensor* diag,
TfLiteTensor* output) {
const int num_output_dims = output->dims->size;
int batch_size = 1;
for (int i = 0; i < num_output_dims - 2; ++i) {
batch_size *= output->dims->data[i];
}
const int row_size = output->dims->data[num_output_dims - 2];
const int col_size = output->dims->data[num_output_dims - 1];
switch (output->type) {
case kTfLiteInt64: {
return FillDiag<int64_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteInt32: {
return FillDiag<int32_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteInt16: {
return FillDiag<int16_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteInt8: {
return FillDiag<int8_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteUInt8: {
return FillDiag<uint8_t>(input, diag, output, batch_size, row_size,
col_size);
}
default:
return FillDiag<float>(input, diag, output, batch_size, row_size,
col_size);
}
} | 0 | [
"CWE-125",
"CWE-787"
]
| tensorflow | 1970c2158b1ffa416d159d03c3370b9a462aee35 | 76,396,986,985,570,950,000,000,000,000,000,000,000 | 36 | [tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56 |
static void cmd_authinfo_pass(char *pass)
{
int failedloginpause;
/* Conceal password in telemetry log */
if (nntp_logfd != -1 && pass) {
int r; /* avoid warnings */
r = ftruncate(nntp_logfd,
lseek(nntp_logfd, -2, SEEK_CUR) - strlen(pass));
r = write(nntp_logfd, "...\r\n", 5);
}
if (nntp_authstate) {
prot_printf(nntp_out, "502 Already authenticated\r\n");
return;
}
if (!nntp_userid) {
prot_printf(nntp_out, "482 Must give AUTHINFO USER command first\r\n");
return;
}
if (!strcmp(nntp_userid, "anonymous")) {
if (allowanonymous) {
pass = beautify_string(pass);
if (strlen(pass) > 500) pass[500] = '\0';
syslog(LOG_NOTICE, "login: %s anonymous %s",
nntp_clienthost, pass);
}
else {
syslog(LOG_NOTICE, "badlogin: %s anonymous login refused",
nntp_clienthost);
prot_printf(nntp_out, "481 Invalid login\r\n");
return;
}
}
else if (sasl_checkpass(nntp_saslconn,
nntp_userid,
strlen(nntp_userid),
pass,
strlen(pass))!=SASL_OK) {
syslog(LOG_NOTICE, "badlogin: %s plaintext %s %s",
nntp_clienthost, nntp_userid, sasl_errdetail(nntp_saslconn));
failedloginpause = config_getint(IMAPOPT_FAILEDLOGINPAUSE);
if (failedloginpause != 0) {
sleep(failedloginpause);
}
prot_printf(nntp_out, "481 Invalid login\r\n");
free(nntp_userid);
nntp_userid = 0;
return;
}
else {
syslog(LOG_NOTICE, "login: %s %s plaintext%s %s", nntp_clienthost,
nntp_userid, nntp_starttls_done ? "+TLS" : "",
"User logged in");
prot_printf(nntp_out, "281 User logged in\r\n");
nntp_authstate = auth_newstate(nntp_userid);
/* Close IP-based telemetry log and create new log based on userid */
if (nntp_logfd != -1) close(nntp_logfd);
nntp_logfd = telemetry_log(nntp_userid, nntp_in, nntp_out, 0);
}
} | 0 | [
"CWE-287"
]
| cyrus-imapd | 77903669e04c9788460561dd0560b9c916519594 | 40,648,330,663,248,290,000,000,000,000,000,000,000 | 66 | Secunia SA46093 - make sure nntp authentication completes
Discovered by Stefan Cornelius, Secunia Research
The vulnerability is caused due to the access restriction for certain
commands only checking whether or not variable "nntp_userid" is non-NULL,
without performing additional checks to verify that a complete, successful
authentication actually took place. The variable "nntp_userid" can be set to
point to a string holding the username (changing it to a non-NULL, thus
allowing attackers to bypass the checks) by sending an "AUTHINFO USER"
command. The variable is not reset to NULL until e.g. a wrong "AUTHINFO
PASS" command is received. This can be exploited to bypass the
authentication mechanism and allows access to e.g. the "NEWNEWS" or the
"LIST NEWSGROUPS" commands by sending an "AUTHINFO USER" command without a
following "AUTHINFO PASS" command. |
roles_is_member_of(Oid roleid)
{
List *roles_list;
ListCell *l;
List *new_cached_membership_roles;
MemoryContext oldctx;
/* If cache is already valid, just return the list */
if (OidIsValid(cached_member_role) && cached_member_role == roleid)
return cached_membership_roles;
/*
* Find all the roles that roleid is a member of, including multi-level
* recursion. The role itself will always be the first element of the
* resulting list.
*
* Each element of the list is scanned to see if it adds any indirect
* memberships. We can use a single list as both the record of
* already-found memberships and the agenda of roles yet to be scanned.
* This is a bit tricky but works because the foreach() macro doesn't
* fetch the next list element until the bottom of the loop.
*/
roles_list = list_make1_oid(roleid);
foreach(l, roles_list)
{
Oid memberid = lfirst_oid(l);
CatCList *memlist;
int i;
/* Find roles that memberid is directly a member of */
memlist = SearchSysCacheList1(AUTHMEMMEMROLE,
ObjectIdGetDatum(memberid));
for (i = 0; i < memlist->n_members; i++)
{
HeapTuple tup = &memlist->members[i]->tuple;
Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
/*
* Even though there shouldn't be any loops in the membership
* graph, we must test for having already seen this role. It is
* legal for instance to have both A->B and A->C->B.
*/
roles_list = list_append_unique_oid(roles_list, otherid);
}
ReleaseSysCacheList(memlist);
}
/*
* Copy the completed list into TopMemoryContext so it will persist.
*/
oldctx = MemoryContextSwitchTo(TopMemoryContext);
new_cached_membership_roles = list_copy(roles_list);
MemoryContextSwitchTo(oldctx);
list_free(roles_list);
/*
* Now safe to assign to state variable
*/
cached_member_role = InvalidOid; /* just paranoia */
list_free(cached_membership_roles);
cached_membership_roles = new_cached_membership_roles;
cached_member_role = roleid;
/* And now we can return the answer */
return cached_membership_roles;
} | 0 | [
"CWE-264"
]
| postgres | fea164a72a7bfd50d77ba5fb418d357f8f2bb7d0 | 84,770,288,750,806,740,000,000,000,000,000,000,000 | 67 | Shore up ADMIN OPTION restrictions.
Granting a role without ADMIN OPTION is supposed to prevent the grantee
from adding or removing members from the granted role. Issuing SET ROLE
before the GRANT bypassed that, because the role itself had an implicit
right to add or remove members. Plug that hole by recognizing that
implicit right only when the session user matches the current role.
Additionally, do not recognize it during a security-restricted operation
or during execution of a SECURITY DEFINER function. The restriction on
SECURITY DEFINER is not security-critical. However, it seems best for a
user testing his own SECURITY DEFINER function to see the same behavior
others will see. Back-patch to 8.4 (all supported versions).
The SQL standards do not conflate roles and users as PostgreSQL does;
only SQL roles have members, and only SQL users initiate sessions. An
application using PostgreSQL users and roles as SQL users and roles will
never attempt to grant membership in the role that is the session user,
so the implicit right to add or remove members will never arise.
The security impact was mostly that a role member could revoke access
from others, contrary to the wishes of his own grantor. Unapproved role
member additions are less notable, because the member can still largely
achieve that by creating a view or a SECURITY DEFINER function.
Reviewed by Andres Freund and Tom Lane. Reported, independently, by
Jonas Sundman and Noah Misch.
Security: CVE-2014-0060 |
void *nedgetvalue(nedpool **p, void *mem) THROWSPEC
{
nedpool *np=0;
mchunkptr mcp=mem2chunk(mem);
mstate fm;
if(!(is_aligned(chunk2mem(mcp))) && mcp->head != FENCEPOST_HEAD) return 0;
if(!cinuse(mcp)) return 0;
if(!next_pinuse(mcp)) return 0;
if(!is_mmapped(mcp) && !pinuse(mcp))
{
if(next_chunk(prev_chunk(mcp))!=mcp) return 0;
}
fm=get_mstate_for(mcp);
if(!ok_magic(fm)) return 0;
if(!ok_address(fm, mcp)) return 0;
if(!fm->extp) return 0;
np=(nedpool *) fm->extp;
if(p) *p=np;
return np->uservalue;
} | 0 | [
"CWE-119",
"CWE-787"
]
| git | 34fa79a6cde56d6d428ab0d3160cb094ebad3305 | 270,761,872,454,886,860,000,000,000,000,000,000,000 | 20 | prefer memcpy to strcpy
When we already know the length of a string (e.g., because
we just malloc'd to fit it), it's nicer to use memcpy than
strcpy, as it makes it more obvious that we are not going to
overflow the buffer (because the size we pass matches the
size in the allocation).
This also eliminates calls to strcpy, which make auditing
the code base harder.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
static CImg<floatT> isoline3d(CImgList<tf>& primitives, const char *const expression, const float isovalue,
const float x0, const float y0, const float x1, const float y1,
const int size_x=256, const int size_y=256) {
const _functor2d_expr func(expression);
return isoline3d(primitives,func,isovalue,x0,y0,x1,y1,size_x,size_y);
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 210,649,825,535,968,020,000,000,000,000,000,000,000 | 6 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
bool smtp_server_connection_is_ssl_secured(struct smtp_server_connection *conn)
{
return conn->ssl_secured;
} | 0 | [
"CWE-77"
]
| core | 321c339756f9b2b98fb7326359d1333adebb5295 | 197,583,005,091,822,720,000,000,000,000,000,000,000 | 4 | lib-smtp: smtp-server-connection - Fix STARTTLS command injection vulnerability.
The input handler kept reading more commands even though the input was locked by
the STARTTLS command, thereby causing it to read the command pipelined beyond
STARTTLS. This causes a STARTTLS command injection vulerability. |
void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
owner()->current_block()->Finish(instr);
empty_true->Goto(if_true(), owner()->function_state());
empty_false->Goto(if_false(), owner()->function_state());
owner()->set_current_block(NULL);
} | 0 | []
| node | fd80a31e0697d6317ce8c2d289575399f4e06d21 | 158,516,502,841,414,580,000,000,000,000,000,000,000 | 11 | deps: backport 5f836c from v8 upstream
Original commit message:
Fix Hydrogen bounds check elimination
When combining bounds checks, they must all be moved before the first load/store
that they are guarding.
BUG=chromium:344186
LOG=y
[email protected]
Review URL: https://codereview.chromium.org/172093002
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
fix #8070 |
static int opl3_detect(int ioaddr)
{
/*
* This function returns 1 if the FM chip is present at the given I/O port
* The detection algorithm plays with the timer built in the FM chip and
* looks for a change in the status register.
*
* Note! The timers of the FM chip are not connected to AdLib (and compatible)
* boards.
*
* Note2! The chip is initialized if detected.
*/
unsigned char stat1, signature;
int i;
if (devc != NULL)
{
printk(KERN_ERR "opl3: Only one OPL3 supported.\n");
return 0;
}
devc = kzalloc(sizeof(*devc), GFP_KERNEL);
if (devc == NULL)
{
printk(KERN_ERR "opl3: Can't allocate memory for the device control "
"structure \n ");
return 0;
}
strcpy(devc->fm_info.name, "OPL2");
if (!request_region(ioaddr, 4, devc->fm_info.name)) {
printk(KERN_WARNING "opl3: I/O port 0x%x already in use\n", ioaddr);
goto cleanup_devc;
}
devc->base = ioaddr;
/* Reset timers 1 and 2 */
opl3_command(ioaddr, TIMER_CONTROL_REGISTER, TIMER1_MASK | TIMER2_MASK);
/* Reset the IRQ of the FM chip */
opl3_command(ioaddr, TIMER_CONTROL_REGISTER, IRQ_RESET);
signature = stat1 = inb(ioaddr); /* Status register */
if (signature != 0x00 && signature != 0x06 && signature != 0x02 &&
signature != 0x0f)
{
MDB(printk(KERN_INFO "OPL3 not detected %x\n", signature));
goto cleanup_region;
}
if (signature == 0x06) /* OPL2 */
{
detected_model = 2;
}
else if (signature == 0x00 || signature == 0x0f) /* OPL3 or OPL4 */
{
unsigned char tmp;
detected_model = 3;
/*
* Detect availability of OPL4 (_experimental_). Works probably
* only after a cold boot. In addition the OPL4 port
* of the chip may not be connected to the PC bus at all.
*/
opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, 0x00);
opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, OPL3_ENABLE | OPL4_ENABLE);
if ((tmp = inb(ioaddr)) == 0x02) /* Have a OPL4 */
{
detected_model = 4;
}
if (request_region(ioaddr - 8, 2, "OPL4")) /* OPL4 port was free */
{
int tmp;
outb((0x02), ioaddr - 8); /* Select OPL4 ID register */
udelay(10);
tmp = inb(ioaddr - 7); /* Read it */
udelay(10);
if (tmp == 0x20) /* OPL4 should return 0x20 here */
{
detected_model = 4;
outb((0xF8), ioaddr - 8); /* Select OPL4 FM mixer control */
udelay(10);
outb((0x1B), ioaddr - 7); /* Write value */
udelay(10);
}
else
{ /* release OPL4 port */
release_region(ioaddr - 8, 2);
detected_model = 3;
}
}
opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, 0);
}
for (i = 0; i < 9; i++)
opl3_command(ioaddr, KEYON_BLOCK + i, 0); /*
* Note off
*/
opl3_command(ioaddr, TEST_REGISTER, ENABLE_WAVE_SELECT);
opl3_command(ioaddr, PERCOSSION_REGISTER, 0x00); /*
* Melodic mode.
*/
return 1;
cleanup_region:
release_region(ioaddr, 4);
cleanup_devc:
kfree(devc);
devc = NULL;
return 0;
} | 0 | [
"CWE-119",
"CWE-264",
"CWE-284"
]
| linux | 4d00135a680727f6c3be78f8befaac009030e4df | 45,207,577,348,387,820,000,000,000,000,000,000,000 | 121 | sound/oss/opl3: validate voice and channel indexes
User-controllable indexes for voice and channel values may cause reading
and writing beyond the bounds of their respective arrays, leading to
potentially exploitable memory corruption. Validate these indexes.
Signed-off-by: Dan Rosenberg <[email protected]>
Cc: [email protected]
Signed-off-by: Takashi Iwai <[email protected]> |
static COMMANDS *find_command(char *name,char cmd_char)
{
uint len;
char *end;
DBUG_ENTER("find_command");
DBUG_PRINT("enter",("name: '%s' char: %d", name ? name : "NULL", cmd_char));
if (!name)
{
len=0;
end=0;
}
else
{
while (my_isspace(charset_info,*name))
name++;
/*
If there is an \\g in the row or if the row has a delimiter but
this is not a delimiter command, let add_line() take care of
parsing the row and calling find_command()
*/
if (strstr(name, "\\g") || (strstr(name, delimiter) &&
!(strlen(name) >= 9 &&
!my_strnncoll(&my_charset_latin1,
(uchar*) name, 9,
(const uchar*) "delimiter",
9))))
DBUG_RETURN((COMMANDS *) 0);
if ((end=strcont(name," \t")))
{
len=(uint) (end - name);
while (my_isspace(charset_info,*end))
end++;
if (!*end)
end=0; // no arguments to function
}
else
len=(uint) strlen(name);
}
for (uint i= 0; commands[i].name; i++)
{
if (commands[i].func &&
((name &&
!my_strnncoll(&my_charset_latin1, (uchar*)name, len,
(uchar*)commands[i].name,len) &&
!commands[i].name[len] &&
(!end || (end && commands[i].takes_params))) ||
(!name && commands[i].cmd_char == cmd_char)))
{
DBUG_PRINT("exit",("found command: %s", commands[i].name));
DBUG_RETURN(&commands[i]);
}
}
DBUG_RETURN((COMMANDS *) 0);
} | 0 | [
"CWE-295"
]
| mysql-server | b3e9211e48a3fb586e88b0270a175d2348935424 | 315,111,432,318,889,730,000,000,000,000,000,000,000 | 56 | WL#9072: Backport WL#8785 to 5.5 |
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct inet_sock *inet = inet_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
__be16 orig_sport, orig_dport;
__be32 daddr, nexthop;
struct flowi4 fl4;
struct rtable *rt;
int err;
dp->dccps_role = DCCP_ROLE_CLIENT;
if (addr_len < sizeof(struct sockaddr_in))
return -EINVAL;
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
nexthop = daddr = usin->sin_addr.s_addr;
if (inet->opt != NULL && inet->opt->srr) {
if (daddr == 0)
return -EINVAL;
nexthop = inet->opt->faddr;
}
orig_sport = inet->inet_sport;
orig_dport = usin->sin_port;
rt = ip_route_connect(&fl4, nexthop, inet->inet_saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_DCCP,
orig_sport, orig_dport, sk, true);
if (IS_ERR(rt))
return PTR_ERR(rt);
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt);
return -ENETUNREACH;
}
if (inet->opt == NULL || !inet->opt->srr)
daddr = rt->rt_dst;
if (inet->inet_saddr == 0)
inet->inet_saddr = rt->rt_src;
inet->inet_rcv_saddr = inet->inet_saddr;
inet->inet_dport = usin->sin_port;
inet->inet_daddr = daddr;
inet_csk(sk)->icsk_ext_hdr_len = 0;
if (inet->opt != NULL)
inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
/*
* Socket identity is still unknown (sport may be zero).
* However we set state to DCCP_REQUESTING and not releasing socket
* lock select source port, enter ourselves into the hash tables and
* complete initialization after this.
*/
dccp_set_state(sk, DCCP_REQUESTING);
err = inet_hash_connect(&dccp_death_row, sk);
if (err != 0)
goto failure;
rt = ip_route_newports(&fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
if (IS_ERR(rt)) {
rt = NULL;
goto failure;
}
/* OK, now commit destination to socket. */
sk_setup_caps(sk, &rt->dst);
dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
inet->inet_sport,
inet->inet_dport);
inet->inet_id = dp->dccps_iss ^ jiffies;
err = dccp_connect(sk);
rt = NULL;
if (err != 0)
goto failure;
out:
return err;
failure:
/*
* This unhashes the socket and releases the local port, if necessary.
*/
dccp_set_state(sk, DCCP_CLOSED);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;
goto out;
} | 1 | [
"CWE-362"
]
| linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 239,950,913,463,832,720,000,000,000,000,000,000,000 | 95 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
ProcPanoramiXShmPutImage(register ClientPtr client)
{
int j, result = 0, orig_x, orig_y;
PanoramiXRes *draw, *gc;
Bool sendEvent, isRoot;
REQUEST(xShmPutImageReq);
REQUEST_SIZE_MATCH(xShmPutImageReq);
if(!(draw = (PanoramiXRes *)SecurityLookupIDByClass(
client, stuff->drawable, XRC_DRAWABLE, DixWriteAccess)))
return BadDrawable;
if(!(gc = (PanoramiXRes *)SecurityLookupIDByType(
client, stuff->gc, XRT_GC, DixReadAccess)))
return BadGC;
isRoot = (draw->type == XRT_WINDOW) && draw->u.win.root;
orig_x = stuff->dstX;
orig_y = stuff->dstY;
sendEvent = stuff->sendEvent;
stuff->sendEvent = 0;
FOR_NSCREENS(j) {
if(!j) stuff->sendEvent = sendEvent;
stuff->drawable = draw->info[j].id;
stuff->gc = gc->info[j].id;
if (isRoot) {
stuff->dstX = orig_x - panoramiXdataPtr[j].x;
stuff->dstY = orig_y - panoramiXdataPtr[j].y;
}
result = ProcShmPutImage(client);
if(result != client->noClientException) break;
}
return(result);
} | 0 | [
"CWE-189"
]
| xserver | be6c17fcf9efebc0bbcc3d9a25f8c5a2450c2161 | 44,231,323,677,553,960,000,000,000,000,000,000,000 | 36 | CVE-2007-6429: Always test for size+offset wrapping. |
static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
{
unsigned long secend;
/*
* Check for both overflow and offset/size being
* too large.
*/
secend = shdr->sh_offset + shdr->sh_size;
if (secend < shdr->sh_offset || secend > info->len)
return -ENOEXEC;
return 0;
} | 0 | [
"CWE-362",
"CWE-347"
]
| linux | 0c18f29aae7ce3dadd26d8ee3505d07cc982df75 | 140,024,351,780,502,900,000,000,000,000,000,000,000 | 14 | module: limit enabling module.sig_enforce
Irrespective as to whether CONFIG_MODULE_SIG is configured, specifying
"module.sig_enforce=1" on the boot command line sets "sig_enforce".
Only allow "sig_enforce" to be set when CONFIG_MODULE_SIG is configured.
This patch makes the presence of /sys/module/module/parameters/sig_enforce
dependent on CONFIG_MODULE_SIG=y.
Fixes: fda784e50aac ("module: export module signature enforcement status")
Reported-by: Nayna Jain <[email protected]>
Tested-by: Mimi Zohar <[email protected]>
Tested-by: Jessica Yu <[email protected]>
Signed-off-by: Mimi Zohar <[email protected]>
Signed-off-by: Jessica Yu <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
Item *Item_field::derived_field_transformer_for_where(THD *thd, uchar *arg)
{
st_select_lex *sel= (st_select_lex *)arg;
Item *producing_item= find_producing_item(this, sel);
if (producing_item)
{
Item *producing_clone= producing_item->build_clone(thd, thd->mem_root);
if (producing_clone)
producing_clone->marker|= SUBSTITUTION_FL;
return producing_clone;
}
return this;
} | 0 | [
"CWE-89"
]
| server | b5e16a6e0381b28b598da80b414168ce9a5016e5 | 4,630,370,908,858,356,000,000,000,000,000,000,000 | 13 | MDEV-26061 MariaDB server crash at Field::set_default
* Item_default_value::fix_fields creates a copy of its argument's field.
* Field::default_value is changed when its expression is prepared in
unpack_vcol_info_from_frm()
This means we must unpack any vcol expression that includes DEFAULT(x)
strictly after unpacking x->default_value.
To avoid building and solving this dependency graph on every table open,
we update Item_default_value::field->default_value after all vcols
are unpacked and fixed. |
bool CConnectionTransportUDP::CreateLoopbackPair( CConnectionTransportUDP *pTransport[2] )
{
IBoundUDPSocket *sock[2];
SteamNetworkingErrMsg errMsg;
if ( !CreateBoundSocketPair(
CRecvPacketCallback( PacketReceived, pTransport[0] ),
CRecvPacketCallback( PacketReceived, pTransport[1] ), sock, errMsg ) )
{
// Assert, this really should only fail if we have some sort of bug
AssertMsg1( false, "Failed to create UDP socket pair. %s", errMsg );
return false;
}
pTransport[0]->m_pSocket = sock[0];
pTransport[1]->m_pSocket = sock[1];
return true;
} | 0 | [
"CWE-703"
]
| GameNetworkingSockets | d944a10808891d202bb1d5e1998de6e0423af678 | 245,646,353,798,233,800,000,000,000,000,000,000,000 | 18 | Tweak pointer math to avoid possible integer overflow |
static void cmd_authinfo_user(char *user)
{
char *p;
if (nntp_authstate) {
prot_printf(nntp_out, "502 Already authenticated\r\n");
return;
}
/* possibly disallow USER */
if (!(nntp_starttls_done || (extprops_ssf > 1) ||
config_getswitch(IMAPOPT_ALLOWPLAINTEXT))) {
prot_printf(nntp_out,
"483 AUTHINFO USER command only available under a layer\r\n");
return;
}
if (nntp_userid) {
free(nntp_userid);
nntp_userid = NULL;
}
if (!(p = canonify_userid(user, NULL, NULL))) {
prot_printf(nntp_out, "481 Invalid user\r\n");
syslog(LOG_NOTICE,
"badlogin: %s plaintext %s invalid user",
nntp_clienthost, beautify_string(user));
}
else {
nntp_userid = xstrdup(p);
prot_printf(nntp_out, "381 Give AUTHINFO PASS command\r\n");
}
} | 0 | [
"CWE-287"
]
| cyrus-imapd | 77903669e04c9788460561dd0560b9c916519594 | 280,999,699,688,412,700,000,000,000,000,000,000,000 | 33 | Secunia SA46093 - make sure nntp authentication completes
Discovered by Stefan Cornelius, Secunia Research
The vulnerability is caused due to the access restriction for certain
commands only checking whether or not variable "nntp_userid" is non-NULL,
without performing additional checks to verify that a complete, successful
authentication actually took place. The variable "nntp_userid" can be set to
point to a string holding the username (changing it to a non-NULL, thus
allowing attackers to bypass the checks) by sending an "AUTHINFO USER"
command. The variable is not reset to NULL until e.g. a wrong "AUTHINFO
PASS" command is received. This can be exploited to bypass the
authentication mechanism and allows access to e.g. the "NEWNEWS" or the
"LIST NEWSGROUPS" commands by sending an "AUTHINFO USER" command without a
following "AUTHINFO PASS" command. |
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
if (!list_empty_careful(&vcpu->async_pf.done))
return true;
if (kvm_apic_has_events(vcpu))
return true;
if (vcpu->arch.pv.pv_unhalted)
return true;
if (vcpu->arch.exception.pending)
return true;
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
(vcpu->arch.nmi_pending &&
static_call(kvm_x86_nmi_allowed)(vcpu, false)))
return true;
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
(vcpu->arch.smi_pending &&
static_call(kvm_x86_smi_allowed)(vcpu, false)))
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
(kvm_cpu_has_interrupt(vcpu) ||
kvm_guest_apic_has_interrupt(vcpu)))
return true;
if (kvm_hv_has_stimer_pending(vcpu))
return true;
if (is_guest_mode(vcpu) &&
kvm_x86_ops.nested_ops->hv_timer_pending &&
kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
return true;
return false;
} | 0 | [
"CWE-476"
]
| linux | 55749769fe608fa3f4a075e42e89d237c8e37637 | 331,012,324,529,318,520,000,000,000,000,000,000,000 | 39 | KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
{
return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
hv->hv_tsc_emulation_control;
} | 0 | [
"CWE-476"
]
| linux | 7ec37d1cbe17d8189d9562178d8b29167fe1c31a | 31,096,162,046,605,540,000,000,000,000,000,000,000 | 5 | KVM: x86: Check lapic_in_kernel() before attempting to set a SynIC irq
When KVM_CAP_HYPERV_SYNIC{,2} is activated, KVM already checks for
irqchip_in_kernel() so normally SynIC irqs should never be set. It is,
however, possible for a misbehaving VMM to write to SYNIC/STIMER MSRs
causing erroneous behavior.
The immediate issue being fixed is that kvm_irq_delivery_to_apic()
(kvm_irq_delivery_to_apic_fast()) crashes when called with
'irq.shorthand = APIC_DEST_SELF' and 'src == NULL'.
Signed-off-by: Vitaly Kuznetsov <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
Subsets and Splits