func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
Pool3dParameters::Pool3dParameters(OpKernelContext* context,
const std::vector<int32>& ksize,
const std::vector<int32>& stride,
Padding padding, TensorFormat data_format,
const TensorShape& tensor_in_shape) {
// For maxpooling, tensor_in should have 4 dimensions.
OP_REQUIRES(context, tensor_in_shape.dims() == 5,
errors::InvalidArgument("tensor_in must be 4-dimensional"));
this->data_format = data_format;
depth = GetTensorDim(tensor_in_shape, data_format, 'C');
tensor_in_planes = GetTensorDim(tensor_in_shape, data_format, '0');
tensor_in_rows = GetTensorDim(tensor_in_shape, data_format, '1');
tensor_in_cols = GetTensorDim(tensor_in_shape, data_format, '2');
tensor_in_batch = GetTensorDim(tensor_in_shape, data_format, 'N');
window_planes = GetTensorDim(ksize, data_format, '0');
window_rows = GetTensorDim(ksize, data_format, '1');
window_cols = GetTensorDim(ksize, data_format, '2');
depth_window = GetTensorDim(ksize, data_format, 'C');
plane_stride = GetTensorDim(stride, data_format, '0');
row_stride = GetTensorDim(stride, data_format, '1');
col_stride = GetTensorDim(stride, data_format, '2');
depth_stride = GetTensorDim(stride, data_format, 'C');
// We only support 3D pooling across plane/width/height. Depthwise
// pooling is not supported.
OP_REQUIRES(
context, depth_window == 1 && depth_stride == 1,
errors::Unimplemented(
"Pooling3d only supports pooling across plane/width/height."));
OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_planes, window_planes,
plane_stride, padding,
&out_plane, &pad_planes));
OP_REQUIRES_OK(context,
GetWindowedOutputSize(tensor_in_rows, window_rows, row_stride,
padding, &out_height, &pad_rows));
OP_REQUIRES_OK(context,
GetWindowedOutputSize(tensor_in_cols, window_cols, col_stride,
padding, &out_width, &pad_cols));
} | 0 | [
"CWE-369",
"CWE-787"
]
| tensorflow | 63c6a29d0f2d692b247f7bf81f8732d6442fad09 | 329,294,425,063,341,560,000,000,000,000,000,000,000 | 41 | Add missing validation, prevent heap OOB
PiperOrigin-RevId: 372246723
Change-Id: I1a454a643810e77d7d14821b342098c56a09fbbf |
static struct dst_entry *icmpv6_route_lookup(struct net *net,
struct sk_buff *skb,
struct sock *sk,
struct flowi6 *fl6)
{
struct dst_entry *dst, *dst2;
struct flowi6 fl2;
int err;
err = ip6_dst_lookup(net, sk, &dst, fl6);
if (err)
return ERR_PTR(err);
/*
* We won't send icmp if the destination is known
* anycast.
*/
if (ipv6_anycast_destination(dst, &fl6->daddr)) {
net_dbg_ratelimited("icmp6_send: acast source\n");
dst_release(dst);
return ERR_PTR(-EINVAL);
}
/* No need to clone since we're just using its address. */
dst2 = dst;
dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
if (!IS_ERR(dst)) {
if (dst != dst2)
return dst;
} else {
if (PTR_ERR(dst) == -EPERM)
dst = NULL;
else
return dst;
}
err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6);
if (err)
goto relookup_failed;
err = ip6_dst_lookup(net, sk, &dst2, &fl2);
if (err)
goto relookup_failed;
dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP);
if (!IS_ERR(dst2)) {
dst_release(dst);
dst = dst2;
} else {
err = PTR_ERR(dst2);
if (err == -EPERM) {
dst_release(dst);
return dst2;
} else
goto relookup_failed;
}
relookup_failed:
if (dst)
return dst;
return ERR_PTR(err);
} | 0 | [
"CWE-20",
"CWE-200"
]
| linux | 79dc7e3f1cd323be4c81aa1a94faa1b3ed987fb2 | 326,584,873,030,795,880,000,000,000,000,000,000,000 | 63 | net: handle no dst on skb in icmp6_send
Andrey reported the following while fuzzing the kernel with syzkaller:
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 0 PID: 3859 Comm: a.out Not tainted 4.9.0-rc6+ #429
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
task: ffff8800666d4200 task.stack: ffff880067348000
RIP: 0010:[<ffffffff833617ec>] [<ffffffff833617ec>]
icmp6_send+0x5fc/0x1e30 net/ipv6/icmp.c:451
RSP: 0018:ffff88006734f2c0 EFLAGS: 00010206
RAX: ffff8800666d4200 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000018
RBP: ffff88006734f630 R08: ffff880064138418 R09: 0000000000000003
R10: dffffc0000000000 R11: 0000000000000005 R12: 0000000000000000
R13: ffffffff84e7e200 R14: ffff880064138484 R15: ffff8800641383c0
FS: 00007fb3887a07c0(0000) GS:ffff88006cc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020000000 CR3: 000000006b040000 CR4: 00000000000006f0
Stack:
ffff8800666d4200 ffff8800666d49f8 ffff8800666d4200 ffffffff84c02460
ffff8800666d4a1a 1ffff1000ccdaa2f ffff88006734f498 0000000000000046
ffff88006734f440 ffffffff832f4269 ffff880064ba7456 0000000000000000
Call Trace:
[<ffffffff83364ddc>] icmpv6_param_prob+0x2c/0x40 net/ipv6/icmp.c:557
[< inline >] ip6_tlvopt_unknown net/ipv6/exthdrs.c:88
[<ffffffff83394405>] ip6_parse_tlv+0x555/0x670 net/ipv6/exthdrs.c:157
[<ffffffff8339a759>] ipv6_parse_hopopts+0x199/0x460 net/ipv6/exthdrs.c:663
[<ffffffff832ee773>] ipv6_rcv+0xfa3/0x1dc0 net/ipv6/ip6_input.c:191
...
icmp6_send / icmpv6_send is invoked for both rx and tx paths. In both
cases the dst->dev should be preferred for determining the L3 domain
if the dst has been set on the skb. Fallback to the skb->dev if it has
not. This covers the case reported here where icmp6_send is invoked on
Rx before the route lookup.
Fixes: 5d41ce29e ("net: icmp6_send should use dst dev to determine L3 domain")
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David Ahern <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
UrnState::created(StoreEntry *newEntry)
{
urlres_e = newEntry;
if (urlres_e->isNull()) {
urlres_e = storeCreateEntry(urlres, urlres, RequestFlags(), Http::METHOD_GET);
sc = storeClientListAdd(urlres_e, this);
FwdState::fwdStart(Comm::ConnectionPointer(), urlres_e, urlres_r.getRaw());
} else {
urlres_e->lock("UrnState::created");
sc = storeClientListAdd(urlres_e, this);
}
reqofs = 0;
StoreIOBuffer tempBuffer;
tempBuffer.offset = reqofs;
tempBuffer.length = URN_REQBUF_SZ;
tempBuffer.data = reqbuf;
storeClientCopy(sc, urlres_e,
tempBuffer,
urnHandleReply,
this);
} | 0 | [
"CWE-401"
]
| squid | a975fd5aedc866629214aaaccb38376855351899 | 4,258,337,370,419,868,500,000,000,000,000,000,000 | 23 | Bug 5104: Memory leak in RFC 2169 response parsing (#778)
A temporary parsing buffer was not being released when
parsing completed. |
static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
{
return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); | 0 | [
"CWE-20"
]
| linux | 2b16f048729bf35e6c28a40cbfad07239f9dcd90 | 254,276,628,621,739,500,000,000,000,000,000,000,000 | 4 | net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int gnutls_x509_ext_export_subject_key_id(const gnutls_datum_t * id,
gnutls_datum_t * ext)
{
ASN1_TYPE c2 = ASN1_TYPE_EMPTY;
int ret, result;
result =
asn1_create_element(_gnutls_get_pkix(),
"PKIX1.SubjectKeyIdentifier", &c2);
if (result != ASN1_SUCCESS) {
gnutls_assert();
return _gnutls_asn2err(result);
}
result = asn1_write_value(c2, "", id->data, id->size);
if (result != ASN1_SUCCESS) {
gnutls_assert();
ret = _gnutls_asn2err(result);
goto cleanup;
}
ret = _gnutls_x509_der_encode(c2, "", ext, 0);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
ret = 0;
cleanup:
asn1_delete_structure(&c2);
return ret;
} | 0 | []
| gnutls | d6972be33264ecc49a86cd0958209cd7363af1e9 | 145,389,955,617,201,490,000,000,000,000,000,000,000 | 32 | eliminated double-free in the parsing of dist points
Reported by Robert Święcki. |
static int pack_streams (WavpackContext *wpc, uint32_t block_samples)
{
uint32_t max_blocksize, max_chans = 1, bcount;
unsigned char *outbuff, *outend, *out2buff, *out2end;
int result = TRUE, i;
// for calculating output (block) buffer size, first see if any streams are stereo
for (i = 0; i < wpc->num_streams; i++)
if (!(wpc->streams [i]->wphdr.flags & MONO_FLAG)) {
max_chans = 2;
break;
}
// then calculate maximum size based on bytes / sample
max_blocksize = block_samples * max_chans * ((wpc->streams [0]->wphdr.flags & BYTES_STORED) + 1);
// add margin based on how much "negative" compression is possible with pathological audio
if ((wpc->config.flags & CONFIG_FLOAT_DATA) && !(wpc->config.flags & CONFIG_SKIP_WVX))
max_blocksize += max_blocksize; // 100% margin for lossless float data
else
max_blocksize += max_blocksize >> 2; // otherwise 25% margin for everything else
max_blocksize += wpc->metabytes + 1024; // finally, add metadata & another 1K margin
out2buff = (wpc->wvc_flag) ? malloc (max_blocksize) : NULL;
out2end = out2buff + max_blocksize;
outbuff = malloc (max_blocksize);
outend = outbuff + max_blocksize;
for (wpc->current_stream = 0; wpc->current_stream < wpc->num_streams; wpc->current_stream++) {
WavpackStream *wps = wpc->streams [wpc->current_stream];
uint32_t flags = wps->wphdr.flags;
flags &= ~MAG_MASK;
flags += (1 << MAG_LSB) * ((flags & BYTES_STORED) * 8 + 7);
SET_BLOCK_INDEX (wps->wphdr, wps->sample_index);
wps->wphdr.block_samples = block_samples;
wps->wphdr.flags = flags;
wps->block2buff = out2buff;
wps->block2end = out2end;
wps->blockbuff = outbuff;
wps->blockend = outend;
#ifdef ENABLE_DSD
if (flags & DSD_FLAG)
result = pack_dsd_block (wpc, wps->sample_buffer);
else
#endif
result = pack_block (wpc, wps->sample_buffer);
if (result) {
result = block_add_checksum (outbuff, outend, (flags & HYBRID_FLAG) ? 2 : 4);
if (result && out2buff)
result = block_add_checksum (out2buff, out2end, 2);
}
wps->blockbuff = wps->block2buff = NULL;
if (wps->wphdr.block_samples != block_samples)
block_samples = wps->wphdr.block_samples;
if (!result) {
strcpy (wpc->error_message, "output buffer overflowed!");
break;
}
bcount = ((WavpackHeader *) outbuff)->ckSize + 8;
WavpackNativeToLittleEndian ((WavpackHeader *) outbuff, WavpackHeaderFormat);
result = wpc->blockout (wpc->wv_out, outbuff, bcount);
if (!result) {
strcpy (wpc->error_message, "can't write WavPack data, disk probably full!");
break;
}
wpc->filelen += bcount;
if (out2buff) {
bcount = ((WavpackHeader *) out2buff)->ckSize + 8;
WavpackNativeToLittleEndian ((WavpackHeader *) out2buff, WavpackHeaderFormat);
result = wpc->blockout (wpc->wvc_out, out2buff, bcount);
if (!result) {
strcpy (wpc->error_message, "can't write WavPack data, disk probably full!");
break;
}
wpc->file2len += bcount;
}
if (wpc->acc_samples != block_samples)
memmove (wps->sample_buffer, wps->sample_buffer + block_samples * (flags & MONO_FLAG ? 1 : 2),
(wpc->acc_samples - block_samples) * sizeof (int32_t) * (flags & MONO_FLAG ? 1 : 2));
}
wpc->current_stream = 0;
wpc->ave_block_samples = (wpc->ave_block_samples * 0x7 + block_samples + 0x4) >> 3;
wpc->acc_samples -= block_samples;
free (outbuff);
if (out2buff)
free (out2buff);
return result;
} | 0 | [
"CWE-703",
"CWE-835"
]
| WavPack | 070ef6f138956d9ea9612e69586152339dbefe51 | 94,604,967,555,012,370,000,000,000,000,000,000,000 | 110 | issue #53: error out on zero sample rate |
int mnt_table_parse_swaps(struct libmnt_table *tb, const char *filename)
{
if (!tb)
return -EINVAL;
if (!filename) {
filename = mnt_get_swaps_path();
if (!filename)
return -EINVAL;
}
tb->fmt = MNT_FMT_SWAPS;
return mnt_table_parse_file(tb, filename);
} | 0 | [
"CWE-552",
"CWE-703"
]
| util-linux | 166e87368ae88bf31112a30e078cceae637f4cdb | 266,865,559,596,869,620,000,000,000,000,000,000,000 | 14 | libmount: remove support for deleted mount table entries
The "(deleted)" suffix has been originally used by kernel for deleted
mountpoints. Since kernel commit 9d4d65748a5ca26ea8650e50ba521295549bf4e3
(Dec 2014) kernel does not use this suffix for mount stuff in /proc at
all. Let's remove this support from libmount too.
Signed-off-by: Karel Zak <[email protected]> |
struct dentry *ext4_get_parent(struct dentry *child)
{
__u32 ino;
static const struct qstr dotdot = QSTR_INIT("..", 2);
struct ext4_dir_entry_2 * de;
struct buffer_head *bh;
bh = ext4_find_entry(child->d_inode, &dotdot, &de);
if (!bh)
return ERR_PTR(-ENOENT);
ino = le32_to_cpu(de->inode);
brelse(bh);
if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
EXT4_ERROR_INODE(child->d_inode,
"bad parent inode number: %u", ino);
return ERR_PTR(-EIO);
}
return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
} | 0 | [
"CWE-20"
]
| linux | c9b92530a723ac5ef8e352885a1862b18f31b2f5 | 186,667,755,521,553,120,000,000,000,000,000,000,000 | 21 | ext4: make orphan functions be no-op in no-journal mode
Instead of checking whether the handle is valid, we check if journal
is enabled. This avoids taking the s_orphan_lock mutex in all cases
when there is no journal in use, including the error paths where
ext4_orphan_del() is called with a handle set to NULL.
Signed-off-by: Anatol Pomozov <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]> |
Http::FilterHeadersStatus Context::onResponseHeaders() {
if (!in_vm_context_created_) {
// If the request is invalid then onRequestHeaders() will not be called and neither will
// onCreate() then sendLocalReply be called which will call this function. In this case we
// need to call onCreate() so that the Context inside the VM is created before the
// onResponseHeaders() call.
onCreate(root_context_id_);
in_vm_context_created_ = true;
}
if (!wasm_->onResponseHeaders_) {
return Http::FilterHeadersStatus::Continue;
}
if (wasm_->onResponseHeaders_(this, id_).u64_ == 0) {
return Http::FilterHeadersStatus::Continue;
}
return Http::FilterHeadersStatus::StopIteration;
} | 0 | [
"CWE-476"
]
| envoy | 8788a3cf255b647fd14e6b5e2585abaaedb28153 | 19,732,344,615,085,574,000,000,000,000,000,000,000 | 17 | 1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]> |
static Image *ReadJNGImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
logging,
status;
MngInfo
*mng_info;
char
magic_number[MagickPathExtent];
size_t
count;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadJNGImage()");
image=AcquireImage(image_info,exception);
mng_info=(MngInfo *) NULL;
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
return((Image *) NULL);
if (LocaleCompare(image_info->magick,"JNG") != 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/* Verify JNG signature. */
count=(size_t) ReadBlob(image,8,(unsigned char *) magic_number);
if (count < 8 || memcmp(magic_number,"\213JNG\r\n\032\n",8) != 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/* Allocate a MngInfo structure. */
mng_info=(MngInfo *) AcquireMagickMemory(sizeof(*mng_info));
if (mng_info == (MngInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/* Initialize members of the MngInfo structure. */
(void) ResetMagickMemory(mng_info,0,sizeof(MngInfo));
mng_info->image=image;
image=ReadOneJNGImage(mng_info,image_info,exception);
mng_info=MngInfoFreeStruct(mng_info);
if (image == (Image *) NULL)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"exit ReadJNGImage() with error");
return((Image *) NULL);
}
(void) CloseBlob(image);
if (image->columns == 0 || image->rows == 0)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"exit ReadJNGImage() with error");
ThrowReaderException(CorruptImageError,"CorruptImage");
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadJNGImage()");
return(image);
} | 0 | [
"CWE-772",
"CWE-787"
]
| ImageMagick | 8ca35831e91c3db8c6d281d09b605001003bec08 | 75,718,167,756,862,010,000,000,000,000,000,000,000 | 85 | coders/png.c: Stop a memory leak in read_user_chunk_callback() (reference
https://github.com/ImageMagick/ImageMagick/issues/517). |
static void shift_param(php_http_buffer_t *buf, char *key_str, size_t key_len, zval *zvalue, const char *pss, size_t psl, const char *ass, size_t asl, const char *vss, size_t vsl, unsigned flags, zend_bool rfc5987 TSRMLS_DC)
{
if (Z_TYPE_P(zvalue) == IS_ARRAY || Z_TYPE_P(zvalue) == IS_OBJECT) {
/* treat as arguments, unless we care for dimensions or rfc5987 */
if (flags & PHP_HTTP_PARAMS_DIMENSION) {
php_http_buffer_t *keybuf = php_http_buffer_from_string(key_str, key_len);
prepare_dimension(buf, keybuf, zvalue, pss, psl, vss, vsl, flags TSRMLS_CC);
php_http_buffer_free(&keybuf);
} else if (rfc5987) {
shift_key(buf, key_str, key_len, pss, psl, flags TSRMLS_CC);
shift_rfc5987(buf, zvalue, vss, vsl, flags TSRMLS_CC);
} else {
shift_arg(buf, key_str, key_len, zvalue, ass, asl, vss, vsl, flags TSRMLS_CC);
}
} else {
if (flags & PHP_HTTP_PARAMS_RFC5988) {
shift_rfc5988(buf, key_str, key_len, pss, psl, flags TSRMLS_CC);
} else {
shift_key(buf, key_str, key_len, pss, psl, flags TSRMLS_CC);
}
shift_val(buf, zvalue, vss, vsl, flags TSRMLS_CC);
}
} | 0 | [
"CWE-399",
"CWE-704"
]
| ext-http | 17137d4ab1ce81a2cee0fae842340a344ef3da83 | 273,876,986,086,899,080,000,000,000,000,000,000,000 | 23 | fix bug #73055 |
static int io_symlinkat_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_symlink *sl = &req->symlink;
const char __user *oldpath, *newpath;
if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF;
sl->new_dfd = READ_ONCE(sqe->fd);
oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
sl->oldpath = getname(oldpath);
if (IS_ERR(sl->oldpath))
return PTR_ERR(sl->oldpath);
sl->newpath = getname(newpath);
if (IS_ERR(sl->newpath)) {
putname(sl->oldpath);
return PTR_ERR(sl->newpath);
}
req->flags |= REQ_F_NEED_CLEANUP;
return 0;
} | 0 | [
"CWE-416"
]
| linux | 9cae36a094e7e9d6e5fe8b6dcd4642138b3eb0c7 | 306,459,623,822,364,600,000,000,000,000,000,000,000 | 28 | io_uring: reinstate the inflight tracking
After some debugging, it was realized that we really do still need the
old inflight tracking for any file type that has io_uring_fops assigned.
If we don't, then trivial circular references will mean that we never get
the ctx cleaned up and hence it'll leak.
Just bring back the inflight tracking, which then also means we can
eliminate the conditional dropping of the file when task_work is queued.
Fixes: d5361233e9ab ("io_uring: drop the old style inflight file tracking")
Signed-off-by: Jens Axboe <[email protected]> |
static void ffprobe_cleanup(int ret)
{
int i;
for (i = 0; i < FF_ARRAY_ELEMS(sections); i++)
av_dict_free(&(sections[i].entries_to_show));
#if HAVE_THREADS
pthread_mutex_destroy(&log_mutex);
#endif
} | 0 | [
"CWE-476"
]
| FFmpeg | 837cb4325b712ff1aab531bf41668933f61d75d2 | 173,944,408,091,948,650,000,000,000,000,000,000,000 | 10 | ffprobe: Fix null pointer dereference with color primaries
Found-by: AD-lab of venustech
Signed-off-by: Michael Niedermayer <[email protected]> |
ofputil_decode_set_async_config(const struct ofp_header *oh, bool loose,
const struct ofputil_async_cfg *basis,
struct ofputil_async_cfg *ac)
{
struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length));
enum ofpraw raw = ofpraw_pull_assert(&b);
if (raw == OFPRAW_OFPT13_SET_ASYNC ||
raw == OFPRAW_NXT_SET_ASYNC_CONFIG ||
raw == OFPRAW_OFPT13_GET_ASYNC_REPLY) {
const struct nx_async_config *msg = ofpmsg_body(oh);
*ac = OFPUTIL_ASYNC_CFG_INIT;
decode_legacy_async_masks(msg->packet_in_mask, OAM_PACKET_IN,
oh->version, ac);
decode_legacy_async_masks(msg->port_status_mask, OAM_PORT_STATUS,
oh->version, ac);
decode_legacy_async_masks(msg->flow_removed_mask, OAM_FLOW_REMOVED,
oh->version, ac);
} else if (raw == OFPRAW_OFPT14_SET_ASYNC ||
raw == OFPRAW_OFPT14_GET_ASYNC_REPLY ||
raw == OFPRAW_NXT_SET_ASYNC_CONFIG2) {
*ac = *basis;
while (b.size > 0) {
struct ofpbuf property;
enum ofperr error;
uint64_t type;
error = ofpprop_pull__(&b, &property, 8, 0xfffe, &type);
if (error) {
return error;
}
const struct ofp14_async_prop *ap
= get_ofp14_async_config_prop_by_prop_type(type);
error = (ap
? parse_async_tlv(&property, ap, ac, oh->version, loose)
: OFPPROP_UNKNOWN(loose, "async config", type));
if (error) {
/* Most messages use OFPBPC_BAD_TYPE but async has its own (who
* knows why, it's OpenFlow. */
if (error == OFPERR_OFPBPC_BAD_TYPE) {
error = OFPERR_OFPACFC_UNSUPPORTED;
}
return error;
}
}
} else {
return OFPERR_OFPBRC_BAD_VERSION;
}
return 0;
} | 0 | [
"CWE-772"
]
| ovs | 77ad4225d125030420d897c873e4734ac708c66b | 161,746,486,782,072,180,000,000,000,000,000,000,000 | 52 | ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
STATIC U8 *
S_reghopmaybe3(U8* s, SSize_t off, const U8* lim)
{
PERL_ARGS_ASSERT_REGHOPMAYBE3;
if (off >= 0) {
while (off-- && s < lim) {
/* XXX could check well-formedness here */
s += UTF8SKIP(s);
}
if (off >= 0)
return NULL;
}
else {
while (off++ && s > lim) {
s--;
if (UTF8_IS_CONTINUED(*s)) {
while (s > lim && UTF8_IS_CONTINUATION(*s))
s--;
if (! UTF8_IS_START(*s)) {
dTHX;
Perl_croak(aTHX_ "Malformed UTF-8 character (fatal)");
}
}
/* XXX could check well-formedness here */
}
if (off <= 0)
return NULL;
}
return s; | 0 | [
"CWE-416"
]
| perl5 | 22b433eff9a1ffa2454e18405a56650f07b385b5 | 237,411,776,477,449,400,000,000,000,000,000,000,000 | 30 | PATCH [perl #123562] Regexp-matching "hangs"
The regex engine got into an infinite loop because of the malformation.
It is trying to back-up over a sequence of UTF-8 continuation bytes.
But the character just before the sequence should be a start byte. If
not, there is a malformation. I added a test to croak if that isn't the
case so that it doesn't just infinitely loop. I did this also in the
similar areas of regexec.c.
Comments long ago added to the code suggested that we check for
malformations in the vicinity of the new tests. But that was never
done. These new tests should be good enough to prevent looping, anyway. |
struct ldb_message *ldb_msg_copy(TALLOC_CTX *mem_ctx,
const struct ldb_message *msg)
{
struct ldb_message *msg2;
unsigned int i, j;
msg2 = ldb_msg_copy_shallow_impl(mem_ctx, msg);
if (msg2 == NULL) return NULL;
if (msg2->dn != NULL) {
msg2->dn = ldb_dn_copy(msg2, msg2->dn);
if (msg2->dn == NULL) goto failed;
}
for (i=0;i<msg2->num_elements;i++) {
struct ldb_message_element *el = &msg2->elements[i];
struct ldb_val *values = el->values;
el->name = talloc_strdup(msg2->elements, el->name);
if (el->name == NULL) goto failed;
el->values = talloc_array(msg2->elements, struct ldb_val, el->num_values);
if (el->values == NULL) goto failed;
for (j=0;j<el->num_values;j++) {
el->values[j] = ldb_val_dup(el->values, &values[j]);
if (el->values[j].data == NULL && values[j].length != 0) {
goto failed;
}
}
/*
* Since we copied this element's values, we can mark them as
* not shared.
*/
el->flags &= ~LDB_FLAG_INTERNAL_SHARED_VALUES;
}
return msg2;
failed:
talloc_free(msg2);
return NULL;
} | 0 | [
"CWE-200"
]
| samba | 7efe8182c165fbf17d2f88c173527a7a554e214b | 294,315,324,850,493,980,000,000,000,000,000,000,000 | 41 | CVE-2022-32746 ldb: Add flag to mark message element values as shared
When making a shallow copy of an ldb message, mark the message elements
of the copy as sharing their values with the message elements in the
original message.
This flag value will be heeded in the next commit.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
soup_auth_ntlm_get_connection_authorization (SoupConnectionAuth *auth,
SoupMessage *msg,
gpointer state)
{
SoupAuthNTLM *auth_ntlm = SOUP_AUTH_NTLM (auth);
SoupAuthNTLMPrivate *priv = soup_auth_ntlm_get_instance_private (auth_ntlm);
SoupNTLMConnectionState *conn = state;
char *header = NULL;
switch (conn->state) {
case SOUP_NTLM_NEW:
#ifdef USE_NTLM_AUTH
if (sso_ntlm_initiate (priv)) {
header = sso_ntlm_response (priv, "YR\n", conn->state);
if (header) {
if (g_ascii_strcasecmp (header, "PW") != 0) {
conn->state = SOUP_NTLM_SENT_REQUEST;
break;
} else {
g_free (header);
header = NULL;
priv->sso_available = FALSE;
}
} else {
g_debug ("NTLM single-sign-on using %s failed", NTLM_AUTH);
}
}
/* If NTLM single-sign-on fails, go back to original
* request handling process.
*/
#endif
header = soup_ntlm_request ();
conn->state = SOUP_NTLM_SENT_REQUEST;
break;
case SOUP_NTLM_RECEIVED_CHALLENGE:
if (conn->response_header) {
header = conn->response_header;
conn->response_header = NULL;
} else {
header = soup_ntlm_response (conn->nonce,
priv->username,
priv->nt_hash,
priv->lm_hash,
NULL,
priv->domain,
conn->ntlmv2_session);
}
g_clear_pointer (&conn->nonce, g_free);
conn->state = SOUP_NTLM_SENT_RESPONSE;
if (priv->password_state != SOUP_NTLM_PASSWORD_ACCEPTED) {
/* We need to know if this worked */
g_signal_connect (msg, "got-headers",
G_CALLBACK (got_final_auth_result),
auth);
}
break;
#ifdef USE_NTLM_AUTH
case SOUP_NTLM_SSO_FAILED:
/* Restart request without SSO */
g_debug ("NTLM single-sign-on by using %s failed", NTLM_AUTH);
priv->sso_available = FALSE;
header = soup_ntlm_request ();
conn->state = SOUP_NTLM_SENT_REQUEST;
break;
#endif
default:
break;
}
return header;
} | 1 | [
"CWE-125"
]
| libsoup | 0e7b2c1466434a992b6a387497432e1c97b6125c | 42,471,001,866,026,713,000,000,000,000,000,000,000 | 72 | NTLMv2 responses support |
aspath_empty (void)
{
return aspath_parse (NULL, 0, 1); /* 32Bit ;-) */
} | 0 | [
"CWE-20"
]
| quagga | 7a42b78be9a4108d98833069a88e6fddb9285008 | 284,939,196,677,547,000,000,000,000,000,000,000,000 | 4 | bgpd: Fix AS_PATH size calculation for long paths
If you have an AS_PATH with more entries than
what can be written into a single AS_SEGMENT_MAX
it needs to be broken up. The code that noticed
that the AS_PATH needs to be broken up was not
correctly calculating the size of the resulting
message. This patch addresses this issue. |
int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
struct mount *source_mnt, struct hlist_head *tree_list)
{
struct mount *m, *n;
int ret = 0;
/*
* we don't want to bother passing tons of arguments to
* propagate_one(); everything is serialized by namespace_sem,
* so globals will do just fine.
*/
user_ns = current->nsproxy->mnt_ns->user_ns;
last_dest = dest_mnt;
first_source = source_mnt;
last_source = source_mnt;
mp = dest_mp;
list = tree_list;
dest_master = dest_mnt->mnt_master;
/* all peers of dest_mnt, except dest_mnt itself */
for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
ret = propagate_one(n);
if (ret)
goto out;
}
/* all slave groups */
for (m = next_group(dest_mnt, dest_mnt); m;
m = next_group(m, dest_mnt)) {
/* everything in that slave group */
n = m;
do {
ret = propagate_one(n);
if (ret)
goto out;
n = next_peer(n);
} while (n != m);
}
out:
read_seqlock_excl(&mount_lock);
hlist_for_each_entry(n, tree_list, mnt_hash) {
m = n->mnt_parent;
if (m->mnt_master != dest_mnt->mnt_master)
CLEAR_MNT_MARK(m->mnt_master);
}
read_sequnlock_excl(&mount_lock);
return ret;
} | 0 | [
"CWE-703"
]
| linux | 5ec0811d30378ae104f250bfc9b3640242d81e3f | 77,849,636,815,690,690,000,000,000,000,000,000,000 | 48 | propogate_mnt: Handle the first propogated copy being a slave
When the first propgated copy was a slave the following oops would result:
> BUG: unable to handle kernel NULL pointer dereference at 0000000000000010
> IP: [<ffffffff811fba4e>] propagate_one+0xbe/0x1c0
> PGD bacd4067 PUD bac66067 PMD 0
> Oops: 0000 [#1] SMP
> Modules linked in:
> CPU: 1 PID: 824 Comm: mount Not tainted 4.6.0-rc5userns+ #1523
> Hardware name: Bochs Bochs, BIOS Bochs 01/01/2007
> task: ffff8800bb0a8000 ti: ffff8800bac3c000 task.ti: ffff8800bac3c000
> RIP: 0010:[<ffffffff811fba4e>] [<ffffffff811fba4e>] propagate_one+0xbe/0x1c0
> RSP: 0018:ffff8800bac3fd38 EFLAGS: 00010283
> RAX: 0000000000000000 RBX: ffff8800bb77ec00 RCX: 0000000000000010
> RDX: 0000000000000000 RSI: ffff8800bb58c000 RDI: ffff8800bb58c480
> RBP: ffff8800bac3fd48 R08: 0000000000000001 R09: 0000000000000000
> R10: 0000000000001ca1 R11: 0000000000001c9d R12: 0000000000000000
> R13: ffff8800ba713800 R14: ffff8800bac3fda0 R15: ffff8800bb77ec00
> FS: 00007f3c0cd9b7e0(0000) GS:ffff8800bfb00000(0000) knlGS:0000000000000000
> CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> CR2: 0000000000000010 CR3: 00000000bb79d000 CR4: 00000000000006e0
> Stack:
> ffff8800bb77ec00 0000000000000000 ffff8800bac3fd88 ffffffff811fbf85
> ffff8800bac3fd98 ffff8800bb77f080 ffff8800ba713800 ffff8800bb262b40
> 0000000000000000 0000000000000000 ffff8800bac3fdd8 ffffffff811f1da0
> Call Trace:
> [<ffffffff811fbf85>] propagate_mnt+0x105/0x140
> [<ffffffff811f1da0>] attach_recursive_mnt+0x120/0x1e0
> [<ffffffff811f1ec3>] graft_tree+0x63/0x70
> [<ffffffff811f1f6b>] do_add_mount+0x9b/0x100
> [<ffffffff811f2c1a>] do_mount+0x2aa/0xdf0
> [<ffffffff8117efbe>] ? strndup_user+0x4e/0x70
> [<ffffffff811f3a45>] SyS_mount+0x75/0xc0
> [<ffffffff8100242b>] do_syscall_64+0x4b/0xa0
> [<ffffffff81988f3c>] entry_SYSCALL64_slow_path+0x25/0x25
> Code: 00 00 75 ec 48 89 0d 02 22 22 01 8b 89 10 01 00 00 48 89 05 fd 21 22 01 39 8e 10 01 00 00 0f 84 e0 00 00 00 48 8b 80 d8 00 00 00 <48> 8b 50 10 48 89 05 df 21 22 01 48 89 15 d0 21 22 01 8b 53 30
> RIP [<ffffffff811fba4e>] propagate_one+0xbe/0x1c0
> RSP <ffff8800bac3fd38>
> CR2: 0000000000000010
> ---[ end trace 2725ecd95164f217 ]---
This oops happens with the namespace_sem held and can be triggered by
non-root users. An all around not pleasant experience.
To avoid this scenario when finding the appropriate source mount to
copy stop the walk up the mnt_master chain when the first source mount
is encountered.
Further rewrite the walk up the last_source mnt_master chain so that
it is clear what is going on.
The reason why the first source mount is special is that it it's
mnt_parent is not a mount in the dest_mnt propagation tree, and as
such termination conditions based up on the dest_mnt mount propgation
tree do not make sense.
To avoid other kinds of confusion last_dest is not changed when
computing last_source. last_dest is only used once in propagate_one
and that is above the point of the code being modified, so changing
the global variable is meaningless and confusing.
Cc: [email protected]
fixes: f2ebb3a921c1ca1e2ddd9242e95a1989a50c4c68 ("smarter propagate_mnt()")
Reported-by: Tycho Andersen <[email protected]>
Reviewed-by: Seth Forshee <[email protected]>
Tested-by: Seth Forshee <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]> |
expectation_create(struct conntrack *ct,
ovs_be16 dst_port,
const long long now,
enum ct_alg_mode mode,
const struct conn *master_conn)
{
struct ct_addr src_addr;
struct ct_addr dst_addr;
struct ct_addr alg_nat_repl_addr;
switch (mode) {
case CT_FTP_MODE_ACTIVE:
case CT_TFTP_MODE:
src_addr = master_conn->rev_key.src.addr;
dst_addr = master_conn->rev_key.dst.addr;
alg_nat_repl_addr = master_conn->key.src.addr;
break;
case CT_FTP_MODE_PASSIVE:
src_addr = master_conn->key.src.addr;
dst_addr = master_conn->key.dst.addr;
alg_nat_repl_addr = master_conn->rev_key.dst.addr;
break;
default:
OVS_NOT_REACHED();
}
struct alg_exp_node *alg_exp_node =
xzalloc(sizeof *alg_exp_node);
alg_exp_node->key.dl_type = master_conn->key.dl_type;
alg_exp_node->key.nw_proto = master_conn->key.nw_proto;
alg_exp_node->key.zone = master_conn->key.zone;
alg_exp_node->key.src.addr = src_addr;
alg_exp_node->key.dst.addr = dst_addr;
alg_exp_node->key.src.port = ALG_WC_SRC_PORT;
alg_exp_node->key.dst.port = dst_port;
alg_exp_node->master_mark = master_conn->mark;
alg_exp_node->master_label = master_conn->label;
alg_exp_node->master_key = master_conn->key;
alg_exp_node->passive_mode = mode == CT_FTP_MODE_PASSIVE;
/* Take the write lock here because it is almost 100%
* likely that the lookup will fail and
* expectation_create() will be called below. */
ct_rwlock_wrlock(&ct->resources_lock);
struct alg_exp_node *alg_exp = expectation_lookup(
&ct->alg_expectations, &alg_exp_node->key, ct->hash_basis);
if (alg_exp) {
free(alg_exp_node);
ct_rwlock_unlock(&ct->resources_lock);
return;
}
alg_exp_node->alg_nat_repl_addr = alg_nat_repl_addr;
uint32_t alg_exp_conn_key_hash =
conn_key_hash(&alg_exp_node->key,
ct->hash_basis);
hmap_insert(&ct->alg_expectations,
&alg_exp_node->node,
alg_exp_conn_key_hash);
alg_exp_init_expiration(ct, alg_exp_node, now);
ct_rwlock_unlock(&ct->resources_lock);
} | 0 | [
"CWE-400"
]
| ovs | 35c280072c1c3ed58202745b7d27fbbd0736999b | 54,618,750,596,635,640,000,000,000,000,000,000,000 | 62 | flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
static void __dvb_frontend_free(struct dvb_frontend *fe)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
if (fepriv)
dvb_free_device(fepriv->dvbdev);
dvb_frontend_invoke_release(fe, fe->ops.release);
if (!fepriv)
return;
kfree(fepriv);
fe->frontend_priv = NULL;
} | 1 | [
"CWE-416"
]
| linux | b1cb7372fa822af6c06c8045963571d13ad6348b | 10,222,320,032,761,007,000,000,000,000,000,000,000 | 15 | dvb_frontend: don't use-after-free the frontend struct
dvb_frontend_invoke_release() may free the frontend struct.
So, the free logic can't update it anymore after calling it.
That's OK, as __dvb_frontend_free() is called only when the
krefs are zeroed, so nobody is using it anymore.
That should fix the following KASAN error:
The KASAN report looks like this (running on kernel 3e0cc09a3a2c40ec1ffb6b4e12da86e98feccb11 (4.14-rc5+)):
==================================================================
BUG: KASAN: use-after-free in __dvb_frontend_free+0x113/0x120
Write of size 8 at addr ffff880067d45a00 by task kworker/0:1/24
CPU: 0 PID: 24 Comm: kworker/0:1 Not tainted 4.14.0-rc5-43687-g06ab8a23e0e6 #545
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Workqueue: usb_hub_wq hub_event
Call Trace:
__dump_stack lib/dump_stack.c:16
dump_stack+0x292/0x395 lib/dump_stack.c:52
print_address_description+0x78/0x280 mm/kasan/report.c:252
kasan_report_error mm/kasan/report.c:351
kasan_report+0x23d/0x350 mm/kasan/report.c:409
__asan_report_store8_noabort+0x1c/0x20 mm/kasan/report.c:435
__dvb_frontend_free+0x113/0x120 drivers/media/dvb-core/dvb_frontend.c:156
dvb_frontend_put+0x59/0x70 drivers/media/dvb-core/dvb_frontend.c:176
dvb_frontend_detach+0x120/0x150 drivers/media/dvb-core/dvb_frontend.c:2803
dvb_usb_adapter_frontend_exit+0xd6/0x160 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:340
dvb_usb_adapter_exit drivers/media/usb/dvb-usb/dvb-usb-init.c:116
dvb_usb_exit+0x9b/0x200 drivers/media/usb/dvb-usb/dvb-usb-init.c:132
dvb_usb_device_exit+0xa5/0xf0 drivers/media/usb/dvb-usb/dvb-usb-init.c:295
usb_unbind_interface+0x21c/0xa90 drivers/usb/core/driver.c:423
__device_release_driver drivers/base/dd.c:861
device_release_driver_internal+0x4f1/0x5c0 drivers/base/dd.c:893
device_release_driver+0x1e/0x30 drivers/base/dd.c:918
bus_remove_device+0x2f4/0x4b0 drivers/base/bus.c:565
device_del+0x5c4/0xab0 drivers/base/core.c:1985
usb_disable_device+0x1e9/0x680 drivers/usb/core/message.c:1170
usb_disconnect+0x260/0x7a0 drivers/usb/core/hub.c:2124
hub_port_connect drivers/usb/core/hub.c:4754
hub_port_connect_change drivers/usb/core/hub.c:5009
port_event drivers/usb/core/hub.c:5115
hub_event+0x1318/0x3740 drivers/usb/core/hub.c:5195
process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119
worker_thread+0x221/0x1850 kernel/workqueue.c:2253
kthread+0x363/0x440 kernel/kthread.c:231
ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431
Allocated by task 24:
save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59
save_stack+0x43/0xd0 mm/kasan/kasan.c:447
set_track mm/kasan/kasan.c:459
kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:551
kmem_cache_alloc_trace+0x11e/0x2d0 mm/slub.c:2772
kmalloc ./include/linux/slab.h:493
kzalloc ./include/linux/slab.h:666
dtt200u_fe_attach+0x4c/0x110 drivers/media/usb/dvb-usb/dtt200u-fe.c:212
dtt200u_frontend_attach+0x35/0x80 drivers/media/usb/dvb-usb/dtt200u.c:136
dvb_usb_adapter_frontend_init+0x32b/0x660 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:286
dvb_usb_adapter_init drivers/media/usb/dvb-usb/dvb-usb-init.c:86
dvb_usb_init drivers/media/usb/dvb-usb/dvb-usb-init.c:162
dvb_usb_device_init+0xf73/0x17f0 drivers/media/usb/dvb-usb/dvb-usb-init.c:277
dtt200u_usb_probe+0xa1/0xe0 drivers/media/usb/dvb-usb/dtt200u.c:155
usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361
really_probe drivers/base/dd.c:413
driver_probe_device+0x610/0xa00 drivers/base/dd.c:557
__device_attach_driver+0x230/0x290 drivers/base/dd.c:653
bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463
__device_attach+0x26b/0x3c0 drivers/base/dd.c:710
device_initial_probe+0x1f/0x30 drivers/base/dd.c:757
bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523
device_add+0xd0b/0x1660 drivers/base/core.c:1835
usb_set_configuration+0x104e/0x1870 drivers/usb/core/message.c:1932
generic_probe+0x73/0xe0 drivers/usb/core/generic.c:174
usb_probe_device+0xaf/0xe0 drivers/usb/core/driver.c:266
really_probe drivers/base/dd.c:413
driver_probe_device+0x610/0xa00 drivers/base/dd.c:557
__device_attach_driver+0x230/0x290 drivers/base/dd.c:653
bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463
__device_attach+0x26b/0x3c0 drivers/base/dd.c:710
device_initial_probe+0x1f/0x30 drivers/base/dd.c:757
bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523
device_add+0xd0b/0x1660 drivers/base/core.c:1835
usb_new_device+0x7b8/0x1020 drivers/usb/core/hub.c:2457
hub_port_connect drivers/usb/core/hub.c:4903
hub_port_connect_change drivers/usb/core/hub.c:5009
port_event drivers/usb/core/hub.c:5115
hub_event+0x194d/0x3740 drivers/usb/core/hub.c:5195
process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119
worker_thread+0x221/0x1850 kernel/workqueue.c:2253
kthread+0x363/0x440 kernel/kthread.c:231
ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431
Freed by task 24:
save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59
save_stack+0x43/0xd0 mm/kasan/kasan.c:447
set_track mm/kasan/kasan.c:459
kasan_slab_free+0x72/0xc0 mm/kasan/kasan.c:524
slab_free_hook mm/slub.c:1390
slab_free_freelist_hook mm/slub.c:1412
slab_free mm/slub.c:2988
kfree+0xf6/0x2f0 mm/slub.c:3919
dtt200u_fe_release+0x3c/0x50 drivers/media/usb/dvb-usb/dtt200u-fe.c:202
dvb_frontend_invoke_release.part.13+0x1c/0x30 drivers/media/dvb-core/dvb_frontend.c:2790
dvb_frontend_invoke_release drivers/media/dvb-core/dvb_frontend.c:2789
__dvb_frontend_free+0xad/0x120 drivers/media/dvb-core/dvb_frontend.c:153
dvb_frontend_put+0x59/0x70 drivers/media/dvb-core/dvb_frontend.c:176
dvb_frontend_detach+0x120/0x150 drivers/media/dvb-core/dvb_frontend.c:2803
dvb_usb_adapter_frontend_exit+0xd6/0x160 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:340
dvb_usb_adapter_exit drivers/media/usb/dvb-usb/dvb-usb-init.c:116
dvb_usb_exit+0x9b/0x200 drivers/media/usb/dvb-usb/dvb-usb-init.c:132
dvb_usb_device_exit+0xa5/0xf0 drivers/media/usb/dvb-usb/dvb-usb-init.c:295
usb_unbind_interface+0x21c/0xa90 drivers/usb/core/driver.c:423
__device_release_driver drivers/base/dd.c:861
device_release_driver_internal+0x4f1/0x5c0 drivers/base/dd.c:893
device_release_driver+0x1e/0x30 drivers/base/dd.c:918
bus_remove_device+0x2f4/0x4b0 drivers/base/bus.c:565
device_del+0x5c4/0xab0 drivers/base/core.c:1985
usb_disable_device+0x1e9/0x680 drivers/usb/core/message.c:1170
usb_disconnect+0x260/0x7a0 drivers/usb/core/hub.c:2124
hub_port_connect drivers/usb/core/hub.c:4754
hub_port_connect_change drivers/usb/core/hub.c:5009
port_event drivers/usb/core/hub.c:5115
hub_event+0x1318/0x3740 drivers/usb/core/hub.c:5195
process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119
worker_thread+0x221/0x1850 kernel/workqueue.c:2253
kthread+0x363/0x440 kernel/kthread.c:231
ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431
The buggy address belongs to the object at ffff880067d45500
which belongs to the cache kmalloc-2048 of size 2048
The buggy address is located 1280 bytes inside of
2048-byte region [ffff880067d45500, ffff880067d45d00)
The buggy address belongs to the page:
page:ffffea00019f5000 count:1 mapcount:0 mapping: (null)
index:0x0 compound_mapcount: 0
flags: 0x100000000008100(slab|head)
raw: 0100000000008100 0000000000000000 0000000000000000 00000001000f000f
raw: dead000000000100 dead000000000200 ffff88006c002d80 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff880067d45900: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880067d45980: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880067d45a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff880067d45a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880067d45b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
==================================================================
Fixes: ead666000a5f ("media: dvb_frontend: only use kref after initialized")
Reported-by: Andrey Konovalov <[email protected]>
Suggested-by: Matthias Schwarzott <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int nfq_id_after(unsigned int id, unsigned int max)
{
return (int)(id - max) > 0;
} | 0 | [
"CWE-416"
]
| net | 36d5fe6a000790f56039afe26834265db0a3ad4c | 284,679,748,576,333,440,000,000,000,000,000,000,000 | 4 | core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors
skb_zerocopy can copy elements of the frags array between skbs, but it doesn't
orphan them. Also, it doesn't handle errors, so this patch takes care of that
as well, and modify the callers accordingly. skb_tx_error() is also added to
the callers so they will signal the failed delivery towards the creator of the
skb.
Signed-off-by: Zoltan Kiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
ecma_op_create_internal_buffer (void)
{
ecma_collection_t *collection_p = ecma_new_collection ();
ecma_collection_push_back (collection_p, (ecma_value_t) 0);
return collection_p;
} /* ecma_op_create_internal_buffer */ | 0 | [
"CWE-119",
"CWE-125",
"CWE-703"
]
| jerryscript | c2b662170245a16f46ce02eae68815c325d99821 | 92,647,169,555,272,340,000,000,000,000,000,000,000 | 7 | Fix adding entries to the internal buffer of a Map object (#3805)
When appending the key/value pair separately, garbage collection could be
triggered before the value is added, which could cause problems during
marking. This patch changes insertion to add both values at the same
time, which prevents partial entries from being present in the internal
buffer.
Fixes #3804.
JerryScript-DCO-1.0-Signed-off-by: Dániel Bátyai [email protected] |
relpTcpSetUsrPtr(relpTcp_t *pThis, void *pUsr)
{
ENTER_RELPFUNC;
RELPOBJ_assert(pThis, Tcp);
pThis->pUsr = pUsr;
LEAVE_RELPFUNC;
} | 0 | [
"CWE-787"
]
| librelp | 2cfe657672636aa5d7d2a14cfcb0a6ab9d1f00cf | 36,302,725,922,826,990,000,000,000,000,000,000,000 | 7 | unify error message generation |
static SymbolsHeader parseHeader(RBuffer *buf) {
ut8 b[64];
SymbolsHeader sh = { 0 };
(void)r_buf_read_at (buf, 0, b, sizeof (b));
sh.magic = r_read_le32 (b);
sh.version = r_read_le32 (b + 4);
sh.valid = sh.magic == 0xff01ff02;
int i;
for (i = 0; i < 16; i++) {
sh.uuid[i] = b[24 + i];
}
sh.unk0 = r_read_le16 (b + 0x28);
sh.unk1 = r_read_le16 (b + 0x2c); // is slotsize + 1 :?
sh.slotsize = r_read_le16 (b + 0x2e);
sh.size = 0x40;
return sh;
} | 0 | [
"CWE-476"
]
| radare2 | 515e592b9bea0612bc63d8e93239ff35bcf645c7 | 283,735,551,390,545,600,000,000,000,000,000,000,000 | 17 | Fix null deref in bin.symbols ##crash
* Reported by cnitlrt via huntr.dev |
static struct service_auth *find_authorization(guint id)
{
GSList *l;
GList *l2;
for (l = adapters; l != NULL; l = g_slist_next(l)) {
struct btd_adapter *adapter = l->data;
for (l2 = adapter->auths->head; l2 != NULL; l2 = l2->next) {
struct service_auth *auth = l2->data;
if (auth->id == id)
return auth;
}
}
return NULL;
} | 0 | [
"CWE-862",
"CWE-863"
]
| bluez | b497b5942a8beb8f89ca1c359c54ad67ec843055 | 267,017,179,019,458,680,000,000,000,000,000,000,000 | 18 | adapter: Fix storing discoverable setting
discoverable setting shall only be store when changed via Discoverable
property and not when discovery client set it as that be considered
temporary just for the lifetime of the discovery. |
void krok_box_del(GF_Box *s)
{
GF_TextKaraokeBox*ptr = (GF_TextKaraokeBox*)s;
if (ptr->records) gf_free(ptr->records);
gf_free(ptr);
} | 0 | [
"CWE-476"
]
| gpac | d527325a9b72218612455a534a508f9e1753f76e | 170,643,630,726,744,800,000,000,000,000,000,000,000 | 6 | fixed #1768 |
void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
struct dmae_command *dmae,
u8 src_type, u8 dst_type)
{
memset(dmae, 0, sizeof(struct dmae_command));
/* set the opcode */
dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
true, DMAE_COMP_PCI);
/* fill in the completion parameters */
dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
dmae->comp_val = DMAE_COMP_VAL;
} | 0 | [
"CWE-20"
]
| linux | 8914a595110a6eca69a5e275b323f5d09e18f4f9 | 273,690,863,887,758,850,000,000,000,000,000,000,000 | 15 | bnx2x: disable GSO where gso_size is too big for hardware
If a bnx2x card is passed a GSO packet with a gso_size larger than
~9700 bytes, it will cause a firmware error that will bring the card
down:
bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert!
bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2
bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052
bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1
... (dump of values continues) ...
Detect when the mac length of a GSO packet is greater than the maximum
packet size (9700 bytes) and disable GSO.
Signed-off-by: Daniel Axtens <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
gs_manager_set_lock_enabled (GSManager *manager,
gboolean lock_enabled)
{
g_return_if_fail (GS_IS_MANAGER (manager));
if (manager->priv->lock_enabled != lock_enabled) {
manager->priv->lock_enabled = lock_enabled;
}
} | 0 | []
| gnome-screensaver | 2f597ea9f1f363277fd4dfc109fa41bbc6225aca | 108,054,001,911,392,690,000,000,000,000,000,000,000 | 9 | Fix adding monitors
Make sure to show windows that are added. And fix an off by one bug. |
void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group)
{
struct inotify_inode_mark *i_mark;
struct fsnotify_event *ignored_event, *notify_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
int ret;
ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
FSNOTIFY_EVENT_NONE, NULL, 0,
GFP_NOFS);
if (!ignored_event)
return;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
goto skip_send_ignore;
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
event_priv->wd = i_mark->wd;
notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
if (notify_event) {
if (IS_ERR(notify_event))
ret = PTR_ERR(notify_event);
else
fsnotify_put_event(notify_event);
inotify_free_event_priv(fsn_event_priv);
}
skip_send_ignore:
/* matches the reference taken when the event was created */
fsnotify_put_event(ignored_event);
/* remove this mark from the idr */
inotify_remove_from_idr(group, i_mark);
atomic_dec(&group->inotify_data.user->inotify_watches);
} | 0 | [
"CWE-399"
]
| linux | d0de4dc584ec6aa3b26fffea320a8457827768fc | 174,170,236,072,914,830,000,000,000,000,000,000,000 | 45 | inotify: fix double free/corruption of stuct user
On an error path in inotify_init1 a normal user can trigger a double
free of struct user. This is a regression introduced by a2ae4cc9a16e
("inotify: stop kernel memory leak on file creation failure").
We fix this by making sure that if a group exists the user reference is
dropped when the group is cleaned up. We should not explictly drop the
reference on error and also drop the reference when the group is cleaned
up.
The new lifetime rules are that an inotify group lives from
inotify_new_group to the last fsnotify_put_group. Since the struct user
and inotify_devs are directly tied to this lifetime they are only
changed/updated in those two locations. We get rid of all special
casing of struct user or user->inotify_devs.
Signed-off-by: Eric Paris <[email protected]>
Cc: [email protected] (2.6.37 and up)
Signed-off-by: Linus Torvalds <[email protected]> |
struct dce_aux *dcn20_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst]);
return &aux_engine->base;
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-401"
]
| linux | 055e547478a11a6360c7ce05e2afc3e366968a12 | 236,379,203,495,561,730,000,000,000,000,000,000,000 | 16 | drm/amd/display: memory leak
In dcn*_clock_source_create when dcn20_clk_src_construct fails allocated
clk_src needs release.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Alex Deucher <[email protected]> |
void HttpConnectionManagerConfig::processDynamicFilterConfig(
const std::string& name, const envoy::config::core::v3::ExtensionConfigSource& config_discovery,
FilterFactoriesList& filter_factories, const std::string& filter_chain_type,
bool last_filter_in_current_config) {
ENVOY_LOG(debug, " dynamic filter name: {}", name);
if (config_discovery.apply_default_config_without_warming() &&
!config_discovery.has_default_config()) {
throw EnvoyException(fmt::format(
"Error: filter config {} applied without warming but has no default config.", name));
}
for (const auto& type_url : config_discovery.type_urls()) {
auto factory_type_url = TypeUtil::typeUrlToDescriptorFullName(type_url);
auto* factory = Registry::FactoryRegistry<
Server::Configuration::NamedHttpFilterConfigFactory>::getFactoryByType(factory_type_url);
if (factory == nullptr) {
throw EnvoyException(
fmt::format("Error: no factory found for a required type URL {}.", factory_type_url));
}
}
auto filter_config_provider = filter_config_provider_manager_.createDynamicFilterConfigProvider(
config_discovery, name, context_, stats_prefix_, last_filter_in_current_config,
filter_chain_type);
filter_factories.push_back(std::move(filter_config_provider));
} | 0 | [
"CWE-22"
]
| envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 45,335,758,783,758,480,000,000,000,000,000,000,000 | 25 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]> |
ssize_t vnc_client_io_error(VncState *vs, ssize_t ret, Error **errp)
{
if (ret <= 0) {
if (ret == 0) {
VNC_DEBUG("Closing down client sock: EOF\n");
vnc_disconnect_start(vs);
} else if (ret != QIO_CHANNEL_ERR_BLOCK) {
VNC_DEBUG("Closing down client sock: ret %zd (%s)\n",
ret, errp ? error_get_pretty(*errp) : "Unknown");
vnc_disconnect_start(vs);
}
if (errp) {
error_free(*errp);
*errp = NULL;
}
return 0;
}
return ret;
} | 0 | [
"CWE-772"
]
| qemu | d3b0db6dfea6b3a9ee0d96aceb796bdcafa84314 | 161,546,336,873,469,430,000,000,000,000,000,000,000 | 20 | vnc: Set default kbd delay to 10ms
The current VNC default keyboard delay is 1ms. With that we're constantly
typing faster than the guest receives keyboard events from an XHCI attached
USB HID device.
The default keyboard delay time in the input layer however is 10ms. I don't know
how that number came to be, but empirical tests on some OpenQA driven ARM
systems show that 10ms really is a reasonable default number for the delay.
This patch moves the VNC delay also to 10ms. That way our default is much
safer (good!) and also consistent with the input layer default (also good!).
Signed-off-by: Alexander Graf <[email protected]>
Reviewed-by: Daniel P. Berrange <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]> |
data_writeable(ftpbuf_t *ftp, php_socket_t s)
{
int n;
n = php_pollfd_for_ms(s, POLLOUT, 1000);
if (n < 1) {
#ifndef PHP_WIN32
if (n == 0) {
errno = ETIMEDOUT;
}
#endif
return 0;
}
return 1;
} | 0 | [
"CWE-189"
]
| php-src | ac2832935435556dc593784cd0087b5e576bbe4d | 268,300,964,314,449,520,000,000,000,000,000,000,000 | 16 | Fix bug #69545 - avoid overflow when reading list |
GF_Err rely_dump(GF_Box *a, FILE * trace)
{
GF_RelyHintBox *p;
p = (GF_RelyHintBox *)a;
gf_isom_box_dump_start(a, "RelyTransmissionBox", trace);
fprintf(trace, "Prefered=\"%d\" required=\"%d\">\n", p->prefered, p->required);
gf_isom_box_dump_done("RelyTransmissionBox", a, trace);
return GF_OK;
} | 0 | [
"CWE-125"
]
| gpac | bceb03fd2be95097a7b409ea59914f332fb6bc86 | 204,713,394,937,330,900,000,000,000,000,000,000,000 | 9 | fixed 2 possible heap overflows (inc. #1088) |
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
union context *host_ctx, *guest_ctx;
int r, idx;
idx = srcu_read_lock(&vcpu->kvm->srcu);
again:
if (signal_pending(current)) {
r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_INTR;
goto out;
}
preempt_disable();
local_irq_disable();
/*Get host and guest context with guest address space.*/
host_ctx = kvm_get_host_context(vcpu);
guest_ctx = kvm_get_guest_context(vcpu);
clear_bit(KVM_REQ_KICK, &vcpu->requests);
r = kvm_vcpu_pre_transition(vcpu);
if (r < 0)
goto vcpu_run_fail;
srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu->mode = IN_GUEST_MODE;
kvm_guest_enter();
/*
* Transition to the guest
*/
kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
kvm_vcpu_post_transition(vcpu);
vcpu->arch.launched = 1;
set_bit(KVM_REQ_KICK, &vcpu->requests);
local_irq_enable();
/*
* We must have an instruction between local_irq_enable() and
* kvm_guest_exit(), so the timer interrupt isn't delayed by
* the interrupt shadow. The stat.exits increment will do nicely.
* But we need to prevent reordering, hence this barrier():
*/
barrier();
kvm_guest_exit();
vcpu->mode = OUTSIDE_GUEST_MODE;
preempt_enable();
idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_handle_exit(kvm_run, vcpu);
if (r > 0) {
if (!need_resched())
goto again;
}
out:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (r > 0) {
kvm_resched(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
goto again;
}
return r;
vcpu_run_fail:
local_irq_enable();
preempt_enable();
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
goto out;
} | 0 | [
"CWE-399"
]
| kvm | 5b40572ed5f0344b9dbee486a17c589ce1abe1a3 | 169,071,100,280,275,330,000,000,000,000,000,000,000 | 78 | KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.
Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.
Based on earlier patch by Michael Ellerman.
Signed-off-by: Michael Ellerman <[email protected]>
Signed-off-by: Avi Kivity <[email protected]> |
ExceptHandler(expr_ty type, identifier name, asdl_seq * body, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena *arena)
{
excepthandler_ty p;
p = (excepthandler_ty)PyArena_Malloc(arena, sizeof(*p));
if (!p)
return NULL;
p->kind = ExceptHandler_kind;
p->v.ExceptHandler.type = type;
p->v.ExceptHandler.name = name;
p->v.ExceptHandler.body = body;
p->lineno = lineno;
p->col_offset = col_offset;
p->end_lineno = end_lineno;
p->end_col_offset = end_col_offset;
return p;
} | 0 | [
"CWE-125"
]
| cpython | dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c | 334,981,739,043,804,330,000,000,000,000,000,000,000 | 17 | bpo-35766: Merge typed_ast back into CPython (GH-11645) |
static void ip6gre_fb_tunnel_init(struct net_device *dev)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
tunnel->dev = dev;
tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
tunnel->hlen = sizeof(struct ipv6hdr) + 4;
dev_hold(dev);
} | 0 | [
"CWE-125"
]
| net | 7892032cfe67f4bde6fc2ee967e45a8fbaf33756 | 56,923,356,822,893,120,000,000,000,000,000,000,000 | 12 | ip6_gre: fix ip6gre_err() invalid reads
Andrey Konovalov reported out of bound accesses in ip6gre_err()
If GRE flags contains GRE_KEY, the following expression
*(((__be32 *)p) + (grehlen / 4) - 1)
accesses data ~40 bytes after the expected point, since
grehlen includes the size of IPv6 headers.
Let's use a "struct gre_base_hdr *greh" pointer to make this
code more readable.
p[1] becomes greh->protocol.
grhlen is the GRE header length.
Fixes: c12b395a4664 ("gre: Support GRE over IPv6")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport, int dif)
{
struct sock *sk;
sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
dif, &udp_table, NULL);
if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
return sk;
} | 0 | []
| linux | a612769774a30e4fc143c4cb6395c12573415660 | 25,437,201,666,601,145,000,000,000,000,000,000,000 | 11 | udp: prevent bugcheck if filter truncates packet too much
If socket filter truncates an udp packet below the length of UDP header
in udpv6_queue_rcv_skb() or udp_queue_rcv_skb(), it will trigger a
BUG_ON in skb_pull_rcsum(). This BUG_ON (and therefore a system crash if
kernel is configured that way) can be easily enforced by an unprivileged
user which was reported as CVE-2016-6162. For a reproducer, see
http://seclists.org/oss-sec/2016/q3/8
Fixes: e6afc8ace6dd ("udp: remove headers from UDP packets before queueing")
Reported-by: Marco Grassi <[email protected]>
Signed-off-by: Michal Kubecek <[email protected]>
Acked-by: Eric Dumazet <[email protected]>
Acked-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int wait_task_stopped(int ptrace, struct task_struct *p,
int options, struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
int retval, exit_code, why;
uid_t uid = 0; /* unneeded, required by compiler */
pid_t pid;
if (!(options & WUNTRACED))
return 0;
exit_code = 0;
spin_lock_irq(&p->sighand->siglock);
if (unlikely(!task_is_stopped_or_traced(p)))
goto unlock_sig;
if (!ptrace && p->signal->group_stop_count > 0)
/*
* A group stop is in progress and this is the group leader.
* We won't report until all threads have stopped.
*/
goto unlock_sig;
exit_code = p->exit_code;
if (!exit_code)
goto unlock_sig;
if (!unlikely(options & WNOWAIT))
p->exit_code = 0;
uid = p->uid;
unlock_sig:
spin_unlock_irq(&p->sighand->siglock);
if (!exit_code)
return 0;
/*
* Now we are pretty sure this task is interesting.
* Make sure it doesn't get reaped out from under us while we
* give up the lock and then examine it below. We don't want to
* keep holding onto the tasklist_lock while we call getrusage and
* possibly take page faults for user memory.
*/
get_task_struct(p);
pid = task_pid_vnr(p);
why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
read_unlock(&tasklist_lock);
if (unlikely(options & WNOWAIT))
return wait_noreap_copyout(p, pid, uid,
why, exit_code,
infop, ru);
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
if (!retval && stat_addr)
retval = put_user((exit_code << 8) | 0x7f, stat_addr);
if (!retval && infop)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval && infop)
retval = put_user(0, &infop->si_errno);
if (!retval && infop)
retval = put_user((short)why, &infop->si_code);
if (!retval && infop)
retval = put_user(exit_code, &infop->si_status);
if (!retval && infop)
retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = pid;
put_task_struct(p);
BUG_ON(!retval);
return retval;
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | 8141c7f3e7aee618312fa1c15109e1219de784a7 | 189,049,727,324,214,200,000,000,000,000,000,000,000 | 76 | Move "exit_robust_list" into mm_release()
We don't want to get rid of the futexes just at exit() time, we want to
drop them when doing an execve() too, since that gets rid of the
previous VM image too.
Doing it at mm_release() time means that we automatically always do it
when we disassociate a VM map from the task.
Reported-by: [email protected]
Cc: Andrew Morton <[email protected]>
Cc: Nick Piggin <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Brad Spengler <[email protected]>
Cc: Alex Efros <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
{
return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
CEPH_OSD_OP_ZERO;
} | 0 | [
"CWE-863"
]
| linux | f44d04e696feaf13d192d942c4f14ad2e117065a | 283,336,279,982,187,600,000,000,000,000,000,000,000 | 5 | rbd: require global CAP_SYS_ADMIN for mapping and unmapping
It turns out that currently we rely only on sysfs attribute
permissions:
$ ll /sys/bus/rbd/{add*,remove*}
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add_single_major
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/remove
--w------- 1 root root 4096 Sep 3 20:38 /sys/bus/rbd/remove_single_major
This means that images can be mapped and unmapped (i.e. block devices
can be created and deleted) by a UID 0 process even after it drops all
privileges or by any process with CAP_DAC_OVERRIDE in its user namespace
as long as UID 0 is mapped into that user namespace.
Be consistent with other virtual block devices (loop, nbd, dm, md, etc)
and require CAP_SYS_ADMIN in the initial user namespace for mapping and
unmapping, and also for dumping the configuration string and refreshing
the image header.
Cc: [email protected]
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Jeff Layton <[email protected]> |
_nc_init_color(SCREEN *sp, int color, int r, int g, int b)
{
int result = ERR;
int maxcolors;
T((T_CALLED("init_color(%p,%d,%d,%d,%d)"),
(void *) sp,
color,
r, g, b));
if (sp == 0 || sp->_direct_color.value)
returnCode(result);
maxcolors = MaxColors;
if (InitColor
&& sp->_coloron
&& (color >= 0 && OkColorHi(color))
&& (okRGB(r) && okRGB(g) && okRGB(b))) {
sp->_color_table[color].init = 1;
sp->_color_table[color].r = r;
sp->_color_table[color].g = g;
sp->_color_table[color].b = b;
if (UseHlsPalette) {
rgb2hls(r, g, b,
&sp->_color_table[color].red,
&sp->_color_table[color].green,
&sp->_color_table[color].blue);
} else {
sp->_color_table[color].red = r;
sp->_color_table[color].green = g;
sp->_color_table[color].blue = b;
}
#ifdef USE_TERM_DRIVER
CallDriver_4(sp, td_initcolor, color, r, g, b);
#else
NCURSES_PUTP2("initialize_color",
TIPARM_4(initialize_color, color, r, g, b));
#endif
sp->_color_defs = max(color + 1, sp->_color_defs);
result = OK;
}
returnCode(result);
} | 0 | []
| ncurses | 790a85dbd4a81d5f5d8dd02a44d84f01512ef443 | 266,802,680,291,362,240,000,000,000,000,000,000,000 | 48 | ncurses 6.2 - patch 20200531
+ correct configure version-check/warnng for g++ to allow for 10.x
+ re-enable "bel" in konsole-base (report by Nia Huang)
+ add linux-s entry (patch by Alexandre Montaron).
+ drop long-obsolete convert_configure.pl
+ add test/test_parm.c, for checking tparm changes.
+ improve parameter-checking for tparm, adding function _nc_tiparm() to
handle the most-used case, which accepts only numeric parameters
(report/testcase by "puppet-meteor").
+ use a more conservative estimate of the buffer-size in lib_tparm.c's
save_text() and save_number(), in case the sprintf() function
passes-through unexpected characters from a format specifier
(report/testcase by "puppet-meteor").
+ add a check for end-of-string in cvtchar to handle a malformed
string in infotocap (report/testcase by "puppet-meteor"). |
cifs_writev_callback(struct mid_q_entry *mid)
{
struct cifs_writedata *wdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
unsigned int written;
WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
switch (mid->midState) {
case MID_RESPONSE_RECEIVED:
wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
if (wdata->result != 0)
break;
written = le16_to_cpu(smb->CountHigh);
written <<= 16;
written += le16_to_cpu(smb->Count);
/*
* Mask off high 16 bits when bytes written as returned
* by the server is greater than bytes requested by the
* client. OS/2 servers are known to set incorrect
* CountHigh values.
*/
if (written > wdata->bytes)
written &= 0xFFFF;
if (written < wdata->bytes)
wdata->result = -ENOSPC;
else
wdata->bytes = written;
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
wdata->result = -EAGAIN;
break;
default:
wdata->result = -EIO;
break;
}
queue_work(system_nrt_wq, &wdata->work);
DeleteMidQEntry(mid);
atomic_dec(&tcon->ses->server->inFlight);
wake_up(&tcon->ses->server->request_q);
} | 0 | [
"CWE-362",
"CWE-119",
"CWE-189"
]
| linux | 9438fabb73eb48055b58b89fc51e0bc4db22fabd | 204,738,273,307,698,600,000,000,000,000,000,000,000 | 44 | cifs: fix possible memory corruption in CIFSFindNext
The name_len variable in CIFSFindNext is a signed int that gets set to
the resume_name_len in the cifs_search_info. The resume_name_len however
is unsigned and for some infolevels is populated directly from a 32 bit
value sent by the server.
If the server sends a very large value for this, then that value could
look negative when converted to a signed int. That would make that
value pass the PATH_MAX check later in CIFSFindNext. The name_len would
then be used as a length value for a memcpy. It would then be treated
as unsigned again, and the memcpy scribbles over a ton of memory.
Fix this by making the name_len an unsigned value in CIFSFindNext.
Cc: <[email protected]>
Reported-by: Darren Lavender <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]> |
static int cxusb_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(simple_tuner_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, 0x61,
TUNER_PHILIPS_FMD1216ME_MK3);
return 0;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 3f190e3aec212fc8c61e202c51400afa7384d4bc | 21,601,891,176,951,434,000,000,000,000,000,000,000 | 7 | [media] cxusb: Use a dma capable buffer also for reading
Commit 17ce039b4e54 ("[media] cxusb: don't do DMA on stack")
added a kmalloc'ed bounce buffer for writes, but missed to do the same
for reads. As the read only happens after the write is finished, we can
reuse the same buffer.
As dvb_usb_generic_rw handles a read length of 0 by itself, avoid calling
it using the dvb_usb_generic_read wrapper function.
Signed-off-by: Stefan Brüns <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
apr_byte_t oidc_util_json_validate_cnf_tbh(request_rec *r,
int token_binding_policy, const char *tbh_str) {
const char *tbp_str = NULL;
char *tbp = NULL;
int tbp_len = -1;
unsigned char *tbp_hash = NULL;
unsigned int tbp_hash_len = -1;
char *tbh = NULL;
int tbh_len = -1;
tbp_str = oidc_util_get_provided_token_binding_id(r);
if (tbp_str == NULL) {
oidc_debug(r,
"no Provided Token Binding ID environment variable found");
goto out_err;
}
tbp_len = oidc_base64url_decode(r->pool, &tbp, tbp_str);
if (tbp_len <= 0) {
oidc_warn(r,
"Provided Token Binding ID environment variable could not be decoded");
goto out_err;
}
if (oidc_jose_hash_bytes(r->pool, OIDC_JOSE_ALG_SHA256,
(const unsigned char*) tbp, tbp_len, &tbp_hash, &tbp_hash_len,
NULL) == FALSE) {
oidc_warn(r,
"hashing Provided Token Binding ID environment variable failed");
goto out_err;
}
tbh_len = oidc_base64url_decode(r->pool, &tbh, tbh_str);
if (tbh_len <= 0) {
oidc_warn(r, "cnf[\"tbh\"] provided but it could not be decoded");
goto out_err;
}
if (tbp_hash_len != tbh_len) {
oidc_warn(r,
"hash length of provided token binding ID environment variable: %d does not match length of cnf[\"tbh\"]: %d",
tbp_hash_len, tbh_len);
goto out_err;
}
if (memcmp(tbp_hash, tbh, tbh_len) != 0) {
oidc_warn(r,
"hash of provided token binding ID environment variable does not match cnf[\"tbh\"]");
goto out_err;
}
oidc_debug(r,
"hash of provided token binding ID environment variable matches cnf[\"tbh\"]");
return TRUE;
out_err:
if (token_binding_policy == OIDC_TOKEN_BINDING_POLICY_OPTIONAL)
return TRUE;
if (token_binding_policy == OIDC_TOKEN_BINDING_POLICY_ENFORCED)
return FALSE;
// token_binding_policy == OIDC_TOKEN_BINDING_POLICY_REQURIED
return (tbp_str == NULL);
} | 0 | [
"CWE-79"
]
| mod_auth_openidc | 55ea0a085290cd2c8cdfdd960a230cbc38ba8b56 | 41,134,891,496,906,840,000,000,000,000,000,000,000 | 66 | Add a function to escape Javascript characters |
gdata_service_insert_entry_async (GDataService *self, GDataAuthorizationDomain *domain, const gchar *upload_uri, GDataEntry *entry,
GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data)
{
GSimpleAsyncResult *result;
InsertEntryAsyncData *data;
g_return_if_fail (GDATA_IS_SERVICE (self));
g_return_if_fail (domain == NULL || GDATA_IS_AUTHORIZATION_DOMAIN (domain));
g_return_if_fail (upload_uri != NULL);
g_return_if_fail (GDATA_IS_ENTRY (entry));
g_return_if_fail (cancellable == NULL || G_IS_CANCELLABLE (cancellable));
data = g_slice_new (InsertEntryAsyncData);
data->domain = (domain != NULL) ? g_object_ref (domain) : NULL;
data->upload_uri = g_strdup (upload_uri);
data->entry = g_object_ref (entry);
result = g_simple_async_result_new (G_OBJECT (self), callback, user_data, gdata_service_insert_entry_async);
g_simple_async_result_set_op_res_gpointer (result, data, (GDestroyNotify) insert_entry_async_data_free);
g_simple_async_result_run_in_thread (result, (GSimpleAsyncThreadFunc) insert_entry_thread, G_PRIORITY_DEFAULT, cancellable);
g_object_unref (result);
} | 0 | [
"CWE-20"
]
| libgdata | 6799f2c525a584dc998821a6ce897e463dad7840 | 329,877,292,341,048,800,000,000,000,000,000,000,000 | 22 | core: Validate SSL certificates for all connections
This prevents MitM attacks which use spoofed SSL certificates.
Note that this bumps our libsoup requirement to 2.37.91.
Closes: https://bugzilla.gnome.org/show_bug.cgi?id=671535 |
cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
{
return cifs_sb->master_tlink;
} | 0 | [
"CWE-20"
]
| linux | 70945643722ffeac779d2529a348f99567fa5c33 | 310,053,132,503,487,160,000,000,000,000,000,000,000 | 4 | cifs: always do is_path_accessible check in cifs_mount
Currently, we skip doing the is_path_accessible check in cifs_mount if
there is no prefixpath. I have a report of at least one server however
that allows a TREE_CONNECT to a share that has a DFS referral at its
root. The reporter in this case was using a UNC that had no prefixpath,
so the is_path_accessible check was not triggered and the box later hit
a BUG() because we were chasing a DFS referral on the root dentry for
the mount.
This patch fixes this by removing the check for a zero-length
prefixpath. That should make the is_path_accessible check be done in
this situation and should allow the client to chase the DFS referral at
mount time instead.
Cc: [email protected]
Reported-and-Tested-by: Yogesh Sharma <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]> |
else if (type == NBD_REPLY_TYPE_OFFSET_HOLE) {
if (cmd->type != NBD_CMD_READ) {
SET_NEXT_STATE (%.DEAD);
set_error (0, "invalid command for receiving offset-hole chunk, "
"cmd->type=%" PRIu16 ", "
"this is likely to be a bug in the server",
cmd->type);
return 0;
}
if (length != sizeof h->sbuf.sr.payload.offset_hole) {
SET_NEXT_STATE (%.DEAD);
set_error (0, "invalid length in NBD_REPLY_TYPE_OFFSET_HOLE");
return 0;
}
h->rbuf = &h->sbuf.sr.payload.offset_hole;
h->rlen = sizeof h->sbuf.sr.payload.offset_hole;
SET_NEXT_STATE (%RECV_OFFSET_HOLE);
return 0;
} | 0 | []
| libnbd | 2c1987fc23d6d0f537edc6d4701e95a2387f7917 | 322,700,190,053,320,080,000,000,000,000,000,000,000 | 19 | lib: Fix stack corruption with structured reply containing negative offset.
Because of improper bounds checking, when receiving a structured reply
some offset/lengths sent by the server could cause libnbd to execute
arbitrary code under control of a malicious server.
A structured reply segment containing (for example):
offset = 18446744073709551615 (== (uint64_t) -1,
or similar negative offsets)
length = 100 (any small positive number < request count)
In both original bounds tests the error case would not be reached:
if (offset < cmd->offset) { // very large < 0
// error case
}
if (offset + length > cmd->count) { // 99 > 512
// error case
}
The result of the negative offset is that data under control of the
server is written to memory before the read buffer supplied by the
client. If the read buffer is located on the stack then this allows
the stack return address from nbd_pread() to be trivially modified,
allowing arbitrary code execution under the control of the server. If
the buffer is located on the heap then other memory objects before the
buffer can be overwritten, which again would usually lead to arbitrary
code execution.
This commit adds a central function to handle bounds checking for all
cases, and the corrected bounds check is written once in this function.
This bug was found by fuzzing libnbd with American Fuzzy Lop as
described here:
https://groups.google.com/forum/#!topic/afl-users/WZzAnfItxM4
(cherry picked from commit f75f602a6361c0c5f42debfeea6980f698ce7f09) |
static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
return 1;
return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
} | 0 | [
"CWE-476"
]
| linux | 55749769fe608fa3f4a075e42e89d237c8e37637 | 153,944,944,133,298,360,000,000,000,000,000,000,000 | 9 | KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
void InstanceKlass::do_nonstatic_fields(FieldClosure* cl) {
InstanceKlass* super = superklass();
if (super != NULL) {
super->do_nonstatic_fields(cl);
}
fieldDescriptor fd;
int length = java_fields_count();
// In DebugInfo nonstatic fields are sorted by offset.
int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass);
int j = 0;
for (int i = 0; i < length; i += 1) {
fd.reinitialize(this, i);
if (!fd.is_static()) {
fields_sorted[j + 0] = fd.offset();
fields_sorted[j + 1] = i;
j += 2;
}
}
if (j > 0) {
length = j;
// _sort_Fn is defined in growableArray.hpp.
qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
for (int i = 0; i < length; i += 2) {
fd.reinitialize(this, fields_sorted[i + 1]);
assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
cl->do_field(&fd);
}
}
FREE_C_HEAP_ARRAY(int, fields_sorted);
} | 0 | []
| jdk17u | f8eb9abe034f7c6bea4da05a9ea42017b3f80730 | 208,537,336,215,744,800,000,000,000,000,000,000,000 | 30 | 8270386: Better verification of scan methods
Reviewed-by: coleenp
Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4 |
gf_list_add(res, desc);
}
}
gf_bs_del(bs_r);
return res;
}
static void dasher_get_mime_and_ext(GF_DasherCtx *ctx, GF_DashStream *ds, const char **out_subtype, const char **out_ext)
{
const char *subtype = NULL;
const char *mux_ext = NULL;
const char *cstr;
if (ctx->muxtype!=DASHER_MUX_AUTO) {
switch (ctx->muxtype) {
case DASHER_MUX_ISOM: subtype = "mp4"; mux_ext = "mp4"; break;
case DASHER_MUX_TS: subtype = "mp2t"; mux_ext = "ts"; break;
case DASHER_MUX_MKV: subtype = "x-matroska"; mux_ext = "mkv"; break;
case DASHER_MUX_WEBM: subtype = "webm"; mux_ext = "webm"; break;
case DASHER_MUX_OGG: subtype = "ogg"; mux_ext = "ogg"; break;
case DASHER_MUX_RAW:
cstr = gf_codecid_mime(ds->codec_id);
if (cstr) {
subtype = strchr(cstr, '/');
if (subtype) subtype++;
else subtype = "raw";
}
if (out_ext) {
cstr = gf_codecid_file_ext(ds->codec_id);
if (cstr) *out_ext = cstr;
}
break;
}
} else if (ctx->initext) {
mux_ext = ctx->initext;
if (!strcmp(ctx->initext, "ts") || !strcmp(ctx->initext, "m2ts")) {
subtype = "mp2t";
ctx->muxtype = DASHER_MUX_TS;
} else if (!strcmp(ctx->initext, "mkv") || !strcmp(ctx->initext, "mka") || !strcmp(ctx->initext, "mks") || !strcmp(ctx->initext, "mk3d")) {
subtype = "x-matroska";
ctx->muxtype = DASHER_MUX_MKV;
} else if (!strcmp(ctx->initext, "webm") || !strcmp(ctx->initext, "weba")) {
subtype = "webm";
ctx->muxtype = DASHER_MUX_WEBM;
} else if (!strcmp(ctx->initext, "ogg") || !strcmp(ctx->initext, "oga") || !strcmp(ctx->initext, "ogv") || !strcmp(ctx->initext, "spx") || !strcmp(ctx->initext, "oggm") || !strcmp(ctx->initext, "opus")) {
subtype = "ogg";
ctx->muxtype = DASHER_MUX_OGG;
}
else if (!strcmp(ctx->initext, "null")) {
mux_ext = "mp4";
ctx->muxtype = DASHER_MUX_ISOM; | 0 | [
"CWE-787"
]
| gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 62,147,681,321,889,480,000,000,000,000,000,000,000 | 51 | fixed #2138 |
onig_names_free(regex_t* reg)
{
int r;
NameTable* t;
r = names_clear(reg);
if (r != 0) return r;
t = (NameTable* )reg->name_table;
if (IS_NOT_NULL(t)) onig_st_free_table(t);
reg->name_table = (void* )NULL;
return 0;
} | 0 | [
"CWE-400",
"CWE-399",
"CWE-674"
]
| oniguruma | 4097828d7cc87589864fecf452f2cd46c5f37180 | 118,648,134,659,272,700,000,000,000,000,000,000,000 | 13 | fix #147: Stack Exhaustion Problem caused by some parsing functions in regcomp.c making recursive calls to themselves. |
tree_from_node(struct tree *tp, struct node *np)
{
free_partial_tree(tp, FALSE);
tp->label = np->label;
np->label = NULL;
tp->enums = np->enums;
np->enums = NULL;
tp->ranges = np->ranges;
np->ranges = NULL;
tp->indexes = np->indexes;
np->indexes = NULL;
tp->augments = np->augments;
np->augments = NULL;
tp->varbinds = np->varbinds;
np->varbinds = NULL;
tp->hint = np->hint;
np->hint = NULL;
tp->units = np->units;
np->units = NULL;
tp->description = np->description;
np->description = NULL;
tp->reference = np->reference;
np->reference = NULL;
tp->defaultValue = np->defaultValue;
np->defaultValue = NULL;
tp->subid = np->subid;
tp->tc_index = np->tc_index;
tp->type = translation_table[np->type];
tp->access = np->access;
tp->status = np->status;
set_function(tp);
} | 0 | [
"CWE-59",
"CWE-61"
]
| net-snmp | 4fd9a450444a434a993bc72f7c3486ccce41f602 | 318,438,446,942,214,680,000,000,000,000,000,000,000 | 34 | CHANGES: snmpd: Stop reading and writing the mib_indexes/* files
Caching directory contents is something the operating system should do
and is not something Net-SNMP should do. Instead of storing a copy of
the directory contents in ${tmp_dir}/mib_indexes/${n}, always scan a
MIB directory. |
static bool r_bin_mdmp_init_directory_entry(struct r_bin_mdmp_obj *obj, struct minidump_directory *entry) {
r_strf_buffer (128);
struct minidump_handle_operation_list handle_operation_list;
struct minidump_memory_list memory_list;
struct minidump_memory64_list memory64_list;
struct minidump_memory_info_list memory_info_list;
struct minidump_module_list module_list;
struct minidump_thread_list thread_list;
struct minidump_thread_ex_list thread_ex_list;
struct minidump_thread_info_list thread_info_list;
struct minidump_token_info_list token_info_list;
struct minidump_unloaded_module_list unloaded_module_list;
ut64 offset;
int i, r;
/* We could confirm data sizes but a malcious MDMP will always get around
** this! But we can ensure that the data is not outside of the file */
if ((ut64)entry->location.rva + entry->location.data_size > r_buf_size (obj->b)) {
eprintf ("[ERROR] Size Mismatch - Stream data is larger than file size!\n");
return false;
}
switch (entry->stream_type) {
case THREAD_LIST_STREAM:
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)&thread_list, sizeof (thread_list));
if (r != sizeof (thread_list)) {
break;
}
sdb_set (obj->kv, "mdmp_thread.format", "ddddq?? "
"ThreadId SuspendCount PriorityClass Priority "
"Teb (mdmp_memory_descriptor)Stack "
"(mdmp_location_descriptor)ThreadContext", 0);
sdb_num_set (obj->kv, "mdmp_thread_list.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_thread_list.format",
r_strf ("d[%d]? "
"NumberOfThreads (mdmp_thread)Threads",
thread_list.number_of_threads),
0);
/* TODO: Not yet fully parsed or utilised */
break;
case MODULE_LIST_STREAM:
module_list.number_of_modules = r_buf_read_le32_at (obj->b, entry->location.rva);
sdb_set (obj->kv, "mdmp_module.format", "qddtd???qq "
"BaseOfImage SizeOfImage CheckSum "
"TimeDateStamp ModuleNameRVA "
"(mdmp_vs_fixedfileinfo)VersionInfo "
"(mdmp_location_descriptor)CvRecord "
"(mdmp_location_descriptor)MiscRecord "
"Reserved0 Reserved1", 0);
sdb_num_set (obj->kv, "mdmp_module_list.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_module_list.format",
r_strf ("d[%d]? "
"NumberOfModule (mdmp_module)Modules",
module_list.number_of_modules),
0);
offset = entry->location.rva + sizeof (module_list);
for (i = 0; i < module_list.number_of_modules && offset < obj->size; i++) {
struct minidump_module *module = read_module (obj->b, offset);
if (!module) {
break;
}
r_list_append (obj->streams.modules, module);
offset += sizeof (*module);
}
break;
case MEMORY_LIST_STREAM:
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)&memory_list, sizeof (memory_list));
if (r != sizeof (memory_list)) {
break;
}
sdb_num_set (obj->kv, "mdmp_memory_list.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_memory_list.format",
r_strf ("d[%d]? "
"NumberOfMemoryRanges "
"(mdmp_memory_descriptor)MemoryRanges ",
memory_list.number_of_memory_ranges),
0);
offset = entry->location.rva + sizeof (memory_list);
for (i = 0; i < memory_list.number_of_memory_ranges && offset < obj->size; i++) {
struct minidump_memory_descriptor *desc = R_NEW (struct minidump_memory_descriptor);
if (!desc) {
break;
}
r = r_buf_read_at (obj->b, offset, (ut8 *)desc, sizeof (*desc));
if (r != sizeof (*desc)) {
break;
}
r_list_append (obj->streams.memories, desc);
offset += sizeof (*desc);
}
break;
case EXCEPTION_STREAM:
/* TODO: Not yet fully parsed or utilised */
obj->streams.exception = R_NEW (struct minidump_exception_stream);
if (!obj->streams.exception) {
break;
}
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)obj->streams.exception, sizeof (*obj->streams.exception));
if (r != sizeof (*obj->streams.exception)) {
break;
}
sdb_set (obj->kv, "mdmp_exception.format", "[4]E[4]Eqqdd[15]q "
"(mdmp_exception_code)ExceptionCode "
"(mdmp_exception_flags)ExceptionFlags "
"ExceptionRecord ExceptionAddress "
"NumberParameters __UnusedAlignment "
"ExceptionInformation",
0);
sdb_num_set (obj->kv, "mdmp_exception_stream.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_exception_stream.format", "dd?? "
"ThreadId __Alignment "
"(mdmp_exception)ExceptionRecord "
"(mdmp_location_descriptor)ThreadContext",
0);
break;
case SYSTEM_INFO_STREAM:
obj->streams.system_info = R_NEW (struct minidump_system_info);
if (!obj->streams.system_info) {
break;
}
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)obj->streams.system_info, sizeof (*obj->streams.system_info));
if (r != sizeof (*obj->streams.system_info)) {
break;
}
sdb_num_set (obj->kv, "mdmp_system_info.offset",
entry->location.rva, 0);
/* TODO: We need E as a byte! */
sdb_set (obj->kv, "mdmp_system_info.format", "[2]EwwbBddd[4]Ed[2]Ew[2]q "
"(mdmp_processor_architecture)ProcessorArchitecture "
"ProcessorLevel ProcessorRevision NumberOfProcessors "
"(mdmp_product_type)ProductType "
"MajorVersion MinorVersion BuildNumber (mdmp_platform_id)PlatformId "
"CsdVersionRva (mdmp_suite_mask)SuiteMask Reserved2 ProcessorFeatures", 0);
break;
case THREAD_EX_LIST_STREAM:
/* TODO: Not yet fully parsed or utilised */
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)&thread_ex_list, sizeof (thread_ex_list));
if (r != sizeof (thread_ex_list)) {
break;
}
sdb_set (obj->kv, "mdmp_thread_ex.format", "ddddq??? "
"ThreadId SuspendCount PriorityClass Priority "
"Teb (mdmp_memory_descriptor)Stack "
"(mdmp_location_descriptor)ThreadContext "
"(mdmp_memory_descriptor)BackingStore", 0);
sdb_num_set (obj->kv, "mdmp_thread_ex_list.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_thread_ex_list.format",
r_strf ("d[%d]? NumberOfThreads "
"(mdmp_thread_ex)Threads",
thread_ex_list.number_of_threads),
0);
offset = entry->location.rva + sizeof (thread_ex_list);
for (i = 0; i < thread_ex_list.number_of_threads && offset < obj->size; i++) {
struct minidump_thread_ex *thread = R_NEW (struct minidump_thread_ex);
if (!thread) {
break;
}
r = r_buf_read_at (obj->b, offset, (ut8 *)thread, sizeof (*thread));
if (r != sizeof (*thread)) {
break;
}
r_list_append (obj->streams.ex_threads, thread);
offset += sizeof (*thread);
}
break;
case MEMORY_64_LIST_STREAM:
read_memory64_list (obj->b, entry->location.rva, &memory64_list);
sdb_num_set (obj->kv, "mdmp_memory64_list.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_memory64_list.format",
r_strf ("qq[%"PFMT64d"]? NumberOfMemoryRanges "
"BaseRva "
"(mdmp_memory_descriptor64)MemoryRanges",
memory64_list.number_of_memory_ranges),
0);
obj->streams.memories64.base_rva = memory64_list.base_rva;
offset = entry->location.rva + sizeof (memory64_list);
for (i = 0; i < memory64_list.number_of_memory_ranges && offset < obj->size; i++) {
struct minidump_memory_descriptor64 *desc = R_NEW (struct minidump_memory_descriptor64);
if (!desc) {
break;
}
read_desc (obj->b, offset, desc);
r_list_append (obj->streams.memories64.memories, desc);
offset += sizeof (*desc);
}
break;
case COMMENT_STREAM_A:
/* TODO: Not yet fully parsed or utilised */
obj->streams.comments_a = R_NEWS (ut8, COMMENTS_SIZE);
if (!obj->streams.comments_a) {
break;
}
r = r_buf_read_at (obj->b, entry->location.rva, obj->streams.comments_a, COMMENTS_SIZE);
if (r != COMMENTS_SIZE) {
break;
}
sdb_num_set (obj->kv, "mdmp_comment_stream_a.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_comment_stream_a.format",
"s CommentA", 0);
break;
case COMMENT_STREAM_W:
/* TODO: Not yet fully parsed or utilised */
obj->streams.comments_w = R_NEWS (ut8, COMMENTS_SIZE);
if (!obj->streams.comments_w) {
break;
}
r = r_buf_read_at (obj->b, entry->location.rva, obj->streams.comments_w, COMMENTS_SIZE);
if (r != COMMENTS_SIZE) {
break;
}
sdb_num_set (obj->kv, "mdmp_comment_stream_w.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_comment_stream_w.format",
"s CommentW", 0);
break;
case HANDLE_DATA_STREAM:
/* TODO: Not yet fully parsed or utilised */
obj->streams.handle_data = R_NEW (struct minidump_handle_data_stream);
if (!obj->streams.handle_data) {
break;
}
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)obj->streams.handle_data, sizeof (*obj->streams.handle_data));
if (r != sizeof (*obj->streams.handle_data)) {
break;
}
sdb_num_set (obj->kv, "mdmp_handle_data_stream.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_handle_data_stream.format", "dddd "
"SizeOfHeader SizeOfDescriptor "
"NumberOfDescriptors Reserved", 0);
break;
case FUNCTION_TABLE_STREAM:
/* TODO: Not yet fully parsed or utilised */
obj->streams.function_table = R_NEW (struct minidump_function_table_stream);
if (!obj->streams.function_table) {
break;
}
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)obj->streams.function_table, sizeof (*obj->streams.function_table));
if (r != sizeof (*obj->streams.function_table)) {
break;
}
sdb_num_set (obj->kv, "mdmp_function_table_stream.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_function_table_stream.format", "dddddd "
"SizeOfHeader SizeOfDescriptor SizeOfNativeDescriptor "
"SizeOfFunctionEntry NumberOfDescriptors SizeOfAlignPad",
0);
break;
case UNLOADED_MODULE_LIST_STREAM:
/* TODO: Not yet fully parsed or utilised */
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)&unloaded_module_list, sizeof (unloaded_module_list));
if (r != sizeof (unloaded_module_list)) {
break;
}
sdb_set (obj->kv, "mdmp_unloaded_module.format", "qddtd "
"BaseOfImage SizeOfImage CheckSum TimeDateStamp "
"ModuleNameRva", 0);
sdb_num_set (obj->kv, "mdmp_unloaded_module_list.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_unloaded_module_list.format", "ddd "
"SizeOfHeader SizeOfEntry NumberOfEntries", 0);
offset = entry->location.rva + sizeof (unloaded_module_list);
for (i = 0; i < unloaded_module_list.number_of_entries && offset < obj->size; i++) {
struct minidump_unloaded_module *module = R_NEW (struct minidump_unloaded_module);
if (!module) {
break;
}
r = r_buf_read_at (obj->b, offset, (ut8 *)module, sizeof (*module));
if (r != sizeof (*module)) {
break;
}
r_list_append (obj->streams.unloaded_modules, module);
offset += sizeof (*module);
}
break;
case MISC_INFO_STREAM:
/* TODO: Not yet fully parsed or utilised */
obj->streams.misc_info.misc_info_1 = R_NEW (struct minidump_misc_info);
if (!obj->streams.misc_info.misc_info_1) {
break;
}
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)obj->streams.misc_info.misc_info_1, sizeof (*obj->streams.misc_info.misc_info_1));
if (r != sizeof (*obj->streams.misc_info.misc_info_1)) {
break;
}
/* TODO: Handle different sizes */
sdb_num_set (obj->kv, "mdmp_misc_info.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_misc_info.format", "d[4]Bdtttddddd "
"SizeOfInfo (mdmp_misc1_flags)Flags1 ProcessId "
"ProcessCreateTime ProcessUserTime ProcessKernelTime "
"ProcessorMaxMhz ProcessorCurrentMhz "
"ProcessorMhzLimit ProcessorMaxIdleState "
"ProcessorCurrentIdleState", 0);
break;
case MEMORY_INFO_LIST_STREAM:
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)&memory_info_list, sizeof (memory_info_list));
if (r != sizeof (memory_info_list)) {
break;
}
sdb_set (obj->kv, "mdmp_memory_info.format",
"qq[4]Edq[4]E[4]E[4]Ed BaseAddress AllocationBase "
"(mdmp_page_protect)AllocationProtect __Alignment1 RegionSize "
"(mdmp_mem_state)State (mdmp_page_protect)Protect "
"(mdmp_mem_type)Type __Alignment2", 0);
sdb_num_set (obj->kv, "mdmp_memory_info_list.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_memory_info_list.format",
r_strf ("ddq[%"PFMT64d"]? SizeOfHeader SizeOfEntry "
"NumberOfEntries (mdmp_memory_info)MemoryInfo",
memory_info_list.number_of_entries),
0);
offset = entry->location.rva + sizeof (memory_info_list);
for (i = 0; i < memory_info_list.number_of_entries && offset < obj->size; i++) {
struct minidump_memory_info *info = R_NEW (struct minidump_memory_info);
if (!info) {
break;
}
r = r_buf_read_at (obj->b, offset, (ut8 *)info, sizeof (*info));
if (r != sizeof (*info)) {
break;
}
r_list_append (obj->streams.memory_infos, info);
offset += sizeof (*info);
}
break;
case THREAD_INFO_LIST_STREAM:
/* TODO: Not yet fully parsed or utilised */
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)&thread_info_list, sizeof (thread_info_list));
if (r != sizeof (thread_info_list)) {
break;
}
sdb_set (obj->kv, "mdmp_thread_info.format", "ddddttttqq "
"ThreadId DumpFlags DumpError ExitStatus CreateTime "
"ExitTime KernelTime UserTime StartAddress Affinity",
0);
sdb_num_set (obj->kv, "mdmp_thread_info_list.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_thread_info_list.format", "ddd "
"SizeOfHeader SizeOfEntry NumberOfEntries", 0);
offset = entry->location.rva + sizeof (thread_info_list);
for (i = 0; i < thread_info_list.number_of_entries && offset < obj->size; i++) {
struct minidump_thread_info *info = R_NEW (struct minidump_thread_info);
if (!info) {
break;
}
r = r_buf_read_at (obj->b, offset, (ut8 *)info, sizeof (*info));
if (r != sizeof (*info)) {
break;
}
r_list_append (obj->streams.thread_infos, info);
offset += sizeof (*info);
}
break;
case HANDLE_OPERATION_LIST_STREAM:
/* TODO: Not yet fully parsed or utilised */
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)&handle_operation_list, sizeof (handle_operation_list));
if (r != sizeof (handle_operation_list)) {
break;
}
sdb_num_set (obj->kv, "mdmp_handle_operation_list.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_handle_operation_list.format", "dddd "
"SizeOfHeader SizeOfEntry NumberOfEntries Reserved", 0);
offset = entry->location.rva + sizeof (handle_operation_list);
for (i = 0; i < handle_operation_list.number_of_entries && offset < obj->size; i++) {
struct avrf_handle_operation *op = R_NEW (struct avrf_handle_operation);
if (!op) {
break;
}
r = r_buf_read_at (obj->b, offset, (ut8 *)op, sizeof (*op));
if (r != sizeof (*op)) {
break;
}
r_list_append (obj->streams.operations, op);
offset += sizeof (*op);
}
break;
case TOKEN_STREAM:
/* TODO: Not fully parsed or utilised */
r = r_buf_read_at (obj->b, entry->location.rva, (ut8 *)&token_info_list, sizeof (token_info_list));
if (r != sizeof (token_info_list)) {
break;
}
sdb_set (obj->kv, "mdmp_token_info.format", "ddq "
"TokenSize TokenId TokenHandle", 0);
sdb_num_set (obj->kv, "mdmp_token_info_list.offset",
entry->location.rva, 0);
sdb_set (obj->kv, "mdmp_token_info_list.format", "dddd "
"TokenListSize TokenListEntries ListHeaderSize ElementHeaderSize", 0);
offset = entry->location.rva + sizeof (token_info_list);
for (i = 0; i < token_info_list.number_of_entries && offset < obj->size; i++) {
struct minidump_token_info *info = R_NEW (struct minidump_token_info);
if (!info) {
break;
}
r = r_buf_read_at (obj->b, offset, (ut8 *)info, sizeof (*info));
if (r != sizeof (*info)) {
break;
}
r_list_append (obj->streams.token_infos, info);
offset += sizeof (*info);
}
break;
case LAST_RESERVED_STREAM:
/* TODO: Not yet fully parsed or utilised */
break;
case UNUSED_STREAM:
case RESERVED_STREAM_0:
case RESERVED_STREAM_1:
/* Silently ignore reserved streams */
break;
default:
eprintf ("[WARN] Invalid or unsupported enumeration encountered %d\n", entry->stream_type);
break;
}
return true;
} | 0 | [
"CWE-400",
"CWE-703"
]
| radare2 | 27fe8031782d3a06c3998eaa94354867864f9f1b | 298,973,677,659,746,740,000,000,000,000,000,000,000 | 461 | Fix DoS in the minidump parser ##crash
* Reported by lazymio via huntr.dev
* Reproducer: mdmp-dos |
impl_GetLocale (DBusConnection * bus, DBusMessage * message, void *user_data)
{
return NULL;
} | 0 | []
| at-spi2-atk | e4f3eee2e137cd34cd427875365f458c65458164 | 257,687,756,535,382,950,000,000,000,000,000,000,000 | 4 | Use XDG_RUNTIME_DIR to hold sockets, and do not make a world-writable dir
If we use XDG_RUNTIME_DIR, then the directory should be owned by the
appropriate user, so it should not need to be world-writable. Hopefully this
won't break accessibility for administrative apps on some distro.
https://bugzilla.gnome.org/show_bug.cgi?id=678348 |
*/
YY_BUFFER_STATE re_yy_create_buffer (FILE * file, int size , yyscan_t yyscanner)
{
YY_BUFFER_STATE b;
b = (YY_BUFFER_STATE) re_yyalloc(sizeof( struct yy_buffer_state ) ,yyscanner );
if ( ! b )
YY_FATAL_ERROR( "out of dynamic memory in re_yy_create_buffer()" );
b->yy_buf_size = (yy_size_t)size;
/* yy_ch_buf has to be 2 characters longer than the size given because
* we need to put in 2 end-of-buffer characters.
*/
b->yy_ch_buf = (char *) re_yyalloc(b->yy_buf_size + 2 ,yyscanner );
if ( ! b->yy_ch_buf )
YY_FATAL_ERROR( "out of dynamic memory in re_yy_create_buffer()" );
b->yy_is_our_buffer = 1;
re_yy_init_buffer(b,file ,yyscanner);
return b; | 0 | [
"CWE-476",
"CWE-703",
"CWE-125"
]
| yara | 3119b232c9c453c98d8fa8b6ae4e37ba18117cd4 | 53,027,550,928,290,760,000,000,000,000,000,000,000 | 23 | re_lexer: Make reading escape sequences more robust (#586)
* Add test for issue #503
* re_lexer: Make reading escape sequences more robust
This commit fixes parsing incomplete escape sequences at the end of a
regular expression and parsing things like \xxy (invalid hex digits)
which before were silently turned into (char)255.
Close #503
* Update re_lexer.c |
void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sock *sk;
if (v == SEQ_START_TOKEN)
sk = raw_get_first(seq);
else
sk = raw_get_next(seq, v);
++*pos;
return sk;
} | 0 | [
"CWE-362"
]
| linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 92,700,373,612,878,290,000,000,000,000,000,000,000 | 11 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int ha_myisam::update_row(const uchar *old_data, uchar *new_data)
{
ha_statistic_increment(&SSV::ha_update_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
return mi_update(file,old_data,new_data);
} | 0 | [
"CWE-362"
]
| mysql-server | 4e5473862e6852b0f3802b0cd0c6fa10b5253291 | 209,926,365,428,699,800,000,000,000,000,000,000,000 | 7 | Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD)
is created. When repair finishes, this file is renamed to the original
.MYD file. The problem was that during this rename, we copied the
stats from the old file to the new file with chmod/chown. If a user
managed to replace the temporary file before chmod/chown was executed,
it was possible to get an arbitrary file with the privileges of the
mysql user.
This patch fixes the problem by not copying stats from the old
file to the new file. This is not needed as the new file was
created with the correct stats. This fix only changes server
behavior - external utilities such as myisamchk still does
chmod/chown.
No test case provided since the problem involves synchronization
with file system operations. |
void Context::removeHeaderMapValue(HeaderMapType type, absl::string_view key) {
auto map = getMap(type);
if (!map) {
return;
}
const Http::LowerCaseString lower_key(std::move(std::string(key)));
map->remove(lower_key);
} | 0 | [
"CWE-476"
]
| envoy | 8788a3cf255b647fd14e6b5e2585abaaedb28153 | 239,398,509,314,973,860,000,000,000,000,000,000,000 | 8 | 1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]> |
register_ck_session (GdmSessionWorker *worker)
{
const char *session_cookie;
gboolean res;
session_cookie = NULL;
res = open_ck_session (worker);
if (res) {
session_cookie = ck_connector_get_cookie (worker->priv->ckc);
}
if (session_cookie != NULL) {
gdm_session_worker_set_environment_variable (worker,
"XDG_SESSION_COOKIE",
session_cookie);
}
} | 0 | []
| gdm | c25ef9245be4e0be2126ef3d075df4401949b570 | 200,992,594,220,246,830,000,000,000,000,000,000,000 | 16 | Store the face and dmrc files in a cache. Refer to bug #565151. |
SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
int, maxevents, int, timeout)
{
return do_epoll_wait(epfd, events, maxevents, timeout);
} | 0 | [
"CWE-416"
]
| linux | a9ed4a6560b8562b7e2e2bed9527e88001f7b682 | 132,791,951,768,325,180,000,000,000,000,000,000,000 | 5 | epoll: Keep a reference on files added to the check list
When adding a new fd to an epoll, and that this new fd is an
epoll fd itself, we recursively scan the fds attached to it
to detect cycles, and add non-epool files to a "check list"
that gets subsequently parsed.
However, this check list isn't completely safe when deletions
can happen concurrently. To sidestep the issue, make sure that
a struct file placed on the check list sees its f_count increased,
ensuring that a concurrent deletion won't result in the file
disapearing from under our feet.
Cc: [email protected]
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Al Viro <[email protected]> |
static int decode_explicit_rdpcm_dir(thread_context* tctx,int cIdx)
{
context_model* model = &tctx->ctx_model[CONTEXT_MODEL_RDPCM_DIR];
int value = decode_CABAC_bit(&tctx->cabac_decoder, &model[cIdx ? 1 : 0]);
return value;
} | 0 | []
| libde265 | e83f3798dd904aa579425c53020c67e03735138d | 172,373,341,673,473,330,000,000,000,000,000,000,000 | 6 | fix check for valid PPS idx (#298) |
static void bnx2x_init_eq_ring(struct bnx2x *bp)
{
int i;
for (i = 1; i <= NUM_EQ_PAGES; i++) {
union event_ring_elem *elem =
&bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
elem->next_page.addr.hi =
cpu_to_le32(U64_HI(bp->eq_mapping +
BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
elem->next_page.addr.lo =
cpu_to_le32(U64_LO(bp->eq_mapping +
BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
}
bp->eq_cons = 0;
bp->eq_prod = NUM_EQ_DESC;
bp->eq_cons_sb = BNX2X_EQ_INDEX;
/* we want a warning message before it gets wrought... */
atomic_set(&bp->eq_spq_left,
min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
} | 0 | [
"CWE-20"
]
| linux | 8914a595110a6eca69a5e275b323f5d09e18f4f9 | 273,308,066,463,794,900,000,000,000,000,000,000,000 | 21 | bnx2x: disable GSO where gso_size is too big for hardware
If a bnx2x card is passed a GSO packet with a gso_size larger than
~9700 bytes, it will cause a firmware error that will bring the card
down:
bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert!
bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2
bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052
bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1
... (dump of values continues) ...
Detect when the mac length of a GSO packet is greater than the maximum
packet size (9700 bytes) and disable GSO.
Signed-off-by: Daniel Axtens <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
iobuf_close (iobuf_t a)
{
iobuf_t a2;
size_t dummy_len = 0;
int rc = 0;
if (a && a->directfp)
{
fclose (a->directfp);
xfree (a->real_fname);
if (DBG_IOBUF)
log_debug ("iobuf_close -> %p\n", a->directfp);
return 0;
}
for (; a && !rc; a = a2)
{
a2 = a->chain;
if (a->use == 2 && (rc = iobuf_flush (a)))
log_error ("iobuf_flush failed on close: %s\n", gpg_strerror (rc));
if (DBG_IOBUF)
log_debug ("iobuf-%d.%d: close '%s'\n", a->no, a->subno,
a->desc?a->desc:"?");
if (a->filter && (rc = a->filter (a->filter_ov, IOBUFCTRL_FREE,
a->chain, NULL, &dummy_len)))
log_error ("IOBUFCTRL_FREE failed on close: %s\n", gpg_strerror (rc));
xfree (a->real_fname);
if (a->d.buf)
{
memset (a->d.buf, 0, a->d.size); /* erase the buffer */
xfree (a->d.buf);
}
xfree (a);
}
return rc;
} | 0 | [
"CWE-20"
]
| gnupg | 2183683bd633818dd031b090b5530951de76f392 | 224,468,862,749,182,240,000,000,000,000,000,000,000 | 37 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
snd_seq_oss_open(struct file *file, int level)
{
int i, rc;
struct seq_oss_devinfo *dp;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp) {
snd_printk(KERN_ERR "can't malloc device info\n");
return -ENOMEM;
}
debug_printk(("oss_open: dp = %p\n", dp));
dp->cseq = system_client;
dp->port = -1;
dp->queue = -1;
for (i = 0; i < SNDRV_SEQ_OSS_MAX_CLIENTS; i++) {
if (client_table[i] == NULL)
break;
}
dp->index = i;
if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) {
snd_printk(KERN_ERR "too many applications\n");
rc = -ENOMEM;
goto _error;
}
/* look up synth and midi devices */
snd_seq_oss_synth_setup(dp);
snd_seq_oss_midi_setup(dp);
if (dp->synth_opened == 0 && dp->max_mididev == 0) {
/* snd_printk(KERN_ERR "no device found\n"); */
rc = -ENODEV;
goto _error;
}
/* create port */
debug_printk(("create new port\n"));
rc = create_port(dp);
if (rc < 0) {
snd_printk(KERN_ERR "can't create port\n");
goto _error;
}
/* allocate queue */
debug_printk(("allocate queue\n"));
rc = alloc_seq_queue(dp);
if (rc < 0)
goto _error;
/* set address */
dp->addr.client = dp->cseq;
dp->addr.port = dp->port;
/*dp->addr.queue = dp->queue;*/
/*dp->addr.channel = 0;*/
dp->seq_mode = level;
/* set up file mode */
dp->file_mode = translate_mode(file);
/* initialize read queue */
debug_printk(("initialize read queue\n"));
if (is_read_mode(dp->file_mode)) {
dp->readq = snd_seq_oss_readq_new(dp, maxqlen);
if (!dp->readq) {
rc = -ENOMEM;
goto _error;
}
}
/* initialize write queue */
debug_printk(("initialize write queue\n"));
if (is_write_mode(dp->file_mode)) {
dp->writeq = snd_seq_oss_writeq_new(dp, maxqlen);
if (!dp->writeq) {
rc = -ENOMEM;
goto _error;
}
}
/* initialize timer */
debug_printk(("initialize timer\n"));
dp->timer = snd_seq_oss_timer_new(dp);
if (!dp->timer) {
snd_printk(KERN_ERR "can't alloc timer\n");
rc = -ENOMEM;
goto _error;
}
debug_printk(("timer initialized\n"));
/* set private data pointer */
file->private_data = dp;
/* set up for mode2 */
if (level == SNDRV_SEQ_OSS_MODE_MUSIC)
snd_seq_oss_synth_setup_midi(dp);
else if (is_read_mode(dp->file_mode))
snd_seq_oss_midi_open_all(dp, SNDRV_SEQ_OSS_FILE_READ);
client_table[dp->index] = dp;
num_clients++;
debug_printk(("open done\n"));
return 0;
_error:
snd_seq_oss_synth_cleanup(dp);
snd_seq_oss_midi_cleanup(dp);
delete_seq_queue(dp->queue);
delete_port(dp);
return rc;
} | 0 | [
"CWE-415"
]
| linux-2.6 | 27f7ad53829f79e799a253285318bff79ece15bd | 160,838,817,151,122,050,000,000,000,000,000,000,000 | 116 | ALSA: seq/oss - Fix double-free at error path of snd_seq_oss_open()
The error handling in snd_seq_oss_open() has several bad codes that
do dereferecing released pointers and double-free of kmalloc'ed data.
The object dp is release in free_devinfo() that is called via
private_free callback. The rest shouldn't touch this object any more.
The patch changes delete_port() to call kfree() in any case, and gets
rid of unnecessary calls of destructors in snd_seq_oss_open().
Fixes CVE-2010-3080.
Reported-and-tested-by: Tavis Ormandy <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx)
{
struct ccid_operations *ccid_ops = ccid_by_number(id);
struct ccid *ccid = NULL;
if (ccid_ops == NULL)
goto out;
ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
ccid_ops->ccid_hc_tx_slab, gfp_any());
if (ccid == NULL)
goto out;
ccid->ccid_ops = ccid_ops;
if (rx) {
memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
goto out_free_ccid;
} else {
memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
goto out_free_ccid;
}
out:
return ccid;
out_free_ccid:
kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
ccid_ops->ccid_hc_tx_slab, ccid);
ccid = NULL;
goto out;
} | 0 | [
"CWE-476"
]
| linux-2.6 | 8ed030dd0aa400d18c63861c2c6deb7c38f4edde | 128,536,235,533,666,860,000,000,000,000,000,000,000 | 32 | dccp: fix bug in cache allocation
This fixes a bug introduced in commit de4ef86cfce60d2250111f34f8a084e769f23b16
("dccp: fix dccp rmmod when kernel configured to use slub", 17 Jan): the
vsnprintf used sizeof(slab_name_fmt), which became truncated to 4 bytes, since
slab_name_fmt is now a 4-byte pointer and no longer a 32-character array.
This lead to error messages such as
FATAL: Error inserting dccp: No buffer space available
>> kernel: [ 1456.341501] kmem_cache_create: duplicate cache cci
generated due to the truncation after the 3rd character.
Fixed for the moment by introducing a symbolic constant. Tested to fix the bug.
Signed-off-by: Gerrit Renker <[email protected]>
Acked-by: Neil Horman <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
ComputeEffectiveMask(struct xkb_keymap *keymap, struct xkb_mods *mods)
{
mods->mask = mod_mask_get_effective(keymap, mods->mods);
} | 0 | [
"CWE-476"
]
| libxkbcommon | 917636b1d0d70205a13f89062b95e3a0fc31d4ff | 165,698,932,607,382,930,000,000,000,000,000,000,000 | 4 | xkbcomp: fix crash when parsing an xkb_geometry section
xkb_geometry sections are ignored; previously the had done so by
returning NULL for the section's XkbFile, however some sections of the
code do not expect this. Instead, create an XkbFile for it, it will
never be processes and discarded later.
Caught with the afl fuzzer.
Signed-off-by: Ran Benita <[email protected]> |
static void ahci_reg_init(AHCIState *s)
{
int i;
s->control_regs.cap = (s->ports - 1) |
(AHCI_NUM_COMMAND_SLOTS << 8) |
(AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) |
HOST_CAP_NCQ | HOST_CAP_AHCI | HOST_CAP_64;
s->control_regs.impl = (1 << s->ports) - 1;
s->control_regs.version = AHCI_VERSION_1_0;
for (i = 0; i < s->ports; i++) {
s->dev[i].port_state = STATE_RUN;
}
} | 0 | [
"CWE-772",
"CWE-401"
]
| qemu | d68f0f778e7f4fbd674627274267f269e40f0b04 | 279,536,154,557,404,400,000,000,000,000,000,000,000 | 17 | ide: ahci: call cleanup function in ahci unit
This can avoid memory leak when hotunplug the ahci device.
Signed-off-by: Li Qiang <[email protected]>
Message-id: [email protected]
Signed-off-by: John Snow <[email protected]> |
pfm_mask_monitoring(struct task_struct *task)
{
pfm_context_t *ctx = PFM_GET_CTX(task);
unsigned long mask, val, ovfl_mask;
int i;
DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
ovfl_mask = pmu_conf->ovfl_val;
/*
* monitoring can only be masked as a result of a valid
* counter overflow. In UP, it means that the PMU still
* has an owner. Note that the owner can be different
* from the current task. However the PMU state belongs
* to the owner.
* In SMP, a valid overflow only happens when task is
* current. Therefore if we come here, we know that
* the PMU state belongs to the current task, therefore
* we can access the live registers.
*
* So in both cases, the live register contains the owner's
* state. We can ONLY touch the PMU registers and NOT the PSR.
*
* As a consequence to this call, the ctx->th_pmds[] array
* contains stale information which must be ignored
* when context is reloaded AND monitoring is active (see
* pfm_restart).
*/
mask = ctx->ctx_used_pmds[0];
for (i = 0; mask; i++, mask>>=1) {
/* skip non used pmds */
if ((mask & 0x1) == 0) continue;
val = ia64_get_pmd(i);
if (PMD_IS_COUNTING(i)) {
/*
* we rebuild the full 64 bit value of the counter
*/
ctx->ctx_pmds[i].val += (val & ovfl_mask);
} else {
ctx->ctx_pmds[i].val = val;
}
DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
i,
ctx->ctx_pmds[i].val,
val & ovfl_mask));
}
/*
* mask monitoring by setting the privilege level to 0
* we cannot use psr.pp/psr.up for this, it is controlled by
* the user
*
* if task is current, modify actual registers, otherwise modify
* thread save state, i.e., what will be restored in pfm_load_regs()
*/
mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
if ((mask & 0x1) == 0UL) continue;
ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
ctx->th_pmcs[i] &= ~0xfUL;
DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
}
/*
* make all of this visible
*/
ia64_srlz_d();
} | 0 | []
| linux-2.6 | 41d5e5d73ecef4ef56b7b4cde962929a712689b4 | 58,522,208,225,978,680,000,000,000,000,000,000,000 | 67 | [IA64] permon use-after-free fix
Perfmon associates vmalloc()ed memory with a file descriptor, and installs
a vma mapping that memory. Unfortunately, the vm_file field is not filled
in, so processes with mappings to that memory do not prevent the file from
being closed and the memory freed. This results in use-after-free bugs and
multiple freeing of pages, etc.
I saw this bug on an Altix on SLES9. Haven't reproduced upstream but it
looks like the same issue is there.
Signed-off-by: Nick Piggin <[email protected]>
Cc: Stephane Eranian <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Tony Luck <[email protected]> |
static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
{
struct kvm_cpuid_entry2 *cpuid = NULL;
if (eax && ecx)
cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
*eax, *ecx);
if (cpuid) {
*eax = cpuid->eax;
*ecx = cpuid->ecx;
if (ebx)
*ebx = cpuid->ebx;
if (edx)
*edx = cpuid->edx;
return true;
}
return false;
} | 0 | []
| kvm | 0769c5de24621141c953fbe1f943582d37cb4244 | 191,149,080,642,872,930,000,000,000,000,000,000,000 | 21 | KVM: x86: extend "struct x86_emulate_ops" with "get_cpuid"
In order to be able to proceed checks on CPU-specific properties
within the emulator, function "get_cpuid" is introduced.
With "get_cpuid" it is possible to virtually call the guests
"cpuid"-opcode without changing the VM's context.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
static ssize_t ucma_query(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_query cmd;
struct ucma_context *ctx;
void __user *response;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
response = u64_to_user_ptr(cmd.response);
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
switch (cmd.option) {
case RDMA_USER_CM_QUERY_ADDR:
ret = ucma_query_addr(ctx, response, out_len);
break;
case RDMA_USER_CM_QUERY_PATH:
ret = ucma_query_path(ctx, response, out_len);
break;
case RDMA_USER_CM_QUERY_GID:
ret = ucma_query_gid(ctx, response, out_len);
break;
default:
ret = -ENOSYS;
break;
}
ucma_put_ctx(ctx);
return ret;
} | 0 | [
"CWE-416",
"CWE-703"
]
| linux | cb2595c1393b4a5211534e6f0a0fbad369e21ad8 | 134,235,213,137,627,890,000,000,000,000,000,000,000 | 35 | infiniband: fix a possible use-after-free bug
ucma_process_join() will free the new allocated "mc" struct,
if there is any error after that, especially the copy_to_user().
But in parallel, ucma_leave_multicast() could find this "mc"
through idr_find() before ucma_process_join() frees it, since it
is already published.
So "mc" could be used in ucma_leave_multicast() after it is been
allocated and freed in ucma_process_join(), since we don't refcnt
it.
Fix this by separating "publish" from ID allocation, so that we
can get an ID first and publish it later after copy_to_user().
Fixes: c8f6a362bf3e ("RDMA/cma: Add multicast communication support")
Reported-by: Noam Rathaus <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
int read_only)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
int res = 0;
if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) {
ext3_msg(sb, KERN_ERR,
"error: revision level too high, "
"forcing read-only mode");
res = MS_RDONLY;
}
if (read_only)
return res;
if (!(sbi->s_mount_state & EXT3_VALID_FS))
ext3_msg(sb, KERN_WARNING,
"warning: mounting unchecked fs, "
"running e2fsck is recommended");
else if ((sbi->s_mount_state & EXT3_ERROR_FS))
ext3_msg(sb, KERN_WARNING,
"warning: mounting fs with errors, "
"running e2fsck is recommended");
else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
le16_to_cpu(es->s_mnt_count) >=
le16_to_cpu(es->s_max_mnt_count))
ext3_msg(sb, KERN_WARNING,
"warning: maximal mount count reached, "
"running e2fsck is recommended");
else if (le32_to_cpu(es->s_checkinterval) &&
(le32_to_cpu(es->s_lastcheck) +
le32_to_cpu(es->s_checkinterval) <= get_seconds()))
ext3_msg(sb, KERN_WARNING,
"warning: checktime reached, "
"running e2fsck is recommended");
#if 0
/* @@@ We _will_ want to clear the valid bit if we find
inconsistencies, to force a fsck at reboot. But for
a plain journaled filesystem we can keep it set as
valid forever! :) */
es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
#endif
if (!le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
es->s_mtime = cpu_to_le32(get_seconds());
ext3_update_dynamic_rev(sb);
EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
ext3_commit_super(sb, es, 1);
if (test_opt(sb, DEBUG))
ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, "
"bpg=%lu, ipg=%lu, mo=%04lx]",
sb->s_blocksize,
sbi->s_groups_count,
EXT3_BLOCKS_PER_GROUP(sb),
EXT3_INODES_PER_GROUP(sb),
sbi->s_mount_opt);
if (EXT3_SB(sb)->s_journal->j_inode == NULL) {
char b[BDEVNAME_SIZE];
ext3_msg(sb, KERN_INFO, "using external journal on %s",
bdevname(EXT3_SB(sb)->s_journal->j_dev, b));
} else {
ext3_msg(sb, KERN_INFO, "using internal journal");
}
cleancache_init_fs(sb);
return res;
} | 0 | [
"CWE-20"
]
| linux | 8d0c2d10dd72c5292eda7a06231056a4c972e4cc | 113,663,716,587,157,650,000,000,000,000,000,000,000 | 68 | ext3: Fix format string issues
ext3_msg() takes the printk prefix as the second parameter and the
format string as the third parameter. Two callers of ext3_msg omit the
prefix and pass the format string as the second parameter and the first
parameter to the format string as the third parameter. In both cases
this string comes from an arbitrary source. Which means the string may
contain format string characters, which will
lead to undefined and potentially harmful behavior.
The issue was introduced in commit 4cf46b67eb("ext3: Unify log messages
in ext3") and is fixed by this patch.
CC: [email protected]
Signed-off-by: Lars-Peter Clausen <[email protected]>
Signed-off-by: Jan Kara <[email protected]> |
GF_Err abst_box_dump(GF_Box *a, FILE * trace)
{
u32 i;
GF_AdobeBootstrapInfoBox *p = (GF_AdobeBootstrapInfoBox*)a;
gf_isom_box_dump_start(a, "AdobeBootstrapBox", trace);
gf_fprintf(trace, "BootstrapinfoVersion=\"%u\" Profile=\"%u\" Live=\"%u\" Update=\"%u\" TimeScale=\"%u\" CurrentMediaTime=\""LLU"\" SmpteTimeCodeOffset=\""LLU"\" ",
p->bootstrapinfo_version, p->profile, p->live, p->update, p->time_scale, p->current_media_time, p->smpte_time_code_offset);
if (p->movie_identifier)
gf_fprintf(trace, "MovieIdentifier=\"%s\" ", p->movie_identifier);
if (p->drm_data)
gf_fprintf(trace, "DrmData=\"%s\" ", p->drm_data);
if (p->meta_data)
gf_fprintf(trace, "MetaData=\"%s\" ", p->meta_data);
gf_fprintf(trace, ">\n");
for (i=0; i<p->server_entry_count; i++) {
char *str = (char*)gf_list_get(p->server_entry_table, i);
gf_fprintf(trace, "<ServerEntry>%s</ServerEntry>\n", str);
}
for (i=0; i<p->quality_entry_count; i++) {
char *str = (char*)gf_list_get(p->quality_entry_table, i);
gf_fprintf(trace, "<QualityEntry>%s</QualityEntry>\n", str);
}
for (i=0; i<p->segment_run_table_count; i++)
gf_isom_box_dump(gf_list_get(p->segment_run_table_entries, i), trace);
for (i=0; i<p->fragment_run_table_count; i++)
gf_isom_box_dump(gf_list_get(p->fragment_run_table_entries, i), trace);
gf_isom_box_dump_done("AdobeBootstrapBox", a, trace);
return GF_OK;
} | 0 | [
"CWE-787"
]
| gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 244,754,973,049,898,560,000,000,000,000,000,000,000 | 35 | fixed #2138 |
multi_init(struct multi_context *m, struct context *t, bool tcp_mode, int thread_mode)
{
int dev = DEV_TYPE_UNDEF;
msg(D_MULTI_LOW, "MULTI: multi_init called, r=%d v=%d",
t->options.real_hash_size,
t->options.virtual_hash_size);
/*
* Get tun/tap/null device type
*/
dev = dev_type_enum(t->options.dev, t->options.dev_type);
/*
* Init our multi_context object.
*/
CLEAR(*m);
m->thread_mode = thread_mode;
/*
* Real address hash table (source port number is
* considered to be part of the address). Used
* to determine which client sent an incoming packet
* which is seen on the TCP/UDP socket.
*/
m->hash = hash_init(t->options.real_hash_size,
get_random(),
mroute_addr_hash_function,
mroute_addr_compare_function);
/*
* Virtual address hash table. Used to determine
* which client to route a packet to.
*/
m->vhash = hash_init(t->options.virtual_hash_size,
get_random(),
mroute_addr_hash_function,
mroute_addr_compare_function);
/*
* This hash table is a clone of m->hash but with a
* bucket size of one so that it can be used
* for fast iteration through the list.
*/
m->iter = hash_init(1,
get_random(),
mroute_addr_hash_function,
mroute_addr_compare_function);
#ifdef MANAGEMENT_DEF_AUTH
m->cid_hash = hash_init(t->options.real_hash_size,
0,
cid_hash_function,
cid_compare_function);
#endif
#ifdef ENABLE_ASYNC_PUSH
/*
* Mapping between inotify watch descriptors and
* multi_instances.
*/
m->inotify_watchers = hash_init(t->options.real_hash_size,
get_random(),
int_hash_function,
int_compare_function);
#endif
/*
* This is our scheduler, for time-based wakeup
* events.
*/
m->schedule = schedule_init();
/*
* Limit frequency of incoming connections to control
* DoS.
*/
m->new_connection_limiter = frequency_limit_init(t->options.cf_max,
t->options.cf_per);
/*
* Allocate broadcast/multicast buffer list
*/
m->mbuf = mbuf_init(t->options.n_bcast_buf);
/*
* Different status file format options are available
*/
m->status_file_version = t->options.status_file_version;
/*
* Possibly allocate an ifconfig pool, do it
* differently based on whether a tun or tap style
* tunnel.
*/
if (t->options.ifconfig_pool_defined)
{
int pool_type = IFCONFIG_POOL_INDIV;
if (dev == DEV_TYPE_TUN && t->options.topology == TOP_NET30)
{
pool_type = IFCONFIG_POOL_30NET;
}
m->ifconfig_pool = ifconfig_pool_init(pool_type,
t->options.ifconfig_pool_start,
t->options.ifconfig_pool_end,
t->options.duplicate_cn,
t->options.ifconfig_ipv6_pool_defined,
t->options.ifconfig_ipv6_pool_base,
t->options.ifconfig_ipv6_pool_netbits );
/* reload pool data from file */
if (t->c1.ifconfig_pool_persist)
{
ifconfig_pool_read(t->c1.ifconfig_pool_persist, m->ifconfig_pool);
}
}
/*
* Help us keep track of routing table.
*/
m->route_helper = mroute_helper_init(MULTI_CACHE_ROUTE_TTL);
/*
* Initialize route and instance reaper.
*/
m->reaper = multi_reap_new(reap_buckets_per_pass(t->options.virtual_hash_size));
/*
* Get local ifconfig address
*/
CLEAR(m->local);
ASSERT(t->c1.tuntap);
mroute_extract_in_addr_t(&m->local, t->c1.tuntap->local);
/*
* Per-client limits
*/
m->max_clients = t->options.max_clients;
m->instances = calloc(m->max_clients, sizeof(struct multi_instance *));
/*
* Initialize multi-socket TCP I/O wait object
*/
if (tcp_mode)
{
m->mtcp = multi_tcp_init(t->options.max_clients, &m->max_clients);
}
m->tcp_queue_limit = t->options.tcp_queue_limit;
/*
* Allow client <-> client communication, without going through
* tun/tap interface and network stack?
*/
m->enable_c2c = t->options.enable_c2c;
/* initialize stale routes check timer */
if (t->options.stale_routes_check_interval > 0)
{
msg(M_INFO, "Initializing stale route check timer to run every %i seconds and to removing routes with activity timeout older than %i seconds",
t->options.stale_routes_check_interval, t->options.stale_routes_ageing_time);
event_timeout_init(&m->stale_routes_check_et, t->options.stale_routes_check_interval, 0);
}
m->deferred_shutdown_signal.signal_received = 0;
} | 0 | [
"CWE-362",
"CWE-476"
]
| openvpn | 37bc691e7d26ea4eb61a8a434ebd7a9ae76225ab | 57,550,237,048,405,660,000,000,000,000,000,000,000 | 169 | Fix illegal client float (CVE-2020-11810)
There is a time frame between allocating peer-id and initializing data
channel key (which is performed on receiving push request or on async
push-reply) in which the existing peer-id float checks do not work right.
If a "rogue" data channel packet arrives during that time frame from
another address and with same peer-id, this would cause client to float
to that new address. This is because:
- tls_pre_decrypt() sets packet length to zero if
data channel key has not been initialized, which leads to
- openvpn_decrypt() returns true if packet length is zero,
which leads to
- process_incoming_link_part1() returns true, which
calls multi_process_float(), which commits float
Note that problem doesn't happen when data channel key is initialized,
since in this case openvpn_decrypt() returns false.
The net effect of this behaviour is that the VPN session for the
"victim client" is broken. Since the "attacker client" does not have
suitable keys, it can not inject or steal VPN traffic from the other
session. The time window is small and it can not be used to attack
a specific client's session, unless some other way is found to make it
disconnect and reconnect first.
CVE-2020-11810 has been assigned to acknowledge this risk.
Fix illegal float by adding buffer length check ("is this packet still
considered valid") before calling multi_process_float().
Trac: #1272
CVE: 2020-11810
Signed-off-by: Lev Stipakov <[email protected]>
Acked-by: Arne Schwabe <[email protected]>
Acked-by: Antonio Quartulli <[email protected]>
Acked-by: Gert Doering <[email protected]>
Message-Id: <[email protected]>
URL: https://www.mail-archive.com/[email protected]/msg19720.html
Signed-off-by: Gert Doering <[email protected]> |
static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
int nskips, __be16 stream)
{
int i;
for (i = 0; i < nskips; i++) {
if (skiplist[i].stream == stream)
return i;
}
return i;
} | 0 | []
| linux | 196d67593439b03088913227093e374235596e33 | 187,454,559,943,037,270,000,000,000,000,000,000,000 | 11 | sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
ConnStateData::expectRequestBody(int64_t size)
{
bodyPipe = new BodyPipe(this);
if (size >= 0)
bodyPipe->setBodySize(size);
else
startDechunkingRequest();
return bodyPipe;
} | 0 | [
"CWE-444"
]
| squid | fd68382860633aca92065e6c343cfd1b12b126e7 | 317,643,492,152,086,580,000,000,000,000,000,000,000 | 9 | Improve Transfer-Encoding handling (#702)
Reject messages containing Transfer-Encoding header with coding other
than chunked or identity. Squid does not support other codings.
For simplicity and security sake, also reject messages where
Transfer-Encoding contains unnecessary complex values that are
technically equivalent to "chunked" or "identity" (e.g., ",,chunked" or
"identity, chunked").
RFC 7230 formally deprecated and removed identity coding, but it is
still used by some agents. |
static void dasher_check_outpath(GF_DasherCtx *ctx)
{
if (!ctx->out_path) {
ctx->out_path = gf_filter_pid_get_destination(ctx->opid);
if (!ctx->out_path) return;
if (ctx->mname) {
char *sep = strstr(ctx->out_path, "://");
if (sep) {
char *opath = gf_url_concatenate(ctx->out_path, ctx->mname);
if (opath) {
gf_free(ctx->out_path);
ctx->out_path = opath;
}
}
}
//check if we have a route/atsc output, in which we will case assign hls ref prop
if (!strncmp(ctx->out_path, "route://", 8) || !strncmp(ctx->out_path, "atsc://", 7))
ctx->is_route = GF_TRUE;
}
//for routeout
if (ctx->opid)
gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_URL, &PROP_STRING(ctx->out_path) );
if (ctx->opid_alt)
gf_filter_pid_set_property(ctx->opid_alt, GF_PROP_PID_URL, &PROP_STRING(ctx->out_path) );
} | 0 | [
"CWE-787"
]
| gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 187,166,117,112,825,400,000,000,000,000,000,000,000 | 26 | fixed #2138 |
static double filter_box(double x) {
if (x < - DEFAULT_FILTER_BOX)
return 0.0f;
if (x < DEFAULT_FILTER_BOX)
return 1.0f;
return 0.0f;
} | 0 | [
"CWE-119"
]
| php-src | 4bb422343f29f06b7081323844d9b52e1a71e4a5 | 231,801,190,373,483,500,000,000,000,000,000,000,000 | 7 | Fix bug #70976: fix boundary check on gdImageRotateInterpolated |
add_exported_type (MonoReflectionAssemblyBuilder *assemblyb, MonoDynamicImage *assembly, MonoClass *klass, guint32 parent_index)
{
MonoDynamicTable *table;
guint32 *values;
guint32 scope, scope_idx, impl, current_idx;
gboolean forwarder = TRUE;
gpointer iter = NULL;
MonoClass *nested;
if (klass->nested_in) {
impl = (parent_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_EXP_TYPE;
forwarder = FALSE;
} else {
scope = resolution_scope_from_image (assembly, klass->image);
g_assert ((scope & MONO_RESOLTION_SCOPE_MASK) == MONO_RESOLTION_SCOPE_ASSEMBLYREF);
scope_idx = scope >> MONO_RESOLTION_SCOPE_BITS;
impl = (scope_idx << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_ASSEMBLYREF;
}
table = &assembly->tables [MONO_TABLE_EXPORTEDTYPE];
table->rows++;
alloc_table (table, table->rows);
current_idx = table->next_idx;
values = table->values + current_idx * MONO_EXP_TYPE_SIZE;
values [MONO_EXP_TYPE_FLAGS] = forwarder ? TYPE_ATTRIBUTE_FORWARDER : 0;
values [MONO_EXP_TYPE_TYPEDEF] = 0;
values [MONO_EXP_TYPE_IMPLEMENTATION] = impl;
values [MONO_EXP_TYPE_NAME] = string_heap_insert (&assembly->sheap, klass->name);
values [MONO_EXP_TYPE_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space);
table->next_idx++;
while ((nested = mono_class_get_nested_types (klass, &iter)))
add_exported_type (assemblyb, assembly, nested, current_idx);
} | 0 | [
"CWE-20"
]
| mono | 65292a69c837b8a5f7a392d34db63de592153358 | 259,942,793,706,160,770,000,000,000,000,000,000,000 | 37 | Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847 |
ModuleExport void UnregisterWEBPImage(void)
{
(void) UnregisterMagickInfo("WEBP");
} | 0 | [
"CWE-400"
]
| ImageMagick | cb63560ba25e4a6c51ab282538c24877fff7d471 | 286,277,424,280,837,500,000,000,000,000,000,000,000 | 4 | https://github.com/ImageMagick/ImageMagick/issues/641 |
int ssl3_get_next_proto(SSL *s)
{
int ok;
int proto_len, padding_len;
long n;
const unsigned char *p;
/* Clients cannot send a NextProtocol message if we didn't see the
* extension in their ClientHello */
if (!s->s3->next_proto_neg_seen)
{
SSLerr(SSL_F_SSL3_GET_NEXT_PROTO,SSL_R_GOT_NEXT_PROTO_WITHOUT_EXTENSION);
return -1;
}
n=s->method->ssl_get_message(s,
SSL3_ST_SR_NEXT_PROTO_A,
SSL3_ST_SR_NEXT_PROTO_B,
SSL3_MT_NEXT_PROTO,
514, /* See the payload format below */
&ok);
if (!ok)
return((int)n);
/* s->state doesn't reflect whether ChangeCipherSpec has been received
* in this handshake, but s->s3->change_cipher_spec does (will be reset
* by ssl3_get_finished). */
if (!s->s3->change_cipher_spec)
{
SSLerr(SSL_F_SSL3_GET_NEXT_PROTO,SSL_R_GOT_NEXT_PROTO_BEFORE_A_CCS);
return -1;
}
if (n < 2)
return 0; /* The body must be > 1 bytes long */
p=(unsigned char *)s->init_msg;
/*-
* The payload looks like:
* uint8 proto_len;
* uint8 proto[proto_len];
* uint8 padding_len;
* uint8 padding[padding_len];
*/
proto_len = p[0];
if (proto_len + 2 > s->init_num)
return 0;
padding_len = p[proto_len + 1];
if (proto_len + padding_len + 2 != s->init_num)
return 0;
s->next_proto_negotiated = OPENSSL_malloc(proto_len);
if (!s->next_proto_negotiated)
{
SSLerr(SSL_F_SSL3_GET_NEXT_PROTO,ERR_R_MALLOC_FAILURE);
return 0;
}
memcpy(s->next_proto_negotiated, p + 1, proto_len);
s->next_proto_negotiated_len = proto_len;
return 1;
} | 0 | [
"CWE-310"
]
| openssl | ce325c60c74b0fa784f5872404b722e120e5cab0 | 120,101,512,576,730,960,000,000,000,000,000,000,000 | 64 | Only allow ephemeral RSA keys in export ciphersuites.
OpenSSL clients would tolerate temporary RSA keys in non-export
ciphersuites. It also had an option SSL_OP_EPHEMERAL_RSA which
enabled this server side. Remove both options as they are a
protocol violation.
Thanks to Karthikeyan Bhargavan for reporting this issue.
(CVE-2015-0204)
Reviewed-by: Matt Caswell <[email protected]> |
InitializeLDAPConnection(Port *port, LDAP **ldap)
{
int ldapversion = LDAP_VERSION3;
int r;
*ldap = ldap_init(port->hba->ldapserver, port->hba->ldapport);
if (!*ldap)
{
#ifndef WIN32
ereport(LOG,
(errmsg("could not initialize LDAP: %m")));
#else
ereport(LOG,
(errmsg("could not initialize LDAP: error code %d",
(int) LdapGetLastError())));
#endif
return STATUS_ERROR;
}
if ((r = ldap_set_option(*ldap, LDAP_OPT_PROTOCOL_VERSION, &ldapversion)) != LDAP_SUCCESS)
{
ldap_unbind(*ldap);
ereport(LOG,
(errmsg("could not set LDAP protocol version: %s", ldap_err2string(r))));
return STATUS_ERROR;
}
if (port->hba->ldaptls)
{
#ifndef WIN32
if ((r = ldap_start_tls_s(*ldap, NULL, NULL)) != LDAP_SUCCESS)
#else
static __ldap_start_tls_sA _ldap_start_tls_sA = NULL;
if (_ldap_start_tls_sA == NULL)
{
/*
* Need to load this function dynamically because it does not
* exist on Windows 2000, and causes a load error for the whole
* exe if referenced.
*/
HANDLE ldaphandle;
ldaphandle = LoadLibrary("WLDAP32.DLL");
if (ldaphandle == NULL)
{
/*
* should never happen since we import other files from
* wldap32, but check anyway
*/
ldap_unbind(*ldap);
ereport(LOG,
(errmsg("could not load wldap32.dll")));
return STATUS_ERROR;
}
_ldap_start_tls_sA = (__ldap_start_tls_sA) GetProcAddress(ldaphandle, "ldap_start_tls_sA");
if (_ldap_start_tls_sA == NULL)
{
ldap_unbind(*ldap);
ereport(LOG,
(errmsg("could not load function _ldap_start_tls_sA in wldap32.dll"),
errdetail("LDAP over SSL is not supported on this platform.")));
return STATUS_ERROR;
}
/*
* Leak LDAP handle on purpose, because we need the library to
* stay open. This is ok because it will only ever be leaked once
* per process and is automatically cleaned up on process exit.
*/
}
if ((r = _ldap_start_tls_sA(*ldap, NULL, NULL, NULL, NULL)) != LDAP_SUCCESS)
#endif
{
ldap_unbind(*ldap);
ereport(LOG,
(errmsg("could not start LDAP TLS session: %s", ldap_err2string(r))));
return STATUS_ERROR;
}
}
return STATUS_OK;
} | 0 | [
"CWE-89"
]
| postgres | 2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b | 7,152,778,322,518,996,000,000,000,000,000,000,000 | 83 | Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244 |
void lpushxCommand(client *c) {
pushxGenericCommand(c,LIST_HEAD);
} | 0 | [
"CWE-190"
]
| redis | f6a40570fa63d5afdd596c78083d754081d80ae3 | 120,034,937,367,298,740,000,000,000,000,000,000,000 | 3 | Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error. |
xmlGzfileOpenW (const char *filename, int compression) {
const char *path = NULL;
char mode[15];
gzFile fd;
snprintf(mode, sizeof(mode), "wb%d", compression);
if (!strcmp(filename, "-")) {
int duped_fd = dup(fileno(stdout));
fd = gzdopen(duped_fd, "rb");
if (fd == Z_NULL && duped_fd >= 0) {
close(duped_fd); /* gzdOpen() does not close on failure */
}
return((void *) fd);
}
if (!xmlStrncasecmp(BAD_CAST filename, BAD_CAST "file://localhost/", 17))
#if defined (_WIN32) || defined (__DJGPP__) && !defined(__CYGWIN__)
path = &filename[17];
#else
path = &filename[16];
#endif
else if (!xmlStrncasecmp(BAD_CAST filename, BAD_CAST "file:///", 8)) {
#if defined (_WIN32) || defined (__DJGPP__) && !defined(__CYGWIN__)
path = &filename[8];
#else
path = &filename[7];
#endif
} else
path = filename;
if (path == NULL)
return(NULL);
#if defined(_WIN32) || defined (__DJGPP__) && !defined (__CYGWIN__)
fd = xmlWrapGzOpen(path, mode);
#else
fd = gzopen(path, mode);
#endif
return((void *) fd);
} | 0 | [
"CWE-134"
]
| libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 103,845,086,048,092,280,000,000,000,000,000,000,000 | 41 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
long do_mkdirat(int dfd, const char __user *pathname, umode_t mode)
{
struct dentry *dentry;
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_DIRECTORY;
retry:
dentry = user_path_create(dfd, pathname, &path, lookup_flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
error = security_path_mkdir(&path, dentry, mode);
if (!error)
error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
} | 0 | [
"CWE-416",
"CWE-284"
]
| linux | d0cb50185ae942b03c4327be322055d622dc79f6 | 322,459,053,089,432,070,000,000,000,000,000,000,000 | 24 | do_last(): fetch directory ->i_mode and ->i_uid before it's too late
may_create_in_sticky() call is done when we already have dropped the
reference to dir.
Fixes: 30aba6656f61e (namei: allow restricted O_CREAT of FIFOs and regular files)
Signed-off-by: Al Viro <[email protected]> |
x509_error_string( void )
{
return _globus_error_message;
} | 0 | [
"CWE-20"
]
| htcondor | 2f3c393feb819cf6c6d06fb0a2e9c4e171f3c26d | 186,636,195,003,229,700,000,000,000,000,000,000,000 | 4 | (#6455) Fix issue validating VOMS proxies |
void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
u32 msr, int type, bool value)
{
if (value)
vmx_enable_intercept_for_msr(vcpu, msr, type);
else
vmx_disable_intercept_for_msr(vcpu, msr, type);
} | 0 | [
"CWE-787"
]
| linux | 04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a | 24,726,640,567,188,110,000,000,000,000,000,000,000 | 8 | KVM: VMX: Don't use vcpu->run->internal.ndata as an array index
__vmx_handle_exit() uses vcpu->run->internal.ndata as an index for
an array access. Since vcpu->run is (can be) mapped to a user address
space with a writer permission, the 'ndata' could be updated by the
user process at anytime (the user process can set it to outside the
bounds of the array).
So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way.
Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information")
Signed-off-by: Reiji Watanabe <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
static inline int object_custom(UNSERIALIZE_PARAMETER, zend_class_entry *ce)
{
long datalen;
datalen = parse_iv2((*p) + 2, p);
(*p) += 2;
if (datalen < 0 || (*p) + datalen >= max) {
zend_error(E_WARNING, "Insufficient data for unserializing - %ld required, %ld present", datalen, (long)(max - (*p)));
return 0;
}
if (ce->unserialize == NULL) {
zend_error(E_WARNING, "Class %s has no unserializer", ce->name);
object_init_ex(*rval, ce);
} else if (ce->unserialize(rval, ce, (const unsigned char*)*p, datalen, (zend_unserialize_data *)var_hash TSRMLS_CC) != SUCCESS) {
return 0;
}
(*p) += datalen;
return finish_nested_data(UNSERIALIZE_PASSTHRU);
} | 1 | [
"CWE-189"
]
| php-src | 56754a7f9eba0e4f559b6ca081d9f2a447b3f159 | 207,401,612,749,960,530,000,000,000,000,000,000,000 | 24 | Fixed bug #68044: Integer overflow in unserialize() (32-bits only) |
_utf8_to_unicode(uint32_t *pwc, const char *s, size_t n)
{
static const char utf8_count[256] = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 00 - 0F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 10 - 1F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 20 - 2F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 30 - 3F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 40 - 4F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 50 - 5F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 60 - 6F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 70 - 7F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* 80 - 8F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* 90 - 9F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* A0 - AF */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* B0 - BF */
0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,/* C0 - CF */
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,/* D0 - DF */
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,/* E0 - EF */
4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F0 - FF */
};
int ch, i;
int cnt;
uint32_t wc;
/* Sanity check. */
if (n == 0)
return (0);
/*
* Decode 1-4 bytes depending on the value of the first byte.
*/
ch = (unsigned char)*s;
if (ch == 0)
return (0); /* Standard: return 0 for end-of-string. */
cnt = utf8_count[ch];
/* Invalid sequence or there are not plenty bytes. */
if ((int)n < cnt) {
cnt = (int)n;
for (i = 1; i < cnt; i++) {
if ((s[i] & 0xc0) != 0x80) {
cnt = i;
break;
}
}
goto invalid_sequence;
}
/* Make a Unicode code point from a single UTF-8 sequence. */
switch (cnt) {
case 1: /* 1 byte sequence. */
*pwc = ch & 0x7f;
return (cnt);
case 2: /* 2 bytes sequence. */
if ((s[1] & 0xc0) != 0x80) {
cnt = 1;
goto invalid_sequence;
}
*pwc = ((ch & 0x1f) << 6) | (s[1] & 0x3f);
return (cnt);
case 3: /* 3 bytes sequence. */
if ((s[1] & 0xc0) != 0x80) {
cnt = 1;
goto invalid_sequence;
}
if ((s[2] & 0xc0) != 0x80) {
cnt = 2;
goto invalid_sequence;
}
wc = ((ch & 0x0f) << 12)
| ((s[1] & 0x3f) << 6)
| (s[2] & 0x3f);
if (wc < 0x800)
goto invalid_sequence;/* Overlong sequence. */
break;
case 4: /* 4 bytes sequence. */
if ((s[1] & 0xc0) != 0x80) {
cnt = 1;
goto invalid_sequence;
}
if ((s[2] & 0xc0) != 0x80) {
cnt = 2;
goto invalid_sequence;
}
if ((s[3] & 0xc0) != 0x80) {
cnt = 3;
goto invalid_sequence;
}
wc = ((ch & 0x07) << 18)
| ((s[1] & 0x3f) << 12)
| ((s[2] & 0x3f) << 6)
| (s[3] & 0x3f);
if (wc < 0x10000)
goto invalid_sequence;/* Overlong sequence. */
break;
default: /* Others are all invalid sequence. */
if (ch == 0xc0 || ch == 0xc1)
cnt = 2;
else if (ch >= 0xf5 && ch <= 0xf7)
cnt = 4;
else if (ch >= 0xf8 && ch <= 0xfb)
cnt = 5;
else if (ch == 0xfc || ch == 0xfd)
cnt = 6;
else
cnt = 1;
if ((int)n < cnt)
cnt = (int)n;
for (i = 1; i < cnt; i++) {
if ((s[i] & 0xc0) != 0x80) {
cnt = i;
break;
}
}
goto invalid_sequence;
}
/* The code point larger than 0x10FFFF is not legal
* Unicode values. */
if (wc > UNICODE_MAX)
goto invalid_sequence;
/* Correctly gets a Unicode, returns used bytes. */
*pwc = wc;
return (cnt);
invalid_sequence:
*pwc = UNICODE_R_CHAR;/* set the Replacement Character instead. */
return (cnt * -1);
} | 0 | [
"CWE-476"
]
| libarchive | 42a3408ac7df1e69bea9ea12b72e14f59f7400c0 | 73,041,327,661,883,760,000,000,000,000,000,000,000 | 127 | archive_strncat_l(): allocate and do not convert if length == 0
This ensures e.g. that archive_mstring_copy_mbs_len_l() does not set
aes_set = AES_SET_MBS with aes_mbs.s == NULL.
Resolves possible null-pointer dereference reported by OSS-Fuzz.
Reported-By: OSS-Fuzz issue 286 |
static int decode_bit_string(const u8 * inbuf, size_t inlen, void *outbuf,
size_t outlen, int invert)
{
const u8 *in = inbuf;
u8 *out = (u8 *) outbuf;
int i, count = 0;
int zero_bits;
size_t octets_left;
if (outlen < octets_left)
return SC_ERROR_BUFFER_TOO_SMALL;
if (inlen < 1)
return SC_ERROR_INVALID_ASN1_OBJECT;
zero_bits = *in & 0x07;
octets_left = inlen - 1;
in++;
memset(outbuf, 0, outlen);
while (octets_left) {
/* 1st octet of input: ABCDEFGH, where A is the MSB */
/* 1st octet of output: HGFEDCBA, where A is the LSB */
/* first bit in bit string is the LSB in first resulting octet */
int bits_to_go;
*out = 0;
if (octets_left == 1)
bits_to_go = 8 - zero_bits;
else
bits_to_go = 8;
if (invert)
for (i = 0; i < bits_to_go; i++) {
*out |= ((*in >> (7 - i)) & 1) << i;
}
else {
*out = *in;
}
out++;
in++;
octets_left--;
count++;
}
return (count * 8) - zero_bits;
} | 0 | [
"CWE-119",
"CWE-787"
]
| OpenSC | 412a6142c27a5973c61ba540e33cdc22d5608e68 | 102,143,066,898,472,570,000,000,000,000,000,000,000 | 44 | fixed out of bounds access of ASN.1 Bitstring
Credit to OSS-Fuzz |
void MainWindow::closeEvent(QCloseEvent* event)
{
if (continueJobsRunning() && continueModified()) {
if (!m_htmlEditor || m_htmlEditor->close()) {
LOG_DEBUG() << "begin";
JOBS.cleanup();
writeSettings();
if (m_exitCode == EXIT_SUCCESS) {
MLT.stop();
} else {
if (multitrack())
m_timelineDock->model()->close();
if (playlist())
m_playlistDock->model()->close();
else
onMultitrackClosed();
}
QThreadPool::globalInstance()->clear();
AudioLevelsTask::closeAll();
event->accept();
emit aboutToShutDown();
if (m_exitCode == EXIT_SUCCESS) {
QApplication::quit();
LOG_DEBUG() << "end";
::_Exit(0);
} else {
QApplication::exit(m_exitCode);
LOG_DEBUG() << "end";
}
return;
}
}
event->ignore();
} | 0 | [
"CWE-89",
"CWE-327",
"CWE-295"
]
| shotcut | f008adc039642307f6ee3378d378cdb842e52c1d | 160,781,232,185,278,930,000,000,000,000,000,000,000 | 34 | fix upgrade check is not using TLS correctly |
bool ad_unconvert(TALLOC_CTX *mem_ctx,
struct vfs_handle_struct *handle,
const char *catia_mappings,
struct smb_filename *smb_fname,
bool *converted)
{
static struct char_mappings **cmaps = NULL;
TALLOC_CTX *frame = talloc_stackframe();
struct ad_collect_state state;
struct stream_struct *streams = NULL;
struct smb_filename *adpath = NULL;
struct adouble *ad = NULL;
unsigned int num_streams = 0;
size_t to_convert = 0;
bool have_rsrc = false;
files_struct *fsp = NULL;
size_t i;
NTSTATUS status;
int ret;
bool ok;
*converted = false;
if (cmaps == NULL) {
const char **mappings = NULL;
mappings = str_list_make_v3_const(
frame, catia_mappings, NULL);
if (mappings == NULL) {
ok = false;
goto out;
}
cmaps = string_replace_init_map(mem_ctx, mappings);
TALLOC_FREE(mappings);
}
ok = ad_unconvert_get_streams(handle,
smb_fname,
frame,
&num_streams,
&streams);
if (!ok) {
goto out;
}
for (i = 0; i < num_streams; i++) {
if (strcasecmp_m(streams[i].name, "::$DATA") == 0) {
continue;
}
to_convert++;
if (is_afpresource_stream(streams[i].name)) {
have_rsrc = true;
}
}
if (to_convert == 0) {
ok = true;
goto out;
}
state = (struct ad_collect_state) {
.adx_data_off = 0,
};
ret = adouble_path(frame, smb_fname, &adpath);
if (ret != 0) {
ok = false;
goto out;
}
ret = SMB_VFS_STAT(handle->conn, adpath);
if (ret == 0) {
state.have_adfile = true;
} else {
if (errno != ENOENT) {
ok = false;
goto out;
}
state.have_adfile = false;
}
if (to_convert == 1 && have_rsrc && state.have_adfile) {
/*
* So we have just a single stream, the resource fork stream
* from an AppleDouble file. Fine, that means there's nothing to
* convert.
*/
ok = true;
goto out;
}
ad = ad_init(frame, ADOUBLE_RSRC);
if (ad == NULL) {
ok = false;
goto out;
}
for (i = 0; i < num_streams; i++) {
ok = ad_collect_one_stream(handle,
cmaps,
smb_fname,
&streams[i],
ad,
&state);
if (!ok) {
goto out;
}
}
ok = ad_unconvert_open_ad(frame, handle, smb_fname, adpath, &fsp);
if (!ok) {
DBG_ERR("Failed to open adfile [%s]\n",
smb_fname_str_dbg(smb_fname));
goto out;
}
ret = ad_fset(handle, ad, fsp);
if (ret != 0) {
ok = false;
goto out;
}
*converted = true;
ok = true;
out:
if (fsp != NULL) {
status = close_file(NULL, fsp, NORMAL_CLOSE);
if (!NT_STATUS_IS_OK(status)) {
DBG_ERR("close_file [%s] failed: %s\n",
smb_fname_str_dbg(smb_fname),
nt_errstr(status));
ok = false;
}
}
TALLOC_FREE(frame);
return ok;
} | 0 | [
"CWE-787"
]
| samba | 0e2b3fb982d1f53d111e10d9197ed2ec2e13712c | 45,151,601,984,180,960,000,000,000,000,000,000,000 | 138 | CVE-2021-44142: libadouble: harden parsing code
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14914
Signed-off-by: Ralph Boehme <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]> |
void init_smb_request(struct smb_request *req,
const uint8 *inbuf,
size_t unread_bytes,
bool encrypted)
{
struct smbd_server_connection *sconn = smbd_server_conn;
size_t req_size = smb_len(inbuf) + 4;
/* Ensure we have at least smb_size bytes. */
if (req_size < smb_size) {
DEBUG(0,("init_smb_request: invalid request size %u\n",
(unsigned int)req_size ));
exit_server_cleanly("Invalid SMB request");
}
req->cmd = CVAL(inbuf, smb_com);
req->flags2 = SVAL(inbuf, smb_flg2);
req->smbpid = SVAL(inbuf, smb_pid);
req->mid = SVAL(inbuf, smb_mid);
req->seqnum = 0;
req->vuid = SVAL(inbuf, smb_uid);
req->tid = SVAL(inbuf, smb_tid);
req->wct = CVAL(inbuf, smb_wct);
req->vwv = (uint16_t *)(inbuf+smb_vwv);
req->buflen = smb_buflen(inbuf);
req->buf = (const uint8_t *)smb_buf(inbuf);
req->unread_bytes = unread_bytes;
req->encrypted = encrypted;
req->conn = conn_find(sconn,req->tid);
req->chain_fsp = NULL;
req->chain_outbuf = NULL;
smb_init_perfcount_data(&req->pcd);
/* Ensure we have at least wct words and 2 bytes of bcc. */
if (smb_size + req->wct*2 > req_size) {
DEBUG(0,("init_smb_request: invalid wct number %u (size %u)\n",
(unsigned int)req->wct,
(unsigned int)req_size));
exit_server_cleanly("Invalid SMB request");
}
/* Ensure bcc is correct. */
if (((uint8 *)smb_buf(inbuf)) + req->buflen > inbuf + req_size) {
DEBUG(0,("init_smb_request: invalid bcc number %u "
"(wct = %u, size %u)\n",
(unsigned int)req->buflen,
(unsigned int)req->wct,
(unsigned int)req_size));
exit_server_cleanly("Invalid SMB request");
}
req->outbuf = NULL;
} | 0 | []
| samba | c116652a3050a8549b722ae8ab5f9a2bf9a33b9f | 67,936,520,685,378,250,000,000,000,000,000,000,000 | 50 | In chain_reply, copy the subrequests' error to the main request |
int main(int argc, char **argv)
{
int error;
my_bool first_argument_uses_wildcards=0;
char *wild;
MYSQL mysql;
MY_INIT(argv[0]);
my_getopt_use_args_separator= TRUE;
if (load_defaults("my",load_default_groups,&argc,&argv))
exit(1);
my_getopt_use_args_separator= FALSE;
get_options(&argc,&argv);
wild=0;
if (argc)
{
char *pos= argv[argc-1], *to;
for (to= pos ; *pos ; pos++, to++)
{
switch (*pos) {
case '*':
*pos= '%';
first_argument_uses_wildcards= 1;
break;
case '?':
*pos= '_';
first_argument_uses_wildcards= 1;
break;
case '%':
case '_':
first_argument_uses_wildcards= 1;
break;
case '\\':
pos++;
default: break;
}
*to= *pos;
}
*to= *pos; /* just to copy a '\0' if '\\' was used */
}
if (first_argument_uses_wildcards)
wild= argv[--argc];
else if (argc == 3) /* We only want one field */
wild= argv[--argc];
if (argc > 2)
{
fprintf(stderr,"%s: Too many arguments\n",my_progname);
exit(1);
}
mysql_init(&mysql);
if (opt_compress)
mysql_options(&mysql,MYSQL_OPT_COMPRESS,NullS);
#ifdef HAVE_OPENSSL
if (opt_use_ssl)
{
mysql_ssl_set(&mysql, opt_ssl_key, opt_ssl_cert, opt_ssl_ca,
opt_ssl_capath, opt_ssl_cipher);
mysql_options(&mysql, MYSQL_OPT_SSL_CRL, opt_ssl_crl);
mysql_options(&mysql, MYSQL_OPT_SSL_CRLPATH, opt_ssl_crlpath);
}
mysql_options(&mysql,MYSQL_OPT_SSL_VERIFY_SERVER_CERT,
(char*)&opt_ssl_verify_server_cert);
#endif
if (opt_protocol)
mysql_options(&mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
if (opt_bind_addr)
mysql_options(&mysql,MYSQL_OPT_BIND,opt_bind_addr);
if (!opt_secure_auth)
mysql_options(&mysql, MYSQL_SECURE_AUTH,(char*)&opt_secure_auth);
#ifdef HAVE_SMEM
if (shared_memory_base_name)
mysql_options(&mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
#endif
mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset);
if (opt_plugin_dir && *opt_plugin_dir)
mysql_options(&mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir);
if (opt_default_auth && *opt_default_auth)
mysql_options(&mysql, MYSQL_DEFAULT_AUTH, opt_default_auth);
if (using_opt_enable_cleartext_plugin)
mysql_options(&mysql, MYSQL_ENABLE_CLEARTEXT_PLUGIN,
(char*)&opt_enable_cleartext_plugin);
mysql_options(&mysql, MYSQL_OPT_CONNECT_ATTR_RESET, 0);
mysql_options4(&mysql, MYSQL_OPT_CONNECT_ATTR_ADD,
"program_name", "mysqlshow");
if (!(mysql_connect_ssl_check(&mysql, host, user, opt_password,
(first_argument_uses_wildcards) ? "" :
argv[0], opt_mysql_port, opt_mysql_unix_port,
0, opt_ssl_required)))
{
fprintf(stderr,"%s: %s\n",my_progname,mysql_error(&mysql));
exit(1);
}
mysql.reconnect= 1;
switch (argc) {
case 0: error=list_dbs(&mysql,wild); break;
case 1:
if (opt_status)
error=list_table_status(&mysql,argv[0],wild);
else
error=list_tables(&mysql,argv[0],wild);
break;
default:
if (opt_status && ! wild)
error=list_table_status(&mysql,argv[0],argv[1]);
else
error=list_fields(&mysql,argv[0],argv[1],wild);
break;
}
mysql_close(&mysql); /* Close & free connection */
my_free(opt_password);
#ifdef HAVE_SMEM
my_free(shared_memory_base_name);
#endif
my_end(my_end_arg);
exit(error ? 1 : 0);
return 0; /* No compiler warnings */
} | 1 | [
"CWE-319"
]
| mysql-server | 0002e1380d5f8c113b6bce91f2cf3f75136fd7c7 | 86,880,178,494,893,580,000,000,000,000,000,000,000 | 125 | BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit f91b941842d240b8a62645e507f5554e8be76aec) |
bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
{
bool result;
unsigned seq;
if (new_dentry == old_dentry)
return true;
do {
/* for restarting inner loop in case of seq retry */
seq = read_seqbegin(&rename_lock);
/*
* Need rcu_readlock to protect against the d_parent trashing
* due to d_move
*/
rcu_read_lock();
if (d_ancestor(old_dentry, new_dentry))
result = true;
else
result = false;
rcu_read_unlock();
} while (read_seqretry(&rename_lock, seq));
return result;
} | 0 | [
"CWE-362",
"CWE-399"
]
| linux | 49d31c2f389acfe83417083e1208422b4091cd9e | 185,204,834,248,913,100,000,000,000,000,000,000,000 | 25 | dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <[email protected]> |
ready_callback_key_compare_only_active (gconstpointer a, gconstpointer b)
{
const ReadyCallback *callback_a;
callback_a = a;
/* Non active callbacks never match */
if (!callback_a->active) {
return -1;
}
return ready_callback_key_compare (a, b);
} | 0 | []
| nautilus | 7632a3e13874a2c5e8988428ca913620a25df983 | 319,410,233,872,516,240,000,000,000,000,000,000,000 | 13 | Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003 |
TEST_F(RenameCollectionTest, RenameSameDatabaseStayTempFalse) {
_testRenameCollectionStayTemp(_opCtx.get(), _sourceNss, _targetNss, false, true);
} | 0 | [
"CWE-20"
]
| mongo | 35c1b1f588f04926a958ad2fe4d9c59d79f81e8b | 141,030,075,832,455,970,000,000,000,000,000,000,000 | 3 | SERVER-35636 renameCollectionForApplyOps checks for complete namespace |
static int select_idle_smt(struct task_struct *p, int target)
{
int cpu, si_cpu = -1;
if (!static_branch_likely(&sched_smt_present))
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
continue;
if (available_idle_cpu(cpu))
return cpu;
if (si_cpu == -1 && sched_idle_cpu(cpu))
si_cpu = cpu;
}
return si_cpu;
} | 0 | [
"CWE-400",
"CWE-703"
]
| linux | de53fd7aedb100f03e5d2231cfce0e4993282425 | 57,222,302,018,526,720,000,000,000,000,000,000,000 | 18 | sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices
It has been observed, that highly-threaded, non-cpu-bound applications
running under cpu.cfs_quota_us constraints can hit a high percentage of
periods throttled while simultaneously not consuming the allocated
amount of quota. This use case is typical of user-interactive non-cpu
bound applications, such as those running in kubernetes or mesos when
run on multiple cpu cores.
This has been root caused to cpu-local run queue being allocated per cpu
bandwidth slices, and then not fully using that slice within the period.
At which point the slice and quota expires. This expiration of unused
slice results in applications not being able to utilize the quota for
which they are allocated.
The non-expiration of per-cpu slices was recently fixed by
'commit 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift
condition")'. Prior to that it appears that this had been broken since
at least 'commit 51f2176d74ac ("sched/fair: Fix unlocked reads of some
cfs_b->quota/period")' which was introduced in v3.16-rc1 in 2014. That
added the following conditional which resulted in slices never being
expired.
if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
/* extend local deadline, drift is bounded above by 2 ticks */
cfs_rq->runtime_expires += TICK_NSEC;
Because this was broken for nearly 5 years, and has recently been fixed
and is now being noticed by many users running kubernetes
(https://github.com/kubernetes/kubernetes/issues/67577) it is my opinion
that the mechanisms around expiring runtime should be removed
altogether.
This allows quota already allocated to per-cpu run-queues to live longer
than the period boundary. This allows threads on runqueues that do not
use much CPU to continue to use their remaining slice over a longer
period of time than cpu.cfs_period_us. However, this helps prevent the
above condition of hitting throttling while also not fully utilizing
your cpu quota.
This theoretically allows a machine to use slightly more than its
allotted quota in some periods. This overflow would be bounded by the
remaining quota left on each per-cpu runqueueu. This is typically no
more than min_cfs_rq_runtime=1ms per cpu. For CPU bound tasks this will
change nothing, as they should theoretically fully utilize all of their
quota in each period. For user-interactive tasks as described above this
provides a much better user/application experience as their cpu
utilization will more closely match the amount they requested when they
hit throttling. This means that cpu limits no longer strictly apply per
period for non-cpu bound applications, but that they are still accurate
over longer timeframes.
This greatly improves performance of high-thread-count, non-cpu bound
applications with low cfs_quota_us allocation on high-core-count
machines. In the case of an artificial testcase (10ms/100ms of quota on
80 CPU machine), this commit resulted in almost 30x performance
improvement, while still maintaining correct cpu quota restrictions.
That testcase is available at https://github.com/indeedeng/fibtest.
Fixes: 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition")
Signed-off-by: Dave Chiluk <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Phil Auld <[email protected]>
Reviewed-by: Ben Segall <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: John Hammond <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Kyle Anderson <[email protected]>
Cc: Gabriel Munos <[email protected]>
Cc: Peter Oskolkov <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: Brendan Gregg <[email protected]>
Link: https://lkml.kernel.org/r/[email protected] |
virtual int init_processing() {
op_ret = init_quota();
if (op_ret < 0)
return op_ret;
return 0;
} | 0 | [
"CWE-770"
]
| ceph | ab29bed2fc9f961fe895de1086a8208e21ddaddc | 158,859,926,025,523,140,000,000,000,000,000,000,000 | 7 | rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests |
m4_placeholder (struct obstack *obs, int argc, token_data **argv)
{
M4ERROR ((warning_status, 0, "\
builtin `%s' requested by frozen file is not supported", ARG (0)));
} | 0 | []
| m4 | 5345bb49077bfda9fabd048e563f9e7077fe335d | 187,541,890,035,946,900,000,000,000,000,000,000,000 | 5 | Minor security fix: Quote output of mkstemp.
* src/builtin.c (mkstemp_helper): Produce quoted output.
* doc/m4.texinfo (Mkstemp): Update the documentation and tests.
* NEWS: Document this change.
Signed-off-by: Eric Blake <[email protected]>
(cherry picked from commit bd9900d65eb9cd5add0f107e94b513fa267495ba) |
Subsets and Splits