func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static void ssl_write_ecjpake_kkpp_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
int ret;
unsigned char *p = buf;
const unsigned char *end = ssl->out_msg + MBEDTLS_SSL_MAX_CONTENT_LEN;
size_t kkpp_len;
*olen = 0;
/* Skip costly extension if we can't use EC J-PAKE anyway */
if( mbedtls_ecjpake_check( &ssl->handshake->ecjpake_ctx ) != 0 )
return;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello, adding ecjpake_kkpp extension" ) );
if( end - p < 4 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "buffer too small" ) );
return;
}
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ECJPAKE_KKPP >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ECJPAKE_KKPP ) & 0xFF );
/*
* We may need to send ClientHello multiple times for Hello verification.
* We don't want to compute fresh values every time (both for performance
* and consistency reasons), so cache the extension content.
*/
if( ssl->handshake->ecjpake_cache == NULL ||
ssl->handshake->ecjpake_cache_len == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "generating new ecjpake parameters" ) );
ret = mbedtls_ecjpake_write_round_one( &ssl->handshake->ecjpake_ctx,
p + 2, end - p - 2, &kkpp_len,
ssl->conf->f_rng, ssl->conf->p_rng );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1 , "mbedtls_ecjpake_write_round_one", ret );
return;
}
ssl->handshake->ecjpake_cache = mbedtls_calloc( 1, kkpp_len );
if( ssl->handshake->ecjpake_cache == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "allocation failed" ) );
return;
}
memcpy( ssl->handshake->ecjpake_cache, p + 2, kkpp_len );
ssl->handshake->ecjpake_cache_len = kkpp_len;
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "re-using cached ecjpake parameters" ) );
kkpp_len = ssl->handshake->ecjpake_cache_len;
if( (size_t)( end - p - 2 ) < kkpp_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "buffer too small" ) );
return;
}
memcpy( p + 2, ssl->handshake->ecjpake_cache, kkpp_len );
}
*p++ = (unsigned char)( ( kkpp_len >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( kkpp_len ) & 0xFF );
*olen = kkpp_len + 4;
}
| 0 |
[
"CWE-119",
"CWE-125",
"CWE-295"
] |
mbedtls
|
a1098f81c252b317ad34ea978aea2bc47760b215
| 200,265,351,393,996,860,000,000,000,000,000,000,000 | 75 |
Add bounds check before signature length read
|
static int ZEND_FASTCALL zend_pre_incdec_property_helper_SPEC_VAR_CV(incdec_t incdec_op, ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_free_op free_op1;
zval **object_ptr = _get_zval_ptr_ptr_var(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC);
zval *object;
zval *property = _get_zval_ptr_cv(&opline->op2, EX(Ts), BP_VAR_R TSRMLS_CC);
zval **retval = &EX_T(opline->result.u.var).var.ptr;
int have_get_ptr = 0;
if (IS_VAR == IS_VAR && !object_ptr) {
zend_error_noreturn(E_ERROR, "Cannot increment/decrement overloaded objects nor string offsets");
}
make_real_object(object_ptr TSRMLS_CC); /* this should modify object only if it's empty */
object = *object_ptr;
if (Z_TYPE_P(object) != IS_OBJECT) {
zend_error(E_WARNING, "Attempt to increment/decrement property of non-object");
if (!RETURN_VALUE_UNUSED(&opline->result)) {
*retval = EG(uninitialized_zval_ptr);
PZVAL_LOCK(*retval);
}
if (free_op1.var) {zval_ptr_dtor(&free_op1.var);};
ZEND_VM_NEXT_OPCODE();
}
/* here we are sure we are dealing with an object */
if (0) {
MAKE_REAL_ZVAL_PTR(property);
}
if (Z_OBJ_HT_P(object)->get_property_ptr_ptr) {
zval **zptr = Z_OBJ_HT_P(object)->get_property_ptr_ptr(object, property TSRMLS_CC);
if (zptr != NULL) { /* NULL means no success in getting PTR */
SEPARATE_ZVAL_IF_NOT_REF(zptr);
have_get_ptr = 1;
incdec_op(*zptr);
if (!RETURN_VALUE_UNUSED(&opline->result)) {
*retval = *zptr;
PZVAL_LOCK(*retval);
}
}
}
if (!have_get_ptr) {
if (Z_OBJ_HT_P(object)->read_property && Z_OBJ_HT_P(object)->write_property) {
zval *z = Z_OBJ_HT_P(object)->read_property(object, property, BP_VAR_R TSRMLS_CC);
if (Z_TYPE_P(z) == IS_OBJECT && Z_OBJ_HT_P(z)->get) {
zval *value = Z_OBJ_HT_P(z)->get(z TSRMLS_CC);
if (Z_REFCOUNT_P(z) == 0) {
GC_REMOVE_ZVAL_FROM_BUFFER(z);
zval_dtor(z);
FREE_ZVAL(z);
}
z = value;
}
Z_ADDREF_P(z);
SEPARATE_ZVAL_IF_NOT_REF(&z);
incdec_op(z);
*retval = z;
Z_OBJ_HT_P(object)->write_property(object, property, z TSRMLS_CC);
SELECTIVE_PZVAL_LOCK(*retval, &opline->result);
zval_ptr_dtor(&z);
} else {
zend_error(E_WARNING, "Attempt to increment/decrement property of non-object");
if (!RETURN_VALUE_UNUSED(&opline->result)) {
*retval = EG(uninitialized_zval_ptr);
PZVAL_LOCK(*retval);
}
}
}
if (0) {
zval_ptr_dtor(&property);
} else {
}
if (free_op1.var) {zval_ptr_dtor(&free_op1.var);};
ZEND_VM_NEXT_OPCODE();
}
| 0 |
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
| 255,827,738,962,390,400,000,000,000,000,000,000,000 | 86 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
|
xfs_da3_node_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_da_blkinfo *info = bp->b_addr;
switch (be16_to_cpu(info->magic)) {
case XFS_DA3_NODE_MAGIC:
if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
XFS_DA3_NODE_CRC_OFF))
break;
/* fall through */
case XFS_DA_NODE_MAGIC:
if (!xfs_da3_node_verify(bp))
break;
return;
case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
bp->b_ops = &xfs_attr3_leaf_buf_ops;
bp->b_ops->verify_read(bp);
return;
case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
bp->b_ops = &xfs_dir3_leafn_buf_ops;
bp->b_ops->verify_read(bp);
return;
default:
break;
}
/* corrupt block */
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
| 0 |
[
"CWE-399"
] |
linux
|
c88547a8119e3b581318ab65e9b72f27f23e641d
| 66,786,888,204,441,460,000,000,000,000,000,000,000 | 34 |
xfs: fix directory hash ordering bug
Commit f5ea1100 ("xfs: add CRCs to dir2/da node blocks") introduced
in 3.10 incorrectly converted the btree hash index array pointer in
xfs_da3_fixhashpath(). It resulted in the the current hash always
being compared against the first entry in the btree rather than the
current block index into the btree block's hash entry array. As a
result, it was comparing the wrong hashes, and so could misorder the
entries in the btree.
For most cases, this doesn't cause any problems as it requires hash
collisions to expose the ordering problem. However, when there are
hash collisions within a directory there is a very good probability
that the entries will be ordered incorrectly and that actually
matters when duplicate hashes are placed into or removed from the
btree block hash entry array.
This bug results in an on-disk directory corruption and that results
in directory verifier functions throwing corruption warnings into
the logs. While no data or directory entries are lost, access to
them may be compromised, and attempts to remove entries from a
directory that has suffered from this corruption may result in a
filesystem shutdown. xfs_repair will fix the directory hash
ordering without data loss occuring.
[dchinner: wrote useful a commit message]
cc: <[email protected]>
Reported-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: Mark Tinguely <[email protected]>
Reviewed-by: Ben Myers <[email protected]>
Signed-off-by: Dave Chinner <[email protected]>
|
std::string collectErrorStr(const std::vector<JsonTestCase::TestErrors>& errors) {
std::string result;
for (size_t i = 0; i < errors.size(); ++i) {
auto error = errors[i];
result = result + error.first + " - " + error.second;
if (i != errors.size() - 1) {
result += "; ";
}
}
return result;
}
| 0 |
[
"CWE-755"
] |
mongo
|
75f7184eafa78006a698cda4c4adfb57f1290047
| 339,572,986,476,647,520,000,000,000,000,000,000,000 | 11 |
SERVER-50170 fix max staleness read preference parameter for server selection
|
static void usbredir_free_streams(USBDevice *udev, USBEndpoint **eps,
int nr_eps)
{
#if USBREDIR_VERSION >= 0x000700
USBRedirDevice *dev = USB_REDIRECT(udev);
struct usb_redir_free_bulk_streams_header free_streams;
int i;
if (!usbredirparser_peer_has_cap(dev->parser,
usb_redir_cap_bulk_streams)) {
return;
}
free_streams.endpoints = 0;
for (i = 0; i < nr_eps; i++) {
free_streams.endpoints |= 1 << USBEP2I(eps[i]);
}
usbredirparser_send_free_bulk_streams(dev->parser, 0, &free_streams);
usbredirparser_do_write(dev->parser);
#endif
}
| 0 |
[
"CWE-770"
] |
qemu
|
7ec54f9eb62b5d177e30eb8b1cad795a5f8d8986
| 307,463,017,172,382,740,000,000,000,000,000,000,000 | 21 |
usb/redir: avoid dynamic stack allocation (CVE-2021-3527)
Use autofree heap allocation instead.
Fixes: 4f4321c11ff ("usb: use iovecs in USBPacket")
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]>
Tested-by: Philippe Mathieu-Daudé <[email protected]>
Message-Id: <[email protected]>
|
static XMLRPC_OPTIONS XMLRPC_GetDefaultOptions() {
static STRUCT_XMLRPC_OPTIONS options = {
xmlrpc_case_exact,
xmlrpc_case_sensitive
};
return &options;
}
| 0 |
[
"CWE-119"
] |
php-src
|
88412772d295ebf7dd34409534507dc9bcac726e
| 111,127,260,373,214,900,000,000,000,000,000,000,000 | 7 |
Fix bug #68027 - fix date parsing in XMLRPC lib
|
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
| 0 |
[
"CWE-399"
] |
linux
|
12d6e7538e2d418c08f082b1b44ffa5fb7270ed8
| 79,860,640,916,366,780,000,000,000,000,000,000,000 | 25 |
KVM: perform an invalid memslot step for gpa base change
PPC must flush all translations before the new memory slot
is visible.
Signed-off-by: Marcelo Tosatti <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
|
static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
int ret = 0, i;
u32 val32;
/* Poll checksum report */
for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
if (val32 & MCU_FW_DL_CSUM_REPORT)
break;
}
if (i == RTL8XXXU_FIRMWARE_POLL_MAX) {
dev_warn(dev, "Firmware checksum poll timed out\n");
ret = -EAGAIN;
goto exit;
}
val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
val32 |= MCU_FW_DL_READY;
val32 &= ~MCU_WINT_INIT_READY;
rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32);
/*
* Reset the 8051 in order for the firmware to start running,
* otherwise it won't come up on the 8192eu
*/
priv->fops->reset_8051(priv);
/* Wait for firmware to become ready */
for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
if (val32 & MCU_WINT_INIT_READY)
break;
udelay(100);
}
if (i == RTL8XXXU_FIRMWARE_POLL_MAX) {
dev_warn(dev, "Firmware failed to start\n");
ret = -EAGAIN;
goto exit;
}
/*
* Init H2C command
*/
if (priv->rtl_chip == RTL8723B)
rtl8xxxu_write8(priv, REG_HMTFR, 0x0f);
exit:
return ret;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
a2cdd07488e666aa93a49a3fc9c9b1299e27ef3c
| 227,911,653,179,997,500,000,000,000,000,000,000,000 | 53 |
rtl8xxxu: prevent leaking urb
In rtl8xxxu_submit_int_urb if usb_submit_urb fails the allocated urb
should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Chris Chiu <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
XML_FreeContentModel(XML_Parser parser, XML_Content *model) {
if (parser != NULL)
FREE(parser, model);
}
| 0 |
[
"CWE-611",
"CWE-776",
"CWE-415",
"CWE-125"
] |
libexpat
|
c20b758c332d9a13afbbb276d30db1d183a85d43
| 83,573,273,833,204,180,000,000,000,000,000,000,000 | 4 |
xmlparse.c: Deny internal entities closing the doctype
|
static int vmci_transport_notify_send_init(
struct vsock_sock *vsk,
struct vsock_transport_send_notify_data *data)
{
return vmci_trans(vsk)->notify_ops->send_init(
&vsk->sk,
(struct vmci_transport_send_notify_data *)data);
}
| 0 |
[
"CWE-20",
"CWE-269"
] |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 275,958,680,401,589,770,000,000,000,000,000,000,000 | 8 |
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void fastd_receive(fastd_socket_t *sock) {
size_t max_len = max_size_t(fastd_max_payload(ctx.max_mtu) + conf.overhead, MAX_HANDSHAKE_SIZE);
fastd_buffer_t *buffer = fastd_buffer_alloc(max_len, conf.decrypt_headroom);
fastd_peer_address_t local_addr;
fastd_peer_address_t recvaddr;
struct iovec buffer_vec = { .iov_base = buffer->data, .iov_len = buffer->len };
uint8_t cbuf[1024] __attribute__((aligned(8)));
struct msghdr message = {
.msg_name = &recvaddr,
.msg_namelen = sizeof(recvaddr),
.msg_iov = &buffer_vec,
.msg_iovlen = 1,
.msg_control = cbuf,
.msg_controllen = sizeof(cbuf),
};
ssize_t len = recvmsg(sock->fd.fd, &message, 0);
if (len <= 0) {
if (len < 0)
pr_warn_errno("recvmsg");
fastd_buffer_free(buffer);
return;
}
buffer->len = len;
handle_socket_control(&message, sock, &local_addr);
#ifdef USE_PKTINFO
if (!local_addr.sa.sa_family) {
pr_error("received packet without packet info");
fastd_buffer_free(buffer);
return;
}
#endif
fastd_peer_address_simplify(&local_addr);
fastd_peer_address_simplify(&recvaddr);
handle_socket_receive(sock, &local_addr, &recvaddr, buffer);
}
| 0 |
[
"CWE-617",
"CWE-119",
"CWE-284"
] |
fastd
|
737925113363b6130879729cdff9ccc46c33eaea
| 45,380,823,722,832,440,000,000,000,000,000,000,000 | 43 |
receive: fix buffer leak when receiving invalid packets
For fastd versions before v20, this was just a memory leak (which could
still be used for DoS, as it's remotely triggerable). With the new
buffer management of fastd v20, this will trigger an assertion failure
instead as soon as the buffer pool is empty.
|
static UINT32 _GetLastErrorToIoStatus(SERIAL_DEVICE* serial)
{
/* http://msdn.microsoft.com/en-us/library/ff547466%28v=vs.85%29.aspx#generic_status_values_for_serial_device_control_requests
*/
switch (GetLastError())
{
case ERROR_BAD_DEVICE:
return STATUS_INVALID_DEVICE_REQUEST;
case ERROR_CALL_NOT_IMPLEMENTED:
return STATUS_NOT_IMPLEMENTED;
case ERROR_CANCELLED:
return STATUS_CANCELLED;
case ERROR_INSUFFICIENT_BUFFER:
return STATUS_BUFFER_TOO_SMALL; /* NB: STATUS_BUFFER_SIZE_TOO_SMALL not defined */
case ERROR_INVALID_DEVICE_OBJECT_PARAMETER: /* eg: SerCx2.sys' _purge() */
return STATUS_INVALID_DEVICE_STATE;
case ERROR_INVALID_HANDLE:
return STATUS_INVALID_DEVICE_REQUEST;
case ERROR_INVALID_PARAMETER:
return STATUS_INVALID_PARAMETER;
case ERROR_IO_DEVICE:
return STATUS_IO_DEVICE_ERROR;
case ERROR_IO_PENDING:
return STATUS_PENDING;
case ERROR_NOT_SUPPORTED:
return STATUS_NOT_SUPPORTED;
case ERROR_TIMEOUT:
return STATUS_TIMEOUT;
/* no default */
}
WLog_Print(serial->log, WLOG_DEBUG, "unexpected last-error: 0x%08" PRIX32 "", GetLastError());
return STATUS_UNSUCCESSFUL;
}
| 0 |
[
"CWE-125"
] |
FreeRDP
|
6b485b146a1b9d6ce72dfd7b5f36456c166e7a16
| 92,430,765,027,121,800,000,000,000,000,000,000,000 | 44 |
Fixed oob read in irp_write and similar
|
static ssize_t dns_stream_read(DnsStream *s, void *buf, size_t count) {
ssize_t ss;
#if ENABLE_DNS_OVER_TLS
if (s->encrypted)
ss = dnstls_stream_read(s, buf, count);
else
#endif
{
ss = read(s->fd, buf, count);
if (ss < 0)
return -errno;
}
return ss;
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
systemd
|
d973d94dec349fb676fdd844f6fe2ada3538f27c
| 335,584,946,964,854,280,000,000,000,000,000,000,000 | 16 |
resolved: pin stream while calling callbacks for it
These callbacks might unref the stream, but we still have to access it,
let's hence ref it explicitly.
Maybe fixes: #10725
|
static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc)
{
sctp_assoc_sync_pmtu(sk, asoc);
asoc->pmtu_pending = 0;
}
| 0 |
[] |
linux
|
196d67593439b03088913227093e374235596e33
| 18,744,143,673,277,084,000,000,000,000,000,000,000 | 6 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
if (iter_is_iovec(i))
might_fault();
iterate_and_advance(i, bytes, base, len, off,
copyin(addr + off, base, len),
memcpy(addr + off, base, len)
)
return bytes;
}
| 0 |
[
"CWE-665",
"CWE-284"
] |
linux
|
9d2231c5d74e13b2a0546fee6737ee4446017903
| 25,317,001,219,990,650,000,000,000,000,000,000,000 | 15 |
lib/iov_iter: initialize "flags" in new pipe_buffer
The functions copy_page_to_iter_pipe() and push_pipe() can both
allocate a new pipe_buffer, but the "flags" member initializer is
missing.
Fixes: 241699cd72a8 ("new iov_iter flavour: pipe-backed")
To: Alexander Viro <[email protected]>
To: [email protected]
To: [email protected]
Cc: [email protected]
Signed-off-by: Max Kellermann <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
int quic_decode(QuicContext *quic, QuicImageType type, uint8_t *buf, int stride)
{
Encoder *encoder = (Encoder *)quic;
unsigned int row;
uint8_t *prev;
spice_assert(buf);
switch (encoder->type) {
case QUIC_IMAGE_TYPE_RGB32:
case QUIC_IMAGE_TYPE_RGB24:
if (type == QUIC_IMAGE_TYPE_RGB32) {
spice_assert(ABS(stride) >= (int)encoder->width * 4);
QUIC_UNCOMPRESS_RGB(32, rgb32_pixel_t);
break;
} else if (type == QUIC_IMAGE_TYPE_RGB24) {
spice_assert(ABS(stride) >= (int)encoder->width * 3);
QUIC_UNCOMPRESS_RGB(24, rgb24_pixel_t);
break;
}
encoder->usr->warn(encoder->usr, "unsupported output format\n");
return QUIC_ERROR;
case QUIC_IMAGE_TYPE_RGB16:
if (type == QUIC_IMAGE_TYPE_RGB16) {
spice_assert(ABS(stride) >= (int)encoder->width * 2);
QUIC_UNCOMPRESS_RGB(16, rgb16_pixel_t);
} else if (type == QUIC_IMAGE_TYPE_RGB32) {
spice_assert(ABS(stride) >= (int)encoder->width * 4);
QUIC_UNCOMPRESS_RGB(16_to_32, rgb32_pixel_t);
} else {
encoder->usr->warn(encoder->usr, "unsupported output format\n");
return QUIC_ERROR;
}
break;
case QUIC_IMAGE_TYPE_RGBA:
if (type != QUIC_IMAGE_TYPE_RGBA) {
encoder->usr->warn(encoder->usr, "unsupported output format\n");
return QUIC_ERROR;
}
spice_assert(ABS(stride) >= (int)encoder->width * 4);
uncompress_rgba(encoder, buf, stride);
break;
case QUIC_IMAGE_TYPE_GRAY:
if (type != QUIC_IMAGE_TYPE_GRAY) {
encoder->usr->warn(encoder->usr, "unsupported output format\n");
return QUIC_ERROR;
}
spice_assert(ABS(stride) >= (int)encoder->width);
uncompress_gray(encoder, buf, stride);
break;
case QUIC_IMAGE_TYPE_INVALID:
default:
encoder->usr->error(encoder->usr, "bad image type\n");
}
return QUIC_OK;
}
| 0 |
[] |
spice-common
|
762e0abae36033ccde658fd52d3235887b60862d
| 216,434,611,734,101,040,000,000,000,000,000,000,000 | 59 |
quic: Check we have some data to start decoding quic image
All paths already pass some data to quic_decode_begin but for the
test check it, it's not that expensive test.
Checking for not 0 is enough, all other words will potentially be
read calling more_io_words but we need one to avoid a potential
initial buffer overflow or deferencing an invalid pointer.
Signed-off-by: Frediano Ziglio <[email protected]>
Acked-by: Uri Lublin <[email protected]>
|
ReturnCode_t DataWriterImpl::get_offered_incompatible_qos_status(
OfferedIncompatibleQosStatus& status)
{
if (writer_ == nullptr)
{
return ReturnCode_t::RETCODE_NOT_ENABLED;
}
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex());
status = offered_incompatible_qos_status_;
offered_incompatible_qos_status_.total_count_change = 0u;
return ReturnCode_t::RETCODE_OK;
}
| 0 |
[
"CWE-284"
] |
Fast-DDS
|
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
| 243,131,514,501,440,700,000,000,000,000,000,000,000 | 14 |
check remote permissions (#1387)
* Refs 5346. Blackbox test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. one-way string compare
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Do not add partition separator on last partition
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Access control unit testing
It only covers Partition and Topic permissions
Signed-off-by: Iker Luengo <[email protected]>
* Refs #3680. Fix partition check on Permissions plugin.
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix tests on mac
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix windows tests
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Avoid memory leak on test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Proxy data mocks should not return temporary objects
Signed-off-by: Iker Luengo <[email protected]>
* refs 3680. uncrustify
Signed-off-by: Iker Luengo <[email protected]>
Co-authored-by: Miguel Company <[email protected]>
|
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu);
kvm_mmu_flush_tlb(vcpu);
return 0;
}
if (is_long_mode(vcpu)) {
if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) {
if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
return 1;
} else
if (cr3 & CR3_L_MODE_RESERVED_BITS)
return 1;
} else {
if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS)
return 1;
if (is_paging(vcpu) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
}
/*
* We don't check reserved bits in nonpae mode, because
* this isn't enforced, and VMware depends on this.
*/
}
vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
kvm_mmu_new_cr3(vcpu);
return 0;
}
| 0 |
[
"CWE-119",
"CWE-703",
"CWE-120"
] |
linux
|
a08d3b3b99efd509133946056531cdf8f3a0c09b
| 247,620,177,483,676,600,000,000,000,000,000,000,000 | 34 |
kvm: x86: fix emulator buffer overflow (CVE-2014-0049)
The problem occurs when the guest performs a pusha with the stack
address pointing to an mmio address (or an invalid guest physical
address) to start with, but then extending into an ordinary guest
physical address. When doing repeated emulated pushes
emulator_read_write sets mmio_needed to 1 on the first one. On a
later push when the stack points to regular memory,
mmio_nr_fragments is set to 0, but mmio_is_needed is not set to 0.
As a result, KVM exits to userspace, and then returns to
complete_emulated_mmio. In complete_emulated_mmio
vcpu->mmio_cur_fragment is incremented. The termination condition of
vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments is never achieved.
The code bounces back and fourth to userspace incrementing
mmio_cur_fragment past it's buffer. If the guest does nothing else it
eventually leads to a a crash on a memcpy from invalid memory address.
However if a guest code can cause the vm to be destroyed in another
vcpu with excellent timing, then kvm_clear_async_pf_completion_queue
can be used by the guest to control the data that's pointed to by the
call to cancel_work_item, which can be used to gain execution.
Fixes: f78146b0f9230765c6315b2e14f56112513389ad
Signed-off-by: Andrew Honig <[email protected]>
Cc: [email protected] (3.5+)
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void muscle_load_single_acl(sc_file_t* file, int operation, unsigned short acl)
{
int key;
/* Everybody by default.... */
sc_file_add_acl_entry(file, operation, SC_AC_NONE, 0);
if(acl == 0xFFFF) {
sc_file_add_acl_entry(file, operation, SC_AC_NEVER, 0);
return;
}
for(key = 0; key < 16; key++) {
if(acl >> key & 1) {
sc_file_add_acl_entry(file, operation, SC_AC_CHV, key);
}
}
}
| 0 |
[
"CWE-415",
"CWE-119"
] |
OpenSC
|
360e95d45ac4123255a4c796db96337f332160ad
| 184,585,344,262,728,700,000,000,000,000,000,000,000 | 15 |
fixed out of bounds writes
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problems.
|
ClearArea(x1, y1, xs, xe, x2, y2, bce, uselayfn)
int x1, y1, xs, xe, x2, y2, bce, uselayfn;
{
int y, xxe;
struct canvas *cv;
struct viewport *vp;
debug2("Clear %d,%d", x1, y1);
debug2(" %d-%d", xs, xe);
debug2(" %d,%d", x2, y2);
debug2(" uselayfn=%d bce=%d\n", uselayfn, bce);
ASSERT(display);
if (x1 == D_width)
x1--;
if (x2 == D_width)
x2--;
if (xs == -1)
xs = x1;
if (xe == -1)
xe = x2;
if (D_UT) /* Safe to erase ? */
SetRendition(&mchar_null);
#ifdef COLOR
if (D_BE)
SetBackColor(bce);
#endif
if (D_lp_missing && y1 <= D_bot && xe >= D_width - 1)
{
if (y2 > D_bot || (y2 == D_bot && x2 >= D_width - 1))
D_lp_missing = 0;
}
if (x2 == D_width - 1 && (xs == 0 || y1 == y2) && xe == D_width - 1 && y2 == D_height - 1 && (!bce || D_BE))
{
#ifdef AUTO_NUKE
if (x1 == 0 && y1 == 0 && D_auto_nuke)
NukePending();
#endif
if (x1 == 0 && y1 == 0 && D_CL)
{
AddCStr(D_CL);
D_y = D_x = 0;
return;
}
/*
* Workaround a hp700/22 terminal bug. Do not use CD where CE
* is also appropriate.
*/
if (D_CD && (y1 < y2 || !D_CE))
{
GotoPos(x1, y1);
AddCStr(D_CD);
return;
}
}
if (x1 == 0 && xs == 0 && (xe == D_width - 1 || y1 == y2) && y1 == 0 && D_CCD && (!bce || D_BE))
{
GotoPos(x1, y1);
AddCStr(D_CCD);
return;
}
xxe = xe;
for (y = y1; y <= y2; y++, x1 = xs)
{
if (y == y2)
xxe = x2;
if (x1 == 0 && D_CB && (xxe != D_width - 1 || (D_x == xxe && D_y == y)) && (!bce || D_BE))
{
GotoPos(xxe, y);
AddCStr(D_CB);
continue;
}
if (xxe == D_width - 1 && D_CE && (!bce || D_BE))
{
GotoPos(x1, y);
AddCStr(D_CE);
continue;
}
if (uselayfn)
{
vp = 0;
for (cv = D_cvlist; cv; cv = cv->c_next)
{
if (y < cv->c_ys || y > cv->c_ye || xxe < cv->c_xs || x1 > cv->c_xe)
continue;
for (vp = cv->c_vplist; vp; vp = vp->v_next)
if (y >= vp->v_ys && y <= vp->v_ye && xxe >= vp->v_xs && x1 <= vp->v_xe)
break;
if (vp)
break;
}
if (cv && cv->c_layer && x1 >= vp->v_xs && xxe <= vp->v_xe &&
y - vp->v_yoff >= 0 && y - vp->v_yoff < cv->c_layer->l_height &&
xxe - vp->v_xoff >= 0 && x1 - vp->v_xoff < cv->c_layer->l_width)
{
struct layer *oldflayer = flayer;
struct canvas *cvlist, *cvlnext;
flayer = cv->c_layer;
cvlist = flayer->l_cvlist;
cvlnext = cv->c_lnext;
flayer->l_cvlist = cv;
cv->c_lnext = 0;
LayClearLine(y - vp->v_yoff, x1 - vp->v_xoff, xxe - vp->v_xoff, bce);
flayer->l_cvlist = cvlist;
cv->c_lnext = cvlnext;
flayer = oldflayer;
continue;
}
}
ClearLine((struct mline *)0, y, x1, xxe, bce);
}
}
| 0 |
[] |
screen
|
c5db181b6e017cfccb8d7842ce140e59294d9f62
| 107,004,622,843,955,320,000,000,000,000,000,000,000 | 111 |
ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected]
|
do_mount (GVfsBackend *backend,
GVfsJobMount *job,
GMountSpec *mount_spec,
GMountSource *mount_source,
gboolean is_automount)
{
GVfsBackendDav *dav_backend = G_VFS_BACKEND_DAV (backend);
MountAuthData *data;
SoupSession *session;
SoupMessage *msg_opts;
SoupMessage *msg_stat;
SoupURI *mount_base;
gulong signal_id;
guint status;
gboolean is_success;
gboolean is_webdav;
gboolean is_collection;
gboolean auth_interactive;
gboolean res;
char *last_good_path;
const char *host;
const char *type;
g_debug ("+ mount\n");
host = g_mount_spec_get (mount_spec, "host");
type = g_mount_spec_get (mount_spec, "type");
#ifdef HAVE_AVAHI
/* resolve DNS-SD style URIs */
if ((strcmp (type, "dav+sd") == 0 || strcmp (type, "davs+sd") == 0) && host != NULL)
{
GError *error;
dav_backend->resolver = g_vfs_dns_sd_resolver_new_for_encoded_triple (host, "u");
error = NULL;
if (!g_vfs_dns_sd_resolver_resolve_sync (dav_backend->resolver,
NULL,
&error))
{
g_vfs_job_failed_from_error (G_VFS_JOB (job), error);
g_error_free (error);
return;
}
g_signal_connect (dav_backend->resolver,
"changed",
(GCallback) dns_sd_resolver_changed,
dav_backend);
mount_base = dav_uri_from_dns_sd_resolver (dav_backend);
}
else
#endif
{
mount_base = g_mount_spec_to_dav_uri (mount_spec);
}
if (mount_base == NULL)
{
g_vfs_job_failed (G_VFS_JOB (job), G_IO_ERROR,
G_IO_ERROR_INVALID_ARGUMENT,
_("Invalid mount spec"));
return;
}
session = G_VFS_BACKEND_HTTP (backend)->session;
G_VFS_BACKEND_HTTP (backend)->mount_base = mount_base;
data = &(G_VFS_BACKEND_DAV (backend)->auth_info);
data->mount_source = g_object_ref (mount_source);
data->server_auth.username = g_strdup (mount_base->user);
data->server_auth.pw_save = G_PASSWORD_SAVE_NEVER;
data->proxy_auth.pw_save = G_PASSWORD_SAVE_NEVER;
signal_id = g_signal_connect (session, "authenticate",
G_CALLBACK (soup_authenticate_interactive),
data);
auth_interactive = TRUE;
last_good_path = NULL;
msg_opts = soup_message_new_from_uri (SOUP_METHOD_OPTIONS, mount_base);
msg_stat = stat_location_begin (mount_base, FALSE);
do {
GFileType file_type;
SoupURI *cur_uri;
status = g_vfs_backend_dav_send_message (backend, msg_opts);
is_success = SOUP_STATUS_IS_SUCCESSFUL (status);
is_webdav = is_success && sm_has_header (msg_opts, "DAV");
soup_message_headers_clear (msg_opts->response_headers);
soup_message_body_truncate (msg_opts->response_body);
if (is_webdav == FALSE)
break;
cur_uri = soup_message_get_uri (msg_opts);
soup_message_set_uri (msg_stat, cur_uri);
g_vfs_backend_dav_send_message (backend, msg_stat);
res = stat_location_finish (msg_stat, &file_type, NULL);
is_collection = res && file_type == G_FILE_TYPE_DIRECTORY;
g_debug (" [%s] webdav: %d, collection %d [res: %d]\n",
mount_base->path, is_webdav, is_collection, res);
if (is_collection == FALSE)
break;
/* we have found a new good root, try the parent ... */
g_free (last_good_path);
last_good_path = mount_base->path;
mount_base->path = path_get_parent_dir (mount_base->path);
soup_message_set_uri (msg_opts, mount_base);
if (auth_interactive)
{
/* if we have found a root that is good then we assume
that we also have obtained to correct credentials
and we switch the auth handler. This will prevent us
from asking for *different* credentials *again* if the
server should response with 401 for some of the parent
collections. See also bug #677753 */
g_signal_handler_disconnect (session, signal_id);
g_signal_connect (session, "authenticate",
G_CALLBACK (soup_authenticate_from_data),
data);
auth_interactive = FALSE;
}
soup_message_headers_clear (msg_stat->response_headers);
soup_message_body_truncate (msg_stat->response_body);
} while (mount_base->path != NULL);
/* we either encountered an error or we have
reached the end of paths we are allowed to
chdir up to (or couldn't chdir up at all) */
/* check if we at all have a good path */
if (last_good_path == NULL)
{
/* TODO: set correct error in case of cancellation */
if (!is_success)
g_vfs_job_failed (G_VFS_JOB (job),
G_IO_ERROR, G_IO_ERROR_FAILED,
_("HTTP Error: %s"), msg_opts->reason_phrase);
else if (!is_webdav)
g_vfs_job_failed (G_VFS_JOB (job),
G_IO_ERROR, G_IO_ERROR_FAILED,
_("Not a WebDAV enabled share"));
else
g_vfs_job_failed (G_VFS_JOB (job),
G_IO_ERROR, G_IO_ERROR_FAILED,
_("Not a WebDAV enabled share"));
/* TODO: We leak a bunch of stuff here :-( */
/* TODO: STRING CHANGE: change to: Could not find an enclosing directory */
return;
}
/* Success! We are mounted */
/* Save the auth info in the keyring */
keyring_save_authinfo (&(data->server_auth), mount_base, FALSE);
/* TODO: save proxy auth */
/* Set the working path in mount path */
g_free (mount_base->path);
mount_base->path = last_good_path;
/* dup the mountspec, but only copy known fields */
mount_spec = g_mount_spec_from_dav_uri (dav_backend, mount_base);
g_vfs_backend_set_mount_spec (backend, mount_spec);
g_vfs_backend_set_icon_name (backend, "folder-remote");
g_vfs_backend_set_symbolic_icon_name (backend, "folder-remote-symbolic");
g_vfs_backend_dav_setup_display_name (backend);
/* cleanup */
g_mount_spec_unref (mount_spec);
g_object_unref (msg_opts);
g_object_unref (msg_stat);
/* also auth the async session */
g_signal_connect (G_VFS_BACKEND_HTTP (backend)->session_async, "authenticate",
G_CALLBACK (soup_authenticate_from_data),
data);
g_vfs_job_succeeded (G_VFS_JOB (job));
g_debug ("- mount\n");
}
| 0 |
[] |
gvfs
|
0abdd97989d5274d84017490aff3bf07a71fd672
| 257,377,158,091,254,200,000,000,000,000,000,000,000 | 199 |
dav: don't unescape the uri twice
path_equal tries to unescape path before comparing. Unfortunately
this function is used also for already unescaped paths. Therefore
unescaping can fail. This commit reverts changes which was done in
commit 50af53d and unescape just uris, which aren't unescaped yet.
https://bugzilla.gnome.org/show_bug.cgi?id=743298
|
static void compute_ideal_colors_and_weights_2_comp(
const block_size_descriptor& bsd,
const image_block& blk,
const error_weight_block& ewb,
const partition_info& pi,
endpoints_and_weights& ei,
int component1,
int component2
) {
int partition_count = pi.partition_count;
ei.ep.partition_count = partition_count;
promise(partition_count > 0);
int texel_count = bsd.texel_count;
promise(texel_count > 0);
partition_metrics pms[BLOCK_MAX_PARTITIONS];
const float *error_weights;
const float* data_vr = nullptr;
const float* data_vg = nullptr;
if (component1 == 0 && component2 == 1)
{
error_weights = ewb.texel_weight_rg;
data_vr = blk.data_r;
data_vg = blk.data_g;
}
else if (component1 == 0 && component2 == 2)
{
error_weights = ewb.texel_weight_rb;
data_vr = blk.data_r;
data_vg = blk.data_b;
}
else // (component1 == 1 && component2 == 2)
{
error_weights = ewb.texel_weight_gb;
data_vr = blk.data_g;
data_vg = blk.data_b;
}
float lowparam[BLOCK_MAX_PARTITIONS] { 1e10f, 1e10f, 1e10f, 1e10f };
float highparam[BLOCK_MAX_PARTITIONS] { -1e10f, -1e10f, -1e10f, -1e10f };
line2 lines[BLOCK_MAX_PARTITIONS];
float scale[BLOCK_MAX_PARTITIONS];
float length_squared[BLOCK_MAX_PARTITIONS];
compute_avgs_and_dirs_2_comp(pi, blk, ewb, component1, component2, pms);
for (int i = 0; i < partition_count; i++)
{
vfloat4 dir = pms[i].dir.swz<0, 1>();
if (hadd_s(dir) < 0.0f)
{
dir = vfloat4::zero() - dir;
}
lines[i].a = pms[i].avg.swz<0, 1>();
lines[i].b = normalize_safe(dir, unit2());
}
for (int i = 0; i < texel_count; i++)
{
if (error_weights[i] > 1e-10f)
{
int partition = pi.partition_of_texel[i];
vfloat4 point = vfloat2(data_vr[i], data_vg[i]) * pms[partition].color_scale.swz<0, 1>();
line2 l = lines[partition];
float param = dot_s(point - l.a, l.b);
ei.weights[i] = param;
lowparam[partition] = astc::min(param, lowparam[partition]);
highparam[partition] = astc::max(param, highparam[partition]);
}
else
{
ei.weights[i] = -1e38f;
}
}
vfloat4 lowvalues[BLOCK_MAX_PARTITIONS];
vfloat4 highvalues[BLOCK_MAX_PARTITIONS];
for (int i = 0; i < partition_count; i++)
{
float length = highparam[i] - lowparam[i];
if (length < 0.0f) // Case for when none of the texels had any weight
{
lowparam[i] = 0.0f;
highparam[i] = 1e-7f;
}
// It is possible for a uniform-color partition to produce length=0; this causes NaN issues
// so set to a small value to avoid this problem.
length = astc::max(length, 1e-7f);
length_squared[i] = length * length;
scale[i] = 1.0f / length;
vfloat4 ep0 = lines[i].a + lines[i].b * lowparam[i];
vfloat4 ep1 = lines[i].a + lines[i].b * highparam[i];
ep0 = ep0.swz<0, 1>() / pms[i].color_scale;
ep1 = ep1.swz<0, 1>() / pms[i].color_scale;
lowvalues[i] = ep0;
highvalues[i] = ep1;
}
vmask4 comp1_mask = vint4::lane_id() == vint4(component1);
vmask4 comp2_mask = vint4::lane_id() == vint4(component2);
for (int i = 0; i < partition_count; i++)
{
vfloat4 ep0 = select(blk.data_min, vfloat4(lowvalues[i].lane<0>()), comp1_mask);
vfloat4 ep1 = select(blk.data_max, vfloat4(highvalues[i].lane<0>()), comp1_mask);
ei.ep.endpt0[i] = select(ep0, vfloat4(lowvalues[i].lane<1>()), comp2_mask);
ei.ep.endpt1[i] = select(ep1, vfloat4(highvalues[i].lane<1>()), comp2_mask);
}
bool is_constant_wes = true;
float constant_wes = length_squared[pi.partition_of_texel[0]] * error_weights[0];
for (int i = 0; i < texel_count; i++)
{
int partition = pi.partition_of_texel[i];
float idx = (ei.weights[i] - lowparam[partition]) * scale[partition];
idx = astc::clamp1f(idx);
ei.weights[i] = idx;
ei.weight_error_scale[i] = length_squared[partition] * error_weights[i];
assert(!astc::isnan(ei.weight_error_scale[i]));
is_constant_wes = is_constant_wes && ei.weight_error_scale[i] == constant_wes;
}
// Zero initialize any SIMD over-fetch
int texel_count_simd = round_up_to_simd_multiple_vla(texel_count);
for (int i = texel_count; i < texel_count_simd; i++)
{
ei.weights[i] = 0.0f;
ei.weight_error_scale[i] = 0.0f;
}
ei.is_constant_weight_error_scale = is_constant_wes;
}
| 0 |
[
"CWE-787"
] |
astc-encoder
|
bdd385fe19bf2737bead4b5664acdfdeca7aab15
| 192,599,537,259,590,400,000,000,000,000,000,000,000 | 146 |
Only load based on texel index if undecimated
|
bool __fastcall TOwnConsole::CommandLineOnly()
{
return false;
}
| 0 |
[
"CWE-787"
] |
winscp
|
faa96e8144e6925a380f94a97aa382c9427f688d
| 151,927,587,332,340,530,000,000,000,000,000,000,000 | 4 |
Bug 1943: Prevent loading session settings that can lead to remote code execution from handled URLs
https://winscp.net/tracker/1943
(cherry picked from commit ec584f5189a856cd79509f754722a6898045c5e0)
Source commit: 0f4be408b3f01132b00682da72d925d6c4ee649b
|
static void tls_log_func(int level, const char *str)
{
fprintf(stderr, "|<%d>| %s", level, str);
}
| 0 |
[
"CWE-290"
] |
curl
|
b09c8ee15771c614c4bf3ddac893cdb12187c844
| 105,430,278,188,300,210,000,000,000,000,000,000,000 | 4 |
vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890
|
ptaChangeRefcount(PTA *pta,
l_int32 delta)
{
PROCNAME("ptaChangeRefcount");
if (!pta)
return ERROR_INT("pta not defined", procName, 1);
pta->refcount += delta;
return 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
leptonica
|
ee301cb2029db8a6289c5295daa42bba7715e99a
| 20,750,127,841,545,048,000,000,000,000,000,000,000 | 10 |
Security fixes: expect final changes for release 1.75.3.
* Fixed a debian security issue with fscanf() reading a string with
possible buffer overflow.
* There were also a few similar situations with sscanf().
|
link_to_maj_min_ino (char *file_name, int st_dev_maj, int st_dev_min,
ino_t st_ino)
{
int link_res;
char *link_name;
link_res = -1;
/* Is the file a link to a previously copied file? */
link_name = find_inode_file (st_ino,
st_dev_maj,
st_dev_min);
if (link_name == NULL)
add_inode (st_ino, file_name,
st_dev_maj,
st_dev_min);
else
link_res = link_to_name (file_name, link_name);
return link_res;
}
| 0 |
[
"CWE-190"
] |
cpio
|
dd96882877721703e19272fe25034560b794061b
| 56,848,415,916,229,300,000,000,000,000,000,000,000 | 18 |
Rewrite dynamic string support.
* src/dstring.c (ds_init): Take a single argument.
(ds_free): New function.
(ds_resize): Take a single argument. Use x2nrealloc to expand
the storage.
(ds_reset,ds_append,ds_concat,ds_endswith): New function.
(ds_fgetstr): Rewrite. In particular, this fixes integer overflow.
* src/dstring.h (dynamic_string): Keep both the allocated length
(ds_size) and index of the next free byte in the string (ds_idx).
(ds_init,ds_resize): Change signature.
(ds_len): New macro.
(ds_free,ds_reset,ds_append,ds_concat,ds_endswith): New protos.
* src/copyin.c: Use new ds_ functions.
* src/copyout.c: Likewise.
* src/copypass.c: Likewise.
* src/util.c: Likewise.
|
GetEmptyMatrixMaxBufSize(const char *name, int rank, size_t *size)
{
int err = 0;
size_t nBytes = 0, len, rank_size;
size_t tag_size = 8, array_flags_size = 8;
/* Add the Array Flags tag and space to the number of bytes */
nBytes += tag_size + array_flags_size;
/* Get size of variable name, pad it to an 8 byte block, and add it to nBytes */
if ( NULL != name )
len = strlen(name);
else
len = 4;
if ( len <= 4 ) {
nBytes += tag_size;
} else {
nBytes += tag_size;
if ( len % 8 ) {
err = Add(&len, len, 8 - len % 8);
if ( err )
return err;
}
err = Add(&nBytes, nBytes, len);
if ( err )
return err;
}
/* Add rank and dimensions, padded to an 8 byte block */
err = Mul(&rank_size, rank, 4);
if ( err )
return err;
if ( rank % 2 )
err = Add(&nBytes, nBytes, tag_size + 4);
else
err = Add(&nBytes, nBytes, tag_size);
if ( err )
return err;
err = Add(&nBytes, nBytes, rank_size);
if ( err )
return err;
/* Data tag */
err = Add(&nBytes, nBytes, tag_size);
if ( err )
return err;
*size = nBytes;
return MATIO_E_NO_ERROR;
}
| 0 |
[
"CWE-200",
"CWE-401"
] |
matio
|
b53b62b756920f4c1509f4ee06427f66c3b5c9c4
| 172,870,072,930,612,700,000,000,000,000,000,000,000 | 52 |
Fix memory leak
As reported by https://github.com/tbeu/matio/issues/186
|
static inline int xfrm_byidx_should_resize(struct net *net, int total)
{
unsigned int hmask = net->xfrm.policy_idx_hmask;
if ((hmask + 1) < xfrm_policy_hashmax &&
total > hmask)
return 1;
return 0;
}
| 0 |
[
"CWE-125"
] |
ipsec
|
7bab09631c2a303f87a7eb7e3d69e888673b9b7e
| 48,877,846,841,560,890,000,000,000,000,000,000,000 | 10 |
xfrm: policy: check policy direction value
The 'dir' parameter in xfrm_migrate() is a user-controlled byte which is used
as an array index. This can lead to an out-of-bound access, kernel lockup and
DoS. Add a check for the 'dir' value.
This fixes CVE-2017-11600.
References: https://bugzilla.redhat.com/show_bug.cgi?id=1474928
Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)")
Cc: <[email protected]> # v2.6.21-rc1
Reported-by: "bo Zhang" <[email protected]>
Signed-off-by: Vladis Dronov <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
grammar_current_rule_symbol_append (symbol *sym, location loc,
named_ref *name)
{
if (current_rule->action_props.code)
grammar_midrule_action ();
symbol_list *p = grammar_symbol_append (sym, loc);
if (name)
assign_named_ref (p, name);
if (sym->content->status == undeclared || sym->content->status == used)
sym->content->status = needed;
}
| 0 |
[] |
bison
|
b7aab2dbad43aaf14eebe78d54aafa245a000988
| 149,497,276,857,693,550,000,000,000,000,000,000,000 | 11 |
fix: crash when redefining the EOF token
Reported by Agency for Defense Development.
https://lists.gnu.org/r/bug-bison/2020-08/msg00008.html
On an empty such as
%token FOO
BAR
FOO 0
%%
input: %empty
we crash because when we find FOO 0, we decrement ntokens (since FOO
was discovered to be EOF, which is already known to be a token, so we
increment ntokens for it, and need to cancel this). This "works well"
when EOF is properly defined in one go, but here it is first defined
and later only assign token code 0. In the meanwhile BAR was given
the token number that we just decremented.
To fix this, assign symbol numbers after parsing, not during parsing,
so that we also saw all the explicit token codes. To maintain the
current numbers (I'd like to keep no difference in the output, not
just equivalence), we need to make sure the symbols are numbered in
the same order: that of appearance in the source file. So we need the
locations to be correct, which was almost the case, except for nterms
that appeared several times as LHS (i.e., several times as "foo:
..."). Fixing the use of location_of_lhs sufficed (it appears it was
intended for this use, but its implementation was unfinished: it was
always set to "false" only).
* src/symtab.c (symbol_location_as_lhs_set): Update location_of_lhs.
(symbol_code_set): Remove broken hack that decremented ntokens.
(symbol_class_set, dummy_symbol_get): Don't set number, ntokens and
nnterms.
(symbol_check_defined): Do it.
(symbols): Don't count nsyms here.
Actually, don't count nsyms at all: let it be done in...
* src/reader.c (check_and_convert_grammar): here. Define nsyms from
ntokens and nnterms after parsing.
* tests/input.at (EOF redeclared): New.
* examples/c/bistromathic/bistromathic.test: Adjust the traces: in
"%nterm <double> exp %% input: ...", exp used to be numbered before
input.
|
static unsigned int check_time(gnutls_x509_crt_t crt, time_t now)
{
int status = 0;
time_t t;
t = gnutls_x509_crt_get_activation_time (crt);
if (t == (time_t) - 1 || now < t)
{
status |= GNUTLS_CERT_NOT_ACTIVATED;
status |= GNUTLS_CERT_INVALID;
return status;
}
t = gnutls_x509_crt_get_expiration_time (crt);
if (t == (time_t) - 1 || now > t)
{
status |= GNUTLS_CERT_EXPIRED;
status |= GNUTLS_CERT_INVALID;
return status;
}
return 0;
}
| 0 |
[
"CWE-17"
] |
gnutls
|
897cbce62c0263a498088ac3e465aa5f05f8719c
| 157,717,028,947,425,250,000,000,000,000,000,000,000 | 23 |
Extended time verification to trusted certificate list as well. Introduced
the flag GNUTLS_VERIFY_DISABLE_TRUSTED_TIME_CHECKS that will prevent the
trusted certificate list verification.
|
static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int rc = -ENOIOCTLCMD;
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
switch (cmd) {
/* Protocol layer */
case TIOCOUTQ: {
long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
rc = put_user(amount, (int __user *)argp);
break;
}
case TIOCINQ: {
/*
* These two are safe on a single CPU system as only
* user tasks fiddle here
*/
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
long amount = 0;
if (skb)
amount = skb->len - sizeof(struct ddpehdr);
rc = put_user(amount, (int __user *)argp);
break;
}
/* Routing */
case SIOCADDRT:
case SIOCDELRT:
rc = -EPERM;
if (capable(CAP_NET_ADMIN))
rc = atrtr_ioctl(cmd, argp);
break;
/* Interface */
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFBRDADDR:
case SIOCATALKDIFADDR:
case SIOCDIFADDR:
case SIOCSARP: /* proxy AARP */
case SIOCDARP: /* proxy AARP */
rtnl_lock();
rc = atif_ioctl(cmd, argp);
rtnl_unlock();
break;
}
return rc;
}
| 0 |
[
"CWE-276"
] |
linux
|
6cc03e8aa36c51f3b26a0d21a3c4ce2809c842ac
| 51,707,178,283,166,860,000,000,000,000,000,000,000 | 52 |
appletalk: enforce CAP_NET_RAW for raw sockets
When creating a raw AF_APPLETALK socket, CAP_NET_RAW needs to be checked
first.
Signed-off-by: Ori Nimron <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static noinline void key_gc_unused_keys(struct list_head *keys)
{
while (!list_empty(keys)) {
struct key *key =
list_entry(keys->next, struct key, graveyard_link);
list_del(&key->graveyard_link);
kdebug("- %u", key->serial);
key_check(key);
/* Throw away the key data */
if (key->type->destroy)
key->type->destroy(key);
security_key_free(key);
/* deal with the user's key tracking and quota */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
kfree(key->description);
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC_X;
#endif
kmem_cache_free(key_jar, key);
}
}
| 1 |
[
"CWE-20",
"CWE-190"
] |
linux
|
f05819df10d7b09f6d1eb6f8534a8f68e5a4fe61
| 179,454,672,219,116,500,000,000,000,000,000,000,000 | 38 |
KEYS: Fix crash when attempt to garbage collect an uninstantiated keyring
The following sequence of commands:
i=`keyctl add user a a @s`
keyctl request2 keyring foo bar @t
keyctl unlink $i @s
tries to invoke an upcall to instantiate a keyring if one doesn't already
exist by that name within the user's keyring set. However, if the upcall
fails, the code sets keyring->type_data.reject_error to -ENOKEY or some
other error code. When the key is garbage collected, the key destroy
function is called unconditionally and keyring_destroy() uses list_empty()
on keyring->type_data.link - which is in a union with reject_error.
Subsequently, the kernel tries to unlink the keyring from the keyring names
list - which oopses like this:
BUG: unable to handle kernel paging request at 00000000ffffff8a
IP: [<ffffffff8126e051>] keyring_destroy+0x3d/0x88
...
Workqueue: events key_garbage_collector
...
RIP: 0010:[<ffffffff8126e051>] keyring_destroy+0x3d/0x88
RSP: 0018:ffff88003e2f3d30 EFLAGS: 00010203
RAX: 00000000ffffff82 RBX: ffff88003bf1a900 RCX: 0000000000000000
RDX: 0000000000000000 RSI: 000000003bfc6901 RDI: ffffffff81a73a40
RBP: ffff88003e2f3d38 R08: 0000000000000152 R09: 0000000000000000
R10: ffff88003e2f3c18 R11: 000000000000865b R12: ffff88003bf1a900
R13: 0000000000000000 R14: ffff88003bf1a908 R15: ffff88003e2f4000
...
CR2: 00000000ffffff8a CR3: 000000003e3ec000 CR4: 00000000000006f0
...
Call Trace:
[<ffffffff8126c756>] key_gc_unused_keys.constprop.1+0x5d/0x10f
[<ffffffff8126ca71>] key_garbage_collector+0x1fa/0x351
[<ffffffff8105ec9b>] process_one_work+0x28e/0x547
[<ffffffff8105fd17>] worker_thread+0x26e/0x361
[<ffffffff8105faa9>] ? rescuer_thread+0x2a8/0x2a8
[<ffffffff810648ad>] kthread+0xf3/0xfb
[<ffffffff810647ba>] ? kthread_create_on_node+0x1c2/0x1c2
[<ffffffff815f2ccf>] ret_from_fork+0x3f/0x70
[<ffffffff810647ba>] ? kthread_create_on_node+0x1c2/0x1c2
Note the value in RAX. This is a 32-bit representation of -ENOKEY.
The solution is to only call ->destroy() if the key was successfully
instantiated.
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: David Howells <[email protected]>
Tested-by: Dmitry Vyukov <[email protected]>
|
static void process_command(conn *c, char *command) {
token_t tokens[MAX_TOKENS];
size_t ntokens;
int comm;
assert(c != NULL);
MEMCACHED_PROCESS_COMMAND_START(c->sfd, c->rcurr, c->rbytes);
if (settings.verbose > 1)
fprintf(stderr, "<%d %s\n", c->sfd, command);
/*
* for commands set/add/replace, we build an item and read the data
* directly into it, then continue in nread_complete().
*/
c->msgcurr = 0;
c->msgused = 0;
c->iovused = 0;
if (add_msghdr(c) != 0) {
out_string(c, "SERVER_ERROR out of memory preparing response");
return;
}
ntokens = tokenize_command(command, tokens, MAX_TOKENS);
if (ntokens >= 3 &&
((strcmp(tokens[COMMAND_TOKEN].value, "get") == 0) ||
(strcmp(tokens[COMMAND_TOKEN].value, "bget") == 0))) {
process_get_command(c, tokens, ntokens, false);
} else if ((ntokens == 6 || ntokens == 7) &&
((strcmp(tokens[COMMAND_TOKEN].value, "add") == 0 && (comm = NREAD_ADD)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "set") == 0 && (comm = NREAD_SET)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "replace") == 0 && (comm = NREAD_REPLACE)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "prepend") == 0 && (comm = NREAD_PREPEND)) ||
(strcmp(tokens[COMMAND_TOKEN].value, "append") == 0 && (comm = NREAD_APPEND)) )) {
process_update_command(c, tokens, ntokens, comm, false);
} else if ((ntokens == 7 || ntokens == 8) && (strcmp(tokens[COMMAND_TOKEN].value, "cas") == 0 && (comm = NREAD_CAS))) {
process_update_command(c, tokens, ntokens, comm, true);
} else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "incr") == 0)) {
process_arithmetic_command(c, tokens, ntokens, 1);
} else if (ntokens >= 3 && (strcmp(tokens[COMMAND_TOKEN].value, "gets") == 0)) {
process_get_command(c, tokens, ntokens, true);
} else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "decr") == 0)) {
process_arithmetic_command(c, tokens, ntokens, 0);
} else if (ntokens >= 3 && ntokens <= 5 && (strcmp(tokens[COMMAND_TOKEN].value, "delete") == 0)) {
process_delete_command(c, tokens, ntokens);
} else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "touch") == 0)) {
process_touch_command(c, tokens, ntokens);
} else if (ntokens >= 2 && (strcmp(tokens[COMMAND_TOKEN].value, "stats") == 0)) {
process_stat(c, tokens, ntokens);
} else if (ntokens >= 2 && ntokens <= 4 && (strcmp(tokens[COMMAND_TOKEN].value, "flush_all") == 0)) {
time_t exptime = 0;
set_noreply_maybe(c, tokens, ntokens);
pthread_mutex_lock(&c->thread->stats.mutex);
c->thread->stats.flush_cmds++;
pthread_mutex_unlock(&c->thread->stats.mutex);
if(ntokens == (c->noreply ? 3 : 2)) {
settings.oldest_live = current_time - 1;
item_flush_expired();
out_string(c, "OK");
return;
}
exptime = strtol(tokens[1].value, NULL, 10);
if(errno == ERANGE) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
/*
If exptime is zero realtime() would return zero too, and
realtime(exptime) - 1 would overflow to the max unsigned
value. So we process exptime == 0 the same way we do when
no delay is given at all.
*/
if (exptime > 0)
settings.oldest_live = realtime(exptime) - 1;
else /* exptime == 0 */
settings.oldest_live = current_time - 1;
item_flush_expired();
out_string(c, "OK");
return;
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "version") == 0)) {
out_string(c, "VERSION " VERSION);
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "quit") == 0)) {
conn_set_state(c, conn_closing);
} else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "shutdown") == 0)) {
if (settings.shutdown_command) {
conn_set_state(c, conn_closing);
raise(SIGINT);
} else {
out_string(c, "ERROR: shutdown not enabled");
}
} else if (ntokens > 1 && strcmp(tokens[COMMAND_TOKEN].value, "slabs") == 0) {
if (ntokens == 5 && strcmp(tokens[COMMAND_TOKEN + 1].value, "reassign") == 0) {
int src, dst, rv;
if (settings.slab_reassign == false) {
out_string(c, "CLIENT_ERROR slab reassignment disabled");
return;
}
src = strtol(tokens[2].value, NULL, 10);
dst = strtol(tokens[3].value, NULL, 10);
if (errno == ERANGE) {
out_string(c, "CLIENT_ERROR bad command line format");
return;
}
rv = slabs_reassign(src, dst);
switch (rv) {
case REASSIGN_OK:
out_string(c, "OK");
break;
case REASSIGN_RUNNING:
out_string(c, "BUSY currently processing reassign request");
break;
case REASSIGN_BADCLASS:
out_string(c, "BADCLASS invalid src or dst class id");
break;
case REASSIGN_NOSPARE:
out_string(c, "NOSPARE source class has no spare pages");
break;
case REASSIGN_SRC_DST_SAME:
out_string(c, "SAME src and dst class are identical");
break;
}
return;
} else if (ntokens == 4 &&
(strcmp(tokens[COMMAND_TOKEN + 1].value, "automove") == 0)) {
process_slabs_automove_command(c, tokens, ntokens);
} else {
out_string(c, "ERROR");
}
} else if ((ntokens == 3 || ntokens == 4) && (strcmp(tokens[COMMAND_TOKEN].value, "verbosity") == 0)) {
process_verbosity_command(c, tokens, ntokens);
} else {
out_string(c, "ERROR");
}
return;
}
| 0 |
[
"CWE-189"
] |
memcached
|
6695ccbc525c36d693aaa3e8337b36aa0c784424
| 162,868,267,498,970,720,000,000,000,000,000,000,000 | 172 |
Fix segfault on specially crafted packet.
|
static int aesni_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
void *ptr)
{
EVP_AES_HMAC_SHA1 *key = data(ctx);
switch (type) {
case EVP_CTRL_AEAD_SET_MAC_KEY:
{
unsigned int i;
unsigned char hmac_key[64];
memset(hmac_key, 0, sizeof(hmac_key));
if (arg > (int)sizeof(hmac_key)) {
SHA1_Init(&key->head);
SHA1_Update(&key->head, ptr, arg);
SHA1_Final(hmac_key, &key->head);
} else {
memcpy(hmac_key, ptr, arg);
}
for (i = 0; i < sizeof(hmac_key); i++)
hmac_key[i] ^= 0x36; /* ipad */
SHA1_Init(&key->head);
SHA1_Update(&key->head, hmac_key, sizeof(hmac_key));
for (i = 0; i < sizeof(hmac_key); i++)
hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */
SHA1_Init(&key->tail);
SHA1_Update(&key->tail, hmac_key, sizeof(hmac_key));
OPENSSL_cleanse(hmac_key, sizeof(hmac_key));
return 1;
}
case EVP_CTRL_AEAD_TLS1_AAD:
{
unsigned char *p = ptr;
unsigned int len = p[arg - 2] << 8 | p[arg - 1];
if (ctx->encrypt) {
key->payload_length = len;
if ((key->aux.tls_ver =
p[arg - 4] << 8 | p[arg - 3]) >= TLS1_1_VERSION) {
len -= AES_BLOCK_SIZE;
p[arg - 2] = len >> 8;
p[arg - 1] = len;
}
key->md = key->head;
SHA1_Update(&key->md, p, arg);
return (int)(((len + SHA_DIGEST_LENGTH +
AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)
- len);
} else {
if (arg > 13)
arg = 13;
memcpy(key->aux.tls_aad, ptr, arg);
key->payload_length = arg;
return SHA_DIGEST_LENGTH;
}
}
# if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
case EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE:
return (int)(5 + 16 + ((arg + 20 + 16) & -16));
case EVP_CTRL_TLS1_1_MULTIBLOCK_AAD:
{
EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param =
(EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *) ptr;
unsigned int n4x = 1, x4;
unsigned int frag, last, packlen, inp_len;
if (arg < (int)sizeof(EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM))
return -1;
inp_len = param->inp[11] << 8 | param->inp[12];
if (ctx->encrypt) {
if ((param->inp[9] << 8 | param->inp[10]) < TLS1_1_VERSION)
return -1;
if (inp_len) {
if (inp_len < 4096)
return 0; /* too short */
if (inp_len >= 8192 && OPENSSL_ia32cap_P[2] & (1 << 5))
n4x = 2; /* AVX2 */
} else if ((n4x = param->interleave / 4) && n4x <= 2)
inp_len = param->len;
else
return -1;
key->md = key->head;
SHA1_Update(&key->md, param->inp, 13);
x4 = 4 * n4x;
n4x += 1;
frag = inp_len >> n4x;
last = inp_len + frag - (frag << n4x);
if (last > frag && ((last + 13 + 9) % 64 < (x4 - 1))) {
frag++;
last -= x4 - 1;
}
packlen = 5 + 16 + ((frag + 20 + 16) & -16);
packlen = (packlen << n4x) - packlen;
packlen += 5 + 16 + ((last + 20 + 16) & -16);
param->interleave = x4;
return (int)packlen;
} else
return -1; /* not yet */
}
case EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT:
{
EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param =
(EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *) ptr;
return (int)tls1_1_multi_block_encrypt(key, param->out,
param->inp, param->len,
param->interleave / 4);
}
case EVP_CTRL_TLS1_1_MULTIBLOCK_DECRYPT:
# endif
default:
return -1;
}
}
| 1 |
[] |
openssl
|
1a3701f4fe0530a40ec073cd78d02cfcc26c0f8e
| 87,837,004,564,116,270,000,000,000,000,000,000,000 | 131 |
Sanity check EVP_CTRL_AEAD_TLS_AAD
The various implementations of EVP_CTRL_AEAD_TLS_AAD expect a buffer of at
least 13 bytes long. Add sanity checks to ensure that the length is at
least that. Also add a new constant (EVP_AEAD_TLS1_AAD_LEN) to evp.h to
represent this length. Thanks to Kevin Wojtysiak (Int3 Solutions) and
Paramjot Oberoi (Int3 Solutions) for reporting this issue.
Reviewed-by: Andy Polyakov <[email protected]>
(cherry picked from commit c8269881093324b881b81472be037055571f73f3)
Conflicts:
ssl/record/ssl3_record.c
|
test_gui_tabmenu_event(dict_T *args UNUSED)
{
# ifdef FEAT_GUI_TABLINE
int tabnr;
int item;
if (!dict_has_key(args, "tabnr")
|| !dict_has_key(args, "item"))
return FALSE;
tabnr = (int)dict_get_number(args, "tabnr");
item = (int)dict_get_number(args, "item");
send_tabline_menu_event(tabnr, item);
# endif
return TRUE;
}
| 0 |
[
"CWE-416"
] |
vim
|
249e1b903a9c0460d618f6dcc59aeb8c03b24b20
| 57,825,039,383,933,410,000,000,000,000,000,000,000 | 17 |
patch 9.0.0213: using freed memory with error in assert argument
Problem: Using freed memory with error in assert argument.
Solution: Make a copy of the error.
|
static void convergeephemerons (global_State *g) {
int changed;
int dir = 0;
do {
GCObject *w;
GCObject *next = g->ephemeron; /* get ephemeron list */
g->ephemeron = NULL; /* tables may return to this list when traversed */
changed = 0;
while ((w = next) != NULL) { /* for each ephemeron table */
next = gco2t(w)->gclist; /* list is rebuilt during loop */
if (traverseephemeron(g, gco2t(w), dir)) { /* marked some value? */
propagateall(g); /* propagate changes */
changed = 1; /* will have to revisit all ephemeron tables */
}
}
dir = !dir; /* invert direction next time */
} while (changed); /* repeat until no more changes */
}
| 0 |
[
"CWE-125"
] |
lua
|
127e7a6c8942b362aa3c6627f44d660a4fb75312
| 170,418,948,230,590,770,000,000,000,000,000,000,000 | 18 |
Fixed bug of old finalized objects in the GC
When an object aged OLD1 is finalized, it is moved from the list
'finobj' to the *beginning* of the list 'allgc'. So, this part of the
list (and not only the survival list) must be visited by 'markold'.
|
inline static int php_openssl_safe_mode_chk(char *filename TSRMLS_DC)
{
if (PG(safe_mode) && (!php_checkuid(filename, NULL, CHECKUID_CHECK_FILE_AND_DIR))) {
return -1;
}
if (php_check_open_basedir(filename TSRMLS_CC)) {
return -1;
}
return 0;
}
| 0 |
[
"CWE-200"
] |
php-src
|
270a406ac94b5fc5cc9ef59fc61e3b4b95648a3e
| 146,126,537,554,895,190,000,000,000,000,000,000,000 | 11 |
Fix bug #61413 ext\openssl\tests\openssl_encrypt_crash.phpt fails 5.3 only
|
static inline void __d_set_inode_and_type(struct dentry *dentry,
struct inode *inode,
unsigned type_flags)
{
unsigned flags;
dentry->d_inode = inode;
smp_wmb();
flags = READ_ONCE(dentry->d_flags);
flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
flags |= type_flags;
WRITE_ONCE(dentry->d_flags, flags);
}
| 0 |
[
"CWE-401",
"CWE-254"
] |
linux
|
cde93be45a8a90d8c264c776fab63487b5038a65
| 141,167,040,813,998,960,000,000,000,000,000,000,000 | 13 |
dcache: Handle escaped paths in prepend_path
A rename can result in a dentry that by walking up d_parent
will never reach it's mnt_root. For lack of a better term
I call this an escaped path.
prepend_path is called by four different functions __d_path,
d_absolute_path, d_path, and getcwd.
__d_path only wants to see paths are connected to the root it passes
in. So __d_path needs prepend_path to return an error.
d_absolute_path similarly wants to see paths that are connected to
some root. Escaped paths are not connected to any mnt_root so
d_absolute_path needs prepend_path to return an error greater
than 1. So escaped paths will be treated like paths on lazily
unmounted mounts.
getcwd needs to prepend "(unreachable)" so getcwd also needs
prepend_path to return an error.
d_path is the interesting hold out. d_path just wants to print
something, and does not care about the weird cases. Which raises
the question what should be printed?
Given that <escaped_path>/<anything> should result in -ENOENT I
believe it is desirable for escaped paths to be printed as empty
paths. As there are not really any meaninful path components when
considered from the perspective of a mount tree.
So tweak prepend_path to return an empty path with an new error
code of 3 when it encounters an escaped path.
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
static void bind_const_locs(struct vrend_linked_shader_program *sprog,
int id)
{
if (sprog->ss[id]->sel->sinfo.num_consts) {
char name[32];
snprintf(name, 32, "%sconst0", pipe_shader_to_prefix(id));
sprog->const_location[id] = glGetUniformLocation(sprog->id, name);
} else
sprog->const_location[id] = -1;
}
| 0 |
[
"CWE-787"
] |
virglrenderer
|
cbc8d8b75be360236cada63784046688aeb6d921
| 100,802,007,599,191,040,000,000,000,000,000,000,000 | 10 |
vrend: check transfer bounds for negative values too and report error
Closes #138
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Emil Velikov <[email protected]>
|
BOOL IsAllZeroes (unsigned char* pbData, DWORD dwDataLen)
{
while (dwDataLen--)
{
if (*pbData)
return FALSE;
pbData++;
}
return TRUE;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
VeraCrypt
|
f30f9339c9a0b9bbcc6f5ad38804af39db1f479e
| 118,205,345,952,014,070,000,000,000,000,000,000,000 | 10 |
Windows: fix low severity vulnerability in driver that allowed reading 3 bytes of kernel stack memory (with a rare possibility of 25 additional bytes). Reported by Tim Harrison.
|
static MagickBooleanType WriteJPEGImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*option,
*sampling_factor,
*value;
ErrorManager
error_manager;
Image
*volatile volatile_image;
int
colorspace,
quality;
JSAMPLE
*volatile jpeg_pixels;
JSAMPROW
scanline[1];
MagickBooleanType
status;
MemoryInfo
*memory_info;
register JSAMPLE
*q;
register ssize_t
i;
ssize_t
y;
struct jpeg_compress_struct
jpeg_info;
struct jpeg_error_mgr
jpeg_error;
unsigned short
scale;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((LocaleCompare(image_info->magick,"JPS") == 0) &&
(image->next != (Image *) NULL))
image=AppendImages(image,MagickFalse,exception);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
/*
Initialize JPEG parameters.
*/
(void) ResetMagickMemory(&error_manager,0,sizeof(error_manager));
(void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info));
(void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error));
volatile_image=image;
jpeg_info.client_data=(void *) volatile_image;
jpeg_info.err=jpeg_std_error(&jpeg_error);
jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler;
jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler;
error_manager.exception=exception;
error_manager.image=volatile_image;
memory_info=(MemoryInfo *) NULL;
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_compress(&jpeg_info);
(void) CloseBlob(volatile_image);
return(MagickFalse);
}
jpeg_info.client_data=(void *) &error_manager;
jpeg_create_compress(&jpeg_info);
JPEGDestinationManager(&jpeg_info,image);
if ((image->columns != (unsigned int) image->columns) ||
(image->rows != (unsigned int) image->rows))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
jpeg_info.image_width=(unsigned int) image->columns;
jpeg_info.image_height=(unsigned int) image->rows;
jpeg_info.input_components=3;
jpeg_info.data_precision=8;
jpeg_info.in_color_space=JCS_RGB;
switch (image->colorspace)
{
case CMYKColorspace:
{
jpeg_info.input_components=4;
jpeg_info.in_color_space=JCS_CMYK;
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
jpeg_info.in_color_space=JCS_YCbCr;
break;
}
case GRAYColorspace:
{
if (image_info->type == TrueColorType)
break;
jpeg_info.input_components=1;
jpeg_info.in_color_space=JCS_GRAYSCALE;
break;
}
default:
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
if (image_info->type == TrueColorType)
break;
if (SetImageGray(image,exception) != MagickFalse)
{
jpeg_info.input_components=1;
jpeg_info.in_color_space=JCS_GRAYSCALE;
}
break;
}
}
jpeg_set_defaults(&jpeg_info);
if (jpeg_info.in_color_space == JCS_CMYK)
jpeg_set_colorspace(&jpeg_info,JCS_YCCK);
if ((jpeg_info.data_precision != 12) && (image->depth <= 8))
jpeg_info.data_precision=8;
else
jpeg_info.data_precision=BITS_IN_JSAMPLE;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Image resolution: %.20g,%.20g",image->resolution.x,image->resolution.y);
if ((image->resolution.x != 0.0) && (image->resolution.y != 0.0))
{
/*
Set image resolution.
*/
jpeg_info.write_JFIF_header=TRUE;
jpeg_info.X_density=(UINT16) image->resolution.x;
jpeg_info.Y_density=(UINT16) image->resolution.y;
/*
Set image resolution units.
*/
if (image->units == PixelsPerInchResolution)
jpeg_info.density_unit=(UINT8) 1;
if (image->units == PixelsPerCentimeterResolution)
jpeg_info.density_unit=(UINT8) 2;
}
jpeg_info.dct_method=JDCT_FLOAT;
option=GetImageOption(image_info,"jpeg:dct-method");
if (option != (const char *) NULL)
switch (*option)
{
case 'D':
case 'd':
{
if (LocaleCompare(option,"default") == 0)
jpeg_info.dct_method=JDCT_DEFAULT;
break;
}
case 'F':
case 'f':
{
if (LocaleCompare(option,"fastest") == 0)
jpeg_info.dct_method=JDCT_FASTEST;
if (LocaleCompare(option,"float") == 0)
jpeg_info.dct_method=JDCT_FLOAT;
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(option,"ifast") == 0)
jpeg_info.dct_method=JDCT_IFAST;
if (LocaleCompare(option,"islow") == 0)
jpeg_info.dct_method=JDCT_ISLOW;
break;
}
}
option=GetImageOption(image_info,"jpeg:optimize-coding");
if (option != (const char *) NULL)
jpeg_info.optimize_coding=IsStringTrue(option) != MagickFalse ? TRUE :
FALSE;
else
{
MagickSizeType
length;
length=(MagickSizeType) jpeg_info.input_components*image->columns*
image->rows*sizeof(JSAMPLE);
if (length == (MagickSizeType) ((size_t) length))
{
/*
Perform optimization only if available memory resources permit it.
*/
status=AcquireMagickResource(MemoryResource,length);
RelinquishMagickResource(MemoryResource,length);
jpeg_info.optimize_coding=status == MagickFalse ? FALSE : TRUE;
}
}
#if (JPEG_LIB_VERSION >= 61) && defined(C_PROGRESSIVE_SUPPORTED)
if ((LocaleCompare(image_info->magick,"PJPEG") == 0) ||
(image_info->interlace != NoInterlace))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: progressive");
jpeg_simple_progression(&jpeg_info);
}
else
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: non-progressive");
#else
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Interlace: nonprogressive");
#endif
quality=92;
if ((image_info->compression != LosslessJPEGCompression) &&
(image->quality <= 100))
{
if (image->quality != UndefinedCompressionQuality)
quality=(int) image->quality;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: %.20g",
(double) image->quality);
}
else
{
#if !defined(C_LOSSLESS_SUPPORTED)
quality=100;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: 100");
#else
if (image->quality < 100)
(void) ThrowMagickException(exception,GetMagickModule(),CoderWarning,
"LosslessToLossyJPEGConversion","`%s'",image->filename);
else
{
int
point_transform,
predictor;
predictor=image->quality/100; /* range 1-7 */
point_transform=image->quality % 20; /* range 0-15 */
jpeg_simple_lossless(&jpeg_info,predictor,point_transform);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Compression: lossless");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Predictor: %d",predictor);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Point Transform: %d",point_transform);
}
}
#endif
}
option=GetImageOption(image_info,"jpeg:extent");
if (option != (const char *) NULL)
{
Image
*jpeg_image;
ImageInfo
*extent_info;
extent_info=CloneImageInfo(image_info);
extent_info->blob=NULL;
jpeg_image=CloneImage(image,0,0,MagickTrue,exception);
if (jpeg_image != (Image *) NULL)
{
MagickSizeType
extent;
size_t
maximum,
minimum;
/*
Search for compression quality that does not exceed image extent.
*/
extent_info->quality=0;
extent=(MagickSizeType) SiPrefixToDoubleInterval(option,100.0);
(void) DeleteImageOption(extent_info,"jpeg:extent");
(void) DeleteImageArtifact(jpeg_image,"jpeg:extent");
maximum=image_info->quality;
if (maximum < 2)
maximum=101;
for (minimum=2; minimum < maximum; )
{
(void) AcquireUniqueFilename(jpeg_image->filename);
jpeg_image->quality=minimum+(maximum-minimum+1)/2;
status=WriteJPEGImage(extent_info,jpeg_image,exception);
if (GetBlobSize(jpeg_image) <= extent)
minimum=jpeg_image->quality+1;
else
maximum=jpeg_image->quality-1;
(void) RelinquishUniqueFileResource(jpeg_image->filename);
}
quality=(int) minimum-1;
jpeg_image=DestroyImage(jpeg_image);
}
extent_info=DestroyImageInfo(extent_info);
}
jpeg_set_quality(&jpeg_info,quality,TRUE);
#if (JPEG_LIB_VERSION >= 70)
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
int
flags;
/*
Set quality scaling for luminance and chrominance separately.
*/
flags=ParseGeometry(option,&geometry_info);
if (((flags & RhoValue) != 0) && ((flags & SigmaValue) != 0))
{
jpeg_info.q_scale_factor[0]=jpeg_quality_scaling((int)
(geometry_info.rho+0.5));
jpeg_info.q_scale_factor[1]=jpeg_quality_scaling((int)
(geometry_info.sigma+0.5));
jpeg_default_qtables(&jpeg_info,TRUE);
}
}
#endif
colorspace=jpeg_info.in_color_space;
value=GetImageOption(image_info,"jpeg:colorspace");
if (value == (char *) NULL)
value=GetImageProperty(image,"jpeg:colorspace",exception);
if (value != (char *) NULL)
colorspace=StringToInteger(value);
sampling_factor=(const char *) NULL;
if (colorspace == jpeg_info.in_color_space)
{
value=GetImageOption(image_info,"jpeg:sampling-factor");
if (value == (char *) NULL)
value=GetImageProperty(image,"jpeg:sampling-factor",exception);
if (value != (char *) NULL)
{
sampling_factor=value;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Input sampling-factors=%s",sampling_factor);
}
}
value=GetImageOption(image_info,"jpeg:sampling-factor");
if (image_info->sampling_factor != (char *) NULL)
sampling_factor=image_info->sampling_factor;
if (sampling_factor == (const char *) NULL)
{
if (quality >= 90)
for (i=0; i < MAX_COMPONENTS; i++)
{
jpeg_info.comp_info[i].h_samp_factor=1;
jpeg_info.comp_info[i].v_samp_factor=1;
}
}
else
{
char
**factors;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Set sampling factor.
*/
i=0;
factors=SamplingFactorToList(sampling_factor);
if (factors != (char **) NULL)
{
for (i=0; i < MAX_COMPONENTS; i++)
{
if (factors[i] == (char *) NULL)
break;
flags=ParseGeometry(factors[i],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
jpeg_info.comp_info[i].h_samp_factor=(int) geometry_info.rho;
jpeg_info.comp_info[i].v_samp_factor=(int) geometry_info.sigma;
factors[i]=(char *) RelinquishMagickMemory(factors[i]);
}
factors=(char **) RelinquishMagickMemory(factors);
}
for ( ; i < MAX_COMPONENTS; i++)
{
jpeg_info.comp_info[i].h_samp_factor=1;
jpeg_info.comp_info[i].v_samp_factor=1;
}
}
option=GetImageOption(image_info,"jpeg:q-table");
if (option != (const char *) NULL)
{
QuantizationTable
*table;
/*
Custom quantization tables.
*/
table=GetQuantizationTable(option,"0",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=0; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=0;
jpeg_add_quant_table(&jpeg_info,0,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"1",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=1; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=1;
jpeg_add_quant_table(&jpeg_info,1,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"2",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=2; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=2;
jpeg_add_quant_table(&jpeg_info,2,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
table=GetQuantizationTable(option,"3",exception);
if (table != (QuantizationTable *) NULL)
{
for (i=3; i < MAX_COMPONENTS; i++)
jpeg_info.comp_info[i].quant_tbl_no=3;
jpeg_add_quant_table(&jpeg_info,3,table->levels,
jpeg_quality_scaling(quality),0);
table=DestroyQuantizationTable(table);
}
}
jpeg_start_compress(&jpeg_info,TRUE);
if (image->debug != MagickFalse)
{
if (image->storage_class == PseudoClass)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: PseudoClass");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: DirectClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Depth: %.20g",
(double) image->depth);
if (image->colors != 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Number of colors: %.20g",(double) image->colors);
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Number of colors: unspecified");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"JPEG data precision: %d",(int) jpeg_info.data_precision);
switch (image->colorspace)
{
case CMYKColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Storage class: DirectClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: CMYK");
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: YCbCr");
break;
}
default:
break;
}
switch (image->colorspace)
{
case CMYKColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: CMYK");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor,
jpeg_info.comp_info[3].h_samp_factor,
jpeg_info.comp_info[3].v_samp_factor);
break;
}
case GRAYColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: GRAY");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d",jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor);
break;
}
case sRGBColorspace:
case RGBColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Image colorspace is RGB");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor);
break;
}
case YCbCrColorspace:
case Rec601YCbCrColorspace:
case Rec709YCbCrColorspace:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Colorspace: YCbCr");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor);
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d",
image->colorspace);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d",
jpeg_info.comp_info[0].h_samp_factor,
jpeg_info.comp_info[0].v_samp_factor,
jpeg_info.comp_info[1].h_samp_factor,
jpeg_info.comp_info[1].v_samp_factor,
jpeg_info.comp_info[2].h_samp_factor,
jpeg_info.comp_info[2].v_samp_factor,
jpeg_info.comp_info[3].h_samp_factor,
jpeg_info.comp_info[3].v_samp_factor);
break;
}
}
}
/*
Write JPEG profiles.
*/
value=GetImageProperty(image,"comment",exception);
if (value != (char *) NULL)
for (i=0; i < (ssize_t) strlen(value); i+=65533L)
jpeg_write_marker(&jpeg_info,JPEG_COM,(unsigned char *) value+i,
(unsigned int) MagickMin((size_t) strlen(value+i),65533L));
if (image->profiles != (void *) NULL)
WriteProfile(&jpeg_info,image,exception);
/*
Convert MIFF to JPEG raster pixels.
*/
memory_info=AcquireVirtualMemory((size_t) image->columns,
jpeg_info.input_components*sizeof(*jpeg_pixels));
if (memory_info == (MemoryInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info);
if (setjmp(error_manager.error_recovery) != 0)
{
jpeg_destroy_compress(&jpeg_info);
if (memory_info != (MemoryInfo *) NULL)
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(MagickFalse);
}
scanline[0]=(JSAMPROW) jpeg_pixels;
scale=65535/(unsigned short) GetQuantumRange((size_t)
jpeg_info.data_precision);
if (scale == 0)
scale=1;
if (jpeg_info.data_precision <= 8)
{
if ((jpeg_info.in_color_space == JCS_RGB) ||
(jpeg_info.in_color_space == JCS_YCbCr))
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelRed(image,p));
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=(JSAMPLE) ScaleQuantumToChar(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
if (jpeg_info.in_color_space == JCS_GRAYSCALE)
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) ScaleQuantumToChar(ClampToQuantum(GetPixelLuma(
image,p)));
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Convert DirectClass packets to contiguous CMYK scanlines.
*/
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelCyan(image,p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelMagenta(image,p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelYellow(image,p))));
*q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange-
GetPixelBlack(image,p))));
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
else
if (jpeg_info.in_color_space == JCS_GRAYSCALE)
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) (ScaleQuantumToShort(ClampToQuantum(GetPixelLuma(image,
p)))/scale);
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
if ((jpeg_info.in_color_space == JCS_RGB) ||
(jpeg_info.in_color_space == JCS_YCbCr))
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelRed(image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelGreen(image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelBlue(image,p))/scale);
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
else
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=jpeg_pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Convert DirectClass packets to contiguous CMYK scanlines.
*/
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelRed(
image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelGreen(
image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlue(
image,p))/scale);
*q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlack(
image,p))/scale);
p+=GetPixelChannels(image);
}
(void) jpeg_write_scanlines(&jpeg_info,scanline,1);
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
if (y == (ssize_t) image->rows)
jpeg_finish_compress(&jpeg_info);
/*
Relinquish resources.
*/
jpeg_destroy_compress(&jpeg_info);
memory_info=RelinquishVirtualMemory(memory_info);
(void) CloseBlob(image);
return(MagickTrue);
}
| 0 |
[
"CWE-200",
"CWE-703"
] |
ImageMagick
|
f6463ca9588579633bbaed9460899d892aa3c64a
| 71,607,698,861,672,940,000,000,000,000,000,000,000 | 791 |
Zero pixel buffer
|
static RzList *strings(RzBinFile *bf) {
if (!bf) {
return NULL;
}
LuacBinInfo *bin_info_obj = GET_INTERNAL_BIN_INFO_OBJ(bf);
if (!bin_info_obj) {
return NULL;
}
return bin_info_obj->string_list;
}
| 1 |
[
"CWE-200",
"CWE-787"
] |
rizin
|
05bbd147caccc60162d6fba9baaaf24befa281cd
| 221,187,456,620,693,900,000,000,000,000,000,000,000 | 11 |
Fix oob read on _luac_build_info and luac memleaks
|
TEST_F(QuicUnencryptedServerTransportTest, TestPendingOneRttData) {
recvClientHello();
auto data = IOBuf::copyBuffer("bad data");
size_t expectedPendingLen =
server->getConn().transportSettings.maxPacketsToBuffer;
for (size_t i = 0; i < expectedPendingLen + 10; ++i) {
StreamId streamId = static_cast<StreamId>(i);
auto packetData = packetToBuf(createStreamPacket(
*clientConnectionId,
*server->getConn().serverConnectionId,
clientNextAppDataPacketNum++,
streamId,
*data,
0 /* cipherOverhead */,
0 /* largestAcked */));
EXPECT_CALL(*transportInfoCb_, onPacketDropped(_));
deliverData(std::move(packetData));
}
EXPECT_EQ(server->getConn().streamManager->streamCount(), 0);
EXPECT_EQ(server->getConn().pendingOneRttData->size(), expectedPendingLen);
server->getNonConstConn().pendingOneRttData->clear();
deliverData(IOBuf::create(0));
EXPECT_TRUE(server->getConn().pendingOneRttData->empty());
}
| 0 |
[
"CWE-617",
"CWE-703"
] |
mvfst
|
a67083ff4b8dcbb7ee2839da6338032030d712b0
| 57,011,298,777,276,090,000,000,000,000,000,000,000 | 25 |
Close connection if we derive an extra 1-rtt write cipher
Summary: Fixes CVE-2021-24029
Reviewed By: mjoras, lnicco
Differential Revision: D26613890
fbshipit-source-id: 19bb2be2c731808144e1a074ece313fba11f1945
|
static int ext4_ext_truncate_extend_restart(handle_t *handle,
struct inode *inode,
int needed)
{
int err;
if (!ext4_handle_valid(handle))
return 0;
if (handle->h_buffer_credits >= needed)
return 0;
/*
* If we need to extend the journal get a few extra blocks
* while we're at it for efficiency's sake.
*/
needed += 3;
err = ext4_journal_extend(handle, needed - handle->h_buffer_credits);
if (err <= 0)
return err;
err = ext4_truncate_restart_trans(handle, inode, needed);
if (err == 0)
err = -EAGAIN;
return err;
}
| 0 |
[
"CWE-125"
] |
linux
|
bc890a60247171294acc0bd67d211fa4b88d40ba
| 269,888,726,380,693,600,000,000,000,000,000,000,000 | 24 |
ext4: verify the depth of extent tree in ext4_find_extent()
If there is a corupted file system where the claimed depth of the
extent tree is -1, this can cause a massive buffer overrun leading to
sadness.
This addresses CVE-2018-10877.
https://bugzilla.kernel.org/show_bug.cgi?id=199417
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected]
|
GF_Err hnti_box_dump(GF_Box *a, FILE * trace)
{
gf_isom_box_dump_start(a, "HintTrackInfoBox", trace);
gf_fprintf(trace, ">\n");
gf_isom_box_dump_done("HintTrackInfoBox", a, trace);
return GF_OK;
}
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 248,001,373,641,045,170,000,000,000,000,000,000,000 | 7 |
fixed #2138
|
zeqproc(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
ref2_t stack[MAX_DEPTH + 1];
ref2_t *top = stack;
make_array(&stack[0].proc1, 0, 1, op - 1);
make_array(&stack[0].proc2, 0, 1, op);
for (;;) {
long i;
if (r_size(&top->proc1) == 0) {
/* Finished these arrays, go up to next level. */
if (top == stack) {
/* We're done matching: it succeeded. */
make_true(op - 1);
pop(1);
return 0;
}
--top;
continue;
}
/* Look at the next elements of the arrays. */
i = r_size(&top->proc1) - 1;
array_get(imemory, &top->proc1, i, &top[1].proc1);
array_get(imemory, &top->proc2, i, &top[1].proc2);
r_dec_size(&top->proc1, 1);
++top;
/*
* Amazingly enough, the objects' executable attributes are not
* required to match. This means { x load } will match { /x load },
* even though this is clearly wrong.
*/
#if 0
if (r_has_attr(&top->proc1, a_executable) !=
r_has_attr(&top->proc2, a_executable)
)
break;
#endif
if (obj_eq(imemory, &top->proc1, &top->proc2)) {
/* Names don't match strings. */
if (r_type(&top->proc1) != r_type(&top->proc2) &&
(r_type(&top->proc1) == t_name ||
r_type(&top->proc2) == t_name)
)
break;
--top; /* no recursion */
continue;
}
if (r_is_array(&top->proc1) && r_is_array(&top->proc2) &&
r_size(&top->proc1) == r_size(&top->proc2) &&
top < stack + (MAX_DEPTH - 1)
) {
/* Descend into the arrays. */
continue;
}
break;
}
/* An exit from the loop indicates that matching failed. */
make_false(op - 1);
pop(1);
return 0;
}
| 1 |
[] |
ghostpdl
|
4f83478c88c2e05d6e8d79ca4557eb039354d2f3
| 296,662,451,619,257,640,000,000,000,000,000,000,000 | 63 |
Bug 697799: have .eqproc check its parameters
The Ghostscript custom operator .eqproc was not check the number or type of
the parameters it was given.
|
static struct receiver *cutBackTo(struct receiver **rcvrs, u_int32_t size, u_int32_t max) {
struct receiver *r, *tmp;
int i=0;
int count;
if(size < max) //return the original table
return *rcvrs;
count = size - max;
HASH_ITER(hh, *rcvrs, r, tmp) {
if(i++ == count)
return r;
HASH_DEL(*rcvrs, r);
free(r);
}
return(NULL);
}
| 0 |
[
"CWE-125"
] |
nDPI
|
b7e666e465f138ae48ab81976726e67deed12701
| 248,993,185,648,948,930,000,000,000,000,000,000,000 | 20 |
Added fix to avoid potential heap buffer overflow in H.323 dissector
Modified HTTP report information to make it closer to the HTTP field names
|
pluscount(p, g)
struct parse *p;
register struct re_guts *g;
{
register sop *scan;
register sop s;
register sopno plusnest = 0;
register sopno maxnest = 0;
if (p->error != 0)
return(0); /* there may not be an OEND */
scan = g->strip + 1;
do {
s = *scan++;
switch (OP(s)) {
case OPLUS_:
plusnest++;
break;
case O_PLUS:
if (plusnest > maxnest)
maxnest = plusnest;
plusnest--;
break;
}
} while (OP(s) != OEND);
if (plusnest != 0)
g->iflags |= BAD;
return(maxnest);
}
| 0 |
[] |
php-src
|
124fb22a13fafa3648e4e15b4f207c7096d8155e
| 18,474,783,793,941,650,000,000,000,000,000,000,000 | 30 |
Fixed bug #68739 #68740 #68741
|
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
if (static_key_false(&preempt_notifier_key))
__fire_sched_out_preempt_notifiers(curr, next);
}
| 0 |
[
"CWE-119"
] |
linux
|
29d6455178a09e1dc340380c582b13356227e8df
| 259,260,862,699,492,140,000,000,000,000,000,000,000 | 6 |
sched: panic on corrupted stack end
Until now, hitting this BUG_ON caused a recursive oops (because oops
handling involves do_exit(), which calls into the scheduler, which in
turn raises an oops), which caused stuff below the stack to be
overwritten until a panic happened (e.g. via an oops in interrupt
context, caused by the overwritten CPU index in the thread_info).
Just panic directly.
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
~Sql_mode_save() { thd->variables.sql_mode = old_mode; }
| 0 |
[
"CWE-416"
] |
server
|
4681b6f2d8c82b4ec5cf115e83698251963d80d5
| 7,248,530,576,348,869,000,000,000,000,000,000,000 | 1 |
MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do)
|
const char *castToCharPtr() const {
assert(isASCII() && "Cannot cast char16_t pointer to char pointer");
if (!isHandle_) {
return static_cast<const char *>(nonManagedStringPtr_) + startIndex_;
}
assert(isHandle_ && "StringView does not contain a valid string");
return (*strPrim())->castToASCIIPointer() + startIndex_;
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
hermes
|
d86e185e485b6330216dee8e854455c694e3a36e
| 58,793,079,353,706,880,000,000,000,000,000,000,000 | 8 |
Fix a bug in transient object property assignment and getUTF16Ref
Summary:
The returned `UTF16Ref` from `StringView::getUTF16Ref` can be invalidated by
appending more contents to the same allocator.
This case was encountered in `transientObjectPutErrorMessage`, resulting in
using free'd memory.
Reviewed By: tmikov
Differential Revision: D23034855
fbshipit-source-id: 4c25a5369934bf3bdfc5582385503f4b87de3792
|
template<> struct type<unsigned int> {
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 87,287,759,961,777,130,000,000,000,000,000,000,000 | 1 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static void MP4_FreeBox_stss( MP4_Box_t *p_box )
{
FREENULL( p_box->data.p_stss->i_sample_number );
}
| 0 |
[
"CWE-120",
"CWE-191",
"CWE-787"
] |
vlc
|
2e7c7091a61aa5d07e7997b393d821e91f593c39
| 58,082,576,904,324,800,000,000,000,000,000,000,000 | 4 |
demux: mp4: fix buffer overflow in parsing of string boxes.
We ensure that pbox->i_size is never smaller than 8 to avoid an
integer underflow in the third argument of the subsequent call to
memcpy. We also make sure no truncation occurs when passing values
derived from the 64 bit integer p_box->i_size to arguments of malloc
and memcpy that may be 32 bit integers on 32 bit platforms.
Signed-off-by: Jean-Baptiste Kempf <[email protected]>
|
bgp_get_channel(struct bgp_proto *p, u32 afi)
{
uint i;
for (i = 0; i < p->channel_count; i++)
if (p->afi_map[i] == afi)
return p->channel_map[i];
return NULL;
}
| 0 |
[
"CWE-787"
] |
bird
|
8388f5a7e14108a1458fea35bfbb5a453e2c563c
| 110,062,380,878,145,780,000,000,000,000,000,000,000 | 10 |
BGP: Fix bugs in handling of shutdown messages
There is an improper check for valid message size, which may lead to
stack overflow and buffer leaks to log when a large message is received.
Thanks to Daniel McCarney for bugreport and analysis.
|
static void init_repository_format(struct repository_format *format)
{
const struct repository_format fresh = REPOSITORY_FORMAT_INIT;
memcpy(format, &fresh, sizeof(fresh));
}
| 0 |
[
"CWE-22"
] |
git
|
3b0bf2704980b1ed6018622bdf5377ec22289688
| 4,561,810,783,104,843,000,000,000,000,000,000,000 | 6 |
setup: tighten ownership checks post CVE-2022-24765
8959555cee7 (setup_git_directory(): add an owner check for the top-level
directory, 2022-03-02), adds a function to check for ownership of
repositories using a directory that is representative of it, and ways to
add exempt a specific repository from said check if needed, but that
check didn't account for owership of the gitdir, or (when used) the
gitfile that points to that gitdir.
An attacker could create a git repository in a directory that they can
write into but that is owned by the victim to work around the fix that
was introduced with CVE-2022-24765 to potentially run code as the
victim.
An example that could result in privilege escalation to root in *NIX would
be to set a repository in a shared tmp directory by doing (for example):
$ git -C /tmp init
To avoid that, extend the ensure_valid_ownership function to be able to
check for all three paths.
This will have the side effect of tripling the number of stat() calls
when a repository is detected, but the effect is expected to be likely
minimal, as it is done only once during the directory walk in which Git
looks for a repository.
Additionally make sure to resolve the gitfile (if one was used) to find
the relevant gitdir for checking.
While at it change the message printed on failure so it is clear we are
referring to the repository by its worktree (or gitdir if it is bare) and
not to a specific directory.
Helped-by: Junio C Hamano <[email protected]>
Helped-by: Johannes Schindelin <[email protected]>
Signed-off-by: Carlo Marcelo Arenas Belón <[email protected]>
|
cql_server::response::placeholder<int32_t> cql_server::response::write_int_placeholder() {
return placeholder<int32_t>(_body.write_place_holder(sizeof(int32_t)));
}
| 0 |
[] |
scylladb
|
1c2eef384da439b0457b6d71c7e37d7268e471cb
| 239,617,745,577,524,600,000,000,000,000,000,000,000 | 3 |
transport/server.cc: Return correct size of decompressed lz4 buffer
An incorrect size is returned from the function, which could lead to
crashes or undefined behavior. Fix by erroring out in these cases.
Fixes #11476
|
static int sctp_listen_start(struct sock *sk, int backlog)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
struct crypto_shash *tfm = NULL;
char alg[32];
/* Allocate HMAC for generating cookie. */
if (!sp->hmac && sp->sctp_hmac_alg) {
sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
tfm = crypto_alloc_shash(alg, 0, 0);
if (IS_ERR(tfm)) {
net_info_ratelimited("failed to load transform for %s: %ld\n",
sp->sctp_hmac_alg, PTR_ERR(tfm));
return -ENOSYS;
}
sctp_sk(sk)->hmac = tfm;
}
/*
* If a bind() or sctp_bindx() is not called prior to a listen()
* call that allows new associations to be accepted, the system
* picks an ephemeral port and will choose an address set equivalent
* to binding with a wildcard address.
*
* This is not currently spelled out in the SCTP sockets
* extensions draft, but follows the practice as seen in TCP
* sockets.
*
*/
sk->sk_state = SCTP_SS_LISTENING;
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk))
return -EAGAIN;
} else {
if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
sk->sk_state = SCTP_SS_CLOSED;
return -EADDRINUSE;
}
}
sk->sk_max_ack_backlog = backlog;
sctp_hash_endpoint(ep);
return 0;
}
| 0 |
[
"CWE-617",
"CWE-362"
] |
linux
|
2dcab598484185dea7ec22219c76dcdd59e3cb90
| 330,887,683,879,596,500,000,000,000,000,000,000,000 | 45 |
sctp: avoid BUG_ON on sctp_wait_for_sndbuf
Alexander Popov reported that an application may trigger a BUG_ON in
sctp_wait_for_sndbuf if the socket tx buffer is full, a thread is
waiting on it to queue more data and meanwhile another thread peels off
the association being used by the first thread.
This patch replaces the BUG_ON call with a proper error handling. It
will return -EPIPE to the original sendmsg call, similarly to what would
have been done if the association wasn't found in the first place.
Acked-by: Alexander Popov <[email protected]>
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Reviewed-by: Xin Long <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
keepalived_main(int argc, char **argv)
{
bool report_stopped = true;
struct utsname uname_buf;
char *end;
/* Ensure time_now is set. We then don't have to check anywhere
* else if it is set. */
set_time_now();
/* Save command line options in case need to log them later */
save_cmd_line_options(argc, argv);
/* Init debugging level */
debug = 0;
/* We are the parent process */
#ifndef _DEBUG_
prog_type = PROG_TYPE_PARENT;
#endif
/* Initialise daemon_mode */
#ifdef _WITH_VRRP_
__set_bit(DAEMON_VRRP, &daemon_mode);
#endif
#ifdef _WITH_LVS_
__set_bit(DAEMON_CHECKERS, &daemon_mode);
#endif
#ifdef _WITH_BFD_
__set_bit(DAEMON_BFD, &daemon_mode);
#endif
/* Set default file creation mask */
umask(022);
/* Open log with default settings so we can log initially */
openlog(PACKAGE_NAME, LOG_PID, log_facility);
#ifdef _MEM_CHECK_
mem_log_init(PACKAGE_NAME, "Parent process");
#endif
/* Some functionality depends on kernel version, so get the version here */
if (uname(&uname_buf))
log_message(LOG_INFO, "Unable to get uname() information - error %d", errno);
else {
os_major = (unsigned)strtoul(uname_buf.release, &end, 10);
if (*end != '.')
os_major = 0;
else {
os_minor = (unsigned)strtoul(end + 1, &end, 10);
if (*end != '.')
os_major = 0;
else {
if (!isdigit(end[1]))
os_major = 0;
else
os_release = (unsigned)strtoul(end + 1, &end, 10);
}
}
if (!os_major)
log_message(LOG_INFO, "Unable to parse kernel version %s", uname_buf.release);
/* config_id defaults to hostname */
if (!config_id) {
end = strchrnul(uname_buf.nodename, '.');
config_id = MALLOC((size_t)(end - uname_buf.nodename) + 1);
strncpy(config_id, uname_buf.nodename, (size_t)(end - uname_buf.nodename));
config_id[end - uname_buf.nodename] = '\0';
}
}
/*
* Parse command line and set debug level.
* bits 0..7 reserved by main.c
*/
if (parse_cmdline(argc, argv)) {
closelog();
if (!__test_bit(NO_SYSLOG_BIT, &debug))
openlog(PACKAGE_NAME, LOG_PID | ((__test_bit(LOG_CONSOLE_BIT, &debug)) ? LOG_CONS : 0) , log_facility);
}
if (__test_bit(LOG_CONSOLE_BIT, &debug))
enable_console_log();
#ifdef GIT_COMMIT
log_message(LOG_INFO, "Starting %s, git commit %s", version_string, GIT_COMMIT);
#else
log_message(LOG_INFO, "Starting %s", version_string);
#endif
/* Handle any core file requirements */
core_dump_init();
if (os_major) {
if (KERNEL_VERSION(os_major, os_minor, os_release) < LINUX_VERSION_CODE) {
/* keepalived was build for a later kernel version */
log_message(LOG_INFO, "WARNING - keepalived was build for newer Linux %d.%d.%d, running on %s %s %s",
(LINUX_VERSION_CODE >> 16) & 0xff,
(LINUX_VERSION_CODE >> 8) & 0xff,
(LINUX_VERSION_CODE ) & 0xff,
uname_buf.sysname, uname_buf.release, uname_buf.version);
} else {
/* keepalived was build for a later kernel version */
log_message(LOG_INFO, "Running on %s %s %s (built for Linux %d.%d.%d)",
uname_buf.sysname, uname_buf.release, uname_buf.version,
(LINUX_VERSION_CODE >> 16) & 0xff,
(LINUX_VERSION_CODE >> 8) & 0xff,
(LINUX_VERSION_CODE ) & 0xff);
}
}
#ifndef _DEBUG_
log_command_line(0);
#endif
/* Check we can read the configuration file(s).
NOTE: the working directory will be / if we
forked, but will be the current working directory
when keepalived was run if we haven't forked.
This means that if any config file names are not
absolute file names, the behaviour will be different
depending on whether we forked or not. */
if (!check_conf_file(conf_file)) {
if (__test_bit(CONFIG_TEST_BIT, &debug))
config_test_exit();
goto end;
}
global_data = alloc_global_data();
global_data->umask = umask_val;
read_config_file();
init_global_data(global_data, NULL);
#if HAVE_DECL_CLONE_NEWNET
if (override_namespace) {
if (global_data->network_namespace) {
log_message(LOG_INFO, "Overriding config net_namespace '%s' with command line namespace '%s'", global_data->network_namespace, override_namespace);
FREE(global_data->network_namespace);
}
global_data->network_namespace = override_namespace;
override_namespace = NULL;
}
#endif
if (!__test_bit(CONFIG_TEST_BIT, &debug) &&
(global_data->instance_name
#if HAVE_DECL_CLONE_NEWNET
|| global_data->network_namespace
#endif
)) {
if ((syslog_ident = make_syslog_ident(PACKAGE_NAME))) {
log_message(LOG_INFO, "Changing syslog ident to %s", syslog_ident);
closelog();
openlog(syslog_ident, LOG_PID | ((__test_bit(LOG_CONSOLE_BIT, &debug)) ? LOG_CONS : 0), log_facility);
}
else
log_message(LOG_INFO, "Unable to change syslog ident");
use_pid_dir = true;
open_log_file(log_file_name,
NULL,
#if HAVE_DECL_CLONE_NEWNET
global_data->network_namespace,
#else
NULL,
#endif
global_data->instance_name);
}
/* Initialise pointer to child finding function */
set_child_finder_name(find_keepalived_child_name);
if (!__test_bit(CONFIG_TEST_BIT, &debug)) {
if (use_pid_dir) {
/* Create the directory for pid files */
create_pid_dir();
}
}
#if HAVE_DECL_CLONE_NEWNET
if (global_data->network_namespace) {
if (global_data->network_namespace && !set_namespaces(global_data->network_namespace)) {
log_message(LOG_ERR, "Unable to set network namespace %s - exiting", global_data->network_namespace);
goto end;
}
}
#endif
if (!__test_bit(CONFIG_TEST_BIT, &debug)) {
if (global_data->instance_name) {
if (!main_pidfile && (main_pidfile = make_pidfile_name(KEEPALIVED_PID_DIR KEEPALIVED_PID_FILE, global_data->instance_name, PID_EXTENSION)))
free_main_pidfile = true;
#ifdef _WITH_LVS_
if (!checkers_pidfile && (checkers_pidfile = make_pidfile_name(KEEPALIVED_PID_DIR CHECKERS_PID_FILE, global_data->instance_name, PID_EXTENSION)))
free_checkers_pidfile = true;
#endif
#ifdef _WITH_VRRP_
if (!vrrp_pidfile && (vrrp_pidfile = make_pidfile_name(KEEPALIVED_PID_DIR VRRP_PID_FILE, global_data->instance_name, PID_EXTENSION)))
free_vrrp_pidfile = true;
#endif
#ifdef _WITH_BFD_
if (!bfd_pidfile && (bfd_pidfile = make_pidfile_name(KEEPALIVED_PID_DIR VRRP_PID_FILE, global_data->instance_name, PID_EXTENSION)))
free_bfd_pidfile = true;
#endif
}
if (use_pid_dir) {
if (!main_pidfile)
main_pidfile = KEEPALIVED_PID_DIR KEEPALIVED_PID_FILE PID_EXTENSION;
#ifdef _WITH_LVS_
if (!checkers_pidfile)
checkers_pidfile = KEEPALIVED_PID_DIR CHECKERS_PID_FILE PID_EXTENSION;
#endif
#ifdef _WITH_VRRP_
if (!vrrp_pidfile)
vrrp_pidfile = KEEPALIVED_PID_DIR VRRP_PID_FILE PID_EXTENSION;
#endif
#ifdef _WITH_BFD_
if (!bfd_pidfile)
bfd_pidfile = KEEPALIVED_PID_DIR BFD_PID_FILE PID_EXTENSION;
#endif
}
else
{
if (!main_pidfile)
main_pidfile = PID_DIR KEEPALIVED_PID_FILE PID_EXTENSION;
#ifdef _WITH_LVS_
if (!checkers_pidfile)
checkers_pidfile = PID_DIR CHECKERS_PID_FILE PID_EXTENSION;
#endif
#ifdef _WITH_VRRP_
if (!vrrp_pidfile)
vrrp_pidfile = PID_DIR VRRP_PID_FILE PID_EXTENSION;
#endif
#ifdef _WITH_BFD_
if (!bfd_pidfile)
bfd_pidfile = PID_DIR BFD_PID_FILE PID_EXTENSION;
#endif
}
/* Check if keepalived is already running */
if (keepalived_running(daemon_mode)) {
log_message(LOG_INFO, "daemon is already running");
report_stopped = false;
goto end;
}
}
/* daemonize process */
if (!__test_bit(DONT_FORK_BIT, &debug) &&
xdaemon(false, false, true) > 0) {
closelog();
FREE_PTR(config_id);
FREE_PTR(orig_core_dump_pattern);
close_std_fd();
exit(0);
}
#ifdef _MEM_CHECK_
enable_mem_log_termination();
#endif
if (__test_bit(CONFIG_TEST_BIT, &debug)) {
validate_config();
config_test_exit();
}
/* write the father's pidfile */
if (!pidfile_write(main_pidfile, getpid()))
goto end;
/* Create the master thread */
master = thread_make_master();
/* Signal handling initialization */
signal_init();
/* Init daemon */
if (!start_keepalived())
log_message(LOG_INFO, "Warning - keepalived has no configuration to run");
initialise_debug_options();
#ifdef THREAD_DUMP
register_parent_thread_addresses();
#endif
/* Launch the scheduling I/O multiplexer */
launch_thread_scheduler(master);
/* Finish daemon process */
stop_keepalived();
#ifdef THREAD_DUMP
deregister_thread_addresses();
#endif
/*
* Reached when terminate signal catched.
* finally return from system
*/
end:
if (report_stopped) {
#ifdef GIT_COMMIT
log_message(LOG_INFO, "Stopped %s, git commit %s", version_string, GIT_COMMIT);
#else
log_message(LOG_INFO, "Stopped %s", version_string);
#endif
}
#if HAVE_DECL_CLONE_NEWNET
if (global_data && global_data->network_namespace)
clear_namespaces();
#endif
if (use_pid_dir)
remove_pid_dir();
/* Restore original core_pattern if necessary */
if (orig_core_dump_pattern)
update_core_dump_pattern(orig_core_dump_pattern);
free_parent_mallocs_startup(false);
free_parent_mallocs_exit();
free_global_data(global_data);
closelog();
#ifndef _MEM_CHECK_LOG_
FREE_PTR(syslog_ident);
#else
if (syslog_ident)
free(syslog_ident);
#endif
close_std_fd();
exit(KEEPALIVED_EXIT_OK);
}
| 0 |
[
"CWE-59",
"CWE-61"
] |
keepalived
|
04f2d32871bb3b11d7dc024039952f2fe2750306
| 78,708,647,750,567,010,000,000,000,000,000,000,000 | 344 |
When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]>
|
static int ati_bpp_from_datatype(ATIVGAState *s)
{
switch (s->regs.dp_datatype & 0xf) {
case 2:
return 8;
case 3:
case 4:
return 16;
case 5:
return 24;
case 6:
return 32;
default:
qemu_log_mask(LOG_UNIMP, "Unknown dst datatype %d\n",
s->regs.dp_datatype & 0xf);
return 0;
}
}
| 0 |
[
"CWE-190"
] |
qemu
|
ac2071c3791b67fc7af78b8ceb320c01ca1b5df7
| 308,959,096,820,305,550,000,000,000,000,000,000,000 | 18 |
ati-vga: Fix checks in ati_2d_blt() to avoid crash
In some corner cases (that never happen during normal operation but a
malicious guest could program wrong values) pixman functions were
called with parameters that result in a crash. Fix this and add more
checks to disallow such cases.
Reported-by: Ziming Zhang <[email protected]>
Signed-off-by: BALATON Zoltan <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
char *
ruby_hdtoa(double d, const char *xdigs, int ndigits, int *decpt, int *sign,
char **rve)
{
U u;
char *s, *s0;
int bufsize;
uint32_t manh, manl;
u.d = d;
if (word0(u) & Sign_bit) {
/* set sign for everything, including 0's and NaNs */
*sign = 1;
word0(u) &= ~Sign_bit; /* clear sign bit */
}
else
*sign = 0;
if (isinf(d)) { /* FP_INFINITE */
*decpt = INT_MAX;
return rv_strdup(INFSTR, rve);
}
else if (isnan(d)) { /* FP_NAN */
*decpt = INT_MAX;
return rv_strdup(NANSTR, rve);
}
else if (d == 0.0) { /* FP_ZERO */
*decpt = 1;
return rv_strdup(ZEROSTR, rve);
}
else if (dexp_get(u)) { /* FP_NORMAL */
*decpt = dexp_get(u) - DBL_ADJ;
}
else { /* FP_SUBNORMAL */
u.d *= 5.363123171977039e+154 /* 0x1p514 */;
*decpt = dexp_get(u) - (514 + DBL_ADJ);
}
if (ndigits == 0) /* dtoa() compatibility */
ndigits = 1;
/*
* If ndigits < 0, we are expected to auto-size, so we allocate
* enough space for all the digits.
*/
bufsize = (ndigits > 0) ? ndigits : SIGFIGS;
s0 = rv_alloc(bufsize+1);
/* Round to the desired number of digits. */
if (SIGFIGS > ndigits && ndigits > 0) {
float redux = 1.0f;
volatile double d;
int offset = 4 * ndigits + DBL_MAX_EXP - 4 - DBL_MANT_DIG;
dexp_set(u, offset);
d = u.d;
d += redux;
d -= redux;
u.d = d;
*decpt += dexp_get(u) - offset;
}
manh = dmanh_get(u);
manl = dmanl_get(u);
*s0 = '1';
for (s = s0 + 1; s < s0 + bufsize; s++) {
*s = xdigs[(manh >> (DBL_MANH_SIZE - 4)) & 0xf];
manh = (manh << 4) | (manl >> (DBL_MANL_SIZE - 4));
manl <<= 4;
}
/* If ndigits < 0, we are expected to auto-size the precision. */
if (ndigits < 0) {
for (ndigits = SIGFIGS; s0[ndigits - 1] == '0'; ndigits--)
;
}
s = s0 + ndigits;
*s = '\0';
if (rve != NULL)
*rve = s;
return (s0);
| 0 |
[
"CWE-119"
] |
ruby
|
60c29bbbf6574e0e947c56e71c3c3ca11620ee15
| 164,932,451,107,649,530,000,000,000,000,000,000,000 | 81 |
merge revision(s) 43775:
* util.c (ruby_strtod): ignore too long fraction part, which does not
affect the result.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_9_3@43776 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
|
int inet_sk_rebuild_header(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
__be32 daddr;
int err;
/* Route is OK, nothing to do. */
if (rt)
return 0;
/* Reroute. */
daddr = inet->inet_daddr;
if (inet->opt && inet->opt->srr)
daddr = inet->opt->faddr;
rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
sk->sk_protocol, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (!IS_ERR(rt)) {
err = 0;
sk_setup_caps(sk, &rt->dst);
} else {
err = PTR_ERR(rt);
/* Routing failed... */
sk->sk_route_caps = 0;
/*
* Other protocols have to map its equivalent state to TCP_SYN_SENT.
* DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
*/
if (!sysctl_ip_dynaddr ||
sk->sk_state != TCP_SYN_SENT ||
(sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
(err = inet_sk_reselect_saddr(sk)) != 0)
sk->sk_err_soft = -err;
}
return err;
}
| 1 |
[
"CWE-362"
] |
linux-2.6
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
| 106,828,541,222,360,000,000,000,000,000,000,000,000 | 40 |
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
ofpact_put_reg_load(struct ofpbuf *ofpacts, const struct mf_field *field,
const void *value, const void *mask)
{
struct ofpact_set_field *sf = ofpact_put_set_field(ofpacts, field, value,
mask);
sf->ofpact.raw = NXAST_RAW_REG_LOAD;
return sf;
}
| 0 |
[
"CWE-125"
] |
ovs
|
9237a63c47bd314b807cda0bd2216264e82edbe8
| 296,901,430,957,495,440,000,000,000,000,000,000,000 | 9 |
ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
e_mail_parser_get_parsers (EMailParser *parser,
const gchar *mime_type)
{
EMailExtensionRegistry *reg;
EMailParserClass *parser_class;
gchar *as_mime_type;
GQueue *parsers;
g_return_val_if_fail (E_IS_MAIL_PARSER (parser), NULL);
parser_class = E_MAIL_PARSER_GET_CLASS (parser);
g_return_val_if_fail (parser_class != NULL, NULL);
if (mime_type)
as_mime_type = g_ascii_strdown (mime_type, -1);
else
as_mime_type = NULL;
reg = E_MAIL_EXTENSION_REGISTRY (parser_class->extension_registry);
parsers = e_mail_extension_registry_get_for_mime_type (reg, as_mime_type);
if (!parsers)
parsers = e_mail_extension_registry_get_fallback (reg, as_mime_type);
g_free (as_mime_type);
return parsers;
}
| 0 |
[
"CWE-347"
] |
evolution
|
9c55a311325f5905d8b8403b96607e46cf343f21
| 267,814,109,784,573,100,000,000,000,000,000,000,000 | 28 |
I#120 - Show security bar above message headers
Closes https://gitlab.gnome.org/GNOME/evolution/issues/120
|
R_API void r_core_anal_type_init(RCore *core) {
Sdb *types = NULL;
const char *anal_arch = NULL, *os = NULL;
char *dbpath;
if (!core || !core->anal) {
return;
}
const char *dir_prefix = r_config_get (core->config, "dir.prefix");
int bits = core->assembler->bits;
types = core->anal->sdb_types;
// make sure they are empty this is initializing
sdb_reset (types);
anal_arch = r_config_get (core->config, "anal.arch");
os = r_config_get (core->config, "asm.os");
// spaguetti ahead
dbpath = sdb_fmt ("%s/"DBSPATH"/types.sdb", dir_prefix);
if (r_file_exists (dbpath)) {
sdb_concat_by_path (types, dbpath);
}
dbpath = sdb_fmt ("%s/"DBSPATH"/types-%s.sdb", dir_prefix, anal_arch);
if (r_file_exists (dbpath)) {
sdb_concat_by_path (types, dbpath);
}
dbpath = sdb_fmt ("%s/"DBSPATH"/types-%s.sdb", dir_prefix, os);
if (r_file_exists (dbpath)) {
sdb_concat_by_path (types, dbpath);
}
dbpath = sdb_fmt ("%s/"DBSPATH"/types-%d.sdb", dir_prefix, bits);
if (r_file_exists (dbpath)) {
sdb_concat_by_path (types, dbpath);
}
dbpath = sdb_fmt ("%s/"DBSPATH"/types-%s-%d.sdb", dir_prefix, os, bits);
if (r_file_exists (dbpath)) {
sdb_concat_by_path (types, dbpath);
}
dbpath = sdb_fmt ("%s/"DBSPATH"/types-%s-%d.sdb", dir_prefix, anal_arch, bits);
if (r_file_exists (dbpath)) {
sdb_concat_by_path (types, dbpath);
}
dbpath = sdb_fmt ("%s/"DBSPATH"/types-%s-%s.sdb", dir_prefix, anal_arch, os);
if (r_file_exists (dbpath)) {
sdb_concat_by_path (types, dbpath);
}
dbpath = sdb_fmt ("%s/"DBSPATH"/types-%s-%s-%d.sdb", dir_prefix, anal_arch, os, bits);
if (r_file_exists (dbpath)) {
sdb_concat_by_path (types, dbpath);
}
}
| 0 |
[
"CWE-125"
] |
radare2
|
1f37c04f2a762500222dda2459e6a04646feeedf
| 38,628,909,052,093,790,000,000,000,000,000,000,000 | 48 |
Fix #9904 - crash in r2_hoobr_r_read_le32 (over 9000 entrypoints) and read_le oobread (#9923)
|
static HashTable *date_object_get_gc(zval *object, zval ***table, int *n TSRMLS_DC)
{
*table = NULL;
*n = 0;
return zend_std_get_properties(object TSRMLS_CC);
| 0 |
[] |
php-src
|
c377f1a715476934133f3254d1e0d4bf3743e2d2
| 2,738,895,749,305,506,500,000,000,000,000,000,000 | 6 |
Fix bug #68942 (Use after free vulnerability in unserialize() with DateTimeZone)
|
static void __net_exit default_device_exit(struct net *net)
{
struct net_device *dev, *aux;
/*
* Push all migratable network devices back to the
* initial network namespace
*/
rtnl_lock();
for_each_netdev_safe(net, dev, aux) {
int err;
char fb_name[IFNAMSIZ];
/* Ignore unmoveable devices (i.e. loopback) */
if (dev->features & NETIF_F_NETNS_LOCAL)
continue;
/* Leave virtual devices for the generic cleanup */
if (dev->rtnl_link_ops)
continue;
/* Push remaing network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
__func__, dev->name, err);
BUG();
}
}
rtnl_unlock();
}
| 0 |
[
"CWE-399"
] |
linux
|
6ec82562ffc6f297d0de36d65776cff8e5704867
| 110,579,933,554,560,200,000,000,000,000,000,000,000 | 31 |
veth: Dont kfree_skb() after dev_forward_skb()
In case of congestion, netif_rx() frees the skb, so we must assume
dev_forward_skb() also consume skb.
Bug introduced by commit 445409602c092
(veth: move loopback logic to common location)
We must change dev_forward_skb() to always consume skb, and veth to not
double free it.
Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3
Reported-by: Martín Ferrari <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
DEFUN(ldHist, HISTORY, "Show browsing history")
{
cmd_loadBuffer(historyBuffer(URLHist), BP_NO_URL, LB_NOLINK);
}
| 0 |
[
"CWE-59",
"CWE-241"
] |
w3m
|
18dcbadf2771cdb0c18509b14e4e73505b242753
| 309,609,481,546,644,730,000,000,000,000,000,000,000 | 4 |
Make temporary directory safely when ~/.w3m is unwritable
|
GF_Err ssix_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 i,j;
GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s;
ISOM_DECREASE_SIZE(ptr, 4)
ptr->subsegment_count = gf_bs_read_u32(bs);
//each subseg has at least one range_count (4 bytes), abort if not enough bytes (broken box)
if (ptr->size < ptr->subsegment_count*4)
return GF_ISOM_INVALID_FILE;
ptr->subsegment_alloc = ptr->subsegment_count;
GF_SAFE_ALLOC_N(ptr->subsegments, ptr->subsegment_count, GF_SubsegmentInfo);
if (!ptr->subsegments)
return GF_OUT_OF_MEM;
for (i = 0; i < ptr->subsegment_count; i++) {
GF_SubsegmentInfo *subseg = &ptr->subsegments[i];
ISOM_DECREASE_SIZE(ptr, 4)
subseg->range_count = gf_bs_read_u32(bs);
//each range is 4 bytes, abort if not enough bytes
if (ptr->size < subseg->range_count*4)
return GF_ISOM_INVALID_FILE;
subseg->ranges = (GF_SubsegmentRangeInfo*) gf_malloc(sizeof(GF_SubsegmentRangeInfo) * subseg->range_count);
if (!subseg->ranges) return GF_OUT_OF_MEM;
for (j = 0; j < subseg->range_count; j++) {
ISOM_DECREASE_SIZE(ptr, 4)
subseg->ranges[j].level = gf_bs_read_u8(bs);
subseg->ranges[j].range_size = gf_bs_read_u24(bs);
}
}
return GF_OK;
| 0 |
[
"CWE-476",
"CWE-787"
] |
gpac
|
b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8
| 107,855,333,087,846,770,000,000,000,000,000,000,000 | 32 |
fixed #1757
|
static void disable_file(OPERATION op, const char *filename) {
assert(filename);
assert(op <OPERATION_MAX);
EUID_ASSERT();
// Resolve all symlinks
char* fname = realpath(filename, NULL);
if (fname == NULL && errno != EACCES) {
return;
}
if (fname == NULL && errno == EACCES) {
// realpath and stat functions will fail on FUSE filesystems
// they don't seem to like a uid of 0
// force mounting
int fd = open(filename, O_PATH|O_CLOEXEC);
if (fd < 0) {
if (arg_debug)
printf("Warning (blacklisting): cannot open %s: %s\n", filename, strerror(errno));
return;
}
EUID_ROOT();
int err = bind_mount_path_to_fd(RUN_RO_DIR, fd);
if (err != 0)
err = bind_mount_path_to_fd(RUN_RO_FILE, fd);
EUID_USER();
close(fd);
if (err == 0) {
if (arg_debug)
printf("Disable %s\n", filename);
if (op == BLACKLIST_FILE)
fs_logger2("blacklist", filename);
else
fs_logger2("blacklist-nolog", filename);
}
else if (arg_debug)
printf("Warning (blacklisting): cannot mount on %s\n", filename);
return;
}
assert(fname);
// check for firejail executable
// we might have a file found in ${PATH} pointing to /usr/bin/firejail
// blacklisting it here will end up breaking situations like user clicks on a link in Thunderbird
// and expects Firefox to open in the same sandbox
if (strcmp(BINDIR "/firejail", fname) == 0) {
free(fname);
return;
}
// if the file is not present, do nothing
int fd = open(fname, O_PATH|O_CLOEXEC);
if (fd < 0) {
if (arg_debug)
printf("Warning (blacklisting): cannot open %s: %s\n", fname, strerror(errno));
free(fname);
return;
}
struct stat s;
if (fstat(fd, &s) < 0) {
if (arg_debug)
printf("Warning (blacklisting): cannot stat %s: %s\n", fname, strerror(errno));
free(fname);
close(fd);
return;
}
// modify the file
if (op == BLACKLIST_FILE || op == BLACKLIST_NOLOG) {
// some distros put all executables under /usr/bin and make /bin a symbolic link
if ((strcmp(fname, "/bin") == 0 || strcmp(fname, "/usr/bin") == 0) &&
is_link(filename) &&
S_ISDIR(s.st_mode)) {
fwarning("%s directory link was not blacklisted\n", filename);
}
else {
if (arg_debug) {
if (strcmp(filename, fname))
printf("Disable %s (requested %s)\n", fname, filename);
else
printf("Disable %s\n", fname);
}
else if (arg_debug_blacklists) {
printf("Disable %s", fname);
if (op == BLACKLIST_FILE)
printf("\n");
else
printf(" - no logging\n");
}
EUID_ROOT();
if (S_ISDIR(s.st_mode)) {
if (bind_mount_path_to_fd(RUN_RO_DIR, fd) < 0)
errExit("disable file");
}
else {
if (bind_mount_path_to_fd(RUN_RO_FILE, fd) < 0)
errExit("disable file");
}
EUID_USER();
if (op == BLACKLIST_FILE)
fs_logger2("blacklist", fname);
else
fs_logger2("blacklist-nolog", fname);
// files in /etc will be reprocessed during /etc rebuild
if (strncmp(fname, "/etc/", 5) == 0) {
ProfileEntry *prf = malloc(sizeof(ProfileEntry));
if (!prf)
errExit("malloc");
memset(prf, 0, sizeof(ProfileEntry));
prf->data = strdup(fname);
if (!prf->data)
errExit("strdup");
prf->next = cfg.profile_rebuild_etc;
cfg.profile_rebuild_etc = prf;
}
}
}
else if (op == MOUNT_READONLY || op == MOUNT_RDWR || op == MOUNT_NOEXEC) {
fs_remount_rec(fname, op);
}
else if (op == MOUNT_TMPFS) {
if (!S_ISDIR(s.st_mode)) {
fwarning("%s is not a directory; cannot mount a tmpfs on top of it.\n", fname);
goto out;
}
uid_t uid = getuid();
if (uid != 0) {
// only user owned directories in user home
if (s.st_uid != uid ||
strncmp(cfg.homedir, fname, strlen(cfg.homedir)) != 0 ||
fname[strlen(cfg.homedir)] != '/') {
fwarning("you are not allowed to mount a tmpfs on %s\n", fname);
goto out;
}
}
fs_tmpfs(fname, uid);
selinux_relabel_path(fname, fname);
}
else
assert(0);
out:
close(fd);
free(fname);
}
| 0 |
[
"CWE-269",
"CWE-94"
] |
firejail
|
27cde3d7d1e4e16d4190932347c7151dc2a84c50
| 25,276,672,037,262,600,000,000,000,000,000,000,000 | 153 |
fixing CVE-2022-31214
|
ofputil_encode_ofp11_group_mod(enum ofp_version ofp_version,
const struct ofputil_group_mod *gm)
{
struct ofpbuf *b;
struct ofp11_group_mod *ogm;
size_t start_ogm;
struct ofputil_bucket *bucket;
b = ofpraw_alloc(OFPRAW_OFPT11_GROUP_MOD, ofp_version, 0);
start_ogm = b->size;
ofpbuf_put_zeros(b, sizeof *ogm);
LIST_FOR_EACH (bucket, list_node, &gm->buckets) {
ofputil_put_ofp11_bucket(bucket, b, ofp_version);
}
ogm = ofpbuf_at_assert(b, start_ogm, sizeof *ogm);
ogm->command = htons(gm->command);
ogm->type = gm->type;
ogm->group_id = htonl(gm->group_id);
return b;
}
| 0 |
[
"CWE-772"
] |
ovs
|
77ad4225d125030420d897c873e4734ac708c66b
| 260,703,877,221,628,220,000,000,000,000,000,000,000 | 22 |
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb)
{
char *last_field = NULL;
apr_size_t last_len = 0;
apr_size_t alloc_len = 0;
char *field;
char *value;
apr_size_t len;
int fields_read = 0;
char *tmp_field;
core_server_config *conf = ap_get_core_module_config(r->server->module_config);
int strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE);
/*
* Read header lines until we get the empty separator line, a read error,
* the connection closes (EOF), reach the server limit, or we timeout.
*/
while(1) {
apr_status_t rv;
field = NULL;
rv = ap_rgetline(&field, r->server->limit_req_fieldsize + 2,
&len, r, strict ? AP_GETLINE_CRLF : 0, bb);
if (rv != APR_SUCCESS) {
if (APR_STATUS_IS_TIMEUP(rv)) {
r->status = HTTP_REQUEST_TIME_OUT;
}
else {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
"Failed to read request header line %s", field);
r->status = HTTP_BAD_REQUEST;
}
/* ap_rgetline returns APR_ENOSPC if it fills up the buffer before
* finding the end-of-line. This is only going to happen if it
* exceeds the configured limit for a field size.
*/
if (rv == APR_ENOSPC) {
apr_table_setn(r->notes, "error-notes",
"Size of a request header field "
"exceeds server limit.");
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00561)
"Request header exceeds LimitRequestFieldSize%s"
"%.*s",
(field && *field) ? ": " : "",
(field) ? field_name_len(field) : 0,
(field) ? field : "");
}
return;
}
/* For all header values, and all obs-fold lines, the presence of
* additional whitespace is a no-op, so collapse trailing whitespace
* to save buffer allocation and optimize copy operations.
* Do not remove the last single whitespace under any condition.
*/
while (len > 1 && (field[len-1] == '\t' || field[len-1] == ' ')) {
field[--len] = '\0';
}
if (*field == '\t' || *field == ' ') {
/* Append any newly-read obs-fold line onto the preceding
* last_field line we are processing
*/
apr_size_t fold_len;
if (last_field == NULL) {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03442)
"Line folding encountered before first"
" header line");
return;
}
if (field[1] == '\0') {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03443)
"Empty folded line encountered");
return;
}
/* Leading whitespace on an obs-fold line can be
* similarly discarded */
while (field[1] == '\t' || field[1] == ' ') {
++field; --len;
}
/* This line is a continuation of the preceding line(s),
* so append it to the line that we've set aside.
* Note: this uses a power-of-two allocator to avoid
* doing O(n) allocs and using O(n^2) space for
* continuations that span many many lines.
*/
fold_len = last_len + len + 1; /* trailing null */
if (fold_len >= (apr_size_t)(r->server->limit_req_fieldsize)) {
r->status = HTTP_BAD_REQUEST;
/* report what we have accumulated so far before the
* overflow (last_field) as the field with the problem
*/
apr_table_setn(r->notes, "error-notes",
"Size of a request header field "
"exceeds server limit.");
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00562)
"Request header exceeds LimitRequestFieldSize "
"after folding: %.*s",
field_name_len(last_field), last_field);
return;
}
if (fold_len > alloc_len) {
char *fold_buf;
alloc_len += alloc_len;
if (fold_len > alloc_len) {
alloc_len = fold_len;
}
fold_buf = (char *)apr_palloc(r->pool, alloc_len);
memcpy(fold_buf, last_field, last_len);
last_field = fold_buf;
}
memcpy(last_field + last_len, field, len +1); /* +1 for nul */
/* Replace obs-fold w/ SP per RFC 7230 3.2.4 */
last_field[last_len] = ' ';
last_len += len;
/* We've appended this obs-fold line to last_len, proceed to
* read the next input line
*/
continue;
}
else if (last_field != NULL) {
/* Process the previous last_field header line with all obs-folded
* segments already concatenated (this is not operating on the
* most recently read input line).
*/
if (r->server->limit_req_fields
&& (++fields_read > r->server->limit_req_fields)) {
r->status = HTTP_BAD_REQUEST;
apr_table_setn(r->notes, "error-notes",
"The number of request header fields "
"exceeds this server's limit.");
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00563)
"Number of request headers exceeds "
"LimitRequestFields");
return;
}
if (!strict)
{
/* Not Strict ('Unsafe' mode), using the legacy parser */
if (!(value = strchr(last_field, ':'))) { /* Find ':' or */
r->status = HTTP_BAD_REQUEST; /* abort bad request */
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00564)
"Request header field is missing ':' "
"separator: %.*s", (int)LOG_NAME_MAX_LEN,
last_field);
return;
}
if (value == last_field) {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03453)
"Request header field name was empty");
return;
}
*value++ = '\0'; /* NUL-terminate at colon */
if (strpbrk(last_field, "\t\n\v\f\r ")) {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03452)
"Request header field name presented"
" invalid whitespace");
return;
}
while (*value == ' ' || *value == '\t') {
++value; /* Skip to start of value */
}
if (strpbrk(value, "\n\v\f\r")) {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03451)
"Request header field value presented"
" bad whitespace");
return;
}
}
else /* Using strict RFC7230 parsing */
{
/* Ensure valid token chars before ':' per RFC 7230 3.2.4 */
value = (char *)ap_scan_http_token(last_field);
if ((value == last_field) || *value != ':') {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02426)
"Request header field name is malformed: "
"%.*s", (int)LOG_NAME_MAX_LEN, last_field);
return;
}
*value++ = '\0'; /* NUL-terminate last_field name at ':' */
while (*value == ' ' || *value == '\t') {
++value; /* Skip LWS of value */
}
/* Find invalid, non-HT ctrl char, or the trailing NULL */
tmp_field = (char *)ap_scan_http_field_content(value);
/* Reject value for all garbage input (CTRLs excluding HT)
* e.g. only VCHAR / SP / HT / obs-text are allowed per
* RFC7230 3.2.6 - leave all more explicit rule enforcement
* for specific header handler logic later in the cycle
*/
if (*tmp_field != '\0') {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02427)
"Request header value is malformed: "
"%.*s", (int)LOG_NAME_MAX_LEN, value);
return;
}
}
apr_table_addn(r->headers_in, last_field, value);
/* This last_field header is now stored in headers_in,
* resume processing of the current input line.
*/
}
/* Found the terminating empty end-of-headers line, stop. */
if (len == 0) {
break;
}
/* Keep track of this new header line so that we can extend it across
* any obs-fold or parse it on the next loop iteration. We referenced
* our previously allocated buffer in r->headers_in,
* so allocate a fresh buffer if required.
*/
alloc_len = 0;
last_field = field;
last_len = len;
}
/* Combine multiple message-header fields with the same
* field-name, following RFC 2616, 4.2.
*/
apr_table_compress(r->headers_in, APR_OVERLAP_TABLES_MERGE);
/* enforce LimitRequestFieldSize for merged headers */
apr_table_do(table_do_fn_check_lengths, r, r->headers_in, NULL);
}
| 0 |
[] |
httpd
|
ecebcc035ccd8d0e2984fe41420d9e944f456b3c
| 200,103,169,594,159,200,000,000,000,000,000,000,000 | 258 |
Merged r1734009,r1734231,r1734281,r1838055,r1838079,r1840229,r1876664,r1876674,r1876784,r1879078,r1881620,r1887311,r1888871 from trunk:
*) core: Split ap_create_request() from ap_read_request(). [Graham Leggett]
*) core, h2: common ap_parse_request_line() and ap_check_request_header()
code. [Yann Ylavic]
*) core: Add StrictHostCheck to allow unconfigured hostnames to be
rejected. [Eric Covener]
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1890245 13f79535-47bb-0310-9956-ffa450edef68
|
void EbmlMaster::Remove(size_t Index)
{
if (Index < ElementList.size()) {
std::vector<EbmlElement *>::iterator Itr = ElementList.begin();
while (Index-- > 0) {
++Itr;
}
ElementList.erase(Itr);
}
}
| 0 |
[
"CWE-703"
] |
libebml
|
88409e2a94dd3b40ff81d08bf6d92f486d036b24
| 337,111,580,902,062,980,000,000,000,000,000,000,000 | 11 |
EbmlMaster: propagate upper level element after infinite sized one correctly
When the parser encountered a deeply nested element with an infinite
size then a following element of an upper level was not propagated
correctly. Instead the element with the infinite size was added into the
EBML element tree a second time resulting in memory access after freeing
it and multiple attempts to free the same memory address during
destruction.
Fixes the issue reported as Cisco TALOS-CAN-0037.
|
void giveup_vsx(struct task_struct *tsk)
{
giveup_fpu_maybe_transactional(tsk);
giveup_altivec_maybe_transactional(tsk);
__giveup_vsx(tsk);
}
| 0 |
[
"CWE-20"
] |
linux
|
621b5060e823301d0cba4cb52a7ee3491922d291
| 86,057,455,839,680,800,000,000,000,000,000,000,000 | 6 |
powerpc/tm: Fix crash when forking inside a transaction
When we fork/clone we currently don't copy any of the TM state to the new
thread. This results in a TM bad thing (program check) when the new process is
switched in as the kernel does a tmrechkpt with TEXASR FS not set. Also, since
R1 is from userspace, we trigger the bad kernel stack pointer detection. So we
end up with something like this:
Bad kernel stack pointer 0 at c0000000000404fc
cpu 0x2: Vector: 700 (Program Check) at [c00000003ffefd40]
pc: c0000000000404fc: restore_gprs+0xc0/0x148
lr: 0000000000000000
sp: 0
msr: 9000000100201030
current = 0xc000001dd1417c30
paca = 0xc00000000fe00800 softe: 0 irq_happened: 0x01
pid = 0, comm = swapper/2
WARNING: exception is not recoverable, can't continue
The below fixes this by flushing the TM state before we copy the task_struct to
the clone. To do this we go through the tmreclaim patch, which removes the
checkpointed registers from the CPU and transitions the CPU out of TM suspend
mode. Hence we need to call tmrechkpt after to restore the checkpointed state
and the TM mode for the current task.
To make this fail from userspace is simply:
tbegin
li r0, 2
sc
<boom>
Kudos to Adhemerval Zanella Neto for finding this.
Signed-off-by: Michael Neuling <[email protected]>
cc: Adhemerval Zanella Neto <[email protected]>
cc: [email protected]
Signed-off-by: Benjamin Herrenschmidt <[email protected]>
|
static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
u32 dst_addr, dma_addr_t phy_addr,
u32 byte_cnt)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
int ret;
trans_pcie->ucode_write_complete = false;
if (!iwl_trans_grab_nic_access(trans, &flags))
return -EIO;
iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
byte_cnt);
iwl_trans_release_nic_access(trans, &flags);
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
trans_pcie->ucode_write_complete, 5 * HZ);
if (!ret) {
IWL_ERR(trans, "Failed to load firmware chunk!\n");
iwl_trans_pcie_dump_regs(trans);
return -ETIMEDOUT;
}
return 0;
}
| 0 |
[
"CWE-476"
] |
linux
|
8188a18ee2e48c9a7461139838048363bfce3fef
| 6,252,524,686,823,483,000,000,000,000,000,000,000 | 27 |
iwlwifi: pcie: fix rb_allocator workqueue allocation
We don't handle failures in the rb_allocator workqueue allocation
correctly. To fix that, move the code earlier so the cleanup is
easier and we don't have to undo all the interrupt allocations in
this case.
Signed-off-by: Johannes Berg <[email protected]>
Signed-off-by: Luca Coelho <[email protected]>
|
AA_5_StopARTIMtimer(PRIVATE_NETWORKKEY ** /*network*/,
PRIVATE_ASSOCIATIONKEY ** association, int nextState, void * /*params*/)
{
(*association)->timerStart = 0;
(*association)->protocolState = nextState;
return EC_Normal;
}
| 0 |
[
"CWE-415",
"CWE-703",
"CWE-401"
] |
dcmtk
|
a9697dfeb672b0b9412c00c7d36d801e27ec85cb
| 288,162,879,907,814,670,000,000,000,000,000,000,000 | 7 |
Fixed poss. NULL pointer dereference/double free.
Thanks to Jinsheng Ba <[email protected]> for the report and some patches.
|
completeopt_was_set(void)
{
compl_no_insert = FALSE;
compl_no_select = FALSE;
if (strstr((char *)p_cot, "noselect") != NULL)
compl_no_select = TRUE;
if (strstr((char *)p_cot, "noinsert") != NULL)
compl_no_insert = TRUE;
}
| 0 |
[
"CWE-125"
] |
vim
|
f12129f1714f7d2301935bb21d896609bdac221c
| 95,229,964,224,455,520,000,000,000,000,000,000,000 | 9 |
patch 9.0.0020: with some completion reading past end of string
Problem: With some completion reading past end of string.
Solution: Check the length of the string.
|
void CLASS xtrans_interpolate (int passes)
{
int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol;
int val, ndir, pass, hm[8], avg[4], color[3][8];
static const short orth[12] = { 1,0,0,1,-1,0,0,-1,1,0,0,1 },
patt[2][16] = { { 0,1,0,-1,2,0,-1,0,1,1,1,-1,0,0,0,0 },
{ 0,1,0,-2,1,0,-2,0,1,1,-2,-2,1,-1,-1,1 } },
dir[4] = { 1,TS,TS+1,TS-1 };
short allhex[3][3][2][8], *hex;
ushort min, max, sgrow, sgcol;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab) [TS][3], (*lix)[3];
float (*drv)[TS][TS], diff[6], tr;
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("%d-pass X-Trans interpolation...\n"), passes);
#endif
cielab (0,0);
border_interpolate(6);
ndir = 4 << (passes > 1);
buffer = (char *) malloc (TS*TS*(ndir*11+6));
merror (buffer, "xtrans_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*) [TS][3])(buffer + TS*TS*(ndir*6));
drv = (float (*)[TS][TS]) (buffer + TS*TS*(ndir*6+6));
homo = (char (*)[TS][TS]) (buffer + TS*TS*(ndir*10+6));
/* Map a green hexagon around each non-green pixel and vice versa: */
for (row=0; row < 3; row++)
for (col=0; col < 3; col++)
for (ng=d=0; d < 10; d+=2) {
g = fcol(row,col) == 1;
if (fcol(row+orth[d],col+orth[d+2]) == 1) ng=0; else ng++;
if (ng == 4) { sgrow = row; sgcol = col; }
if (ng == g+1) FORC(8) {
v = orth[d ]*patt[g][c*2] + orth[d+1]*patt[g][c*2+1];
h = orth[d+2]*patt[g][c*2] + orth[d+3]*patt[g][c*2+1];
allhex[row][col][0][c^(g*2 & d)] = h + v*width;
allhex[row][col][1][c^(g*2 & d)] = h + v*TS;
}
}
/* Set green1 and green3 to the minimum and maximum allowed values: */
for (row=2; row < height-2; row++)
for (min=~(max=0), col=2; col < width-2; col++) {
if (fcol(row,col) == 1 && (min=~(max=0))) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
if (!max) FORC(6) {
val = pix[hex[c]][1];
if (min > val) min = val;
if (max < val) max = val;
}
pix[0][1] = min;
pix[0][3] = max;
switch ((row-sgrow) % 3) {
case 1: if (row < height-3) { row++; col--; } break;
case 2: if ((min=~(max=0)) && (col+=2) < width-3 && row > 2) row--;
}
}
for (top=3; top < height-19; top += TS-16)
for (left=3; left < width-19; left += TS-16) {
mrow = MIN (top+TS, height-3);
mcol = MIN (left+TS, width-3);
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++)
memcpy (rgb[0][row-top][col-left], image[row*width+col], 6);
FORC3 memcpy (rgb[c+1], rgb[0], sizeof *rgb);
/* Interpolate green horizontally, vertically, and along both diagonals: */
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
color[1][0] = 174 * (pix[ hex[1]][1] + pix[ hex[0]][1]) -
46 * (pix[2*hex[1]][1] + pix[2*hex[0]][1]);
color[1][1] = 223 * pix[ hex[3]][1] + pix[ hex[2]][1] * 33 +
92 * (pix[ 0 ][f] - pix[ -hex[2]][f]);
FORC(2) color[1][2+c] =
164 * pix[hex[4+c]][1] + 92 * pix[-2*hex[4+c]][1] + 33 *
(2*pix[0][f] - pix[3*hex[4+c]][f] - pix[-3*hex[4+c]][f]);
FORC4 rgb[c^!((row-sgrow) % 3)][row-top][col-left][1] =
LIM(color[1][c] >> 8,pix[0][1],pix[0][3]);
}
for (pass=0; pass < passes; pass++) {
if (pass == 1)
memcpy (rgb+=4, buffer, 4*sizeof *rgb);
/* Recalculate green from interpolated values of closer pixels: */
if (pass) {
for (row=top+2; row < mrow-2; row++)
for (col=left+2; col < mcol-2; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][1];
for (d=3; d < 6; d++) {
rix = &rgb[(d-2)^!((row-sgrow) % 3)][row-top][col-left];
val = rix[-2*hex[d]][1] + 2*rix[hex[d]][1]
- rix[-2*hex[d]][f] - 2*rix[hex[d]][f] + 3*rix[0][f];
rix[0][1] = LIM(val/3,pix[0][1],pix[0][3]);
}
}
}
/* Interpolate red and blue values for solitary green pixels: */
for (row=(top-sgrow+4)/3*3+sgrow; row < mrow-2; row+=3)
for (col=(left-sgcol+4)/3*3+sgcol; col < mcol-2; col+=3) {
rix = &rgb[0][row-top][col-left];
h = fcol(row,col+1);
memset (diff, 0, sizeof diff);
for (i=1, d=0; d < 6; d++, i^=TS^1, h^=2) {
for (c=0; c < 2; c++, h^=2) {
g = 2*rix[0][1] - rix[i<<c][1] - rix[-i<<c][1];
color[h][d] = g + rix[i<<c][h] + rix[-i<<c][h];
if (d > 1)
diff[d] += SQR (rix[i<<c][1] - rix[-i<<c][1]
- rix[i<<c][h] + rix[-i<<c][h]) + SQR(g);
}
if (d > 1 && (d & 1))
if (diff[d-1] < diff[d])
FORC(2) color[c*2][d] = color[c*2][d-1];
if (d < 2 || (d & 1)) {
FORC(2) rix[0][c*2] = CLIP(color[c*2][d]/2);
rix += TS*TS;
}
}
}
/* Interpolate red for blue pixels and vice versa: */
for (row=top+1; row < mrow-1; row++)
for (col=left+1; col < mcol-1; col++) {
if ((f = 2-fcol(row,col)) == 1) continue;
rix = &rgb[0][row-top][col-left];
i = (row-sgrow) % 3 ? TS:1;
for (d=0; d < 4; d++, rix += TS*TS)
rix[0][f] = CLIP((rix[i][f] + rix[-i][f] +
2*rix[0][1] - rix[i][1] - rix[-i][1])/2);
}
/* Fill in red and blue for 2x2 blocks of green: */
for (row=top+2; row < mrow-2; row++) if ((row-sgrow) % 3)
for (col=left+2; col < mcol-2; col++) if ((col-sgcol) % 3) {
rix = &rgb[0][row-top][col-left];
hex = allhex[row % 3][col % 3][1];
for (d=0; d < ndir; d+=2, rix += TS*TS)
if (hex[d] + hex[d+1]) {
g = 3*rix[0][1] - 2*rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + 2*rix[hex[d]][c] + rix[hex[d+1]][c])/3);
} else {
g = 2*rix[0][1] - rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + rix[hex[d]][c] + rix[hex[d+1]][c])/2);
}
}
}
rgb = (ushort(*)[TS][TS][3]) buffer;
mrow -= top;
mcol -= left;
/* Convert to CIELab and differentiate in all directions: */
for (d=0; d < ndir; d++) {
for (row=2; row < mrow-2; row++)
for (col=2; col < mcol-2; col++)
cielab (rgb[d][row][col], lab[row][col]);
for (f=dir[d & 3],row=3; row < mrow-3; row++)
for (col=3; col < mcol-3; col++) {
lix = &lab[row][col];
g = 2*lix[0][0] - lix[f][0] - lix[-f][0];
drv[d][row][col] = SQR(g)
+ SQR((2*lix[0][1] - lix[f][1] - lix[-f][1] + g*500/232))
+ SQR((2*lix[0][2] - lix[f][2] - lix[-f][2] - g*500/580));
}
}
/* Build homogeneity maps from the derivatives: */
memset(homo, 0, ndir*TS*TS);
for (row=4; row < mrow-4; row++)
for (col=4; col < mcol-4; col++) {
for (tr=FLT_MAX, d=0; d < ndir; d++)
if (tr > drv[d][row][col])
tr = drv[d][row][col];
tr *= 8;
for (d=0; d < ndir; d++)
for (v=-1; v <= 1; v++)
for (h=-1; h <= 1; h++)
if (drv[d][row+v][col+h] <= tr)
homo[d][row][col]++;
}
/* Average the most homogenous pixels for the final result: */
if (height-top < TS+4) mrow = height-top+2;
if (width-left < TS+4) mcol = width-left+2;
for (row = MIN(top,8); row < mrow-8; row++)
for (col = MIN(left,8); col < mcol-8; col++) {
for (d=0; d < ndir; d++)
for (hm[d]=0, v=-2; v <= 2; v++)
for (h=-2; h <= 2; h++)
hm[d] += homo[d][row+v][col+h];
for (d=0; d < ndir-4; d++)
if (hm[d] < hm[d+4]) hm[d ] = 0; else
if (hm[d] > hm[d+4]) hm[d+4] = 0;
for (max=hm[0],d=1; d < ndir; d++)
if (max < hm[d]) max = hm[d];
max -= max >> 3;
memset (avg, 0, sizeof avg);
for (d=0; d < ndir; d++)
if (hm[d] >= max) {
FORC3 avg[c] += rgb[d][row][col][c];
avg[3]++;
}
FORC3 image[(row+top)*width+col+left][c] = avg[c]/avg[3];
}
}
free(buffer);
}
| 0 |
[] |
LibRaw
|
9ae25d8c3a6bfb40c582538193264f74c9b93bc0
| 164,175,359,938,132,550,000,000,000,000,000,000,000 | 222 |
backported 0.15.4 datachecks
|
static int ntop_delete_redis_key(lua_State* vm) {
char *key;
ntop->getTrace()->traceEvent(TRACE_DEBUG, "%s() called", __FUNCTION__);
if(ntop_lua_check(vm, __FUNCTION__, 1, LUA_TSTRING)) return(CONST_LUA_PARAM_ERROR);
if((key = (char*)lua_tostring(vm, 1)) == NULL) return(CONST_LUA_PARAM_ERROR);
ntop->getRedis()->delKey(key);
return(CONST_LUA_OK);
}
| 0 |
[
"CWE-284",
"CWE-352"
] |
ntopng
|
f91fbe3d94c8346884271838ae3406ae633f6f15
| 210,228,030,417,374,600,000,000,000,000,000,000,000 | 10 |
Check for presence of crsf in admin scripts
|
void __d_lookup_done(struct dentry *dentry)
{
struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
dentry->d_name.hash);
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
wake_up_all(dentry->d_wait);
dentry->d_wait = NULL;
hlist_bl_unlock(b);
INIT_HLIST_NODE(&dentry->d_u.d_alias);
INIT_LIST_HEAD(&dentry->d_lru);
}
| 0 |
[
"CWE-362",
"CWE-399"
] |
linux
|
49d31c2f389acfe83417083e1208422b4091cd9e
| 289,462,674,501,000,000,000,000,000,000,000,000,000 | 13 |
dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <[email protected]>
|
sp_package *LEX::get_sp_package() const
{
return sphead ? sphead->get_package() : NULL;
}
| 0 |
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
| 32,253,569,916,247,244,000,000,000,000,000,000,000 | 4 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]>
|
xfs_filemap_pmd_fault(
struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
unsigned int flags)
{
struct inode *inode = file_inode(vma->vm_file);
struct xfs_inode *ip = XFS_I(inode);
int ret;
if (!IS_DAX(inode))
return VM_FAULT_FALLBACK;
trace_xfs_filemap_pmd_fault(ip);
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_direct,
xfs_end_io_dax_write);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
sb_end_pagefault(inode->i_sb);
return ret;
}
| 0 |
[
"CWE-19"
] |
linux
|
fc0561cefc04e7803c0f6501ca4f310a502f65b8
| 255,004,557,508,184,940,000,000,000,000,000,000,000 | 25 |
xfs: optimise away log forces on timestamp updates for fdatasync
xfs: timestamp updates cause excessive fdatasync log traffic
Sage Weil reported that a ceph test workload was writing to the
log on every fdatasync during an overwrite workload. Event tracing
showed that the only metadata modification being made was the
timestamp updates during the write(2) syscall, but fdatasync(2)
is supposed to ignore them. The key observation was that the
transactions in the log all looked like this:
INODE: #regs: 4 ino: 0x8b flags: 0x45 dsize: 32
And contained a flags field of 0x45 or 0x85, and had data and
attribute forks following the inode core. This means that the
timestamp updates were triggering dirty relogging of previously
logged parts of the inode that hadn't yet been flushed back to
disk.
There are two parts to this problem. The first is that XFS relogs
dirty regions in subsequent transactions, so it carries around the
fields that have been dirtied since the last time the inode was
written back to disk, not since the last time the inode was forced
into the log.
The second part is that on v5 filesystems, the inode change count
update during inode dirtying also sets the XFS_ILOG_CORE flag, so
on v5 filesystems this makes a timestamp update dirty the entire
inode.
As a result when fdatasync is run, it looks at the dirty fields in
the inode, and sees more than just the timestamp flag, even though
the only metadata change since the last fdatasync was just the
timestamps. Hence we force the log on every subsequent fdatasync
even though it is not needed.
To fix this, add a new field to the inode log item that tracks
changes since the last time fsync/fdatasync forced the log to flush
the changes to the journal. This flag is updated when we dirty the
inode, but we do it before updating the change count so it does not
carry the "core dirty" flag from timestamp updates. The fields are
zeroed when the inode is marked clean (due to writeback/freeing) or
when an fsync/datasync forces the log. Hence if we only dirty the
timestamps on the inode between fsync/fdatasync calls, the fdatasync
will not trigger another log force.
Over 100 runs of the test program:
Ext4 baseline:
runtime: 1.63s +/- 0.24s
avg lat: 1.59ms +/- 0.24ms
iops: ~2000
XFS, vanilla kernel:
runtime: 2.45s +/- 0.18s
avg lat: 2.39ms +/- 0.18ms
log forces: ~400/s
iops: ~1000
XFS, patched kernel:
runtime: 1.49s +/- 0.26s
avg lat: 1.46ms +/- 0.25ms
log forces: ~30/s
iops: ~1500
Reported-by: Sage Weil <[email protected]>
Signed-off-by: Dave Chinner <[email protected]>
Reviewed-by: Brian Foster <[email protected]>
Signed-off-by: Dave Chinner <[email protected]>
|
agent_genkey (ctrl_t ctrl, char **cache_nonce_addr,
const char *keyparms, int no_protection,
const char *passphrase, gcry_sexp_t *r_pubkey)
{
gpg_error_t err;
struct genkey_parm_s gk_parm;
struct cache_nonce_parm_s cn_parm;
struct default_inq_parm_s dfltparm;
membuf_t data;
size_t len;
unsigned char *buf;
char line[ASSUAN_LINELENGTH];
memset (&dfltparm, 0, sizeof dfltparm);
dfltparm.ctrl = ctrl;
*r_pubkey = NULL;
err = start_agent (ctrl, 0);
if (err)
return err;
dfltparm.ctx = agent_ctx;
err = assuan_transact (agent_ctx, "RESET",
NULL, NULL, NULL, NULL, NULL, NULL);
if (err)
return err;
init_membuf (&data, 1024);
gk_parm.dflt = &dfltparm;
gk_parm.keyparms = keyparms;
gk_parm.passphrase = passphrase;
snprintf (line, sizeof line, "GENKEY%s%s%s",
no_protection? " --no-protection" :
passphrase ? " --inq-passwd" :
/* */ "",
cache_nonce_addr && *cache_nonce_addr? " ":"",
cache_nonce_addr && *cache_nonce_addr? *cache_nonce_addr:"");
cn_parm.cache_nonce_addr = cache_nonce_addr;
cn_parm.passwd_nonce_addr = NULL;
err = assuan_transact (agent_ctx, line,
membuf_data_cb, &data,
inq_genkey_parms, &gk_parm,
cache_nonce_status_cb, &cn_parm);
if (err)
{
xfree (get_membuf (&data, &len));
return err;
}
buf = get_membuf (&data, &len);
if (!buf)
err = gpg_error_from_syserror ();
else
{
err = gcry_sexp_sscan (r_pubkey, NULL, buf, len);
xfree (buf);
}
return err;
}
| 0 |
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
| 130,276,530,816,545,220,000,000,000,000,000,000,000 | 59 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]>
|
xmlRelaxNGDefaultTypeCompare(void *data ATTRIBUTE_UNUSED,
const xmlChar * type,
const xmlChar * value1,
xmlNodePtr ctxt1 ATTRIBUTE_UNUSED,
void *comp1 ATTRIBUTE_UNUSED,
const xmlChar * value2,
xmlNodePtr ctxt2 ATTRIBUTE_UNUSED)
{
int ret = -1;
if (xmlStrEqual(type, BAD_CAST "string")) {
ret = xmlStrEqual(value1, value2);
} else if (xmlStrEqual(type, BAD_CAST "token")) {
if (!xmlStrEqual(value1, value2)) {
xmlChar *nval, *nvalue;
/*
* TODO: trivial optimizations are possible by
* computing at compile-time
*/
nval = xmlRelaxNGNormalize(NULL, value1);
nvalue = xmlRelaxNGNormalize(NULL, value2);
if ((nval == NULL) || (nvalue == NULL))
ret = -1;
else if (xmlStrEqual(nval, nvalue))
ret = 1;
else
ret = 0;
if (nval != NULL)
xmlFree(nval);
if (nvalue != NULL)
xmlFree(nvalue);
} else
ret = 1;
}
return (ret);
}
| 0 |
[
"CWE-134"
] |
libxml2
|
502f6a6d08b08c04b3ddfb1cd21b2f699c1b7f5b
| 74,779,983,977,685,760,000,000,000,000,000,000,000 | 38 |
More format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
adds a new xmlEscapeFormatString() function to escape composed format
strings
|
encode_algorithm_id(cms_context *cms, SECItem *der, SECOidTag tag)
{
SECAlgorithmID id;
int rc = generate_algorithm_id(cms, &id, tag);
if (rc < 0)
return rc;
void *ret;
ret = SEC_ASN1EncodeItem(cms->arena, der, &id,
SECOID_AlgorithmIDTemplate);
if (ret == NULL)
cnreterr(-1, cms, "could not encode Algorithm ID");
return 0;
}
| 0 |
[
"CWE-787"
] |
pesign
|
b879dda52f8122de697d145977c285fb0a022d76
| 165,292,840,420,730,510,000,000,000,000,000,000,000 | 16 |
Handle NULL pwdata in cms_set_pw_data()
When 12f16710ee44ef64ddb044a3523c3c4c4d90039a rewrote this function, it
didn't handle the NULL pwdata invocation from daemon.c. This leads to a
explicit NULL dereference and crash on all attempts to daemonize pesign.
Signed-off-by: Robbie Harwood <[email protected]>
|
bool AES_GCM_EncryptContext::Encrypt(
const void *pPlaintextData, size_t cbPlaintextData,
const void *pIV,
void *pEncryptedDataAndTag, uint32 *pcbEncryptedDataAndTag,
const void *pAdditionalAuthenticationData, size_t cbAuthenticationData
) {
// Make sure caller's buffer is big enough to hold the result.
if ( cbPlaintextData + crypto_aead_aes256gcm_ABYTES > *pcbEncryptedDataAndTag )
{
*pcbEncryptedDataAndTag = 0;
return false;
}
unsigned long long cbEncryptedDataAndTag_longlong;
crypto_aead_aes256gcm_encrypt_afternm(
static_cast<unsigned char*>( pEncryptedDataAndTag ), &cbEncryptedDataAndTag_longlong,
static_cast<const unsigned char*>( pPlaintextData ), cbPlaintextData,
static_cast<const unsigned char*>(pAdditionalAuthenticationData), cbAuthenticationData,
nullptr,
static_cast<const unsigned char*>( pIV ),
static_cast<const crypto_aead_aes256gcm_state*>( m_ctx )
);
*pcbEncryptedDataAndTag = cbEncryptedDataAndTag_longlong;
return true;
}
| 0 |
[
"CWE-787"
] |
GameNetworkingSockets
|
bea84e2844b647532a9b7fbc3a6a8989d66e49e3
| 272,717,973,150,967,370,000,000,000,000,000,000,000 | 28 |
Check if output buffer is too small.
It really seems like libsodium (whose entire purpose is to make crypto
idiot-proof) making me mess with these details is a flaw in the API design.
Also, correct Hungarian.
|
static BOOL update_send_cache_glyph(rdpContext* context,
const CACHE_GLYPH_ORDER* cache_glyph)
{
wStream* s;
UINT16 flags;
size_t bm, em, inf;
int headerLength;
INT16 orderLength;
rdpUpdate* update = context->update;
flags = 0;
headerLength = 6;
inf = update_approximate_cache_glyph_order(cache_glyph, &flags);
update_check_flush(context, headerLength + inf);
s = update->us;
if (!s)
return FALSE;
bm = Stream_GetPosition(s);
if (!Stream_EnsureRemainingCapacity(s, headerLength))
return FALSE;
Stream_Seek(s, headerLength);
if (!update_write_cache_glyph_order(s, cache_glyph, &flags))
return FALSE;
em = Stream_GetPosition(s);
orderLength = (em - bm) - 13;
Stream_SetPosition(s, bm);
Stream_Write_UINT8(s, ORDER_STANDARD |
ORDER_SECONDARY); /* controlFlags (1 byte) */
Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */
Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */
Stream_Write_UINT8(s, ORDER_TYPE_CACHE_GLYPH); /* orderType (1 byte) */
Stream_SetPosition(s, em);
update->numberOrders++;
return TRUE;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
FreeRDP
|
445a5a42c500ceb80f8fa7f2c11f3682538033f3
| 194,709,737,151,970,420,000,000,000,000,000,000,000 | 40 |
Fixed CVE-2018-8786
Thanks to Eyal Itkin from Check Point Software Technologies.
|
struct request *elv_rb_latter_request(struct request_queue *q,
struct request *rq)
{
struct rb_node *rbnext = rb_next(&rq->rb_node);
if (rbnext)
return rb_entry_rq(rbnext);
return NULL;
}
| 0 |
[
"CWE-416"
] |
linux
|
c3e2219216c92919a6bd1711f340f5faa98695e6
| 317,512,218,817,022,500,000,000,000,000,000,000,000 | 10 |
block: free sched's request pool in blk_cleanup_queue
In theory, IO scheduler belongs to request queue, and the request pool
of sched tags belongs to the request queue too.
However, the current tags allocation interfaces are re-used for both
driver tags and sched tags, and driver tags is definitely host wide,
and doesn't belong to any request queue, same with its request pool.
So we need tagset instance for freeing request of sched tags.
Meantime, blk_mq_free_tag_set() often follows blk_cleanup_queue() in case
of non-BLK_MQ_F_TAG_SHARED, this way requires that request pool of sched
tags to be freed before calling blk_mq_free_tag_set().
Commit 47cdee29ef9d94e ("block: move blk_exit_queue into __blk_release_queue")
moves blk_exit_queue into __blk_release_queue for simplying the fast
path in generic_make_request(), then causes oops during freeing requests
of sched tags in __blk_release_queue().
Fix the above issue by move freeing request pool of sched tags into
blk_cleanup_queue(), this way is safe becasue queue has been frozen and no any
in-queue requests at that time. Freeing sched tags has to be kept in queue's
release handler becasue there might be un-completed dispatch activity
which might refer to sched tags.
Cc: Bart Van Assche <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Fixes: 47cdee29ef9d94e485eb08f962c74943023a5271 ("block: move blk_exit_queue into __blk_release_queue")
Tested-by: Yi Zhang <[email protected]>
Reported-by: kernel test robot <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
void agmethod_upd(Agraph_t * g, void *obj, Agsym_t * sym)
{
if (g->clos->callbacks_enabled)
agupdcb(g, obj, sym, g->clos->cb);
else
agrecord_callback(g, obj, CB_UPDATE, sym);
}
| 0 |
[
"CWE-476"
] |
graphviz
|
839085f8026afd6f6920a0c31ad2a9d880d97932
| 270,685,004,604,904,930,000,000,000,000,000,000,000 | 7 |
attempted fix for null pointer deference on malformed input
|
bool CUser::SetChanBufferSize(unsigned int u, bool bForce) {
if (!bForce && u > CZNC::Get().GetMaxBufferSize()) return false;
for (CIRCNetwork* pNetwork : m_vIRCNetworks) {
for (CChan* pChan : pNetwork->GetChans()) {
pChan->InheritBufferCount(u, bForce);
}
}
m_uChanBufferSize = u;
return true;
}
| 0 |
[
"CWE-20"
] |
znc
|
64613bc8b6b4adf1e32231f9844d99cd512b8973
| 156,710,113,205,453,550,000,000,000,000,000,000,000 | 10 |
Don't crash if user specified invalid encoding.
This is CVE-2019-9917
|
int TNEFRawRead(TNEFStruct *TNEF, BYTE *data, DWORD size, WORD *checksum) {
WORD temp;
int i;
if (TNEF->IO.ReadProc(&TNEF->IO, sizeof(BYTE), size, data) < size) {
if (TNEF->Debug >= 1)
printf("ERROR: Error reading data\n");
return YTNEF_ERROR_READING_DATA;
}
if (checksum != NULL) {
*checksum = 0;
for (i = 0; i < size; i++) {
temp = data[i];
*checksum = (*checksum + temp);
}
}
return 0;
}
| 0 |
[
"CWE-399",
"CWE-125"
] |
ytnef
|
3cb0f914d6427073f262e1b2b5fd973e3043cdf7
| 7,789,261,343,634,154,000,000,000,000,000,000,000 | 20 |
BugFix - Potential OOB with Fields of Size 0
Thanks to @hannob for contributing a malformed TNEF stream with
a Version field of size 0. Now such files will return an error
indicating invalid data.
|
static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
struct mlx5_bsf_inl *inl)
{
/* Valid inline section and allow BSF refresh */
inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
MLX5_BSF_REFRESH_DIF);
inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
/* repeating block */
inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
MLX5_DIF_CRC : MLX5_DIF_IPCS;
if (domain->sig.dif.ref_remap)
inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
if (domain->sig.dif.app_escape) {
if (domain->sig.dif.ref_escape)
inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
else
inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
}
inl->dif_app_bitmask_check =
cpu_to_be16(domain->sig.dif.apptag_check_mask);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
0625b4ba1a5d4703c7fb01c497bd6c156908af00
| 313,560,854,648,884,540,000,000,000,000,000,000,000 | 26 |
IB/mlx5: Fix leaking stack memory to userspace
mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes
were written.
Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp")
Cc: <[email protected]>
Acked-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
unsigned int run() const { return be::peek<W>(_e+sizeof(W)); }
| 0 |
[
"CWE-476"
] |
graphite
|
db132b4731a9b4c9534144ba3a18e65b390e9ff6
| 339,683,168,006,297,780,000,000,000,000,000,000,000 | 1 |
Deprecate and make ineffective gr_face_dumbRendering
|
static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct net_device *dev;
struct sock *sk = sock->sk;
if (peer)
return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
if (dev)
strlcpy(uaddr->sa_data, dev->name, 15);
else
memset(uaddr->sa_data, 0, 14);
rcu_read_unlock();
*uaddr_len = sizeof(*uaddr);
return 0;
}
| 1 |
[
"CWE-909"
] |
linux-2.6
|
67286640f638f5ad41a946b9a3dc75327950248f
| 328,198,438,257,263,270,000,000,000,000,000,000,000 | 21 |
net: packet: fix information leak to userland
packet_getname_spkt() doesn't initialize all members of sa_data field of
sockaddr struct if strlen(dev->name) < 13. This structure is then copied
to userland. It leads to leaking of contents of kernel stack memory.
We have to fully fill sa_data with strncpy() instead of strlcpy().
The same with packet_getname(): it doesn't initialize sll_pkttype field of
sockaddr_ll. Set it to zero.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int x509parse_crlfile( x509_crl *chain, const char *path )
{
int ret;
size_t n;
unsigned char *buf;
if ( (ret = load_file( path, &buf, &n ) ) != 0 )
return( ret );
ret = x509parse_crl( chain, buf, n );
memset( buf, 0, n + 1 );
free( buf );
return( ret );
}
| 0 |
[
"CWE-310"
] |
polarssl
|
43f9799ce61c6392a014d0a2ea136b4b3a9ee194
| 329,606,319,519,063,570,000,000,000,000,000,000,000 | 16 |
RSA blinding on CRT operations to counter timing attacks
|
_PUBLIC_ bool ldap_encode(struct ldap_message *msg,
const struct ldap_control_handler *control_handlers,
DATA_BLOB *result, TALLOC_CTX *mem_ctx)
{
struct asn1_data *data = asn1_init(mem_ctx);
int i, j;
if (!data) return false;
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) goto err;
if (!asn1_write_Integer(data, msg->messageid)) goto err;
switch (msg->type) {
case LDAP_TAG_BindRequest: {
struct ldap_BindRequest *r = &msg->r.BindRequest;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!asn1_write_Integer(data, r->version)) goto err;
if (!asn1_write_OctetString(data, r->dn,
(r->dn != NULL) ? strlen(r->dn) : 0)) goto err;
switch (r->mechanism) {
case LDAP_AUTH_MECH_SIMPLE:
/* context, primitive */
if (!asn1_push_tag(data, ASN1_CONTEXT_SIMPLE(0))) goto err;
if (!asn1_write(data, r->creds.password,
strlen(r->creds.password))) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
case LDAP_AUTH_MECH_SASL:
/* context, constructed */
if (!asn1_push_tag(data, ASN1_CONTEXT(3))) goto err;
if (!asn1_write_OctetString(data, r->creds.SASL.mechanism,
strlen(r->creds.SASL.mechanism))) goto err;
if (r->creds.SASL.secblob) {
if (!asn1_write_OctetString(data, r->creds.SASL.secblob->data,
r->creds.SASL.secblob->length)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
break;
default:
goto err;
}
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_BindResponse: {
struct ldap_BindResponse *r = &msg->r.BindResponse;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!ldap_encode_response(data, &r->response)) goto err;
if (r->SASL.secblob) {
if (!asn1_write_ContextSimple(data, 7, r->SASL.secblob)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_UnbindRequest: {
/* struct ldap_UnbindRequest *r = &msg->r.UnbindRequest; */
if (!asn1_push_tag(data, ASN1_APPLICATION_SIMPLE(msg->type))) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_SearchRequest: {
struct ldap_SearchRequest *r = &msg->r.SearchRequest;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!asn1_write_OctetString(data, r->basedn, strlen(r->basedn))) goto err;
if (!asn1_write_enumerated(data, r->scope)) goto err;
if (!asn1_write_enumerated(data, r->deref)) goto err;
if (!asn1_write_Integer(data, r->sizelimit)) goto err;
if (!asn1_write_Integer(data, r->timelimit)) goto err;
if (!asn1_write_BOOLEAN(data, r->attributesonly)) goto err;
if (!ldap_push_filter(data, r->tree)) {
goto err;
}
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) goto err;
for (i=0; i<r->num_attributes; i++) {
if (!asn1_write_OctetString(data, r->attributes[i],
strlen(r->attributes[i]))) goto err;
}
if (!asn1_pop_tag(data)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_SearchResultEntry: {
struct ldap_SearchResEntry *r = &msg->r.SearchResultEntry;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!asn1_write_OctetString(data, r->dn, strlen(r->dn))) goto err;
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) goto err;
for (i=0; i<r->num_attributes; i++) {
struct ldb_message_element *attr = &r->attributes[i];
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) goto err;
if (!asn1_write_OctetString(data, attr->name,
strlen(attr->name))) goto err;
if (!asn1_push_tag(data, ASN1_SEQUENCE(1))) goto err;
for (j=0; j<attr->num_values; j++) {
if (!asn1_write_OctetString(data,
attr->values[j].data,
attr->values[j].length)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
if (!asn1_pop_tag(data)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_SearchResultDone: {
struct ldap_Result *r = &msg->r.SearchResultDone;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!ldap_encode_response(data, r)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_ModifyRequest: {
struct ldap_ModifyRequest *r = &msg->r.ModifyRequest;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!asn1_write_OctetString(data, r->dn, strlen(r->dn))) goto err;
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) goto err;
for (i=0; i<r->num_mods; i++) {
struct ldb_message_element *attrib = &r->mods[i].attrib;
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) goto err;
if (!asn1_write_enumerated(data, r->mods[i].type)) goto err;
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) goto err;
if (!asn1_write_OctetString(data, attrib->name,
strlen(attrib->name))) goto err;
if (!asn1_push_tag(data, ASN1_SET)) goto err;
for (j=0; j<attrib->num_values; j++) {
if (!asn1_write_OctetString(data,
attrib->values[j].data,
attrib->values[j].length)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
if (!asn1_pop_tag(data)) goto err;
if (!asn1_pop_tag(data)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_ModifyResponse: {
struct ldap_Result *r = &msg->r.ModifyResponse;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!ldap_encode_response(data, r)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_AddRequest: {
struct ldap_AddRequest *r = &msg->r.AddRequest;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!asn1_write_OctetString(data, r->dn, strlen(r->dn))) goto err;
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) goto err;
for (i=0; i<r->num_attributes; i++) {
struct ldb_message_element *attrib = &r->attributes[i];
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) goto err;
if (!asn1_write_OctetString(data, attrib->name,
strlen(attrib->name))) goto err;
if (!asn1_push_tag(data, ASN1_SET)) goto err;
for (j=0; j<r->attributes[i].num_values; j++) {
if (!asn1_write_OctetString(data,
attrib->values[j].data,
attrib->values[j].length)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
if (!asn1_pop_tag(data)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_AddResponse: {
struct ldap_Result *r = &msg->r.AddResponse;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!ldap_encode_response(data, r)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_DelRequest: {
struct ldap_DelRequest *r = &msg->r.DelRequest;
if (!asn1_push_tag(data, ASN1_APPLICATION_SIMPLE(msg->type))) goto err;
if (!asn1_write(data, r->dn, strlen(r->dn))) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_DelResponse: {
struct ldap_Result *r = &msg->r.DelResponse;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!ldap_encode_response(data, r)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_ModifyDNRequest: {
struct ldap_ModifyDNRequest *r = &msg->r.ModifyDNRequest;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!asn1_write_OctetString(data, r->dn, strlen(r->dn))) goto err;
if (!asn1_write_OctetString(data, r->newrdn, strlen(r->newrdn))) goto err;
if (!asn1_write_BOOLEAN(data, r->deleteolddn)) goto err;
if (r->newsuperior) {
if (!asn1_push_tag(data, ASN1_CONTEXT_SIMPLE(0))) goto err;
if (!asn1_write(data, r->newsuperior,
strlen(r->newsuperior))) goto err;
if (!asn1_pop_tag(data)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_ModifyDNResponse: {
struct ldap_Result *r = &msg->r.ModifyDNResponse;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!ldap_encode_response(data, r)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_CompareRequest: {
struct ldap_CompareRequest *r = &msg->r.CompareRequest;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!asn1_write_OctetString(data, r->dn, strlen(r->dn))) goto err;
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) goto err;
if (!asn1_write_OctetString(data, r->attribute,
strlen(r->attribute))) goto err;
if (!asn1_write_OctetString(data, r->value.data,
r->value.length)) goto err;
if (!asn1_pop_tag(data)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_CompareResponse: {
struct ldap_Result *r = &msg->r.ModifyDNResponse;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!ldap_encode_response(data, r)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_AbandonRequest: {
struct ldap_AbandonRequest *r = &msg->r.AbandonRequest;
if (!asn1_push_tag(data, ASN1_APPLICATION_SIMPLE(msg->type))) goto err;
if (!asn1_write_implicit_Integer(data, r->messageid)) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_SearchResultReference: {
struct ldap_SearchResRef *r = &msg->r.SearchResultReference;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!asn1_write_OctetString(data, r->referral, strlen(r->referral))) goto err;
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_ExtendedRequest: {
struct ldap_ExtendedRequest *r = &msg->r.ExtendedRequest;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!asn1_push_tag(data, ASN1_CONTEXT_SIMPLE(0))) goto err;
if (!asn1_write(data, r->oid, strlen(r->oid))) goto err;
if (!asn1_pop_tag(data)) goto err;
if (r->value) {
if (!asn1_push_tag(data, ASN1_CONTEXT_SIMPLE(1))) goto err;
if (!asn1_write(data, r->value->data, r->value->length)) goto err;
if (!asn1_pop_tag(data)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
break;
}
case LDAP_TAG_ExtendedResponse: {
struct ldap_ExtendedResponse *r = &msg->r.ExtendedResponse;
if (!asn1_push_tag(data, ASN1_APPLICATION(msg->type))) goto err;
if (!ldap_encode_response(data, &r->response)) goto err;
if (r->oid) {
if (!asn1_push_tag(data, ASN1_CONTEXT_SIMPLE(10))) goto err;
if (!asn1_write(data, r->oid, strlen(r->oid))) goto err;
if (!asn1_pop_tag(data)) goto err;
}
if (r->value) {
if (!asn1_push_tag(data, ASN1_CONTEXT_SIMPLE(11))) goto err;
if (!asn1_write(data, r->value->data, r->value->length)) goto err;
if (!asn1_pop_tag(data)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
break;
}
default:
goto err;
}
if (msg->controls != NULL) {
if (!asn1_push_tag(data, ASN1_CONTEXT(0))) goto err;
for (i = 0; msg->controls[i] != NULL; i++) {
if (!ldap_encode_control(mem_ctx, data,
control_handlers,
msg->controls[i])) {
DEBUG(0,("Unable to encode control %s\n",
msg->controls[i]->oid));
goto err;
}
}
if (!asn1_pop_tag(data)) goto err;
}
if (!asn1_pop_tag(data)) goto err;
*result = data_blob_talloc(mem_ctx, data->data, data->length);
asn1_free(data);
return true;
err:
asn1_free(data);
return false;
}
| 0 |
[
"CWE-399"
] |
samba
|
530d50a1abdcdf4d1775652d4c456c1274d83d8d
| 256,738,813,231,219,140,000,000,000,000,000,000,000 | 315 |
CVE-2015-7540: s4: libcli: ldap message - Ensure all asn1_XX returns are checked.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Ronnie Sahlberg <[email protected]>
Autobuild-User(master): Jeremy Allison <[email protected]>
Autobuild-Date(master): Fri Sep 26 03:15:00 CEST 2014 on sn-devel-104
(cherry picked from commit 69a7e3cfdc8dbba9c8dcfdfae82d2894c7247e15)
|
void DL_Dxf::addDimAngular(DL_CreationInterface* creationInterface) {
DL_DimensionData d = getDimData();
// angular dimension:
DL_DimAngular2LData da(
// definition point 1
getRealValue(13, 0.0),
getRealValue(23, 0.0),
getRealValue(33, 0.0),
// definition point 2
getRealValue(14, 0.0),
getRealValue(24, 0.0),
getRealValue(34, 0.0),
// definition point 3
getRealValue(15, 0.0),
getRealValue(25, 0.0),
getRealValue(35, 0.0),
// definition point 4
getRealValue(16, 0.0),
getRealValue(26, 0.0),
getRealValue(36, 0.0));
creationInterface->addDimAngular(d, da);
}
| 0 |
[
"CWE-191"
] |
qcad
|
1eeffc5daf5a06cf6213ffc19e95923cdebb2eb8
| 196,965,721,071,562,800,000,000,000,000,000,000,000 | 23 |
check vertexIndex which might be -1 for broken DXF
|
home_address_compare (EContact *ecard1,
EContact *ecard2)
{
return address_compare (ecard1, ecard2, E_CONTACT_ADDRESS_LABEL_HOME);
}
| 0 |
[] |
evolution-data-server
|
34bad61738e2127736947ac50e0c7969cc944972
| 328,357,023,662,074,100,000,000,000,000,000,000,000 | 5 |
Bug 796174 - strcat() considered unsafe for buffer overflow
|
static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
{
int err = 0;
/* Only support initial user namespace for now. */
/*
* We return ECONNREFUSED because it tricks userspace into thinking
* that audit was not configured into the kernel. Lots of users
* configure their PAM stack (because that's what the distro does)
* to reject login if unable to send messages to audit. If we return
* ECONNREFUSED the PAM stack thinks the kernel does not have audit
* configured in and will let login proceed. If we return EPERM
* userspace will reject all logins. This should be removed when we
* support non init namespaces!!
*/
if (current_user_ns() != &init_user_ns)
return -ECONNREFUSED;
switch (msg_type) {
case AUDIT_LIST:
case AUDIT_ADD:
case AUDIT_DEL:
return -EOPNOTSUPP;
case AUDIT_GET:
case AUDIT_SET:
case AUDIT_GET_FEATURE:
case AUDIT_SET_FEATURE:
case AUDIT_LIST_RULES:
case AUDIT_ADD_RULE:
case AUDIT_DEL_RULE:
case AUDIT_SIGNAL_INFO:
case AUDIT_TTY_GET:
case AUDIT_TTY_SET:
case AUDIT_TRIM:
case AUDIT_MAKE_EQUIV:
/* Only support auditd and auditctl in initial pid namespace
* for now. */
if ((task_active_pid_ns(current) != &init_pid_ns))
return -EPERM;
if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
err = -EPERM;
break;
case AUDIT_USER:
case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
if (!netlink_capable(skb, CAP_AUDIT_WRITE))
err = -EPERM;
break;
default: /* bad msg */
err = -EINVAL;
}
return err;
}
| 0 |
[
"CWE-264"
] |
net
|
90f62cf30a78721641e08737bda787552428061e
| 331,019,886,689,680,220,000,000,000,000,000,000,000 | 55 |
net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
ASC_rejectAssociation(
T_ASC_Association * association,
const T_ASC_RejectParameters * rejectParameters,
void **associatePDU,
unsigned long *associatePDUlength)
{
DUL_ABORTITEMS l_abort;
if (association == NULL) return ASC_NULLKEY;
if (association->DULassociation == NULL) return ASC_NULLKEY;
if (rejectParameters == NULL) return ASC_NULLKEY;
int retrieveRawPDU = 0;
if (associatePDU && associatePDUlength) retrieveRawPDU = 1;
l_abort.result = (unsigned char)(rejectParameters->result & 0xff);
l_abort.source = (unsigned char)(rejectParameters->source & 0xff);
l_abort.reason = (unsigned char)(rejectParameters->reason & 0xff);
OFCondition cond = DUL_RejectAssociationRQ(
&association->DULassociation,
&l_abort,
retrieveRawPDU);
if (retrieveRawPDU && (association->DULassociation))
{
DUL_returnAssociatePDUStorage(association->DULassociation, *associatePDU, *associatePDUlength);
}
return cond;
}
| 0 |
[
"CWE-415",
"CWE-703",
"CWE-401"
] |
dcmtk
|
a9697dfeb672b0b9412c00c7d36d801e27ec85cb
| 175,711,556,925,355,080,000,000,000,000,000,000,000 | 31 |
Fixed poss. NULL pointer dereference/double free.
Thanks to Jinsheng Ba <[email protected]> for the report and some patches.
|
static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
unsigned long len;
struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
len = PAGE_ALIGN(request);
if (len < request)
return -ENOMEM;
if (!len)
return 0;
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
return -EINVAL;
flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
if (offset_in_page(error))
return error;
error = mlock_future_check(mm, mm->def_flags, len);
if (error)
return error;
/*
* mm->mmap_sem is required to protect against another thread
* changing the mappings in case we sleep.
*/
verify_mm_writelocked(mm);
/*
* Clear old maps. this also does some error checking for us
*/
while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
&rb_parent)) {
if (do_munmap(mm, addr, len, uf))
return -ENOMEM;
}
/* Check against address space limits *after* clearing old maps... */
if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
return -ENOMEM;
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
vma = vma_merge(mm, prev, addr, addr + len, flags,
NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
if (vma)
goto out;
/*
* create a vma struct for an anonymous mapping
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM;
}
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_pgoff = pgoff;
vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT;
mm->data_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
vma->vm_flags |= VM_SOFTDIRTY;
return 0;
}
| 0 |
[
"CWE-119"
] |
linux
|
1be7107fbe18eed3e319a6c3e83c78254b693acb
| 59,244,971,005,205,190,000,000,000,000,000,000,000 | 85 |
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.