func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
void set_item_equal(Item_equal *item_eq) { item_equal= item_eq; } | 0 | [
"CWE-617"
] | server | 2e7891080667c59ac80f788eef4d59d447595772 | 74,042,005,401,834,490,000,000,000,000,000,000,000 | 1 | MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]> |
static void stop_read(struct edgeport_port *edge_port)
{
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPING;
edge_port->shadow_mcr &= ~MCR_RTS;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
} | 0 | [
"CWE-191"
] | linux | 654b404f2a222f918af9b0cd18ad469d0c941a8e | 155,644,139,842,001,360,000,000,000,000,000,000,000 | 12 | USB: serial: io_ti: fix information leak in completion handler
Add missing sanity check to the bulk-in completion handler to avoid an
integer underflow that can be triggered by a malicious device.
This avoids leaking 128 kB of memory content from after the URB transfer
buffer to user space.
Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable <[email protected]> # 2.6.30
Signed-off-by: Johan Hovold <[email protected]> |
header_put_be_3byte (SF_PRIVATE *psf, int x)
{ psf->header.ptr [psf->header.indx++] = (x >> 16) ;
psf->header.ptr [psf->header.indx++] = (x >> 8) ;
psf->header.ptr [psf->header.indx++] = x ;
} /* header_put_be_3byte */ | 0 | [
"CWE-119",
"CWE-787"
] | libsndfile | 708e996c87c5fae77b104ccfeb8f6db784c32074 | 302,508,640,409,297,870,000,000,000,000,000,000,000 | 5 | src/ : Move to a variable length header buffer
Previously, the `psf->header` buffer was a fixed length specified by
`SF_HEADER_LEN` which was set to `12292`. This was problematic for
two reasons; this value was un-necessarily large for the majority
of files and too small for some others.
Now the size of the header buffer starts at 256 bytes and grows as
necessary up to a maximum of 100k. |
inline cimg_int64 mod(const cimg_int64 x, const cimg_int64 m) {
if (!m) throw CImgArgumentException("cimg::mod(): Specified modulo value is 0.");
return (cimg_int64)(x>=0?x%m:(x%m?m + x%m:0));
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 199,911,535,737,917,250,000,000,000,000,000,000,000 | 4 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
int rad_packet_recv(int fd, struct rad_packet_t **p, struct sockaddr_in *addr)
{
struct rad_packet_t *pack;
struct rad_attr_t *attr;
struct rad_dict_attr_t *da;
struct rad_dict_vendor_t *vendor;
uint8_t *ptr;
int n, id, len, vendor_id;
socklen_t addr_len = sizeof(*addr);
*p = NULL;
pack = rad_packet_alloc(0);
if (!pack)
return 0;
//ptr = mmap(NULL, REQ_LENGTH_MAX, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
ptr = mempool_alloc(buf_pool);
if (ptr == MAP_FAILED) {
log_emerg("radius:packet: out of memory\n");
goto out_err;
}
pack->buf = ptr;
clock_gettime(CLOCK_MONOTONIC, &pack->tv);
while (1) {
if (addr)
n = recvfrom(fd, pack->buf, REQ_LENGTH_MAX, 0, addr, &addr_len);
else
n = read(fd, pack->buf, REQ_LENGTH_MAX);
if (n < 0) {
rad_packet_free(pack);
if (errno == EAGAIN)
return 1;
if (errno != ECONNREFUSED)
log_ppp_error("radius:packet:read: %s\n", strerror(errno));
return -1;
}
break;
}
if (n < 20) {
log_ppp_warn("radius:packet: short packed received (%i)\n", n);
goto out_err;
}
pack->code = *ptr; ptr++;
pack->id = *ptr; ptr++;
pack->len = ntohs(*(uint16_t*)ptr); ptr += 2;
if (pack->len > n) {
log_ppp_warn("radius:packet: short packet received %i, expected %i\n", pack->len, n);
goto out_err;
}
ptr += 16;
n -= 20;
while (n>0) {
id = *ptr; ptr++;
len = *ptr - 2; ptr++;
if (len < 0) {
log_ppp_warn("radius:packet short attribute len received\n");
goto out_err;
}
if (2 + len > n) {
log_ppp_warn("radius:packet: too long attribute received (%i, %i)\n", id, len);
goto out_err;
}
if (id == 26) {
vendor_id = ntohl(*(uint32_t *)ptr);
vendor = rad_dict_find_vendor_id(vendor_id);
if (vendor) {
ptr += 4;
if (vendor->tag == 2)
id = (uint16_t)ntohs(*(uint16_t *)ptr);
else
id = *ptr;
ptr += vendor->tag;
if (vendor->len == 2)
len = (uint16_t)ntohs(*(uint16_t *)ptr);
else
len = *ptr;
ptr += vendor->len;
len -= vendor->tag + vendor->len;
n -= 4 + vendor->tag + vendor->len;
if (len < 0) {
log_ppp_warn("radius:packet invalid vendor attribute len received\n");
goto out_err;
}
if (2 + len > n) {
log_ppp_warn("radius:packet: too long vendor attribute received (%i, %i)\n", id, len);
goto out_err;
}
} else
log_ppp_warn("radius:packet: vendor %i not found\n", id);
} else
vendor = NULL;
da = rad_dict_find_attr_id(vendor, id);
if (da) {
attr = mempool_alloc(attr_pool);
if (!attr) {
log_emerg("radius:packet: out of memory\n");
goto out_err;
}
memset(attr, 0, sizeof(*attr));
attr->vendor = vendor;
attr->attr = da;
attr->len = len;
attr->raw = ptr;
if (!da->array) {
switch (da->type) {
case ATTR_TYPE_STRING:
attr->alloc = 1;
attr->val.string = _malloc(len + 1);
memcpy(attr->val.string, ptr, len);
attr->val.string[len] = 0;
break;
case ATTR_TYPE_OCTETS:
case ATTR_TYPE_ETHER:
case ATTR_TYPE_TLV:
attr->val.octets = ptr;
break;
case ATTR_TYPE_INTEGER:
if (len != da->size)
log_ppp_warn("radius:packet: attribute %s has invalid length %i (must be %i)\n", da->name, len, da->size);
case ATTR_TYPE_DATE:
if (len == 4)
attr->val.integer = ntohl(*(uint32_t*)ptr);
else if (len == 2)
attr->val.integer = ntohs(*(uint16_t*)ptr);
else if (len == 1)
attr->val.integer = *ptr;
break;
case ATTR_TYPE_IPADDR:
case ATTR_TYPE_IFID:
case ATTR_TYPE_IPV6ADDR:
memcpy(&attr->val.integer, ptr, len);
break;
case ATTR_TYPE_IPV6PREFIX:
attr->val.ipv6prefix.len = ptr[1];
memset(&attr->val.ipv6prefix.prefix, 0, sizeof(attr->val.ipv6prefix.prefix));
memcpy(&attr->val.ipv6prefix.prefix, ptr + 2, len - 2);
break;
}
}
list_add_tail(&attr->entry, &pack->attrs);
} else
log_ppp_warn("radius:packet: unknown attribute received (%i,%i)\n", vendor ? vendor->id : 0, id);
ptr += len;
n -= 2 + len;
}
*p = pack;
return 0;
out_err:
rad_packet_free(pack);
return 1;
} | 1 | [
"CWE-787"
] | accel-ppp | d4cb89721cc8e5b3dd3fbefaf173eb77ecb85615 | 256,186,249,891,961,400,000,000,000,000,000,000,000 | 171 | fix buffer overflow when receive radius packet
This patch fixes buffer overflow if radius packet contains invalid atribute length
and attrubute type from the following list: ipv4addr, ipv6addr, ipv6prefix or ifid
Reported-by: Chloe Ong
Reported-by: Eugene Lim <[email protected]>
Reported-by: Kar Wei Loh
Signed-off-by: Sergey V. Lobanov <[email protected]> |
temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
dma_addr_t tail_p, skb_dma_addr;
int ii;
unsigned long num_frag;
skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags;
frag = &skb_shinfo(skb)->frags[0];
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag + 1)) {
if (netif_queue_stopped(ndev))
return NETDEV_TX_BUSY;
netif_stop_queue(ndev);
/* Matches barrier in temac_start_xmit_done */
smp_mb();
/* Space might have just been freed - check again */
if (temac_check_tx_bd_space(lp, num_frag + 1))
return NETDEV_TX_BUSY;
netif_wake_queue(ndev);
}
cur_p->app0 = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned int csum_start_off = skb_checksum_start_offset(skb);
unsigned int csum_index_off = csum_start_off + skb->csum_offset;
cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
cur_p->app1 = cpu_to_be32((csum_start_off << 16)
| csum_index_off);
cur_p->app2 = 0; /* initial checksum seed */
}
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
cur_p->len = cpu_to_be32(skb_headlen(skb));
if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
dev_kfree_skb_any(skb);
ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
cur_p->phys = cpu_to_be32(skb_dma_addr);
for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
skb_dma_addr = dma_map_single(ndev->dev.parent,
skb_frag_address(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
if (--lp->tx_bd_tail < 0)
lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
while (--ii >= 0) {
--frag;
dma_unmap_single(ndev->dev.parent,
be32_to_cpu(cur_p->phys),
skb_frag_size(frag),
DMA_TO_DEVICE);
if (--lp->tx_bd_tail < 0)
lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
}
dma_unmap_single(ndev->dev.parent,
be32_to_cpu(cur_p->phys),
skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
cur_p->phys = cpu_to_be32(skb_dma_addr);
cur_p->len = cpu_to_be32(skb_frag_size(frag));
cur_p->app0 = 0;
frag++;
}
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
/* Mark last fragment with skb address, so it can be consumed
* in temac_start_xmit_done()
*/
ptr_to_txbd((void *)skb, cur_p);
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
skb_tx_timestamp(skb);
/* Kick off the transfer */
wmb();
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
return NETDEV_TX_OK;
} | 0 | [
"CWE-120",
"CWE-787"
] | linux | c364df2489b8ef2f5e3159b1dff1ff1fdb16040d | 233,461,259,350,857,650,000,000,000,000,000,000,000 | 106 | net: ll_temac: Fix TX BD buffer overwrite
Just as the initial check, we need to ensure num_frag+1 buffers available,
as that is the number of buffers we are going to use.
This fixes a buffer overflow, which might be seen during heavy network
load. Complete lockup of TEMAC was reproducible within about 10 minutes of
a particular load.
Fixes: 84823ff80f74 ("net: ll_temac: Fix race condition causing TX hang")
Cc: [email protected] # v5.4+
Signed-off-by: Esben Haabendal <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
{
struct ip_mc_list *im = igmp_mc_get_first(seq);
if (im)
while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
--pos;
return pos ? NULL : im;
} | 0 | [
"CWE-362"
] | linux | 23d2b94043ca8835bd1e67749020e839f396a1c2 | 151,776,209,450,923,790,000,000,000,000,000,000,000 | 8 | igmp: Add ip_mc_list lock in ip_check_mc_rcu
I got below panic when doing fuzz test:
Kernel panic - not syncing: panic_on_warn set ...
CPU: 0 PID: 4056 Comm: syz-executor.3 Tainted: G B 5.14.0-rc1-00195-gcff5c4254439-dirty #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
dump_stack_lvl+0x7a/0x9b
panic+0x2cd/0x5af
end_report.cold+0x5a/0x5a
kasan_report+0xec/0x110
ip_check_mc_rcu+0x556/0x5d0
__mkroute_output+0x895/0x1740
ip_route_output_key_hash_rcu+0x2d0/0x1050
ip_route_output_key_hash+0x182/0x2e0
ip_route_output_flow+0x28/0x130
udp_sendmsg+0x165d/0x2280
udpv6_sendmsg+0x121e/0x24f0
inet6_sendmsg+0xf7/0x140
sock_sendmsg+0xe9/0x180
____sys_sendmsg+0x2b8/0x7a0
___sys_sendmsg+0xf0/0x160
__sys_sendmmsg+0x17e/0x3c0
__x64_sys_sendmmsg+0x9e/0x100
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x462eb9
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8
48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f3df5af1c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000133
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462eb9
RDX: 0000000000000312 RSI: 0000000020001700 RDI: 0000000000000007
RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f3df5af26bc
R13: 00000000004c372d R14: 0000000000700b10 R15: 00000000ffffffff
It is one use-after-free in ip_check_mc_rcu.
In ip_mc_del_src, the ip_sf_list of pmc has been freed under pmc->lock protection.
But access to ip_sf_list in ip_check_mc_rcu is not protected by the lock.
Signed-off-by: Liu Jian <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static apr_status_t open_scoreboard(apr_pool_t *pconf)
{
#if APR_HAS_SHARED_MEMORY
apr_status_t rv;
char *fname = NULL;
apr_pool_t *global_pool;
/* We don't want to have to recreate the scoreboard after
* restarts, so we'll create a global pool and never clean it.
*/
rv = apr_pool_create(&global_pool, NULL);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00002)
"Fatal error: unable to create global pool "
"for use by the scoreboard");
return rv;
}
/* The config says to create a name-based shmem */
if (ap_scoreboard_fname) {
/* make sure it's an absolute pathname */
fname = ap_server_root_relative(pconf, ap_scoreboard_fname);
if (!fname) {
ap_log_error(APLOG_MARK, APLOG_CRIT, APR_EBADPATH, ap_server_conf, APLOGNO(00003)
"Fatal error: Invalid Scoreboard path %s",
ap_scoreboard_fname);
return APR_EBADPATH;
}
return create_namebased_scoreboard(global_pool, fname);
}
else { /* config didn't specify, we get to choose shmem type */
rv = apr_shm_create(&ap_scoreboard_shm, scoreboard_size, NULL,
global_pool); /* anonymous shared memory */
if ((rv != APR_SUCCESS) && (rv != APR_ENOTIMPL)) {
ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00004)
"Unable to create or access scoreboard "
"(anonymous shared memory failure)");
return rv;
}
/* Make up a filename and do name-based shmem */
else if (rv == APR_ENOTIMPL) {
/* Make sure it's an absolute pathname */
ap_scoreboard_fname = DEFAULT_SCOREBOARD;
fname = ap_server_root_relative(pconf, ap_scoreboard_fname);
return create_namebased_scoreboard(global_pool, fname);
}
}
#endif /* APR_HAS_SHARED_MEMORY */
return APR_SUCCESS;
} | 0 | [
"CWE-476"
] | httpd | fa7b2a5250e54363b3a6c8ac3aaa7de4e8da9b2e | 112,520,262,660,013,330,000,000,000,000,000,000,000 | 51 | Merge r1878092 from trunk:
Fix a NULL pointer dereference
* server/scoreboard.c (ap_increment_counts): In certain cases like certain
invalid requests r->method might be NULL here. r->method_number defaults
to M_GET and hence is M_GET in these cases.
Submitted by: rpluem
Reviewed by: covener, ylavic, jfclere
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1893051 13f79535-47bb-0310-9956-ffa450edef68 |
static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt & RFCOMM_LM_AUTH)
rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;
if (opt & RFCOMM_LM_ENCRYPT)
rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
if (opt & RFCOMM_LM_SECURE)
rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH;
rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
} | 0 | [
"CWE-20",
"CWE-269"
] | linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 291,412,590,626,371,860,000,000,000,000,000,000,000 | 35 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int cli_bytecode_context_clear(struct cli_bc_ctx *ctx)
{
cli_bytecode_context_reset(ctx);
memset(ctx, 0, sizeof(*ctx));
return CL_SUCCESS;
} | 0 | [
"CWE-189"
] | clamav-devel | 3d664817f6ef833a17414a4ecea42004c35cc42f | 281,107,439,936,200,600,000,000,000,000,000,000,000 | 6 | fix recursion level crash (bb #3706).
Thanks to Stephane Chazelas for the analysis. |
int cap_task_setnice (struct task_struct *p, int nice)
{
return 0;
} | 0 | [] | linux-2.6 | 3318a386e4ca68c76e0294363d29bdc46fcad670 | 134,744,745,486,913,390,000,000,000,000,000,000,000 | 4 | file caps: always start with clear bprm->caps_*
While Linux doesn't honor setuid on scripts. However, it mistakenly
behaves differently for file capabilities.
This patch fixes that behavior by making sure that get_file_caps()
begins with empty bprm->caps_*. That way when a script is loaded,
its bprm->caps_* may be filled when binfmt_misc calls prepare_binprm(),
but they will be cleared again when binfmt_elf calls prepare_binprm()
next to read the interpreter's file capabilities.
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: David Howells <[email protected]>
Acked-by: Andrew G. Morgan <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
{
if (cfs_rq->avg.load_avg)
return true;
if (cfs_rq->avg.util_avg)
return true;
return false;
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-835"
] | linux | c40f7d74c741a907cfaeb73a7697081881c497d0 | 178,213,886,971,056,780,000,000,000,000,000,000,000 | 10 | sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
static bool fix_optimizer_switch(sys_var *self, THD *thd,
enum_var_type type)
{
SV *sv= (type == OPT_GLOBAL) ? &global_system_variables : &thd->variables;
sv->engine_condition_pushdown=
test(sv->optimizer_switch & OPTIMIZER_SWITCH_ENGINE_CONDITION_PUSHDOWN);
return false;
} | 0 | [
"CWE-264"
] | mysql-server | 48bd8b16fe382be302c6f0b45931be5aa6f29a0e | 18,791,983,003,775,460,000,000,000,000,000,000,000 | 8 | Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE
[This is the 5.5/5.6 version of the bugfix].
The problem was that it was possible to write log files ending
in .ini/.cnf that later could be parsed as an options file.
This made it possible for users to specify startup options
without the permissions to do so.
This patch fixes the problem by disallowing general query log
and slow query log to be written to files ending in .ini and .cnf. |
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
int vector)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (is_guest_mode(vcpu) &&
vector == vmx->nested.posted_intr_nv) {
/*
* If a posted intr is not recognized by hardware,
* we will accomplish it in the next vmentry.
*/
vmx->nested.pi_pending = true;
kvm_make_request(KVM_REQ_EVENT, vcpu);
/*
* This pairs with the smp_mb_*() after setting vcpu->mode in
* vcpu_enter_guest() to guarantee the vCPU sees the event
* request if triggering a posted interrupt "fails" because
* vcpu->mode != IN_GUEST_MODE. The extra barrier is needed as
* the smb_wmb() in kvm_make_request() only ensures everything
* done before making the request is visible when the request
* is visible, it doesn't ensure ordering between the store to
* vcpu->requests and the load from vcpu->mode.
*/
smp_mb__after_atomic();
/* the PIR and ON have been set by L1. */
kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR);
return 0;
}
return -1;
} | 0 | [
"CWE-703"
] | linux | 6cd88243c7e03845a450795e134b488fc2afb736 | 102,524,549,873,668,520,000,000,000,000,000,000,000 | 32 | KVM: x86: do not report a vCPU as preempted outside instruction boundaries
If a vCPU is outside guest mode and is scheduled out, it might be in the
process of making a memory access. A problem occurs if another vCPU uses
the PV TLB flush feature during the period when the vCPU is scheduled
out, and a virtual address has already been translated but has not yet
been accessed, because this is equivalent to using a stale TLB entry.
To avoid this, only report a vCPU as preempted if sure that the guest
is at an instruction boundary. A rescheduling request will be delivered
to the host physical CPU as an external interrupt, so for simplicity
consider any vmexit *not* instruction boundary except for external
interrupts.
It would in principle be okay to report the vCPU as preempted also
if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the
vmentry/vmexit overhead unnecessarily, and optimistic spinning is
also unlikely to succeed. However, leave it for later because right
now kvm_vcpu_check_block() is doing memory accesses. Even
though the TLB flush issue only applies to virtual memory address,
it's very much preferrable to be conservative.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
sf_flac_write_callback (const FLAC__StreamDecoder * UNUSED (decoder), const FLAC__Frame *frame, const int32_t * const buffer [], void *client_data)
{ SF_PRIVATE *psf = (SF_PRIVATE*) client_data ;
FLAC_PRIVATE* pflac = (FLAC_PRIVATE*) psf->codec_data ;
pflac->frame = frame ;
pflac->bufferpos = 0 ;
pflac->bufferbackup = SF_FALSE ;
pflac->wbuffer = buffer ;
flac_buffer_copy (psf) ;
return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE ;
} /* sf_flac_write_callback */ | 1 | [
"CWE-119",
"CWE-369"
] | libsndfile | 60b234301adf258786d8b90be5c1d437fc8799e0 | 226,694,761,749,991,450,000,000,000,000,000,000,000 | 14 | src/flac.c: Improve error handling
Especially when dealing with corrupt or malicious files. |
void t_go_generator::generate_go_struct_reader(ofstream& out,
t_struct* tstruct,
const string& tstruct_name,
bool is_result) {
(void)is_result;
const vector<t_field*>& fields = tstruct->get_members();
vector<t_field*>::const_iterator f_iter;
string escaped_tstruct_name(escape_string(tstruct->get_name()));
out << indent() << "func (p *" << tstruct_name << ") " << read_method_name_ << "(iprot thrift.TProtocol) error {"
<< endl;
indent_up();
out << indent() << "if _, err := iprot.ReadStructBegin(); err != nil {" << endl;
out << indent() << " return thrift.PrependError(fmt.Sprintf(\"%T read error: \", p), err)"
<< endl;
out << indent() << "}" << endl << endl;
// Required variables does not have IsSet functions, so we need tmp vars to check them.
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if ((*f_iter)->get_req() == t_field::T_REQUIRED) {
const string field_name(publicize(escape_string((*f_iter)->get_name())));
indent(out) << "var isset" << field_name << " bool = false;" << endl;
}
}
out << endl;
// Loop over reading in fields
indent(out) << "for {" << endl;
indent_up();
// Read beginning field marker
out << indent() << "_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()" << endl;
out << indent() << "if err != nil {" << endl;
out << indent() << " return thrift.PrependError(fmt.Sprintf("
"\"%T field %d read error: \", p, fieldId), err)" << endl;
out << indent() << "}" << endl;
// Check for field STOP marker and break
out << indent() << "if fieldTypeId == thrift.STOP { break; }" << endl;
string thriftFieldTypeId;
// Generate deserialization code for known cases
int32_t field_id = -1;
// Switch statement on the field we are reading, false if no fields present
bool have_switch = !fields.empty();
if (have_switch) {
indent(out) << "switch fieldId {" << endl;
}
// All the fields we know
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
field_id = (*f_iter)->get_key();
// if negative id, ensure we generate a valid method name
string field_method_prefix("ReadField");
int32_t field_method_suffix = field_id;
if (field_method_suffix < 0) {
field_method_prefix += "_";
field_method_suffix *= -1;
}
out << indent() << "case " << field_id << ":" << endl;
indent_up();
thriftFieldTypeId = type_to_enum((*f_iter)->get_type());
if (thriftFieldTypeId == "thrift.BINARY") {
thriftFieldTypeId = "thrift.STRING";
}
out << indent() << "if err := p." << field_method_prefix << field_method_suffix << "(iprot); err != nil {"
<< endl;
out << indent() << " return err" << endl;
out << indent() << "}" << endl;
// Mark required field as read
if ((*f_iter)->get_req() == t_field::T_REQUIRED) {
const string field_name(publicize(escape_string((*f_iter)->get_name())));
out << indent() << "isset" << field_name << " = true" << endl;
}
indent_down();
}
// Begin switch default case
if (have_switch) {
out << indent() << "default:" << endl;
indent_up();
}
// Skip unknown fields in either case
out << indent() << "if err := iprot.Skip(fieldTypeId); err != nil {" << endl;
out << indent() << " return err" << endl;
out << indent() << "}" << endl;
// End switch default case
if (have_switch) {
indent_down();
out << indent() << "}" << endl;
}
// Read field end marker
out << indent() << "if err := iprot.ReadFieldEnd(); err != nil {" << endl;
out << indent() << " return err" << endl;
out << indent() << "}" << endl;
indent_down();
out << indent() << "}" << endl;
out << indent() << "if err := iprot.ReadStructEnd(); err != nil {" << endl;
out << indent() << " return thrift.PrependError(fmt.Sprintf("
"\"%T read struct end error: \", p), err)" << endl;
out << indent() << "}" << endl;
// Return error if any required fields are missing.
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if ((*f_iter)->get_req() == t_field::T_REQUIRED) {
const string field_name(publicize(escape_string((*f_iter)->get_name())));
out << indent() << "if !isset" << field_name << "{" << endl;
out << indent() << " return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, "
"fmt.Errorf(\"Required field " << field_name << " is not set\"));" << endl;
out << indent() << "}" << endl;
}
}
out << indent() << "return nil" << endl;
indent_down();
out << indent() << "}" << endl << endl;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
string field_type_name(publicize((*f_iter)->get_type()->get_name()));
string field_name(publicize((*f_iter)->get_name()));
string field_method_prefix("ReadField");
int32_t field_id = (*f_iter)->get_key();
int32_t field_method_suffix = field_id;
if (field_method_suffix < 0) {
field_method_prefix += "_";
field_method_suffix *= -1;
}
out << indent() << "func (p *" << tstruct_name << ") " << field_method_prefix << field_method_suffix
<< "(iprot thrift.TProtocol) error {" << endl;
indent_up();
generate_deserialize_field(out, *f_iter, false, "p.");
indent_down();
out << indent() << " return nil" << endl;
out << indent() << "}" << endl << endl;
}
} | 0 | [
"CWE-77"
] | thrift | 2007783e874d524a46b818598a45078448ecc53e | 38,269,857,429,990,640,000,000,000,000,000,000,000 | 146 | THRIFT-3893 Command injection in format_go_output
Client: Go
Patch: Jens Geyer |
xmlFAParsePiece(xmlRegParserCtxtPtr ctxt) {
int ret;
ctxt->atom = NULL;
ret = xmlFAParseAtom(ctxt);
if (ret == 0)
return(0);
if (ctxt->atom == NULL) {
ERROR("internal: no atom generated");
}
xmlFAParseQuantifier(ctxt);
return(1);
} | 0 | [
"CWE-119"
] | libxml2 | cbb271655cadeb8dbb258a64701d9a3a0c4835b4 | 86,166,589,067,495,750,000,000,000,000,000,000,000 | 13 | Bug 757711: heap-buffer-overflow in xmlFAParsePosCharGroup <https://bugzilla.gnome.org/show_bug.cgi?id=757711>
* xmlregexp.c:
(xmlFAParseCharRange): Only advance to the next character if
there is no error. Advancing to the next character in case of
an error while parsing regexp leads to an out of bounds access. |
std::istream& operator>>
(std::istream& is, Nef_polyhedron_2<T,Items,Mark>& NP)
{
typedef typename Nef_polyhedron_2<T,Items,Mark>::Decorator Decorator;
CGAL::PM_io_parser<Decorator> I(is, NP.pm());
if (I.check_sep("Nef_polyhedron_2<") &&
I.check_sep(NP.EK.output_identifier()) &&
I.check_sep(">")) I.read();
else {
std::cerr << "Nef_polyhedron_2 input corrupted." << std::endl;
NP = Nef_polyhedron_2<T,Items,Mark>();
}
if(!is)
return is;
typename Nef_polyhedron_2<T,Items,Mark>::Topological_explorer D(NP.explorer());
D.check_integrity_and_topological_planarity();
return is;
} | 0 | [
"CWE-269"
] | cgal | 618b409b0fbcef7cb536a4134ae3a424ef5aae45 | 278,958,445,909,280,840,000,000,000,000,000,000,000 | 18 | Fix Nef_2 and Nef_S2 IO |
utfc_ptr2char_len(
char_u *p,
int *pcc, // return: composing chars, last one is 0
int maxlen)
{
int len;
int c;
int cc;
int i = 0;
c = utf_ptr2char(p);
len = utf_ptr2len_len(p, maxlen);
// Only accept a composing char when the first char isn't illegal.
if ((len > 1 || *p < 0x80)
&& len < maxlen
&& p[len] >= 0x80
&& UTF_COMPOSINGLIKE(p, p + len))
{
cc = utf_ptr2char(p + len);
for (;;)
{
pcc[i++] = cc;
if (i == MAX_MCO)
break;
len += utf_ptr2len_len(p + len, maxlen - len);
if (len >= maxlen
|| p[len] < 0x80
|| !utf_iscomposing(cc = utf_ptr2char(p + len)))
break;
}
}
if (i < MAX_MCO) // last composing char must be 0
pcc[i] = 0;
return c;
} | 0 | [
"CWE-122",
"CWE-787"
] | vim | f6d39c31d2177549a986d170e192d8351bd571e2 | 206,459,796,250,712,100,000,000,000,000,000,000,000 | 37 | patch 9.0.0220: invalid memory access with for loop over NULL string
Problem: Invalid memory access with for loop over NULL string.
Solution: Make sure mb_ptr2len() consistently returns zero for NUL. |
int append_possible_keys(MEM_ROOT *alloc, String_list &list, TABLE *table,
key_map possible_keys)
{
uint j;
for (j=0 ; j < table->s->keys ; j++)
{
if (possible_keys.is_set(j))
if (!(list.append_str(alloc, table->key_info[j].name.str)))
return 1;
}
return 0;
} | 0 | [] | server | ff77a09bda884fe6bf3917eb29b9d3a2f53f919b | 234,603,105,142,816,470,000,000,000,000,000,000,000 | 12 | MDEV-22464 Server crash on UPDATE with nested subquery
Uninitialized ref_pointer_array[] because setup_fields() got empty
fields list. mysql_multi_update() for some reason does that by
substituting the fields list with empty total_list for the
mysql_select() call (looks like wrong merge since total_list is not
used anywhere else and is always empty). The fix would be to return
back the original fields list. But this fails update_use_source.test
case:
--error ER_BAD_FIELD_ERROR
update v1 set t1c1=2 order by 1;
Actually not failing the above seems to be ok.
The other fix would be to keep resolve_in_select_list false (and that
keeps outer context from being resolved in
Item_ref::fix_fields()). This fix is more consistent with how SELECT
behaves:
--error ER_SUBQUERY_NO_1_ROW
select a from t1 where a= (select 2 from t1 having (a = 3));
So this patch implements this fix. |
static int replmd_update_rpmd(struct ldb_module *module,
const struct dsdb_schema *schema,
struct ldb_request *req,
const char * const *rename_attrs,
struct ldb_message *msg, uint64_t *seq_num,
time_t t, bool is_schema_nc,
bool *is_urgent, bool *rodc)
{
const struct ldb_val *omd_value;
enum ndr_err_code ndr_err;
struct replPropertyMetaDataBlob omd;
unsigned int i;
NTTIME now;
const struct GUID *our_invocation_id;
int ret;
const char * const *attrs = NULL;
const char * const attrs2[] = { "uSNChanged", "objectClass", "instanceType", NULL };
struct ldb_result *res;
struct ldb_context *ldb;
struct ldb_message_element *objectclass_el;
enum urgent_situation situation;
bool rmd_is_provided;
bool rmd_is_just_resorted = false;
const char *not_rename_attrs[4 + msg->num_elements];
bool is_forced_rodc = false;
if (rename_attrs) {
attrs = rename_attrs;
} else {
for (i = 0; i < msg->num_elements; i++) {
not_rename_attrs[i] = msg->elements[i].name;
}
not_rename_attrs[i] = "replPropertyMetaData";
not_rename_attrs[i+1] = "objectClass";
not_rename_attrs[i+2] = "instanceType";
not_rename_attrs[i+3] = NULL;
attrs = not_rename_attrs;
}
ldb = ldb_module_get_ctx(module);
ret = samdb_rodc(ldb, rodc);
if (ret != LDB_SUCCESS) {
DEBUG(4, (__location__ ": unable to tell if we are an RODC\n"));
*rodc = false;
}
if (*rodc &&
ldb_request_get_control(req, DSDB_CONTROL_FORCE_RODC_LOCAL_CHANGE)) {
is_forced_rodc = true;
}
our_invocation_id = samdb_ntds_invocation_id(ldb);
if (!our_invocation_id) {
/* this happens during an initial vampire while
updating the schema */
DEBUG(5,("No invocationID - skipping replPropertyMetaData update\n"));
return LDB_SUCCESS;
}
unix_to_nt_time(&now, t);
if (ldb_request_get_control(req, DSDB_CONTROL_CHANGEREPLMETADATA_OID)) {
rmd_is_provided = true;
if (ldb_request_get_control(req, DSDB_CONTROL_CHANGEREPLMETADATA_RESORT_OID)) {
rmd_is_just_resorted = true;
}
} else {
rmd_is_provided = false;
}
/* if isDeleted is present and is TRUE, then we consider we are deleting,
* otherwise we consider we are updating */
if (ldb_msg_check_string_attribute(msg, "isDeleted", "TRUE")) {
situation = REPL_URGENT_ON_DELETE;
} else if (rename_attrs) {
situation = REPL_URGENT_ON_CREATE | REPL_URGENT_ON_DELETE;
} else {
situation = REPL_URGENT_ON_UPDATE;
}
if (rmd_is_provided) {
/* In this case the change_replmetadata control was supplied */
/* We check that it's the only attribute that is provided
* (it's a rare case so it's better to keep the code simplier)
* We also check that the highest local_usn is bigger or the same as
* uSNChanged. */
uint64_t db_seq;
if( msg->num_elements != 1 ||
strncmp(msg->elements[0].name,
"replPropertyMetaData", 20) ) {
DEBUG(0,(__location__ ": changereplmetada control called without "\
"a specified replPropertyMetaData attribute or with others\n"));
return LDB_ERR_OPERATIONS_ERROR;
}
if (situation != REPL_URGENT_ON_UPDATE) {
DEBUG(0,(__location__ ": changereplmetada control can't be called when deleting an object\n"));
return LDB_ERR_OPERATIONS_ERROR;
}
omd_value = ldb_msg_find_ldb_val(msg, "replPropertyMetaData");
if (!omd_value) {
DEBUG(0,(__location__ ": replPropertyMetaData was not specified for Object %s\n",
ldb_dn_get_linearized(msg->dn)));
return LDB_ERR_OPERATIONS_ERROR;
}
ndr_err = ndr_pull_struct_blob(omd_value, msg, &omd,
(ndr_pull_flags_fn_t)ndr_pull_replPropertyMetaDataBlob);
if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
DEBUG(0,(__location__ ": Failed to parse replPropertyMetaData for %s\n",
ldb_dn_get_linearized(msg->dn)));
return LDB_ERR_OPERATIONS_ERROR;
}
ret = dsdb_module_search_dn(module, msg, &res, msg->dn, attrs2,
DSDB_FLAG_NEXT_MODULE |
DSDB_SEARCH_SHOW_RECYCLED |
DSDB_SEARCH_SHOW_EXTENDED_DN |
DSDB_SEARCH_SHOW_DN_IN_STORAGE_FORMAT |
DSDB_SEARCH_REVEAL_INTERNALS, req);
if (ret != LDB_SUCCESS) {
return ret;
}
if (rmd_is_just_resorted == false) {
*seq_num = find_max_local_usn(omd);
db_seq = ldb_msg_find_attr_as_uint64(res->msgs[0], "uSNChanged", 0);
/*
* The test here now allows for a new
* replPropertyMetaData with no change, if was
* just dbcheck re-sorting the values.
*/
if (*seq_num <= db_seq) {
DEBUG(0,(__location__ ": changereplmetada control provided but max(local_usn)" \
" is less than uSNChanged (max = %lld uSNChanged = %lld)\n",
(long long)*seq_num, (long long)db_seq));
return LDB_ERR_OPERATIONS_ERROR;
}
}
} else {
/* search for the existing replPropertyMetaDataBlob. We need
* to use REVEAL and ask for DNs in storage format to support
* the check for values being the same in
* replmd_update_rpmd_element()
*/
ret = dsdb_module_search_dn(module, msg, &res, msg->dn, attrs,
DSDB_FLAG_NEXT_MODULE |
DSDB_SEARCH_SHOW_RECYCLED |
DSDB_SEARCH_SHOW_EXTENDED_DN |
DSDB_SEARCH_SHOW_DN_IN_STORAGE_FORMAT |
DSDB_SEARCH_REVEAL_INTERNALS, req);
if (ret != LDB_SUCCESS) {
return ret;
}
omd_value = ldb_msg_find_ldb_val(res->msgs[0], "replPropertyMetaData");
if (!omd_value) {
DEBUG(0,(__location__ ": Object %s does not have a replPropertyMetaData attribute\n",
ldb_dn_get_linearized(msg->dn)));
return LDB_ERR_OPERATIONS_ERROR;
}
ndr_err = ndr_pull_struct_blob(omd_value, msg, &omd,
(ndr_pull_flags_fn_t)ndr_pull_replPropertyMetaDataBlob);
if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
DEBUG(0,(__location__ ": Failed to parse replPropertyMetaData for %s\n",
ldb_dn_get_linearized(msg->dn)));
return LDB_ERR_OPERATIONS_ERROR;
}
if (omd.version != 1) {
DEBUG(0,(__location__ ": bad version %u in replPropertyMetaData for %s\n",
omd.version, ldb_dn_get_linearized(msg->dn)));
return LDB_ERR_OPERATIONS_ERROR;
}
for (i=0; i<msg->num_elements;) {
struct ldb_message_element *el = &msg->elements[i];
struct ldb_message_element *old_el;
old_el = ldb_msg_find_element(res->msgs[0], el->name);
ret = replmd_update_rpmd_element(ldb, msg, el, old_el,
&omd, schema, seq_num,
our_invocation_id,
now, is_schema_nc,
is_forced_rodc,
req);
if (ret != LDB_SUCCESS) {
return ret;
}
if (!*is_urgent && (situation == REPL_URGENT_ON_UPDATE)) {
*is_urgent = replmd_check_urgent_attribute(el);
}
if (!(el->flags & DSDB_FLAG_INTERNAL_FORCE_META_DATA)) {
i++;
continue;
}
el->flags &= ~DSDB_FLAG_INTERNAL_FORCE_META_DATA;
if (el->num_values != 0) {
i++;
continue;
}
ldb_msg_remove_element(msg, el);
}
}
/*
* Assert that we have an objectClass attribute - this is major
* corruption if we don't have this!
*/
objectclass_el = ldb_msg_find_element(res->msgs[0], "objectClass");
if (objectclass_el != NULL) {
/*
* Now check if this objectClass means we need to do urgent replication
*/
if (!*is_urgent && replmd_check_urgent_objectclass(objectclass_el,
situation)) {
*is_urgent = true;
}
} else if (!ldb_request_get_control(req, DSDB_CONTROL_DBCHECK)) {
ldb_asprintf_errstring(ldb, __location__
": objectClass missing on %s\n",
ldb_dn_get_linearized(msg->dn));
return LDB_ERR_OBJECT_CLASS_VIOLATION;
}
/*
* replmd_update_rpmd_element has done an update if the
* seq_num is set
*/
if (*seq_num != 0 || rmd_is_just_resorted == true) {
struct ldb_val *md_value;
struct ldb_message_element *el;
/*if we are RODC and this is a DRSR update then its ok*/
if (!ldb_request_get_control(req, DSDB_CONTROL_REPLICATED_UPDATE_OID)
&& !ldb_request_get_control(req, DSDB_CONTROL_DBCHECK_MODIFY_RO_REPLICA)
&& !is_forced_rodc) {
unsigned instanceType;
if (*rodc) {
ldb_set_errstring(ldb, "RODC modify is forbidden!");
return LDB_ERR_REFERRAL;
}
instanceType = ldb_msg_find_attr_as_uint(res->msgs[0], "instanceType", INSTANCE_TYPE_WRITE);
if (!(instanceType & INSTANCE_TYPE_WRITE)) {
return ldb_error(ldb, LDB_ERR_UNWILLING_TO_PERFORM,
"cannot change replicated attribute on partial replica");
}
}
md_value = talloc(msg, struct ldb_val);
if (md_value == NULL) {
ldb_oom(ldb);
return LDB_ERR_OPERATIONS_ERROR;
}
ret = replmd_replPropertyMetaDataCtr1_sort_and_verify(ldb, &omd.ctr.ctr1, msg->dn);
if (ret != LDB_SUCCESS) {
ldb_asprintf_errstring(ldb, "%s: %s", __func__, ldb_errstring(ldb));
return ret;
}
ndr_err = ndr_push_struct_blob(md_value, msg, &omd,
(ndr_push_flags_fn_t)ndr_push_replPropertyMetaDataBlob);
if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
DEBUG(0,(__location__ ": Failed to marshall replPropertyMetaData for %s\n",
ldb_dn_get_linearized(msg->dn)));
return LDB_ERR_OPERATIONS_ERROR;
}
ret = ldb_msg_add_empty(msg, "replPropertyMetaData", LDB_FLAG_MOD_REPLACE, &el);
if (ret != LDB_SUCCESS) {
DEBUG(0,(__location__ ": Failed to add updated replPropertyMetaData %s\n",
ldb_dn_get_linearized(msg->dn)));
return ret;
}
el->num_values = 1;
el->values = md_value;
}
return LDB_SUCCESS;
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 275,096,603,904,301,300,000,000,000,000,000,000,000 | 293 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
run_protect_tool (int argc, char **argv)
{
#ifdef HAVE_W32_SYSTEM
(void)argc;
(void)argv;
#else
const char *pgm;
char **av;
int i;
if (!opt.protect_tool_program || !*opt.protect_tool_program)
pgm = gnupg_module_name (GNUPG_MODULE_NAME_PROTECT_TOOL);
else
pgm = opt.protect_tool_program;
av = xcalloc (argc+2, sizeof *av);
av[0] = strrchr (pgm, '/');
if (!av[0])
av[0] = xstrdup (pgm);
for (i=1; argc; i++, argc--, argv++)
av[i] = *argv;
av[i] = NULL;
execv (pgm, av);
log_error ("error executing '%s': %s\n", pgm, strerror (errno));
#endif /*!HAVE_W32_SYSTEM*/
gpgsm_exit (2);
} | 0 | [] | gnupg | abd5f6752d693b7f313c19604f0723ecec4d39a6 | 336,439,969,167,963,260,000,000,000,000,000,000,000 | 27 | dirmngr,gpgsm: Return NULL on fail
* dirmngr/ldapserver.c (ldapserver_parse_one): Set SERVER to NULL.
* sm/gpgsm.c (parse_keyserver_line): Ditto.
--
Reported-by: Joshua Rogers <[email protected]>
"If something inside the ldapserver_parse_one function failed,
'server' would be freed, then returned, leading to a
use-after-free. This code is likely copied from sm/gpgsm.c, which
was also susceptible to this bug."
Signed-off-by: Werner Koch <[email protected]> |
static inline u64 nf_tables_alloc_handle(struct nft_table *table)
{
return ++table->hgenerator;
} | 0 | [
"CWE-19"
] | nf | a2f18db0c68fec96631c10cad9384c196e9008ac | 205,071,709,277,046,370,000,000,000,000,000,000,000 | 4 | netfilter: nf_tables: fix flush ruleset chain dependencies
Jumping between chains doesn't mix well with flush ruleset. Rules
from a different chain and set elements may still refer to us.
[ 353.373791] ------------[ cut here ]------------
[ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159!
[ 353.373896] invalid opcode: 0000 [#1] SMP
[ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi
[ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98
[ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010
[...]
[ 353.375018] Call Trace:
[ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540
[ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0
[ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0
[ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790
[ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0
[ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70
[ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30
[ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0
[ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400
[ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90
[ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20
[ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0
[ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80
[ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d
[ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20
[ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b
Release objects in this order: rules -> sets -> chains -> tables, to
make sure no references to chains are held anymore.
Reported-by: Asbjoern Sloth Toennesen <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
tTcpIpPacketParsingResult ParaNdis_CheckSumVerifyFlat(
PVOID pBuffer,
ULONG ulDataLength,
ULONG flags,
BOOLEAN verifyLength,
LPCSTR caller)
{
tCompletePhysicalAddress SGBuffer;
SGBuffer.Virtual = pBuffer;
SGBuffer.size = ulDataLength;
return ParaNdis_CheckSumVerify(&SGBuffer, ulDataLength, 0, flags, verifyLength, caller);
} | 0 | [
"CWE-20"
] | kvm-guest-drivers-windows | 723416fa4210b7464b28eab89cc76252e6193ac1 | 215,395,288,305,148,400,000,000,000,000,000,000,000 | 12 | NetKVM: BZ#1169718: Checking the length only on read
Signed-off-by: Joseph Hindin <[email protected]> |
stop_adverts(void)
{
struct Interface *iface;
/*
* send final RA (a SHOULD in RFC4861 section 6.2.5)
*/
for (iface=IfaceList; iface; iface=iface->next) {
if( ! iface->UnicastOnly ) {
if (iface->AdvSendAdvert) {
/* send a final advertisement with zero Router Lifetime */
iface->cease_adv = 1;
send_ra_forall(iface, NULL);
}
}
}
} | 0 | [
"CWE-20"
] | radvd | 2c50375043186e133f15135f4c93ca964238ee60 | 231,241,072,589,731,170,000,000,000,000,000,000,000 | 18 | main() must fail on privsep_init() errors, it must not run
without privilege separation as privsep is expected. |
CImg<Tfloat> get_max(const char *const expression) const {
return CImg<Tfloat>(*this,false).max(expression);
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 239,568,525,272,094,170,000,000,000,000,000,000,000 | 3 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
int ntlm_read_ntlm_v2_response(wStream* s, NTLMv2_RESPONSE* response)
{
if (Stream_GetRemainingLength(s) < 16)
return -1;
Stream_Read(s, response->Response, 16);
return ntlm_read_ntlm_v2_client_challenge(s, &(response->Challenge));
} | 0 | [
"CWE-125"
] | FreeRDP | c098f21fdaadca57ff649eee1674f6cc321a2ec4 | 106,943,933,229,096,940,000,000,000,000,000,000,000 | 7 | Fixed oob read in ntlm_read_ntlm_v2_response |
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
{
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
kvfree(aux->func_info);
kfree(aux->func_info_aux);
free_uid(aux->user);
security_bpf_prog_free(aux);
bpf_prog_free(aux->prog);
} | 0 | [
"CWE-307"
] | linux | 350a5c4dd2452ea999cc5e1d4a8dbf12de2f97ef | 338,343,676,924,999,000,000,000,000,000,000,000,000 | 10 | bpf: Dont allow vmlinux BTF to be used in map_create and prog_load.
The syzbot got FD of vmlinux BTF and passed it into map_create which caused
crash in btf_type_id_size() when it tried to access resolved_ids. The vmlinux
BTF doesn't have 'resolved_ids' and 'resolved_sizes' initialized to save
memory. To avoid such issues disallow using vmlinux BTF in prog_load and
map_create commands.
Fixes: 5329722057d4 ("bpf: Assign ID to vmlinux BTF and return extra info for BTF in GET_OBJ_INFO")
Reported-by: [email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Yonghong Song <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected] |
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
{
int ret;
if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
return AVERROR(EINVAL);
if (avctx->internal->draining)
return AVERROR_EOF;
if (avpkt && !avpkt->size && avpkt->data)
return AVERROR(EINVAL);
if (!avpkt || !avpkt->size) {
avctx->internal->draining = 1;
avpkt = NULL;
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
return 0;
}
if (avctx->codec->send_packet) {
if (avpkt) {
AVPacket tmp = *avpkt;
int did_split = av_packet_split_side_data(&tmp);
ret = apply_param_change(avctx, &tmp);
if (ret >= 0)
ret = avctx->codec->send_packet(avctx, &tmp);
if (did_split)
av_packet_free_side_data(&tmp);
return ret;
} else {
return avctx->codec->send_packet(avctx, NULL);
}
}
// Emulation via old API. Assume avpkt is likely not refcounted, while
// decoder output is always refcounted, and avoid copying.
if (avctx->internal->buffer_pkt->size || avctx->internal->buffer_frame->buf[0])
return AVERROR(EAGAIN);
// The goal is decoding the first frame of the packet without using memcpy,
// because the common case is having only 1 frame per packet (especially
// with video, but audio too). In other cases, it can't be avoided, unless
// the user is feeding refcounted packets.
return do_decode(avctx, (AVPacket *)avpkt);
} | 0 | [
"CWE-787"
] | FFmpeg | 2080bc33717955a0e4268e738acf8c1eeddbf8cb | 46,099,723,357,855,310,000,000,000,000,000,000,000 | 48 | avcodec/utils: correct align value for interplay
Fixes out of array access
Fixes: 452/fuzz-1-ffmpeg_VIDEO_AV_CODEC_ID_INTERPLAY_VIDEO_fuzzer
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Signed-off-by: Michael Niedermayer <[email protected]> |
u32 mp4box_cleanup(u32 ret_code) {
if (mpd_base_urls) {
gf_free(mpd_base_urls);
mpd_base_urls = NULL;
}
if (sdp_lines) {
gf_free(sdp_lines);
sdp_lines = NULL;
}
if (metas) {
u32 i;
for (i=0; i<nb_meta_act; i++) {
if (metas[i].enc_type) gf_free(metas[i].enc_type);
if (metas[i].mime_type) gf_free(metas[i].mime_type);
if (metas[i].szName) gf_free(metas[i].szName);
if (metas[i].szPath) gf_free(metas[i].szPath);
}
gf_free(metas);
metas = NULL;
}
if (tracks) {
u32 i;
for (i = 0; i<nb_track_act; i++) {
if (tracks[i].out_name)
gf_free(tracks[i].out_name);
if (tracks[i].src_name)
gf_free(tracks[i].src_name);
if (tracks[i].kind_scheme)
gf_free(tracks[i].kind_scheme);
if (tracks[i].kind_value)
gf_free(tracks[i].kind_value);
}
gf_free(tracks);
tracks = NULL;
}
if (tsel_acts) {
gf_free(tsel_acts);
tsel_acts = NULL;
}
if (brand_add) {
gf_free(brand_add);
brand_add = NULL;
}
if (brand_rem) {
gf_free(brand_rem);
brand_rem = NULL;
}
if (dash_inputs) {
u32 i, j;
for (i = 0; i<nb_dash_inputs; i++) {
GF_DashSegmenterInput *di = &dash_inputs[i];
if (di->nb_baseURL) {
for (j = 0; j<di->nb_baseURL; j++) {
gf_free(di->baseURL[j]);
}
gf_free(di->baseURL);
}
if (di->rep_descs) {
for (j = 0; j<di->nb_rep_descs; j++) {
gf_free(di->rep_descs[j]);
}
gf_free(di->rep_descs);
}
if (di->as_descs) {
for (j = 0; j<di->nb_as_descs; j++) {
gf_free(di->as_descs[j]);
}
gf_free(di->as_descs);
}
if (di->as_c_descs) {
for (j = 0; j<di->nb_as_c_descs; j++) {
gf_free(di->as_c_descs[j]);
}
gf_free(di->as_c_descs);
}
if (di->p_descs) {
for (j = 0; j<di->nb_p_descs; j++) {
gf_free(di->p_descs[j]);
}
gf_free(di->p_descs);
}
if (di->representationID) gf_free(di->representationID);
if (di->periodID) gf_free(di->periodID);
if (di->xlink) gf_free(di->xlink);
if (di->seg_template) gf_free(di->seg_template);
if (di->hls_pl) gf_free(di->hls_pl);
if (di->source_opts) gf_free(di->source_opts);
if (di->filter_chain) gf_free(di->filter_chain);
if (di->roles) {
for (j = 0; j<di->nb_roles; j++) {
gf_free(di->roles[j]);
}
gf_free(di->roles);
}
}
gf_free(dash_inputs);
dash_inputs = NULL;
}
if (logfile) gf_fclose(logfile);
gf_sys_close();
return ret_code;
} | 0 | [
"CWE-476"
] | gpac | 9eeac00b38348c664dfeae2525bba0cf1bc32349 | 65,629,809,646,027,640,000,000,000,000,000,000,000 | 103 | fixed #1565 |
void reds_on_sv_change(RedsState *reds)
{
int compression_level = calc_compression_level(reds);
FOREACH_QXL_INSTANCE(reds, qxl) {
red_qxl_set_compression_level(qxl, compression_level);
red_qxl_on_sv_change(qxl, reds_get_streaming_video(reds));
}
} | 0 | [] | spice | ca5bbc5692e052159bce1a75f55dc60b36078749 | 308,234,562,191,981,880,000,000,000,000,000,000,000 | 9 | With OpenSSL 1.1: Disable client-initiated renegotiation.
Fixes issue #49
Fixes BZ#1904459
Signed-off-by: Julien Ropé <[email protected]>
Reported-by: BlackKD
Acked-by: Frediano Ziglio <[email protected]> |
extract_job_on_progress (AutoarExtractor *extractor,
guint64 archive_current_decompressed_size,
guint archive_current_decompressed_files,
gpointer user_data)
{
ExtractJob *extract_job = user_data;
CommonJob *common = user_data;
GFile *source_file;
char *details;
double elapsed;
double transfer_rate;
int remaining_time;
guint64 archive_total_decompressed_size;
gdouble archive_weight;
gdouble archive_decompress_progress;
guint64 job_completed_size;
gdouble job_progress;
source_file = autoar_extractor_get_source_file (extractor);
nautilus_progress_info_take_status (common->progress,
f (_("Extracting “%B”"), source_file));
archive_total_decompressed_size = autoar_extractor_get_total_size (extractor);
archive_decompress_progress = (gdouble) archive_current_decompressed_size /
(gdouble) archive_total_decompressed_size;
archive_weight = 0;
if (extract_job->total_compressed_size)
{
archive_weight = (gdouble) extract_job->archive_compressed_size /
(gdouble) extract_job->total_compressed_size;
}
job_progress = archive_decompress_progress * archive_weight + extract_job->base_progress;
elapsed = g_timer_elapsed (common->time, NULL);
transfer_rate = 0;
remaining_time = -1;
job_completed_size = job_progress * extract_job->total_compressed_size;
if (elapsed > 0)
{
transfer_rate = job_completed_size / elapsed;
}
if (transfer_rate > 0)
{
remaining_time = (extract_job->total_compressed_size - job_completed_size) /
transfer_rate;
}
if (elapsed < SECONDS_NEEDED_FOR_RELIABLE_TRANSFER_RATE ||
transfer_rate == 0)
{
/* To translators: %S will expand to a size like "2 bytes" or
* "3 MB", so something like "4 kb / 4 MB"
*/
details = f (_("%S / %S"), job_completed_size, extract_job->total_compressed_size);
}
else
{
/* To translators: %S will expand to a size like "2 bytes" or
* "3 MB", %T to a time duration like "2 minutes". So the whole
* thing will be something like
* "2 kb / 4 MB -- 2 hours left (4kb/sec)"
*
* The singular/plural form will be used depending on the
* remaining time (i.e. the %T argument).
*/
details = f (ngettext ("%S / %S \xE2\x80\x94 %T left (%S/sec)",
"%S / %S \xE2\x80\x94 %T left (%S/sec)",
seconds_count_format_time_units (remaining_time)),
job_completed_size, extract_job->total_compressed_size,
remaining_time,
(goffset) transfer_rate);
}
nautilus_progress_info_take_details (common->progress, details);
if (elapsed > SECONDS_NEEDED_FOR_APROXIMATE_TRANSFER_RATE)
{
nautilus_progress_info_set_remaining_time (common->progress,
remaining_time);
nautilus_progress_info_set_elapsed_time (common->progress,
elapsed);
}
nautilus_progress_info_set_progress (common->progress, job_progress, 1);
} | 0 | [
"CWE-20"
] | nautilus | 1630f53481f445ada0a455e9979236d31a8d3bb0 | 25,067,207,377,559,300,000,000,000,000,000,000,000 | 92 | mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991 |
static char *tomoyo_sysctl_path(struct ctl_table *table)
{
int buflen = TOMOYO_MAX_PATHNAME_LEN;
char *buf = tomoyo_alloc(buflen);
char *end = buf + buflen;
int error = -ENOMEM;
if (!buf)
return NULL;
*--end = '\0';
buflen--;
while (table) {
char num[32];
const char *sp = table->procname;
if (!sp) {
memset(num, 0, sizeof(num));
snprintf(num, sizeof(num) - 1, "=%d=", table->ctl_name);
sp = num;
}
if (tomoyo_prepend(&end, &buflen, sp) ||
tomoyo_prepend(&end, &buflen, "/"))
goto out;
table = table->parent;
}
if (tomoyo_prepend(&end, &buflen, "/proc/sys"))
goto out;
error = tomoyo_encode(buf, end - buf, end);
out:
if (!error)
return buf;
tomoyo_free(buf);
return NULL;
} | 0 | [] | linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 176,293,714,928,758,140,000,000,000,000,000,000,000 | 35 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]> |
static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal,
int id)
{
void *attr;
int i = 0;
if (!mask)
return true;
attr = nla_nest_start_noflag(msg, id);
if (!attr)
return false;
for (i = 0; i < IEEE80211_MAX_CHAINS; i++) {
if (!(mask & BIT(i)))
continue;
if (nla_put_u8(msg, i, signal[i]))
return false;
}
nla_nest_end(msg, attr);
return true;
} | 0 | [
"CWE-120"
] | linux | f88eb7c0d002a67ef31aeb7850b42ff69abc46dc | 7,701,582,505,681,109,000,000,000,000,000,000,000 | 25 | nl80211: validate beacon head
We currently don't validate the beacon head, i.e. the header,
fixed part and elements that are to go in front of the TIM
element. This means that the variable elements there can be
malformed, e.g. have a length exceeding the buffer size, but
most downstream code from this assumes that this has already
been checked.
Add the necessary checks to the netlink policy.
Cc: [email protected]
Fixes: ed1b6cc7f80f ("cfg80211/nl80211: add beacon settings")
Link: https://lore.kernel.org/r/1569009255-I7ac7fbe9436e9d8733439eab8acbbd35e55c74ef@changeid
Signed-off-by: Johannes Berg <[email protected]> |
void doeprt(char *p)
{
char delim;
int family;
delim = *p++;
family = atoi(p);
while (isdigit((unsigned char) *p)) {
p++;
}
if (*p == delim) {
p++;
} else {
addreply_noformat(501, MSG_SYNTAX_ERROR_IP);
return;
}
if (family == 2 && v6ready) {
do_ipv6_port(p, delim);
return;
}
if (family != 1) {
if (v6ready) {
addreply_noformat(522, MSG_ONLY_IPV4V6);
} else {
addreply_noformat(522, MSG_ONLY_IPV4);
}
return;
}
{
unsigned int a1, a2, a3, a4, port = 0U;
/* there should be dot-decimal ip as rfc2428 states,
* but troll used for some reason "comma-decimal" notation
* so I decided to leave it */
if ((sscanf(p, "%u,%u,%u,%u", &a1, &a2, &a3, &a4) != 4 &&
sscanf(p, "%u.%u.%u.%u", &a1, &a2, &a3, &a4) != 4) ||
a1 > 255U || a2 > 255U || a3 > 255U || a4 > 255U ||
(a1 | a2 | a3 | a4) == 0U) {
addreply_noformat(501, MSG_SYNTAX_ERROR_IP);
return;
}
while (*p && strchr("0123456789.,", *p)) {
p++;
}
if (*p == delim) {
port = (unsigned int) atoi(++p);
while (*p && isdigit((unsigned char) *p)) {
p++;
}
}
if (*p != delim || port > 65535U || port <= 0U) {
addreply_noformat(501, MSG_SYNTAX_ERROR_IP);
return;
} else {
struct sockaddr_storage a;
memset(&a, 0, sizeof a);
STORAGE_FAMILY(a) = AF_INET;
STORAGE_SIN_ADDR(a) =
htonl(((uint32_t) a1 << 24) |
((uint32_t) a2 << 16) | (a3 << 8) | a4);
SET_STORAGE_LEN(a, sizeof(struct sockaddr_in));
doport2(a, port);
}
}
} | 0 | [
"CWE-434"
] | pure-ftpd | 37ad222868e52271905b94afea4fc780d83294b4 | 28,920,668,084,431,857,000,000,000,000,000,000,000 | 66 | Initialize the max upload file size when quotas are enabled
Due to an unwanted check, files causing the quota to be exceeded
were deleted after the upload, but not during the upload.
The bug was introduced in 2009 in version 1.0.23
Spotted by @DroidTest, thanks! |
int restrict_link_reject(struct key *keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key)
{
return -EPERM;
} | 0 | [
"CWE-20"
] | linux | 363b02dab09b3226f3bd1420dad9c72b79a42a76 | 43,688,638,809,883,600,000,000,000,000,000,000,000 | 7 | KEYS: Fix race between updating and finding a negative key
Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection
error into one field such that:
(1) The instantiation state can be modified/read atomically.
(2) The error can be accessed atomically with the state.
(3) The error isn't stored unioned with the payload pointers.
This deals with the problem that the state is spread over three different
objects (two bits and a separate variable) and reading or updating them
atomically isn't practical, given that not only can uninstantiated keys
change into instantiated or rejected keys, but rejected keys can also turn
into instantiated keys - and someone accessing the key might not be using
any locking.
The main side effect of this problem is that what was held in the payload
may change, depending on the state. For instance, you might observe the
key to be in the rejected state. You then read the cached error, but if
the key semaphore wasn't locked, the key might've become instantiated
between the two reads - and you might now have something in hand that isn't
actually an error code.
The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error
code if the key is negatively instantiated. The key_is_instantiated()
function is replaced with key_is_positive() to avoid confusion as negative
keys are also 'instantiated'.
Additionally, barriering is included:
(1) Order payload-set before state-set during instantiation.
(2) Order state-read before payload-read when using the key.
Further separate barriering is necessary if RCU is being used to access the
payload content after reading the payload pointers.
Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data")
Cc: [email protected] # v4.4+
Reported-by: Eric Biggers <[email protected]>
Signed-off-by: David Howells <[email protected]>
Reviewed-by: Eric Biggers <[email protected]> |
static void *DestroyOptions(void *message)
{
return(DestroyStringInfo((StringInfo *) message));
} | 0 | [
"CWE-125"
] | ImageMagick | 07eebcd72f45c8fd7563d3f9ec5d2bed48f65f36 | 236,012,571,094,531,900,000,000,000,000,000,000,000 | 4 | ... |
yajl_string_encode(yajl_buf buf, const unsigned char * str,
unsigned int len, unsigned int htmlSafe)
{
yajl_string_encode2((const yajl_print_t) &yajl_buf_append, buf, str, len, htmlSafe);
} | 0 | [
"CWE-134"
] | yajl-ruby | a8ca8f476655adaa187eedc60bdc770fff3c51ce | 17,759,880,769,655,247,000,000,000,000,000,000,000 | 5 | Don't advance our end pointer until we've checked we have enough
buffer left and have peeked ahead to see that a unicode escape
is approaching.
Thanks @kivikakk for helping me track down the actual bug here! |
static int tls_construct_cke_dhe(SSL *s, unsigned char **p, int *len, int *al)
{
#ifndef OPENSSL_NO_DH
DH *dh_clnt = NULL;
const BIGNUM *pub_key;
EVP_PKEY *ckey = NULL, *skey = NULL;
skey = s->s3->peer_tmp;
if (skey == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_DHE, ERR_R_INTERNAL_ERROR);
return 0;
}
ckey = ssl_generate_pkey(skey);
if (ckey == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_DHE, ERR_R_INTERNAL_ERROR);
return 0;
}
dh_clnt = EVP_PKEY_get0_DH(ckey);
if (dh_clnt == NULL || ssl_derive(s, ckey, skey) == 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_DHE, ERR_R_INTERNAL_ERROR);
EVP_PKEY_free(ckey);
return 0;
}
/* send off the data */
DH_get0_key(dh_clnt, &pub_key, NULL);
*len = BN_num_bytes(pub_key);
s2n(*len, *p);
BN_bn2bin(pub_key, *p);
*len += 2;
EVP_PKEY_free(ckey);
return 1;
#else
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_DHE, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
} | 0 | [
"CWE-476"
] | openssl | efbe126e3ebb9123ac9d058aa2bb044261342aaa | 74,795,130,229,603,240,000,000,000,000,000,000,000 | 41 | Fix missing NULL checks in CKE processing
Reviewed-by: Rich Salz <[email protected]> |
evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
const char *mem, size_t len)
{
struct evbuffer_chain *chain;
size_t position;
int r;
ASSERT_EVBUFFER_LOCKED(buf);
if (pos->pos < 0 ||
EV_SIZE_MAX - len < (size_t)pos->pos ||
pos->pos + len > buf->total_len)
return -1;
chain = pos->internal_.chain;
position = pos->internal_.pos_in_chain;
while (len && chain) {
size_t n_comparable;
if (len + position > chain->off)
n_comparable = chain->off - position;
else
n_comparable = len;
r = memcmp(chain->buffer + chain->misalign + position, mem,
n_comparable);
if (r)
return r;
mem += n_comparable;
len -= n_comparable;
position = 0;
chain = chain->next;
}
return 0;
} | 0 | [
"CWE-189"
] | libevent | 841ecbd96105c84ac2e7c9594aeadbcc6fb38bc4 | 215,522,601,834,445,400,000,000,000,000,000,000,000 | 34 | Fix CVE-2014-6272 in Libevent 2.1
For this fix, we need to make sure that passing too-large inputs to
the evbuffer functions can't make us do bad things with the heap.
Also, lower the maximum chunk size to the lower of off_t, size_t maximum.
This is necessary since otherwise we could get into an infinite loop
if we make a chunk that 'misalign' cannot index into. |
explicit ReverseV2Op(OpKernelConstruction* context) : OpKernel(context) {} | 0 | [
"CWE-369"
] | tensorflow | 4071d8e2f6c45c1955a811fee757ca2adbe462c1 | 224,245,540,359,505,150,000,000,000,000,000,000,000 | 1 | Fix FPE issue with `tf.raw_ops.Reverse`.
PiperOrigin-RevId: 371176973
Change-Id: Ic6d483bfc95313ec2299c2d1c956cfe96c96626c |
struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
struct lock_class_key *key)
{
struct trace_buffer *buffer;
long nr_pages;
int bsize;
int cpu;
int ret;
/* keep it in its own cache line */
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
GFP_KERNEL);
if (!buffer)
return NULL;
if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
goto fail_free_buffer;
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags;
buffer->clock = trace_clock_local;
buffer->reader_lock_key = key;
init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
init_waitqueue_head(&buffer->irq_work.waiters);
/* need at least two pages */
if (nr_pages < 2)
nr_pages = 2;
buffer->cpus = nr_cpu_ids;
bsize = sizeof(void *) * nr_cpu_ids;
buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
GFP_KERNEL);
if (!buffer->buffers)
goto fail_free_cpumask;
cpu = raw_smp_processor_id();
cpumask_set_cpu(cpu, buffer->cpumask);
buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
if (!buffer->buffers[cpu])
goto fail_free_buffers;
ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
if (ret < 0)
goto fail_free_buffers;
mutex_init(&buffer->mutex);
return buffer;
fail_free_buffers:
for_each_buffer_cpu(buffer, cpu) {
if (buffer->buffers[cpu])
rb_free_cpu_buffer(buffer->buffers[cpu]);
}
kfree(buffer->buffers);
fail_free_cpumask:
free_cpumask_var(buffer->cpumask);
fail_free_buffer:
kfree(buffer);
return NULL;
} | 0 | [
"CWE-362"
] | linux | bbeb97464eefc65f506084fd9f18f21653e01137 | 313,551,783,143,408,320,000,000,000,000,000,000,000 | 66 | tracing: Fix race in trace_open and buffer resize call
Below race can come, if trace_open and resize of
cpu buffer is running parallely on different cpus
CPUX CPUY
ring_buffer_resize
atomic_read(&buffer->resize_disabled)
tracing_open
tracing_reset_online_cpus
ring_buffer_reset_cpu
rb_reset_cpu
rb_update_pages
remove/insert pages
resetting pointer
This race can cause data abort or some times infinte loop in
rb_remove_pages and rb_insert_pages while checking pages
for sanity.
Take buffer lock to fix this.
Link: https://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: b23d7a5f4a07a ("ring-buffer: speed up buffer resets by avoiding synchronize_rcu for each CPU")
Signed-off-by: Gaurav Kohli <[email protected]>
Signed-off-by: Steven Rostedt (VMware) <[email protected]> |
static void nat_detect_cb(void *user_data,
const pj_stun_nat_detect_result *res)
{
PJ_UNUSED_ARG(user_data);
pjsua_var.nat_in_progress = PJ_FALSE;
pjsua_var.nat_status = res->status;
pjsua_var.nat_type = res->nat_type;
if (pjsua_var.ua_cfg.cb.on_nat_detect) {
(*pjsua_var.ua_cfg.cb.on_nat_detect)(res);
}
} | 0 | [
"CWE-120",
"CWE-787"
] | pjproject | d27f79da11df7bc8bb56c2f291d71e54df8d2c47 | 266,763,403,400,635,650,000,000,000,000,000,000,000 | 13 | Use PJ_ASSERT_RETURN() on pjsip_auth_create_digest() and pjsua_init_tpselector() (#3009)
* Use PJ_ASSERT_RETURN on pjsip_auth_create_digest
* Use PJ_ASSERT_RETURN on pjsua_init_tpselector()
* Fix incorrect check.
* Add return value to pjsip_auth_create_digest() and pjsip_auth_create_digestSHA256()
* Modification based on comments. |
R_API char *r_bin_java_get_item_name_from_cp_item_list(RList *cp_list, RBinJavaCPTypeObj *obj, int depth) {
/*
Given a constant poool object Class, FieldRef, MethodRef, or InterfaceMethodRef
return the actual descriptor string.
@param cp_list: RList of RBinJavaCPTypeObj *
@param obj object to look up the name for
@rvalue ut8* (user frees) or NULL
*/
if (!obj || !cp_list || depth < 0) {
return NULL;
}
switch (obj->tag) {
case R_BIN_JAVA_CP_NAMEANDTYPE:
return r_bin_java_get_utf8_from_cp_item_list (
cp_list, obj->info.cp_name_and_type.name_idx);
case R_BIN_JAVA_CP_CLASS:
return r_bin_java_get_utf8_from_cp_item_list (
cp_list, obj->info.cp_class.name_idx);
// XXX - Probably not good form, but they are the same memory structure
case R_BIN_JAVA_CP_FIELDREF:
case R_BIN_JAVA_CP_INTERFACEMETHOD_REF:
case R_BIN_JAVA_CP_METHODREF:
obj = r_bin_java_get_item_from_cp_item_list (
cp_list, obj->info.cp_method.name_and_type_idx);
return r_bin_java_get_item_name_from_cp_item_list (
cp_list, obj, depth - 1);
default:
return NULL;
case 0:
IFDBG eprintf ("Invalid 0 tag in the constant pool\n");
return NULL;
}
return NULL;
} | 0 | [
"CWE-119",
"CWE-788"
] | radare2 | 6c4428f018d385fc80a33ecddcb37becea685dd5 | 277,939,220,512,431,900,000,000,000,000,000,000,000 | 34 | Improve boundary checks to fix oobread segfaults ##crash
* Reported by Cen Zhang via huntr.dev
* Reproducer: bins/fuzzed/javaoob-havoc.class |
word32 BER_Decoder::GetSet()
{
if (source_.GetError().What()) return 0;
byte b = source_.next();
if (b != (SET | CONSTRUCTED)) {
source_.SetError(SET_E);
return 0;
}
return GetLength(source_);
} | 0 | [
"CWE-254"
] | mysql-server | e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69 | 287,763,800,594,061,900,000,000,000,000,000,000,000 | 12 | Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED. |
SSL_CTX *SSL_CTX_new(SSL_METHOD *meth)
{
SSL_CTX *ret=NULL;
if (meth == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_NULL_SSL_METHOD_PASSED);
return(NULL);
}
#ifdef OPENSSL_FIPS
if (FIPS_mode() && (meth->version < TLS1_VERSION))
{
SSLerr(SSL_F_SSL_CTX_NEW, SSL_R_ONLY_TLS_ALLOWED_IN_FIPS_MODE);
return NULL;
}
#endif
if (SSL_get_ex_data_X509_STORE_CTX_idx() < 0)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_X509_VERIFICATION_SETUP_PROBLEMS);
goto err;
}
ret=(SSL_CTX *)OPENSSL_malloc(sizeof(SSL_CTX));
if (ret == NULL)
goto err;
memset(ret,0,sizeof(SSL_CTX));
ret->method=meth;
ret->cert_store=NULL;
ret->session_cache_mode=SSL_SESS_CACHE_SERVER;
ret->session_cache_size=SSL_SESSION_CACHE_MAX_SIZE_DEFAULT;
ret->session_cache_head=NULL;
ret->session_cache_tail=NULL;
/* We take the system default */
ret->session_timeout=meth->get_timeout();
ret->new_session_cb=0;
ret->remove_session_cb=0;
ret->get_session_cb=0;
ret->generate_session_id=0;
memset((char *)&ret->stats,0,sizeof(ret->stats));
ret->references=1;
ret->quiet_shutdown=0;
/* ret->cipher=NULL;*/
/* ret->s2->challenge=NULL;
ret->master_key=NULL;
ret->key_arg=NULL;
ret->s2->conn_id=NULL; */
ret->info_callback=NULL;
ret->app_verify_callback=0;
ret->app_verify_arg=NULL;
ret->max_cert_list=SSL_MAX_CERT_LIST_DEFAULT;
ret->read_ahead=0;
ret->msg_callback=0;
ret->msg_callback_arg=NULL;
ret->verify_mode=SSL_VERIFY_NONE;
#if 0
ret->verify_depth=-1; /* Don't impose a limit (but x509_lu.c does) */
#endif
ret->sid_ctx_length=0;
ret->default_verify_callback=NULL;
if ((ret->cert=ssl_cert_new()) == NULL)
goto err;
ret->default_passwd_callback=0;
ret->default_passwd_callback_userdata=NULL;
ret->client_cert_cb=0;
ret->app_gen_cookie_cb=0;
ret->app_verify_cookie_cb=0;
ret->sessions=lh_new(LHASH_HASH_FN(SSL_SESSION_hash),
LHASH_COMP_FN(SSL_SESSION_cmp));
if (ret->sessions == NULL) goto err;
ret->cert_store=X509_STORE_new();
if (ret->cert_store == NULL) goto err;
ssl_create_cipher_list(ret->method,
&ret->cipher_list,&ret->cipher_list_by_id,
SSL_DEFAULT_CIPHER_LIST);
if (ret->cipher_list == NULL
|| sk_SSL_CIPHER_num(ret->cipher_list) <= 0)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_LIBRARY_HAS_NO_CIPHERS);
goto err2;
}
ret->param = X509_VERIFY_PARAM_new();
if (!ret->param)
goto err;
if ((ret->rsa_md5=EVP_get_digestbyname("ssl2-md5")) == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_UNABLE_TO_LOAD_SSL2_MD5_ROUTINES);
goto err2;
}
if ((ret->md5=EVP_get_digestbyname("ssl3-md5")) == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_UNABLE_TO_LOAD_SSL3_MD5_ROUTINES);
goto err2;
}
if ((ret->sha1=EVP_get_digestbyname("ssl3-sha1")) == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES);
goto err2;
}
if ((ret->client_CA=sk_X509_NAME_new_null()) == NULL)
goto err;
CRYPTO_new_ex_data(CRYPTO_EX_INDEX_SSL_CTX, ret, &ret->ex_data);
ret->extra_certs=NULL;
/* No compression for DTLS */
if (meth->version != DTLS1_VERSION)
ret->comp_methods=SSL_COMP_get_compression_methods();
#ifndef OPENSSL_NO_TLSEXT
ret->tlsext_servername_callback = 0;
ret->tlsext_servername_arg = NULL;
/* Setup RFC4507 ticket keys */
if ((RAND_pseudo_bytes(ret->tlsext_tick_key_name, 16) <= 0)
|| (RAND_bytes(ret->tlsext_tick_hmac_key, 16) <= 0)
|| (RAND_bytes(ret->tlsext_tick_aes_key, 16) <= 0))
ret->options |= SSL_OP_NO_TICKET;
ret->tlsext_status_cb = 0;
ret->tlsext_status_arg = NULL;
#endif
#ifndef OPENSSL_NO_ENGINE
ret->client_cert_engine = NULL;
#ifdef OPENSSL_SSL_CLIENT_ENGINE_AUTO
#define eng_strx(x) #x
#define eng_str(x) eng_strx(x)
/* Use specific client engine automatically... ignore errors */
{
ENGINE *eng;
eng = ENGINE_by_id(eng_str(OPENSSL_SSL_CLIENT_ENGINE_AUTO));
if (!eng)
{
ERR_clear_error();
ENGINE_load_builtin_engines();
eng = ENGINE_by_id(eng_str(OPENSSL_SSL_CLIENT_ENGINE_AUTO));
}
if (!eng || !SSL_CTX_set_client_cert_engine(ret, eng))
ERR_clear_error();
}
#endif
#endif
/* Default is to connect to non-RI servers. When RI is more widely
* deployed might change this.
*/
ret->options |= SSL_OP_LEGACY_SERVER_CONNECT;
return(ret);
err:
SSLerr(SSL_F_SSL_CTX_NEW,ERR_R_MALLOC_FAILURE);
err2:
if (ret != NULL) SSL_CTX_free(ret);
return(NULL);
} | 0 | [
"CWE-310"
] | openssl | c6a876473cbff0fd323c8abcaace98ee2d21863d | 25,453,420,205,887,617,000,000,000,000,000,000,000 | 172 | Support TLS_FALLBACK_SCSV.
Reviewed-by: Stephen Henson <[email protected]> |
static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
{
struct qib_ctxtdata *rcd;
unsigned pollflag;
rcd = ctxt_fp(fp);
if (!rcd)
pollflag = POLLERR;
else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
pollflag = qib_poll_urgent(rcd, fp, pt);
else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
pollflag = qib_poll_next(rcd, fp, pt);
else /* invalid */
pollflag = POLLERR;
return pollflag;
} | 0 | [
"CWE-284",
"CWE-264"
] | linux | e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3 | 148,123,243,968,609,600,000,000,000,000,000,000,000 | 17 | IB/security: Restrict use of the write() interface
The drivers/infiniband stack uses write() as a replacement for
bi-directional ioctl(). This is not safe. There are ways to
trigger write calls that result in the return structure that
is normally written to user space being shunted off to user
specified kernel memory instead.
For the immediate repair, detect and deny suspicious accesses to
the write API.
For long term, update the user space libraries and the kernel API
to something that doesn't present the same security vulnerabilities
(likely a structured ioctl() interface).
The impacted uAPI interfaces are generally only available if
hardware from drivers/infiniband is installed in the system.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
[ Expanded check to all known write() entry points ]
Cc: [email protected]
Signed-off-by: Doug Ledford <[email protected]> |
char *mutt_getnamebyvalue (int val, const struct mapping_t *map)
{
int i;
for (i=0; map[i].name; i++)
if (map[i].value == val)
return (map[i].name);
return NULL;
} | 0 | [
"CWE-668"
] | mutt | 6d0624411a979e2e1d76af4dd97d03f47679ea4a | 131,000,155,459,748,980,000,000,000,000,000,000,000 | 9 | use a 64-bit random value in temporary filenames.
closes #3158 |
bool val_native_with_conversion_result(THD *thd, Native *to,
const Type_handler *th)
{
return th->Item_val_native_with_conversion_result(thd, this, to);
} | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 69,305,252,996,371,060,000,000,000,000,000,000,000 | 5 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
eval0(
char_u *arg,
typval_T *rettv,
exarg_T *eap,
evalarg_T *evalarg)
{
int ret;
char_u *p;
char_u *expr_end;
int did_emsg_before = did_emsg;
int called_emsg_before = called_emsg;
int flags = evalarg == NULL ? 0 : evalarg->eval_flags;
int check_for_end = TRUE;
int end_error = FALSE;
p = skipwhite(arg);
ret = eval1(&p, rettv, evalarg);
expr_end = p;
p = skipwhite(p);
// In Vim9 script a command block is not split at NL characters for
// commands using an expression argument. Skip over a '#' comment to check
// for a following NL. Require white space before the '#'.
if (in_vim9script() && p > expr_end)
while (*p == '#')
{
char_u *nl = vim_strchr(p, NL);
if (nl == NULL)
break;
p = skipwhite(nl + 1);
if (eap != NULL && *p != NUL)
eap->nextcmd = p;
check_for_end = FALSE;
}
if (ret != FAIL && check_for_end)
end_error = !ends_excmd2(arg, p);
if (ret == FAIL || end_error)
{
if (ret != FAIL)
clear_tv(rettv);
/*
* Report the invalid expression unless the expression evaluation has
* been cancelled due to an aborting error, an interrupt, or an
* exception, or we already gave a more specific error.
* Also check called_emsg for when using assert_fails().
*/
if (!aborting()
&& did_emsg == did_emsg_before
&& called_emsg == called_emsg_before
&& (flags & EVAL_CONSTANT) == 0
&& (!in_vim9script() || !vim9_bad_comment(p)))
{
if (end_error)
semsg(_(e_trailing_arg), p);
else
semsg(_(e_invalid_expression_str), arg);
}
// Some of the expression may not have been consumed. Do not check for
// a next command to avoid more errors, unless "|" is following, which
// could only be a command separator.
if (eap != NULL && skipwhite(p)[0] == '|' && skipwhite(p)[1] != '|')
eap->nextcmd = check_nextcmd(p);
return FAIL;
}
if (check_for_end && eap != NULL)
set_nextcmd(eap, p);
return ret;
} | 0 | [
"CWE-122",
"CWE-787"
] | vim | 605ec91e5a7330d61be313637e495fa02a6dc264 | 54,191,423,663,616,110,000,000,000,000,000,000,000 | 73 | patch 8.2.3847: illegal memory access when using a lambda with an error
Problem: Illegal memory access when using a lambda with an error.
Solution: Avoid skipping over the NUL after a string. |
static void msix_table_mmio_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PCIDevice *dev = opaque;
int vector = addr / PCI_MSIX_ENTRY_SIZE;
bool was_masked;
was_masked = msix_is_masked(dev, vector);
pci_set_long(dev->msix_table + addr, val);
msix_handle_mask_update(dev, vector, was_masked);
} | 0 | [] | qemu | 43b11a91dd861a946b231b89b7542856ade23d1b | 146,460,566,910,784,640,000,000,000,000,000,000,000 | 11 | msix: implement pba write (but read-only)
qpci_msix_pending() writes on pba region, causing qemu to SEGV:
Program received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7ffff7fba8c0 (LWP 25882)]
0x0000000000000000 in ?? ()
(gdb) bt
#0 0x0000000000000000 in ()
#1 0x00005555556556c5 in memory_region_oldmmio_write_accessor (mr=0x5555579f3f80, addr=0, value=0x7fffffffbf68, size=4, shift=0, mask=4294967295, attrs=...) at /home/elmarco/src/qemu/memory.c:434
#2 0x00005555556558e1 in access_with_adjusted_size (addr=0, value=0x7fffffffbf68, size=4, access_size_min=1, access_size_max=4, access=0x55555565563e <memory_region_oldmmio_write_accessor>, mr=0x5555579f3f80, attrs=...) at /home/elmarco/src/qemu/memory.c:506
#3 0x00005555556581eb in memory_region_dispatch_write (mr=0x5555579f3f80, addr=0, data=0, size=4, attrs=...) at /home/elmarco/src/qemu/memory.c:1176
#4 0x000055555560b6f9 in address_space_rw (as=0x555555eff4e0 <address_space_memory>, addr=3759147008, attrs=..., buf=0x7fffffffc1b0 "", len=4, is_write=true) at /home/elmarco/src/qemu/exec.c:2439
#5 0x000055555560baa2 in cpu_physical_memory_rw (addr=3759147008, buf=0x7fffffffc1b0 "", len=4, is_write=1) at /home/elmarco/src/qemu/exec.c:2534
#6 0x000055555564c005 in cpu_physical_memory_write (addr=3759147008, buf=0x7fffffffc1b0, len=4) at /home/elmarco/src/qemu/include/exec/cpu-common.h:80
#7 0x000055555564cd9c in qtest_process_command (chr=0x55555642b890, words=0x5555578de4b0) at /home/elmarco/src/qemu/qtest.c:378
#8 0x000055555564db77 in qtest_process_inbuf (chr=0x55555642b890, inbuf=0x55555641b340) at /home/elmarco/src/qemu/qtest.c:569
#9 0x000055555564dc07 in qtest_read (opaque=0x55555642b890, buf=0x7fffffffc2e0 "writel 0xe0100800 0x0\n", size=22) at /home/elmarco/src/qemu/qtest.c:581
#10 0x000055555574ce3e in qemu_chr_be_write (s=0x55555642b890, buf=0x7fffffffc2e0 "writel 0xe0100800 0x0\n", len=22) at qemu-char.c:306
#11 0x0000555555751263 in tcp_chr_read (chan=0x55555642bcf0, cond=G_IO_IN, opaque=0x55555642b890) at qemu-char.c:2876
#12 0x00007ffff64c9a8a in g_main_context_dispatch (context=0x55555641c400) at gmain.c:3122
(without this patch, this can be reproduced with the ivshmem qtest)
Implement an empty mmio write to avoid the crash.
Signed-off-by: Marc-André Lureau <[email protected]>
Reviewed-by: Paolo Bonzini <[email protected]> |
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf;
int q;
int ret;
ret = ath_tx_prepare(hw, skb, txctl);
if (ret)
return ret;
hdr = (struct ieee80211_hdr *) skb->data;
/*
* At this point, the vif, hw_key and sta pointers in the tx control
* info are no longer valid (overwritten by the ath_frame_info data.
*/
q = skb_get_queue_mapping(skb);
ath_txq_lock(sc, txq);
if (txq == sc->tx.txq_map[q] &&
++txq->pending_frames > sc->tx.txq_max_pending[q] &&
!txq->stopped) {
ieee80211_stop_queue(sc->hw, q);
txq->stopped = true;
}
if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
ath_txq_unlock(sc, txq);
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
} else if (txctl->an &&
ieee80211_is_data_present(hdr->frame_control)) {
tid = ath_get_skb_tid(sc, txctl->an, skb);
WARN_ON(tid->ac->txq != txctl->txq);
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
tid->ac->clear_ps_filter = true;
/*
* Add this frame to software queue for scheduling later
* for aggregation.
*/
TX_STAT_INC(txq->axq_qnum, a_queued_sw);
__skb_queue_tail(&tid->buf_q, skb);
if (!txctl->an->sleeping)
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
goto out;
}
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
ath_txq_skb_done(sc, txq, skb);
if (txctl->paprd)
dev_kfree_skb_any(skb);
else
ieee80211_free_txskb(sc->hw, skb);
goto out;
}
bf->bf_state.bfs_paprd = txctl->paprd;
if (txctl->paprd)
bf->bf_state.bfs_paprd_timestamp = jiffies;
ath_set_rates(vif, sta, bf);
ath_tx_send_normal(sc, txq, tid, skb);
out:
ath_txq_unlock(sc, txq);
return 0;
} | 0 | [
"CWE-362",
"CWE-241"
] | linux | 21f8aaee0c62708654988ce092838aa7df4d25d8 | 243,020,722,008,774,900,000,000,000,000,000,000,000 | 83 | ath9k: protect tid->sched check
We check tid->sched without a lock taken on ath_tx_aggr_sleep(). That
is race condition which can result of doing list_del(&tid->list) twice
(second time with poisoned list node) and cause crash like shown below:
[424271.637220] BUG: unable to handle kernel paging request at 00100104
[424271.637328] IP: [<f90fc072>] ath_tx_aggr_sleep+0x62/0xe0 [ath9k]
...
[424271.639953] Call Trace:
[424271.639998] [<f90f6900>] ? ath9k_get_survey+0x110/0x110 [ath9k]
[424271.640083] [<f90f6942>] ath9k_sta_notify+0x42/0x50 [ath9k]
[424271.640177] [<f809cfef>] sta_ps_start+0x8f/0x1c0 [mac80211]
[424271.640258] [<c10f730e>] ? free_compound_page+0x2e/0x40
[424271.640346] [<f809e915>] ieee80211_rx_handlers+0x9d5/0x2340 [mac80211]
[424271.640437] [<c112f048>] ? kmem_cache_free+0x1d8/0x1f0
[424271.640510] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640578] [<c10fc23c>] ? put_page+0x2c/0x40
[424271.640640] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640706] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640787] [<f809dde3>] ? ieee80211_rx_handlers_result+0x73/0x1d0 [mac80211]
[424271.640897] [<f80a07a0>] ieee80211_prepare_and_rx_handle+0x520/0xad0 [mac80211]
[424271.641009] [<f809e22d>] ? ieee80211_rx_handlers+0x2ed/0x2340 [mac80211]
[424271.641104] [<c13846ce>] ? ip_output+0x7e/0xd0
[424271.641182] [<f80a1057>] ieee80211_rx+0x307/0x7c0 [mac80211]
[424271.641266] [<f90fa6ee>] ath_rx_tasklet+0x88e/0xf70 [ath9k]
[424271.641358] [<f80a0f2c>] ? ieee80211_rx+0x1dc/0x7c0 [mac80211]
[424271.641445] [<f90f82db>] ath9k_tasklet+0xcb/0x130 [ath9k]
Bug report:
https://bugzilla.kernel.org/show_bug.cgi?id=70551
Reported-and-tested-by: Max Sydorenko <[email protected]>
Cc: [email protected]
Signed-off-by: Stanislaw Gruszka <[email protected]>
Signed-off-by: John W. Linville <[email protected]> |
header_address_rewrite_buffer(char *buffer, const char *address, size_t len)
{
size_t i;
int address_len;
int escape, quote, comment, bracket;
int has_bracket, has_group;
int pos_bracket_beg, pos_bracket_end, pos_component_beg, pos_component_end;
int insert_beg, insert_end;
char copy[APPEND_DOMAIN_BUFFER_SIZE];
escape = quote = comment = bracket = 0;
has_bracket = has_group = 0;
pos_bracket_beg = pos_bracket_end = pos_component_beg = pos_component_end = 0;
for (i = 0; buffer[i]; ++i) {
if (buffer[i] == '(' && !escape && !quote)
comment++;
if (buffer[i] == '"' && !escape && !comment)
quote = !quote;
if (buffer[i] == ')' && !escape && !quote && comment)
comment--;
if (buffer[i] == '\\' && !escape && !comment && !quote)
escape = 1;
else
escape = 0;
if (buffer[i] == '<' && !escape && !comment && !quote && !bracket) {
bracket++;
has_bracket = 1;
pos_bracket_beg = i+1;
}
if (buffer[i] == '>' && !escape && !comment && !quote && bracket) {
bracket--;
pos_bracket_end = i;
}
if (buffer[i] == ':' && !escape && !comment && !quote)
has_group = 1;
/* update insert point if not in comment and not on a whitespace */
if (!comment && buffer[i] != ')' && !isspace((unsigned char)buffer[i]))
pos_component_end = i;
}
/* parse error, do not attempt to modify */
if (escape || quote || comment || bracket)
return;
/* address is group, skip */
if (has_group)
return;
/* there's an address between brackets, just replace everything brackets */
if (has_bracket) {
insert_beg = pos_bracket_beg;
insert_end = pos_bracket_end;
}
else {
if (pos_component_end == 0)
pos_component_beg = 0;
else {
for (pos_component_beg = pos_component_end; pos_component_beg >= 0; --pos_component_beg)
if (buffer[pos_component_beg] == ')' || isspace(buffer[pos_component_beg]))
break;
pos_component_beg += 1;
pos_component_end += 1;
}
insert_beg = pos_component_beg;
insert_end = pos_component_end;
}
/* check that masquerade won' t overflow */
address_len = strlen(address);
if (strlen(buffer) - (insert_end - insert_beg) + address_len >= len)
return;
(void)strlcpy(copy, buffer, sizeof copy);
(void)strlcpy(copy+insert_beg, address, sizeof (copy) - insert_beg);
(void)strlcat(copy, buffer+insert_end, sizeof (copy));
memcpy(buffer, copy, len);
} | 0 | [
"CWE-78",
"CWE-252"
] | src | 9dcfda045474d8903224d175907bfc29761dcb45 | 188,685,356,119,767,800,000,000,000,000,000,000,000 | 78 | Fix a security vulnerability discovered by Qualys which can lead to a
privileges escalation on mbox deliveries and unprivileged code execution
on lmtp deliveries, due to a logic issue causing a sanity check to be
missed.
ok eric@, millert@ |
void sdma_seqfile_dump_cpu_list(struct seq_file *s,
struct hfi1_devdata *dd,
unsigned long cpuid)
{
struct sdma_rht_node *rht_node;
int i, j;
rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid,
sdma_rht_params);
if (!rht_node)
return;
seq_printf(s, "cpu%3lu: ", cpuid);
for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
if (!rht_node->map[i] || !rht_node->map[i]->ctr)
continue;
seq_printf(s, " vl%d: [", i);
for (j = 0; j < rht_node->map[i]->ctr; j++) {
if (!rht_node->map[i]->sde[j])
continue;
if (j > 0)
seq_puts(s, ",");
seq_printf(s, " sdma%2d",
rht_node->map[i]->sde[j]->this_idx);
}
seq_puts(s, " ]");
}
seq_puts(s, "\n");
} | 0 | [
"CWE-400",
"CWE-401"
] | linux | 34b3be18a04ecdc610aae4c48e5d1b799d8689f6 | 305,212,830,790,340,000,000,000,000,000,000,000,000 | 34 | RDMA/hfi1: Prevent memory leak in sdma_init
In sdma_init if rhashtable_init fails the allocated memory for
tmp_sdma_rht should be released.
Fixes: 5a52a7acf7e2 ("IB/hfi1: NULL pointer dereference when freeing rhashtable")
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Navid Emamdoost <[email protected]>
Acked-by: Dennis Dalessandro <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
_asn1_decode_simple_ber (unsigned int etype, const unsigned char *der,
unsigned int _der_len, unsigned char **str,
unsigned int *str_len, unsigned int *ber_len,
unsigned dflags)
{
int tag_len, len_len;
const unsigned char *p;
int der_len = _der_len;
uint8_t *total = NULL;
unsigned total_size = 0;
unsigned char class;
unsigned long tag;
unsigned char *out = NULL;
const unsigned char *cout = NULL;
unsigned out_len;
long result;
if (ber_len) *ber_len = 0;
if (der == NULL || der_len == 0)
{
warn();
return ASN1_VALUE_NOT_VALID;
}
if (ETYPE_OK (etype) == 0)
{
warn();
return ASN1_VALUE_NOT_VALID;
}
/* doesn't handle constructed + definite classes */
class = ETYPE_CLASS (etype);
if (class != ASN1_CLASS_UNIVERSAL)
{
warn();
return ASN1_VALUE_NOT_VALID;
}
p = der;
if (dflags & DECODE_FLAG_HAVE_TAG)
{
result = asn1_get_tag_der (p, der_len, &class, &tag_len, &tag);
if (result != ASN1_SUCCESS)
{
warn();
return result;
}
if (tag != ETYPE_TAG (etype))
{
warn();
return ASN1_DER_ERROR;
}
p += tag_len;
DECR_LEN(der_len, tag_len);
if (ber_len) *ber_len += tag_len;
}
/* indefinite constructed */
if (((dflags & DECODE_FLAG_INDEFINITE) || class == ASN1_CLASS_STRUCTURED) && ETYPE_IS_STRING(etype))
{
len_len = 1;
DECR_LEN(der_len, len_len);
if (p[0] != 0x80)
{
warn();
result = ASN1_DER_ERROR;
goto cleanup;
}
p += len_len;
if (ber_len) *ber_len += len_len;
/* decode the available octet strings */
do
{
unsigned tmp_len;
result = asn1_decode_simple_ber(etype, p, der_len, &out, &out_len, &tmp_len);
if (result != ASN1_SUCCESS)
{
warn();
goto cleanup;
}
p += tmp_len;
DECR_LEN(der_len, tmp_len);
if (ber_len) *ber_len += tmp_len;
DECR_LEN(der_len, 2); /* we need the EOC */
if (out_len > 0)
{
result = append(&total, &total_size, out, out_len);
if (result != ASN1_SUCCESS)
{
warn();
goto cleanup;
}
}
free(out);
out = NULL;
if (p[0] == 0 && p[1] == 0) /* EOC */
{
if (ber_len) *ber_len += 2;
break;
}
/* no EOC */
der_len += 2;
if (der_len == 2)
{
warn();
result = ASN1_DER_ERROR;
goto cleanup;
}
}
while(1);
}
else if (class == ETYPE_CLASS(etype))
{
if (ber_len)
{
result = asn1_get_length_der (p, der_len, &len_len);
if (result < 0)
{
warn();
result = ASN1_DER_ERROR;
goto cleanup;
}
*ber_len += result + len_len;
}
/* non-string values are decoded as DER */
result = _asn1_decode_simple_der(etype, der, _der_len, &cout, &out_len, dflags);
if (result != ASN1_SUCCESS)
{
warn();
goto cleanup;
}
result = append(&total, &total_size, cout, out_len);
if (result != ASN1_SUCCESS)
{
warn();
goto cleanup;
}
}
else
{
warn();
result = ASN1_DER_ERROR;
goto cleanup;
}
*str = total;
*str_len = total_size;
return ASN1_SUCCESS;
cleanup:
free(out);
free(total);
return result;
} | 1 | [
"CWE-674"
] | libtasn1 | c593ae84cfcde8fea45787e53950e0ac71e9ca97 | 305,117,716,441,681,820,000,000,000,000,000,000,000 | 175 | _asn1_decode_simple_ber: restrict the levels of recursion to 3
On indefinite string decoding, setting a maximum level of recursions
protects the BER decoder from a stack exhaustion due to large amounts
of recursion.
Signed-off-by: Nikos Mavrogiannopoulos <[email protected]> |
void RemoveInsertedCSS(v8::Isolate* isolate, const std::u16string& key) {
content::RenderFrame* render_frame;
if (!MaybeGetRenderFrame(isolate, "removeInsertedCSS", &render_frame))
return;
blink::WebFrame* web_frame = render_frame->GetWebFrame();
if (web_frame->IsWebLocalFrame()) {
web_frame->ToWebLocalFrame()->GetDocument().RemoveInsertedStyleSheet(
blink::WebString::FromUTF16(key));
}
} | 0 | [] | electron | e9fa834757f41c0b9fe44a4dffe3d7d437f52d34 | 329,978,743,964,535,840,000,000,000,000,000,000,000 | 11 | fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]> |
static int __init set_trace_boot_options(char *str)
{
strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
return 0;
} | 0 | [
"CWE-415"
] | linux | 4397f04575c44e1440ec2e49b6302785c95fd2f8 | 84,334,999,641,545,000,000,000,000,000,000,000,000 | 5 | tracing: Fix possible double free on failure of allocating trace buffer
Jing Xia and Chunyan Zhang reported that on failing to allocate part of the
tracing buffer, memory is freed, but the pointers that point to them are not
initialized back to NULL, and later paths may try to free the freed memory
again. Jing and Chunyan fixed one of the locations that does this, but
missed a spot.
Link: http://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code")
Reported-by: Jing Xia <[email protected]>
Reported-by: Chunyan Zhang <[email protected]>
Signed-off-by: Steven Rostedt (VMware) <[email protected]> |
static void test_validation_rre(void)
{
test_validation(test_rre_bounds_server);
} | 0 | [] | gtk-vnc | ea0386933214c9178aaea9f2f85049ea3fa3e14a | 315,549,602,968,651,700,000,000,000,000,000,000,000 | 4 | Fix bounds checking for RRE, hextile & copyrect encodings
While the client would bounds check the overall update
region, it failed to bounds check the payload data
parameters.
Add a test case to validate bounds checking.
https://bugzilla.gnome.org/show_bug.cgi?id=778048
CVE-2017-5884
Signed-off-by: Daniel P. Berrange <[email protected]> |
void vidtv_s302m_encoder_destroy(struct vidtv_encoder *e)
{
if (e->id != S302M) {
pr_err_ratelimited("Encoder type mismatch, skipping.\n");
return;
}
vidtv_s302m_access_unit_destroy(e);
kfree(e->name);
vfree(e->encoder_buf);
kfree(e->ctx);
kfree(e);
} | 0 | [
"CWE-476"
] | linux | e6a21a14106d9718aa4f8e115b1e474888eeba44 | 78,508,914,023,926,480,000,000,000,000,000,000,000 | 13 | media: vidtv: Check for null return of vzalloc
As the possible failure of the vzalloc(), e->encoder_buf might be NULL.
Therefore, it should be better to check it in order
to guarantee the success of the initialization.
If fails, we need to free not only 'e' but also 'e->name'.
Also, if the allocation for ctx fails, we need to free 'e->encoder_buf'
else.
Fixes: f90cf6079bf6 ("media: vidtv: add a bridge driver")
Signed-off-by: Jiasheng Jiang <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
__nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
struct nf_queue_entry *entry)
{
struct sk_buff *nskb;
int err = -ENOBUFS;
__be32 *packet_id_ptr;
int failopen = 0;
nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
if (nskb == NULL) {
err = -ENOMEM;
goto err_out;
}
spin_lock_bh(&queue->lock);
if (queue->queue_total >= queue->queue_maxlen) {
if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
failopen = 1;
err = 0;
} else {
queue->queue_dropped++;
net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
queue->queue_total);
}
goto err_out_free_nskb;
}
entry->id = ++queue->id_sequence;
*packet_id_ptr = htonl(entry->id);
/* nfnetlink_unicast will either free the nskb or add it to a socket */
err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
if (err < 0) {
queue->queue_user_dropped++;
goto err_out_unlock;
}
__enqueue_entry(queue, entry);
spin_unlock_bh(&queue->lock);
return 0;
err_out_free_nskb:
kfree_skb(nskb);
err_out_unlock:
spin_unlock_bh(&queue->lock);
if (failopen)
nf_reinject(entry, NF_ACCEPT);
err_out:
return err;
} | 0 | [
"CWE-416"
] | net | 36d5fe6a000790f56039afe26834265db0a3ad4c | 6,307,897,836,322,463,000,000,000,000,000,000,000 | 50 | core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors
skb_zerocopy can copy elements of the frags array between skbs, but it doesn't
orphan them. Also, it doesn't handle errors, so this patch takes care of that
as well, and modify the callers accordingly. skb_tx_error() is also added to
the callers so they will signal the failed delivery towards the creator of the
skb.
Signed-off-by: Zoltan Kiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
rndr_header_anchor(struct buf *out, const struct buf *anchor)
{
static const char *STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'";
const uint8_t *a = anchor->data;
const size_t size = anchor->size;
size_t i = 0;
int stripped = 0, inserted = 0;
for (; i < size; ++i) {
// skip html tags
if (a[i] == '<') {
while (i < size && a[i] != '>')
i++;
// skip html entities
} else if (a[i] == '&') {
while (i < size && a[i] != ';')
i++;
}
// replace non-ascii or invalid characters with dashes
else if (!isascii(a[i]) || strchr(STRIPPED, a[i])) {
if (inserted && !stripped)
bufputc(out, '-');
// and do it only once
stripped = 1;
}
else {
bufputc(out, tolower(a[i]));
stripped = 0;
inserted++;
}
}
// replace the last dash if there was anything added
if (stripped && inserted)
out->size--;
// if anchor found empty, use djb2 hash for it
if (!inserted && anchor->size) {
unsigned long hash = 5381;
for (i = 0; i < size; ++i) {
hash = ((hash << 5) + hash) + a[i]; /* h * 33 + c */
}
bufprintf(out, "part-%lx", hash);
}
} | 0 | [
"CWE-79",
"CWE-74"
] | redcarpet | a699c82292b17c8e6a62e1914d5eccc252272793 | 256,372,305,251,313,200,000,000,000,000,000,000,000 | 46 | Fix a security issue using `:quote` with `:escape_html`
Reported by @johan-smits. |
int rawv6_mh_filter_unregister(mh_filter_t filter)
{
RCU_INIT_POINTER(mh_filter, NULL);
synchronize_rcu();
return 0;
} | 0 | [
"CWE-20"
] | net | bceaa90240b6019ed73b49965eac7d167610be69 | 332,021,976,282,069,670,000,000,000,000,000,000,000 | 6 | inet: prevent leakage of uninitialized memory to user in recv syscalls
Only update *addr_len when we actually fill in sockaddr, otherwise we
can return uninitialized memory from the stack to the caller in the
recvfrom, recvmmsg and recvmsg syscalls. Drop the the (addr_len == NULL)
checks because we only get called with a valid addr_len pointer either
from sock_common_recvmsg or inet_recvmsg.
If a blocking read waits on a socket which is concurrently shut down we
now return zero and set msg_msgnamelen to 0.
Reported-by: mpb <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
DEFUN(srchprv, SEARCH_PREV, "Continue search backward")
{
srch_nxtprv(1);
} | 0 | [
"CWE-59",
"CWE-241"
] | w3m | 18dcbadf2771cdb0c18509b14e4e73505b242753 | 43,726,390,484,668,140,000,000,000,000,000,000,000 | 4 | Make temporary directory safely when ~/.w3m is unwritable |
xmlParseCharRef(xmlParserCtxtPtr ctxt) {
int val = 0;
int count = 0;
/*
* Using RAW/CUR/NEXT is okay since we are working on ASCII range here
*/
if ((RAW == '&') && (NXT(1) == '#') &&
(NXT(2) == 'x')) {
SKIP(3);
GROW;
while (RAW != ';') { /* loop blocked by count */
if (count++ > 20) {
count = 0;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(0);
}
if ((RAW >= '0') && (RAW <= '9'))
val = val * 16 + (CUR - '0');
else if ((RAW >= 'a') && (RAW <= 'f') && (count < 20))
val = val * 16 + (CUR - 'a') + 10;
else if ((RAW >= 'A') && (RAW <= 'F') && (count < 20))
val = val * 16 + (CUR - 'A') + 10;
else {
xmlFatalErr(ctxt, XML_ERR_INVALID_HEX_CHARREF, NULL);
val = 0;
break;
}
if (val > 0x110000)
val = 0x110000;
NEXT;
count++;
}
if (RAW == ';') {
/* on purpose to avoid reentrancy problems with NEXT and SKIP */
ctxt->input->col++;
ctxt->nbChars ++;
ctxt->input->cur++;
}
} else if ((RAW == '&') && (NXT(1) == '#')) {
SKIP(2);
GROW;
while (RAW != ';') { /* loop blocked by count */
if (count++ > 20) {
count = 0;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(0);
}
if ((RAW >= '0') && (RAW <= '9'))
val = val * 10 + (CUR - '0');
else {
xmlFatalErr(ctxt, XML_ERR_INVALID_DEC_CHARREF, NULL);
val = 0;
break;
}
if (val > 0x110000)
val = 0x110000;
NEXT;
count++;
}
if (RAW == ';') {
/* on purpose to avoid reentrancy problems with NEXT and SKIP */
ctxt->input->col++;
ctxt->nbChars ++;
ctxt->input->cur++;
}
} else {
xmlFatalErr(ctxt, XML_ERR_INVALID_CHARREF, NULL);
}
/*
* [ WFC: Legal Character ]
* Characters referred to using character references must match the
* production for Char.
*/
if (val >= 0x110000) {
xmlFatalErrMsgInt(ctxt, XML_ERR_INVALID_CHAR,
"xmlParseCharRef: character reference out of bounds\n",
val);
} else if (IS_CHAR(val)) {
return(val);
} else {
xmlFatalErrMsgInt(ctxt, XML_ERR_INVALID_CHAR,
"xmlParseCharRef: invalid xmlChar value %d\n",
val);
}
return(0);
} | 0 | [
"CWE-401"
] | libxml2 | 5a02583c7e683896d84878bd90641d8d9b0d0549 | 96,233,165,179,092,210,000,000,000,000,000,000,000 | 92 | Fix memory leak in xmlParseBalancedChunkMemoryRecover
When doc is NULL, namespace created in xmlTreeEnsureXMLDecl
is bind to newDoc->oldNs, in this case, set newDoc->oldNs to
NULL and free newDoc will cause a memory leak.
Found with libFuzzer.
Closes #82. |
static int make_ydt24_entry(int p1, int p2, int16_t *ydt)
{
int lo, hi;
lo = ydt[p1];
hi = ydt[p2];
return (lo + (hi << 8) + (hi << 16)) << 1;
} | 0 | [
"CWE-703"
] | FFmpeg | 2240e2078d53d3cfce8ff1dda64e58fa72038602 | 99,670,149,475,410,670,000,000,000,000,000,000,000 | 8 | truemotion1: check the header size
Fixes invalid reads.
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
CC:[email protected] |
MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
status,
temporary;
PolicyDomain
domain;
PolicyRights
rights;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
sans_exception=AcquireExceptionInfo();
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,image->filename,MagickPathExtent);
(void) SetImageInfo(write_info,1,sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
(void) CopyMagickString(image->filename,write_info->filename,MagickPathExtent);
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,write_info->magick) == MagickFalse)
{
sans_exception=DestroyExceptionInfo(sans_exception);
write_info=DestroyImageInfo(write_info);
errno=EPERM;
ThrowBinaryException(PolicyError,"NotAuthorized",filename);
}
/*
Call appropriate image reader based on image type.
*/
magick_info=GetMagickInfo(write_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
image->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian;
}
}
(void) SyncImageProfiles(image);
DisassociateImageStream(image);
option=GetImageOption(image_info,"delegate:bimodal");
if ((IfMagickTrue(IsStringTrue(option))) &&
(write_info->page == (char *) NULL) &&
(GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) == (Image *) NULL) &&
(IfMagickFalse(IsTaintImage(image))) )
{
delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception);
if ((delegate_info != (const DelegateInfo *) NULL) &&
(GetDelegateMode(delegate_info) == 0) &&
(IsPathAccessible(image->magick_filename) != MagickFalse))
{
/*
Process image with bi-modal delegate.
*/
(void) CopyMagickString(image->filename,image->magick_filename,
MagickPathExtent);
status=InvokeDelegate(write_info,image,image->magick,
write_info->magick,exception);
write_info=DestroyImageInfo(write_info);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
return(status);
}
}
status=MagickFalse;
temporary=MagickFalse;
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickSeekableStream(magick_info) != MagickFalse))
{
char
image_filename[MagickPathExtent];
(void) CopyMagickString(image_filename,image->filename,MagickPathExtent);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
(void) CopyMagickString(image->filename, image_filename,MagickPathExtent);
if (status != MagickFalse)
{
if (IsBlobSeekable(image) == MagickFalse)
{
/*
A seekable stream is required by the encoder.
*/
write_info->adjoin=MagickTrue;
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) AcquireUniqueFilename(image->filename);
temporary=MagickTrue;
}
(void) CloseBlob(image);
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetImageEncoder(magick_info) != (EncodeImageHandler *) NULL))
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=GetImageEncoder(magick_info)(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception);
if (delegate_info != (DelegateInfo *) NULL)
{
/*
Process the image with delegate.
*/
*write_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(write_info,image,(char *) NULL,
write_info->magick,exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
else
{
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(write_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((write_info->affirm == MagickFalse) &&
(magick_info == (const MagickInfo *) NULL))
{
(void) CopyMagickString(write_info->magick,image->magick,
MagickPathExtent);
magick_info=GetMagickInfo(write_info->magick,exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetImageEncoder(magick_info) == (EncodeImageHandler *) NULL))
{
char
extension[MagickPathExtent];
GetPathComponent(image->filename,ExtensionPath,extension);
if (*extension != '\0')
magick_info=GetMagickInfo(extension,exception);
else
magick_info=GetMagickInfo(image->magick,exception);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetImageEncoder(magick_info) == (EncodeImageHandler *) NULL))
{
magick_info=GetMagickInfo(image->magick,exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetImageEncoder(magick_info) == (EncodeImageHandler *) NULL))
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetImageEncoder(magick_info) != (EncodeImageHandler *) NULL))
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=GetImageEncoder(magick_info)(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
}
}
if (temporary != MagickFalse)
{
/*
Copy temporary image file to permanent.
*/
status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception);
if (status != MagickFalse)
{
(void) RelinquishUniqueFileResource(write_info->filename);
status=ImageToFile(image,write_info->filename,exception);
}
(void) CloseBlob(image);
(void) RelinquishUniqueFileResource(image->filename);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
}
if ((LocaleCompare(write_info->magick,"info") != 0) &&
(write_info->verbose != MagickFalse))
(void) IdentifyImage(image,stdout,MagickFalse,exception);
write_info=DestroyImageInfo(write_info);
return(status);
} | 0 | [
"CWE-476"
] | ImageMagick | 5b4bebaa91849c592a8448bc353ab25a54ff8c44 | 170,252,777,037,463,260,000,000,000,000,000,000,000 | 236 | https://github.com/ImageMagick/ImageMagick/pull/34 |
flac_read_flac2f (SF_PRIVATE *psf, float *ptr, sf_count_t len)
{ FLAC_PRIVATE* pflac = (FLAC_PRIVATE*) psf->codec_data ;
sf_count_t total = 0, current ;
unsigned readlen ;
pflac->pcmtype = PFLAC_PCM_FLOAT ;
while (total < len)
{ pflac->ptr = ptr + total ;
readlen = (len - total > 0x1000000) ? 0x1000000 : (unsigned) (len - total) ;
current = flac_read_loop (psf, readlen) ;
if (current == 0)
break ;
total += current ;
} ;
return total ;
} /* flac_read_flac2f */ | 0 | [
"CWE-119",
"CWE-369"
] | libsndfile | 60b234301adf258786d8b90be5c1d437fc8799e0 | 2,436,757,289,367,661,500,000,000,000,000,000,000 | 18 | src/flac.c: Improve error handling
Especially when dealing with corrupt or malicious files. |
TfLiteStatus AbsEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteType type = GetInput(context, node, 0)->type;
switch (type) {
case kTfLiteFloat32:
return EvalImpl<float>(context, node, std::abs<float>, type);
case kTfLiteInt8: {
const auto* op_data = static_cast<const OpData*>(node->user_data);
const int kMinInt8 = std::numeric_limits<int8_t>::min();
const int kMaxInt8 = std::numeric_limits<int8_t>::max();
std::function<int8_t(int8_t)> func = [&](int8_t i) {
const int32_t value = std::abs(i - op_data->input_offset);
return std::min(
std::max(op_data->output_offset +
MultiplyByQuantizedMultiplier(
value, op_data->multiplier, op_data->shift),
kMinInt8),
kMaxInt8);
};
return EvalImpl<int8_t>(context, node, func, type);
}
default:
TF_LITE_KERNEL_LOG(context, "Current data type %s is not supported.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
} | 0 | [
"CWE-125",
"CWE-787"
] | tensorflow | 1970c2158b1ffa416d159d03c3370b9a462aee35 | 104,154,292,088,989,520,000,000,000,000,000,000,000 | 26 | [tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56 |
static void unit_update_on_console(Unit *u) {
bool b;
assert(u);
b = unit_needs_console(u);
if (u->on_console == b)
return;
u->on_console = b;
if (b)
manager_ref_console(u->manager);
else
manager_unref_console(u->manager);
} | 0 | [
"CWE-269"
] | systemd | bf65b7e0c9fc215897b676ab9a7c9d1c688143ba | 243,379,907,145,606,500,000,000,000,000,000,000,000 | 15 | core: imply NNP and SUID/SGID restriction for DynamicUser=yes service
Let's be safe, rather than sorry. This way DynamicUser=yes services can
neither take benefit of, nor create SUID/SGID binaries.
Given that DynamicUser= is a recent addition only we should be able to
get away with turning this on, even though this is strictly speaking a
binary compatibility breakage. |
inline unsigned int& openmp_mode() {
return _openmp_mode(0,false);
} | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 223,464,182,663,804,000,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
} | 0 | [
"CWE-772"
] | ImageMagick | 7a42f63927e7f2e26846b7ed4560e9cb4984af7b | 4,015,920,482,684,524,000,000,000,000,000,000,000 | 18 | https://github.com/ImageMagick/ImageMagick/issues/903 |
static NTSTATUS dcesrv_lsa_CREDRPROFILELOADED(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx,
struct lsa_CREDRPROFILELOADED *r)
{
DCESRV_FAULT(DCERPC_FAULT_OP_RNG_ERROR);
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 165,379,468,528,941,070,000,000,000,000,000,000,000 | 5 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
ar6000_avail_ev(void *context, void *hif_handle)
{
int i;
struct net_device *dev;
void *ar_netif;
struct ar6_softc *ar;
int device_index = 0;
struct htc_init_info htcInfo;
struct wireless_dev *wdev;
int r = 0;
struct hif_device_os_device_info osDevInfo;
memset(&osDevInfo, 0, sizeof(osDevInfo));
if (HIFConfigureDevice(hif_handle, HIF_DEVICE_GET_OS_DEVICE,
&osDevInfo, sizeof(osDevInfo))) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s: Failed to get OS device instance\n", __func__));
return A_ERROR;
}
AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_available\n"));
for (i=0; i < MAX_AR6000; i++) {
if (ar6000_devices[i] == NULL) {
break;
}
}
if (i == MAX_AR6000) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_available: max devices reached\n"));
return A_ERROR;
}
/* Save this. It gives a bit better readability especially since */
/* we use another local "i" variable below. */
device_index = i;
wdev = ar6k_cfg80211_init(osDevInfo.pOSDevice);
if (IS_ERR(wdev)) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ar6k_cfg80211_init failed\n", __func__));
return A_ERROR;
}
ar_netif = wdev_priv(wdev);
if (ar_netif == NULL) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Can't allocate ar6k priv memory\n", __func__));
return A_ERROR;
}
A_MEMZERO(ar_netif, sizeof(struct ar6_softc));
ar = (struct ar6_softc *)ar_netif;
ar->wdev = wdev;
wdev->iftype = NL80211_IFTYPE_STATION;
dev = alloc_netdev_mq(0, "wlan%d", ether_setup, 1);
if (!dev) {
printk(KERN_CRIT "AR6K: no memory for network device instance\n");
ar6k_cfg80211_deinit(ar);
return A_ERROR;
}
dev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
wdev->netdev = dev;
ar->arNetworkType = INFRA_NETWORK;
ar->smeState = SME_DISCONNECTED;
ar->arAutoAuthStage = AUTH_IDLE;
init_netdev(dev, ifname);
ar->arNetDev = dev;
ar->arHifDevice = hif_handle;
ar->arWlanState = WLAN_ENABLED;
ar->arDeviceIndex = device_index;
ar->arWlanPowerState = WLAN_POWER_STATE_ON;
ar->arWlanOff = false; /* We are in ON state */
#ifdef CONFIG_PM
ar->arWowState = WLAN_WOW_STATE_NONE;
ar->arBTOff = true; /* BT chip assumed to be OFF */
ar->arBTSharing = WLAN_CONFIG_BT_SHARING;
ar->arWlanOffConfig = WLAN_CONFIG_WLAN_OFF;
ar->arSuspendConfig = WLAN_CONFIG_PM_SUSPEND;
ar->arWow2Config = WLAN_CONFIG_PM_WOW2;
#endif /* CONFIG_PM */
A_INIT_TIMER(&ar->arHBChallengeResp.timer, ar6000_detect_error, dev);
ar->arHBChallengeResp.seqNum = 0;
ar->arHBChallengeResp.outstanding = false;
ar->arHBChallengeResp.missCnt = 0;
ar->arHBChallengeResp.frequency = AR6000_HB_CHALLENGE_RESP_FREQ_DEFAULT;
ar->arHBChallengeResp.missThres = AR6000_HB_CHALLENGE_RESP_MISS_THRES_DEFAULT;
ar6000_init_control_info(ar);
init_waitqueue_head(&arEvent);
sema_init(&ar->arSem, 1);
ar->bIsDestroyProgress = false;
INIT_HTC_PACKET_QUEUE(&ar->amsdu_rx_buffer_queue);
#ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL
A_INIT_TIMER(&aptcTimer, aptcTimerHandler, ar);
#endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */
A_INIT_TIMER(&ar->disconnect_timer, disconnect_timer_handler, dev);
BMIInit();
ar6000_sysfs_bmi_init(ar);
{
struct bmi_target_info targ_info;
r = BMIGetTargetInfo(ar->arHifDevice, &targ_info);
if (r)
goto avail_ev_failed;
ar->arVersion.target_ver = targ_info.target_ver;
ar->arTargetType = targ_info.target_type;
wdev->wiphy->hw_version = targ_info.target_ver;
}
r = ar6000_configure_target(ar);
if (r)
goto avail_ev_failed;
A_MEMZERO(&htcInfo,sizeof(htcInfo));
htcInfo.pContext = ar;
htcInfo.TargetFailure = ar6000_target_failure;
ar->arHtcTarget = HTCCreate(ar->arHifDevice,&htcInfo);
if (!ar->arHtcTarget) {
r = -ENOMEM;
goto avail_ev_failed;
}
spin_lock_init(&ar->arLock);
#ifdef WAPI_ENABLE
ar->arWapiEnable = 0;
#endif
if(csumOffload){
/*if external frame work is also needed, change and use an extended rxMetaVerion*/
ar->rxMetaVersion=WMI_META_VERSION_2;
}
ar->aggr_cntxt = aggr_init(ar6000_alloc_netbufs);
if (!ar->aggr_cntxt) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("%s() Failed to initialize aggr.\n", __func__));
r = -ENOMEM;
goto avail_ev_failed;
}
aggr_register_rx_dispatcher(ar->aggr_cntxt, (void *)dev, ar6000_deliver_frames_to_nw_stack);
HIFClaimDevice(ar->arHifDevice, ar);
/* We only register the device in the global list if we succeed. */
/* If the device is in the global list, it will be destroyed */
/* when the module is unloaded. */
ar6000_devices[device_index] = dev;
AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("BMI enabled: %d\n", wlaninitmode));
if ((wlaninitmode == WLAN_INIT_MODE_UDEV) ||
(wlaninitmode == WLAN_INIT_MODE_DRV)) {
r = ath6kl_init_netdev(ar);
if (r)
goto avail_ev_failed;
}
/* This runs the init function if registered */
r = register_netdev(dev);
if (r) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_avail: register_netdev failed\n"));
ar6000_destroy(dev, 0);
return r;
}
is_netdev_registered = 1;
#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
arApNetDev = NULL;
#endif /* CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("ar6000_avail: name=%s hifdevice=0x%lx, dev=0x%lx (%d), ar=0x%lx\n",
dev->name, (unsigned long)ar->arHifDevice, (unsigned long)dev, device_index,
(unsigned long)ar));
avail_ev_failed :
if (r)
ar6000_sysfs_bmi_deinit(ar);
return r;
} | 0 | [
"CWE-703",
"CWE-264"
] | linux | 550fd08c2cebad61c548def135f67aba284c6162 | 305,959,889,160,794,260,000,000,000,000,000,000,000 | 197 | net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void AbstractSqlMigrationReader::abortMigration(const QString &errorMsg)
{
qWarning() << "Migration Failed!";
if (!errorMsg.isNull()) {
qWarning() << qPrintable(errorMsg);
}
if (lastError().isValid()) {
qWarning() << "ReaderError:";
dumpStatus();
}
if (_writer->lastError().isValid()) {
qWarning() << "WriterError:";
_writer->dumpStatus();
}
rollback();
_writer->rollback();
_writer = 0;
} | 0 | [
"CWE-89"
] | quassel | aa1008be162cb27da938cce93ba533f54d228869 | 333,492,919,466,875,400,000,000,000,000,000,000,000 | 20 | Fixing security vulnerability with Qt 4.8.5+ and PostgreSQL.
Properly detects whether Qt performs slash escaping in SQL queries or
not, and then configures PostgreSQL accordingly. This bug was a
introduced due to a bugfix in Qt 4.8.5 disables slash escaping when
binding queries: https://bugreports.qt-project.org/browse/QTBUG-30076
Thanks to brot and Tucos.
[Fixes #1244] |
static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
{
u8 val;
if (pv_eoi_get_user(vcpu, &val) < 0)
return false;
val &= KVM_PV_EOI_ENABLED;
if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
return false;
/*
* Clear pending bit in any case: it will be set again on vmentry.
* While this might not be ideal from performance point of view,
* this makes sure pv eoi is only enabled when we know it's safe.
*/
__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
return val;
} | 0 | [
"CWE-476"
] | linux | 00b5f37189d24ac3ed46cb7f11742094778c46ce | 8,068,995,893,014,245,000,000,000,000,000,000,000 | 21 | KVM: x86: Avoid theoretical NULL pointer dereference in kvm_irq_delivery_to_apic_fast()
When kvm_irq_delivery_to_apic_fast() is called with APIC_DEST_SELF
shorthand, 'src' must not be NULL. Crash the VM with KVM_BUG_ON()
instead of crashing the host.
Signed-off-by: Vitaly Kuznetsov <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
BGD_DECLARE(void) gdImageChar (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color)
{
int cx, cy;
int px, py;
int fline;
cx = 0;
cy = 0;
#ifdef CHARSET_EBCDIC
c = ASC (c);
#endif /*CHARSET_EBCDIC */
if ((c < f->offset) || (c >= (f->offset + f->nchars))) {
return;
}
fline = (c - f->offset) * f->h * f->w;
for (py = y; (py < (y + f->h)); py++) {
for (px = x; (px < (x + f->w)); px++) {
if (f->data[fline + cy * f->w + cx]) {
gdImageSetPixel (im, px, py, color);
}
cx++;
}
cx = 0;
cy++;
}
} | 0 | [
"CWE-119",
"CWE-787"
] | libgd | 77f619d48259383628c3ec4654b1ad578e9eb40e | 219,026,499,516,779,130,000,000,000,000,000,000,000 | 25 | fix #215 gdImageFillToBorder stack-overflow when invalid color is used |
ddxGiveUp(enum ExitCode error)
{
int i;
xf86VGAarbiterFini();
#ifdef XF86PM
if (xf86OSPMClose)
xf86OSPMClose();
xf86OSPMClose = NULL;
#endif
for (i = 0; i < xf86NumScreens; i++) {
/*
* zero all access functions to
* trap calls when switched away.
*/
xf86Screens[i]->vtSema = FALSE;
}
#ifdef XFreeXDGA
DGAShutdown();
#endif
if (xorgHWOpenConsole)
xf86CloseConsole();
systemd_logind_fini();
dbus_core_fini();
xf86CloseLog(error);
/* If an unexpected signal was caught, dump a core for debugging */
if (xf86Info.caughtSignal)
OsAbort();
} | 0 | [] | xserver | 032b1d79b7d04d47814a5b3a9fdd162249fea74c | 43,761,450,448,134,790,000,000,000,000,000,000,000 | 36 | xfree86: use the xf86CheckPrivs() helper for modulepath/logfile
v2: Rebase against updated xf86CheckPrivs() helper.
Reviewed-by: Adam Jackson <[email protected]>
Signed-off-by: Emil Velikov <[email protected]> |
static OPJ_SIZE_T jpxRead_callback(void * p_buffer, OPJ_SIZE_T p_nb_bytes, void * p_user_data)
{
JPXData *jpxData = (JPXData *)p_user_data;
int len;
len = jpxData->size - jpxData->pos;
if (len < 0)
len = 0;
if (len == 0)
return (OPJ_SIZE_T)-1; /* End of file! */
if ((OPJ_SIZE_T)len > p_nb_bytes)
len = p_nb_bytes;
memcpy(p_buffer, jpxData->data + jpxData->pos, len);
jpxData->pos += len;
return len;
} | 0 | [
"CWE-125"
] | poppler | 89a5367d49b2556a2635dbb6d48d6a6b182a2c6c | 154,820,935,448,948,240,000,000,000,000,000,000,000 | 16 | JPEG2000Stream: fail gracefully if not all components have the same WxH
I think this is just a mistake, or at least the only file we have with
this scenario is a fuzzed one |
static inline size_t inet6_if_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
} | 0 | [] | net | 4b08a8f1bd8cb4541c93ec170027b4d0782dab52 | 57,038,465,532,382,040,000,000,000,000,000,000,000 | 9 | ipv6: remove max_addresses check from ipv6_create_tempaddr
Because of the max_addresses check attackers were able to disable privacy
extensions on an interface by creating enough autoconfigured addresses:
<http://seclists.org/oss-sec/2012/q4/292>
But the check is not actually needed: max_addresses protects the
kernel to install too many ipv6 addresses on an interface and guards
addrconf_prefix_rcv to install further addresses as soon as this limit
is reached. We only generate temporary addresses in direct response of
a new address showing up. As soon as we filled up the maximum number of
addresses of an interface, we stop installing more addresses and thus
also stop generating more temp addresses.
Even if the attacker tries to generate a lot of temporary addresses
by announcing a prefix and removing it again (lifetime == 0) we won't
install more temp addresses, because the temporary addresses do count
to the maximum number of addresses, thus we would stop installing new
autoconfigured addresses when the limit is reached.
This patch fixes CVE-2013-0343 (but other layer-2 attacks are still
possible).
Thanks to Ding Tianhong to bring this topic up again.
Cc: Ding Tianhong <[email protected]>
Cc: George Kargiotakis <[email protected]>
Cc: P J P <[email protected]>
Cc: YOSHIFUJI Hideaki <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Acked-by: Ding Tianhong <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
SPL_METHOD(RecursiveDirectoryIterator, getSubPathname)
{
spl_filesystem_object *intern = Z_SPLFILESYSTEM_P(getThis());
char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (intern->u.dir.sub_path) {
RETURN_NEW_STR(strpprintf(0, "%s%c%s", intern->u.dir.sub_path, slash, intern->u.dir.entry.d_name));
} else {
RETURN_STRING(intern->u.dir.entry.d_name);
}
} | 0 | [
"CWE-74"
] | php-src | a5a15965da23c8e97657278fc8dfbf1dfb20c016 | 188,342,206,962,109,800,000,000,000,000,000,000,000 | 15 | Fix #78863: DirectoryIterator class silently truncates after a null byte
Since the constructor of DirectoryIterator and friends is supposed to
accepts paths (i.e. strings without NUL bytes), we must not accept
arbitrary strings. |
StatusWith<QueryMetadataBitSet> CanonicalQuery::isValid(MatchExpression* root,
const QueryRequest& request) {
QueryMetadataBitSet unavailableMetadata{};
// There can only be one TEXT. If there is a TEXT, it cannot appear inside a NOR.
//
// Note that the query grammar (as enforced by the MatchExpression parser) forbids TEXT
// inside of value-expression clauses like NOT, so we don't check those here.
size_t numText = countNodes(root, MatchExpression::TEXT);
if (numText > 1) {
return Status(ErrorCodes::BadValue, "Too many text expressions");
} else if (1 == numText) {
if (hasNodeInSubtree(root, MatchExpression::TEXT, MatchExpression::NOR)) {
return Status(ErrorCodes::BadValue, "text expression not allowed in nor");
}
} else {
// Text metadata is not available.
unavailableMetadata.set(DocumentMetadataFields::kTextScore);
}
// There can only be one NEAR. If there is a NEAR, it must be either the root or the root
// must be an AND and its child must be a NEAR.
size_t numGeoNear = countNodes(root, MatchExpression::GEO_NEAR);
if (numGeoNear > 1) {
return Status(ErrorCodes::BadValue, "Too many geoNear expressions");
} else if (1 == numGeoNear) {
bool topLevel = false;
if (MatchExpression::GEO_NEAR == root->matchType()) {
topLevel = true;
} else if (MatchExpression::AND == root->matchType()) {
for (size_t i = 0; i < root->numChildren(); ++i) {
if (MatchExpression::GEO_NEAR == root->getChild(i)->matchType()) {
topLevel = true;
break;
}
}
}
if (!topLevel) {
return Status(ErrorCodes::BadValue, "geoNear must be top-level expr");
}
} else {
// Geo distance and geo point metadata are unavailable.
unavailableMetadata |= DepsTracker::kAllGeoNearData;
}
const BSONObj& sortObj = request.getSort();
BSONElement sortNaturalElt = sortObj["$natural"];
const BSONObj& hintObj = request.getHint();
BSONElement hintNaturalElt = hintObj["$natural"];
if (sortNaturalElt && sortObj.nFields() != 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Cannot include '$natural' in compound sort: " << sortObj);
}
if (hintNaturalElt && hintObj.nFields() != 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Cannot include '$natural' in compound hint: " << hintObj);
}
// NEAR cannot have a $natural sort or $natural hint.
if (numGeoNear > 0) {
if (sortNaturalElt) {
return Status(ErrorCodes::BadValue,
"geoNear expression not allowed with $natural sort order");
}
if (hintNaturalElt) {
return Status(ErrorCodes::BadValue,
"geoNear expression not allowed with $natural hint");
}
}
// TEXT and NEAR cannot both be in the query.
if (numText > 0 && numGeoNear > 0) {
return Status(ErrorCodes::BadValue, "text and geoNear not allowed in same query");
}
// TEXT and {$natural: ...} sort order cannot both be in the query.
if (numText > 0 && sortNaturalElt) {
return Status(ErrorCodes::BadValue, "text expression not allowed with $natural sort order");
}
// TEXT and hint cannot both be in the query.
if (numText > 0 && !hintObj.isEmpty()) {
return Status(ErrorCodes::BadValue, "text and hint not allowed in same query");
}
// TEXT and tailable are incompatible.
if (numText > 0 && request.isTailable()) {
return Status(ErrorCodes::BadValue, "text and tailable cursor not allowed in same query");
}
// NEAR and tailable are incompatible.
if (numGeoNear > 0 && request.isTailable()) {
return Status(ErrorCodes::BadValue,
"Tailable cursors and geo $near cannot be used together");
}
// $natural sort order must agree with hint.
if (sortNaturalElt) {
if (!hintObj.isEmpty() && !hintNaturalElt) {
return Status(ErrorCodes::BadValue, "index hint not allowed with $natural sort order");
}
if (hintNaturalElt) {
if (hintNaturalElt.numberInt() != sortNaturalElt.numberInt()) {
return Status(ErrorCodes::BadValue,
"$natural hint must be in the same direction as $natural sort order");
}
}
}
return unavailableMetadata;
} | 0 | [
"CWE-755"
] | mongo | c8ced6df8f620daaa2e539f192f2eef356c63e9c | 142,437,021,373,347,000,000,000,000,000,000,000,000 | 114 | SERVER-47773 Error consistently when tailable cursors and $near are used together |
char *uwsgi_expand_path(char *dir, int dir_len, char *ptr) {
char src[PATH_MAX + 1];
memcpy(src, dir, dir_len);
src[dir_len] = 0;
char *dst = ptr;
if (!dst)
dst = uwsgi_malloc(PATH_MAX + 1);
if (!realpath(src, dst)) {
uwsgi_error_realpath(src);
if (!ptr)
free(dst);
return NULL;
}
return dst;
} | 1 | [
"CWE-119",
"CWE-703",
"CWE-787"
] | uwsgi | cb4636f7c0af2e97a4eef7a3cdcbd85a71247bfe | 188,609,799,092,990,550,000,000,000,000,000,000,000 | 15 | improve uwsgi_expand_path() to sanitize input, avoiding stack corruption and potential security issue |
void ext4_ext_init(struct super_block *sb)
{
/*
* possible initialization would be here
*/
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
printk(KERN_INFO "EXT4-fs: file extents enabled"
#ifdef AGGRESSIVE_TEST
", aggressive tests"
#endif
#ifdef CHECK_BINSEARCH
", check binsearch"
#endif
#ifdef EXTENTS_STATS
", stats"
#endif
"\n");
#endif
#ifdef EXTENTS_STATS
spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
EXT4_SB(sb)->s_ext_min = 1 << 30;
EXT4_SB(sb)->s_ext_max = 0;
#endif
}
} | 0 | [
"CWE-362"
] | linux-2.6 | dee1f973ca341c266229faa5a1a5bb268bed3531 | 255,825,546,728,402,000,000,000,000,000,000,000,000 | 27 | ext4: race-condition protection for ext4_convert_unwritten_extents_endio
We assumed that at the time we call ext4_convert_unwritten_extents_endio()
extent in question is fully inside [map.m_lblk, map->m_len] because
it was already split during submission. But this may not be true due to
a race between writeback vs fallocate.
If extent in question is larger than requested we will split it again.
Special precautions should being done if zeroout required because
[map.m_lblk, map->m_len] already contains valid data.
Signed-off-by: Dmitry Monakhov <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: [email protected] |
EXPORTED int propfind_princolset(const xmlChar *name, xmlNsPtr ns,
struct propfind_ctx *fctx,
xmlNodePtr prop __attribute__((unused)),
xmlNodePtr resp __attribute__((unused)),
struct propstat propstat[],
void *rock __attribute__((unused)))
{
xmlNodePtr node = xml_add_prop(HTTP_OK, fctx->ns[NS_DAV],
&propstat[PROPSTAT_OK], name, ns, NULL, 0);
buf_reset(&fctx->buf);
buf_printf(&fctx->buf, "%s/%s/",
namespace_principal.prefix, USER_COLLECTION_PREFIX);
xml_add_href(node, NULL, buf_cstring(&fctx->buf));
return 0;
} | 0 | [] | cyrus-imapd | 6703ff881b6056e0c045a7b795ce8ba1bbb87027 | 6,269,777,430,554,306,000,000,000,000,000,000,000 | 17 | http_dav.c: add 'private' Cache-Control directive for cacheable responses that require authentication |
int dhcp6_option_parse_ia(DHCP6Option *iaoption, DHCP6IA *ia) {
uint16_t iatype, optlen;
size_t i, len;
int r = 0, status;
uint16_t opt;
size_t iaaddr_offset;
uint32_t lt_t1, lt_t2, lt_valid = 0, lt_min = UINT32_MAX;
assert_return(ia, -EINVAL);
assert_return(!ia->addresses, -EINVAL);
iatype = be16toh(iaoption->code);
len = be16toh(iaoption->len);
switch (iatype) {
case SD_DHCP6_OPTION_IA_NA:
if (len < DHCP6_OPTION_IA_NA_LEN)
return -ENOBUFS;
iaaddr_offset = DHCP6_OPTION_IA_NA_LEN;
memcpy(&ia->ia_na, iaoption->data, sizeof(ia->ia_na));
lt_t1 = be32toh(ia->ia_na.lifetime_t1);
lt_t2 = be32toh(ia->ia_na.lifetime_t2);
if (lt_t1 && lt_t2 && lt_t1 > lt_t2) {
log_dhcp6_client(client, "IA NA T1 %ds > T2 %ds",
lt_t1, lt_t2);
return -EINVAL;
}
break;
case SD_DHCP6_OPTION_IA_PD:
if (len < sizeof(ia->ia_pd))
return -ENOBUFS;
iaaddr_offset = sizeof(ia->ia_pd);
memcpy(&ia->ia_pd, iaoption->data, sizeof(ia->ia_pd));
lt_t1 = be32toh(ia->ia_pd.lifetime_t1);
lt_t2 = be32toh(ia->ia_pd.lifetime_t2);
if (lt_t1 && lt_t2 && lt_t1 > lt_t2) {
log_dhcp6_client(client, "IA PD T1 %ds > T2 %ds",
lt_t1, lt_t2);
return -EINVAL;
}
break;
case SD_DHCP6_OPTION_IA_TA:
if (len < DHCP6_OPTION_IA_TA_LEN)
return -ENOBUFS;
iaaddr_offset = DHCP6_OPTION_IA_TA_LEN;
memcpy(&ia->ia_ta.id, iaoption->data, sizeof(ia->ia_ta));
break;
default:
return -ENOMSG;
}
ia->type = iatype;
i = iaaddr_offset;
while (i < len) {
DHCP6Option *option = (DHCP6Option *)&iaoption->data[i];
if (len < i + sizeof(*option) || len < i + sizeof(*option) + be16toh(option->len))
return -ENOBUFS;
opt = be16toh(option->code);
optlen = be16toh(option->len);
switch (opt) {
case SD_DHCP6_OPTION_IAADDR:
if (!IN_SET(ia->type, SD_DHCP6_OPTION_IA_NA, SD_DHCP6_OPTION_IA_TA)) {
log_dhcp6_client(client, "IA Address option not in IA NA or TA option");
return -EINVAL;
}
r = dhcp6_option_parse_address(option, ia, <_valid);
if (r < 0)
return r;
if (lt_valid < lt_min)
lt_min = lt_valid;
break;
case SD_DHCP6_OPTION_IA_PD_PREFIX:
if (!IN_SET(ia->type, SD_DHCP6_OPTION_IA_PD)) {
log_dhcp6_client(client, "IA PD Prefix option not in IA PD option");
return -EINVAL;
}
r = dhcp6_option_parse_pdprefix(option, ia, <_valid);
if (r < 0)
return r;
if (lt_valid < lt_min)
lt_min = lt_valid;
break;
case SD_DHCP6_OPTION_STATUS_CODE:
status = dhcp6_option_parse_status(option, optlen + sizeof(DHCP6Option));
if (status < 0)
return status;
if (status > 0) {
log_dhcp6_client(client, "IA status %d",
status);
return -EINVAL;
}
break;
default:
log_dhcp6_client(client, "Unknown IA option %d", opt);
break;
}
i += sizeof(*option) + optlen;
}
switch(iatype) {
case SD_DHCP6_OPTION_IA_NA:
if (!ia->ia_na.lifetime_t1 && !ia->ia_na.lifetime_t2) {
lt_t1 = lt_min / 2;
lt_t2 = lt_min / 10 * 8;
ia->ia_na.lifetime_t1 = htobe32(lt_t1);
ia->ia_na.lifetime_t2 = htobe32(lt_t2);
log_dhcp6_client(client, "Computed IA NA T1 %ds and T2 %ds as both were zero",
lt_t1, lt_t2);
}
break;
case SD_DHCP6_OPTION_IA_PD:
if (!ia->ia_pd.lifetime_t1 && !ia->ia_pd.lifetime_t2) {
lt_t1 = lt_min / 2;
lt_t2 = lt_min / 10 * 8;
ia->ia_pd.lifetime_t1 = htobe32(lt_t1);
ia->ia_pd.lifetime_t2 = htobe32(lt_t2);
log_dhcp6_client(client, "Computed IA PD T1 %ds and T2 %ds as both were zero",
lt_t1, lt_t2);
}
break;
default:
break;
}
return 0;
} | 0 | [
"CWE-120"
] | systemd | 4dac5eaba4e419b29c97da38a8b1f82336c2c892 | 281,378,314,862,844,960,000,000,000,000,000,000,000 | 166 | dhcp6: make sure we have enough space for the DHCP6 option header
Fixes a vulnerability originally discovered by Felix Wilhelm from
Google.
CVE-2018-15688
LP: #1795921
https://bugzilla.redhat.com/show_bug.cgi?id=1639067 |
sg_get_rq_mark(Sg_fd * sfp, int pack_id)
{
Sg_request *resp;
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
resp->done = 2; /* guard against other readers */
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return NULL;
} | 0 | [
"CWE-200"
] | linux | 3e0097499839e0fe3af380410eababe5a47c4cf9 | 235,361,678,661,657,430,000,000,000,000,000,000,000 | 18 | scsi: sg: fixup infoleak when using SG_GET_REQUEST_TABLE
When calling SG_GET_REQUEST_TABLE ioctl only a half-filled table is
returned; the remaining part will then contain stale kernel memory
information. This patch zeroes out the entire table to avoid this
issue.
Signed-off-by: Hannes Reinecke <[email protected]>
Reviewed-by: Bart Van Assche <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
txBoolean fxCheckLength(txMachine* the, txSlot* slot, txInteger* index)
{
txNumber number = fxToNumber(the, slot);
txNumber check = c_trunc(number);
if ((number == check) && (0 <= number) && (number <= 0x7FFFFFFF)) {
*index = (txInteger)number;
return 1 ;
}
return 0;
} | 0 | [
"CWE-125"
] | moddable | 135aa9a4a6a9b49b60aa730ebc3bcc6247d75c45 | 181,039,423,338,429,570,000,000,000,000,000,000,000 | 10 | XS: #896 |
ShutdownWrap* ShutdownWrap::FromObject(
const BaseObjectPtrImpl<T, kIsWeak>& base_obj) {
if (!base_obj) return nullptr;
return FromObject(base_obj->object());
} | 0 | [
"CWE-416"
] | node | 4f8772f9b731118628256189b73cd202149bbd97 | 184,931,998,153,768,180,000,000,000,000,000,000,000 | 5 | src: retain pointers to WriteWrap/ShutdownWrap
Avoids potential use-after-free when wrap req's are synchronously
destroyed.
CVE-ID: CVE-2020-8265
Fixes: https://github.com/nodejs-private/node-private/issues/227
Refs: https://hackerone.com/bugs?subject=nodejs&report_id=988103
PR-URL: https://github.com/nodejs-private/node-private/pull/23
Reviewed-By: Anna Henningsen <[email protected]>
Reviewed-By: Matteo Collina <[email protected]>
Reviewed-By: Rich Trott <[email protected]> |
void jpc_qmfb_split_colgrp(jpc_fix_t *a, int numrows, int stride,
int parity)
{
int bufsize = JPC_CEILDIVPOW2(numrows, 1);
jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE * JPC_QMFB_COLGRPSIZE];
jpc_fix_t *buf = splitbuf;
jpc_fix_t *srcptr;
jpc_fix_t *dstptr;
register jpc_fix_t *srcptr2;
register jpc_fix_t *dstptr2;
register int n;
register int i;
int m;
int hstartcol;
/* Get a buffer. */
if (bufsize > QMFB_SPLITBUFSIZE) {
if (!(buf = jas_malloc(bufsize * sizeof(jpc_fix_t)))) {
/* We have no choice but to commit suicide in this case. */
abort();
}
}
if (numrows >= 2) {
hstartcol = (numrows + 1 - parity) >> 1;
m = (parity) ? hstartcol : (numrows - hstartcol);
/* Save the samples destined for the highpass channel. */
n = m;
dstptr = buf;
srcptr = &a[(1 - parity) * stride];
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += JPC_QMFB_COLGRPSIZE;
srcptr += stride << 1;
}
/* Copy the appropriate samples into the lowpass channel. */
dstptr = &a[(1 - parity) * stride];
srcptr = &a[(2 - parity) * stride];
n = numrows - m - (!parity);
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += stride;
srcptr += stride << 1;
}
/* Copy the saved samples into the highpass channel. */
dstptr = &a[hstartcol * stride];
srcptr = buf;
n = m;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += stride;
srcptr += JPC_QMFB_COLGRPSIZE;
}
}
/* If the split buffer was allocated on the heap, free this memory. */
if (buf != splitbuf) {
jas_free(buf);
}
} | 1 | [
"CWE-189"
] | jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 227,178,466,579,110,170,000,000,000,000,000,000,000 | 80 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
int len)
{
struct trace_eval_map **stop;
struct trace_eval_map **map;
union trace_eval_map_item *map_array;
union trace_eval_map_item *ptr;
stop = start + len;
/*
* The trace_eval_maps contains the map plus a head and tail item,
* where the head holds the module and length of array, and the
* tail holds a pointer to the next list.
*/
map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
if (!map_array) {
pr_warn("Unable to allocate trace eval mapping\n");
return;
}
mutex_lock(&trace_eval_mutex);
if (!trace_eval_maps)
trace_eval_maps = map_array;
else {
ptr = trace_eval_maps;
for (;;) {
ptr = trace_eval_jmp_to_tail(ptr);
if (!ptr->tail.next)
break;
ptr = ptr->tail.next;
}
ptr->tail.next = map_array;
}
map_array->head.mod = mod;
map_array->head.length = len;
map_array++;
for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
map_array->map = **map;
map_array++;
}
memset(map_array, 0, sizeof(*map_array));
mutex_unlock(&trace_eval_mutex);
} | 0 | [
"CWE-415"
] | linux | 4397f04575c44e1440ec2e49b6302785c95fd2f8 | 79,451,819,466,557,550,000,000,000,000,000,000,000 | 48 | tracing: Fix possible double free on failure of allocating trace buffer
Jing Xia and Chunyan Zhang reported that on failing to allocate part of the
tracing buffer, memory is freed, but the pointers that point to them are not
initialized back to NULL, and later paths may try to free the freed memory
again. Jing and Chunyan fixed one of the locations that does this, but
missed a spot.
Link: http://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code")
Reported-by: Jing Xia <[email protected]>
Reported-by: Chunyan Zhang <[email protected]>
Signed-off-by: Steven Rostedt (VMware) <[email protected]> |
checkforcmd_opt(
char_u **pp, // start of command
char *cmd, // name of command
int len, // required length
int noparen)
{
int i;
for (i = 0; cmd[i] != NUL; ++i)
if (((char_u *)cmd)[i] != (*pp)[i])
break;
if (i >= len && !isalpha((*pp)[i]) && (*pp)[i] != '_'
&& (!noparen || ((*pp)[i] != '(' && (*pp)[i] != '.')))
{
*pp = skipwhite(*pp + i);
return TRUE;
}
return FALSE;
} | 0 | [
"CWE-122"
] | vim | f50808ed135ab973296bca515ae4029b321afe47 | 165,638,152,201,642,280,000,000,000,000,000,000,000 | 19 | patch 8.2.4763: using invalid pointer with "V:" in Ex mode
Problem: Using invalid pointer with "V:" in Ex mode.
Solution: Correctly handle the command being changed to "+". |
*/
static void mail_close_it(zend_resource *rsrc)
{
pils *imap_le_struct = (pils *)rsrc->ptr;
/* Do not try to close prototype streams */
if (!(imap_le_struct->flags & OP_PROTOTYPE)) {
mail_close_full(imap_le_struct->imap_stream, imap_le_struct->flags);
}
if (IMAPG(imap_user)) {
efree(IMAPG(imap_user));
IMAPG(imap_user) = 0;
}
if (IMAPG(imap_password)) {
efree(IMAPG(imap_password));
IMAPG(imap_password) = 0;
}
efree(imap_le_struct); | 0 | [
"CWE-88"
] | php-src | 336d2086a9189006909ae06c7e95902d7d5ff77e | 4,219,272,805,353,314,000,000,000,000,000,000,000 | 20 | Disable rsh/ssh functionality in imap by default (bug #77153) |
const SegmentInfo* GetSegmentInfo() const { return &segment_info_; } | 0 | [
"CWE-20"
] | libvpx | f00890eecdf8365ea125ac16769a83aa6b68792d | 225,815,417,173,227,280,000,000,000,000,000,000,000 | 1 | update libwebm to libwebm-1.0.0.27-352-g6ab9fcf
https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf
Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d |
static int ext4_xattr_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo)
{
__u64 physical = 0;
__u64 length;
__u32 flags = FIEMAP_EXTENT_LAST;
int blockbits = inode->i_sb->s_blocksize_bits;
int error = 0;
/* in-inode? */
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
struct ext4_iloc iloc;
int offset; /* offset of xattr in inode */
error = ext4_get_inode_loc(inode, &iloc);
if (error)
return error;
physical = (__u64)iloc.bh->b_blocknr << blockbits;
offset = EXT4_GOOD_OLD_INODE_SIZE +
EXT4_I(inode)->i_extra_isize;
physical += offset;
length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
flags |= FIEMAP_EXTENT_DATA_INLINE;
brelse(iloc.bh);
} else { /* external block */
physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize;
}
if (physical)
error = fiemap_fill_next_extent(fieinfo, 0, physical,
length, flags);
return (error < 0 ? error : 0);
} | 0 | [
"CWE-362"
] | linux | ea3d7209ca01da209cda6f0dea8be9cc4b7a933b | 109,835,734,791,524,170,000,000,000,000,000,000,000 | 34 | ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]> |
static void mktree(struct tree_content *t, int v, struct strbuf *b)
{
size_t maxlen = 0;
unsigned int i;
if (!v)
qsort(t->entries,t->entry_count,sizeof(t->entries[0]),tecmp0);
else
qsort(t->entries,t->entry_count,sizeof(t->entries[0]),tecmp1);
for (i = 0; i < t->entry_count; i++) {
if (t->entries[i]->versions[v].mode)
maxlen += t->entries[i]->name->str_len + 34;
}
strbuf_reset(b);
strbuf_grow(b, maxlen);
for (i = 0; i < t->entry_count; i++) {
struct tree_entry *e = t->entries[i];
if (!e->versions[v].mode)
continue;
strbuf_addf(b, "%o %s%c",
(unsigned int)(e->versions[v].mode & ~NO_DELTA),
e->name->str_dat, '\0');
strbuf_add(b, e->versions[v].sha1, 20);
}
} | 0 | [
"CWE-119",
"CWE-787"
] | git | 34fa79a6cde56d6d428ab0d3160cb094ebad3305 | 207,530,574,755,227,180,000,000,000,000,000,000,000 | 27 | prefer memcpy to strcpy
When we already know the length of a string (e.g., because
we just malloc'd to fit it), it's nicer to use memcpy than
strcpy, as it makes it more obvious that we are not going to
overflow the buffer (because the size we pass matches the
size in the allocation).
This also eliminates calls to strcpy, which make auditing
the code base harder.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
void html_attrf(const char *fmt, ...)
{
va_list ap;
struct strbuf sb = STRBUF_INIT;
va_start(ap, fmt);
strbuf_vaddf(&sb, fmt, ap);
va_end(ap);
html_attr(sb.buf);
strbuf_release(&sb);
} | 0 | [] | cgit | 513b3863d999f91b47d7e9f26710390db55f9463 | 154,880,307,031,331,580,000,000,000,000,000,000,000 | 12 | ui-shared: prevent malicious filename from injecting headers |
static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
int modrm, int reg)
{
CCPrepare cc;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
cc = gen_prepare_cc(s, b, cpu_T1);
if (cc.mask != -1) {
TCGv t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cc.reg, cc.mask);
cc.reg = t0;
}
if (!cc.use_reg2) {
cc.reg2 = tcg_const_tl(cc.imm);
}
tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
cpu_T0, cpu_regs[reg]);
gen_op_mov_reg_v(ot, reg, cpu_T0);
if (cc.mask != -1) {
tcg_temp_free(cc.reg);
}
if (!cc.use_reg2) {
tcg_temp_free(cc.reg2);
}
} | 0 | [
"CWE-94"
] | qemu | 30663fd26c0307e414622c7a8607fbc04f92ec14 | 277,031,355,375,509,300,000,000,000,000,000,000,000 | 28 | tcg/i386: Check the size of instruction being translated
This fixes the bug: 'user-to-root privesc inside VM via bad translation
caching' reported by Jann Horn here:
https://bugs.chromium.org/p/project-zero/issues/detail?id=1122
Reviewed-by: Richard Henderson <[email protected]>
CC: Peter Maydell <[email protected]>
CC: Paolo Bonzini <[email protected]>
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Pranith Kumar <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static void gf_m2ts_reset_sdt(GF_M2TS_Demuxer *ts)
{
while (gf_list_count(ts->SDTs)) {
GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_last(ts->SDTs);
gf_list_rem_last(ts->SDTs);
if (sdt->provider) gf_free(sdt->provider);
if (sdt->service) gf_free(sdt->service);
gf_free(sdt);
}
} | 0 | [
"CWE-416",
"CWE-125"
] | gpac | 1ab4860609f2e7a35634930571e7d0531297e090 | 91,442,271,343,627,050,000,000,000,000,000,000,000 | 10 | fixed potential crash on PMT IOD parse - cf #1268 #1269 |
csnSidNormalize(
slap_mask_t usage,
Syntax *syntax,
MatchingRule *mr,
struct berval *val,
struct berval *normalized,
void *ctx )
{
struct berval bv;
char *ptr,
buf[ 4 ];
if ( BER_BVISEMPTY( val ) ) {
return LDAP_INVALID_SYNTAX;
}
if ( SLAP_MR_IS_VALUE_OF_ASSERTION_SYNTAX(usage) ) {
return sidNormalize( 0, NULL, NULL, val, normalized, ctx );
}
assert( SLAP_MR_IS_VALUE_OF_ATTRIBUTE_SYNTAX(usage) != 0 );
ptr = ber_bvchr( val, '#' );
if ( ptr == NULL || ptr == &val->bv_val[val->bv_len] ) {
return LDAP_INVALID_SYNTAX;
}
bv.bv_val = ptr + 1;
bv.bv_len = val->bv_len - ( ptr + 1 - val->bv_val );
ptr = ber_bvchr( &bv, '#' );
if ( ptr == NULL || ptr == &val->bv_val[val->bv_len] ) {
return LDAP_INVALID_SYNTAX;
}
bv.bv_val = ptr + 1;
bv.bv_len = val->bv_len - ( ptr + 1 - val->bv_val );
ptr = ber_bvchr( &bv, '#' );
if ( ptr == NULL || ptr == &val->bv_val[val->bv_len] ) {
return LDAP_INVALID_SYNTAX;
}
bv.bv_len = ptr - bv.bv_val;
if ( bv.bv_len == 2 ) {
/* OpenLDAP 2.3 SID */
buf[ 0 ] = '0';
buf[ 1 ] = bv.bv_val[ 0 ];
buf[ 2 ] = bv.bv_val[ 1 ];
buf[ 3 ] = '\0';
bv.bv_val = buf;
bv.bv_len = 3;
}
return sidNormalize( 0, NULL, NULL, &bv, normalized, ctx );
} | 0 | [
"CWE-617"
] | openldap | 67670f4544e28fb09eb7319c39f404e1d3229e65 | 104,914,893,181,136,910,000,000,000,000,000,000,000 | 59 | ITS#9383 remove assert in certificateListValidate |
static int data_pending(const struct Curl_easy *data)
{
struct connectdata *conn = data->conn;
#ifdef ENABLE_QUIC
if(conn->transport == TRNSPRT_QUIC)
return Curl_quic_data_pending(data);
#endif
if(conn->handler->protocol&PROTO_FAMILY_FTP)
return Curl_ssl_data_pending(conn, SECONDARYSOCKET);
/* in the case of libssh2, we can never be really sure that we have emptied
its internal buffers so we MUST always try until we get EAGAIN back */
return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
#ifdef USE_NGHTTP2
/* For HTTP/2, we may read up everything including response body
with header fields in Curl_http_readwrite_headers. If no
content-length is provided, curl waits for the connection
close, which we emulate it using conn->proto.httpc.closed =
TRUE. The thing is if we read everything, then http2_recv won't
be called and we cannot signal the HTTP/2 stream has closed. As
a workaround, we return nonzero here to call http2_recv. */
((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion >= 20) ||
#endif
Curl_ssl_data_pending(conn, FIRSTSOCKET);
} | 0 | [] | curl | 620ea21410030a9977396b4661806bc187231b79 | 242,734,437,541,844,400,000,000,000,000,000,000,000 | 27 | transfer: redirects to other protocols or ports clear auth
... unless explicitly permitted.
Bug: https://curl.se/docs/CVE-2022-27774.html
Reported-by: Harry Sintonen
Closes #8748 |
Subsets and Splits