func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static void get_page_bootmem(unsigned long info, struct page *page,
unsigned long type)
{
page->lru.next = (struct list_head *) type;
SetPagePrivate(page);
set_page_private(page, info);
atomic_inc(&page->_count);
} | 0 | []
| linux-2.6 | 08dff7b7d629807dbb1f398c68dd9cd58dd657a1 | 123,765,088,265,242,080,000,000,000,000,000,000,000 | 8 | mm/hotplug: correctly add new zone to all other nodes' zone lists
When online_pages() is called to add new memory to an empty zone, it
rebuilds all zone lists by calling build_all_zonelists(). But there's a
bug which prevents the new zone to be added to other nodes' zone lists.
online_pages() {
build_all_zonelists()
.....
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY)
}
Here the node of the zone is put into N_HIGH_MEMORY state after calling
build_all_zonelists(), but build_all_zonelists() only adds zones from
nodes in N_HIGH_MEMORY state to the fallback zone lists.
build_all_zonelists()
->__build_all_zonelists()
->build_zonelists()
->find_next_best_node()
->for_each_node_state(n, N_HIGH_MEMORY)
So memory in the new zone will never be used by other nodes, and it may
cause strange behavor when system is under memory pressure. So put node
into N_HIGH_MEMORY state before calling build_all_zonelists().
Signed-off-by: Jianguo Wu <[email protected]>
Signed-off-by: Jiang Liu <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Rusty Russell <[email protected]>
Cc: Yinghai Lu <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Keping Chen <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
virtual ~ColumnInterface() {} | 0 | [
"CWE-843"
]
| tensorflow | b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025 | 239,606,311,331,586,600,000,000,000,000,000,000,000 | 1 | Fix `tf.raw_ops.SparseCross` failing CHECK.
PiperOrigin-RevId: 368701671
Change-Id: Id805729dd9ba0bda36e4bb309408129b55fb649d |
static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
cm_id_priv->id.state = IB_CM_IDLE;
if (cm_id_priv->timewait_info) {
spin_lock_irqsave(&cm.lock, flags);
cm_cleanup_timewait(cm_id_priv->timewait_info);
spin_unlock_irqrestore(&cm.lock, flags);
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
}
} | 0 | [
"CWE-20"
]
| linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | 13,799,886,083,093,765,000,000,000,000,000,000,000 | 13 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]> |
static void tcm_loop_stop_session(
struct se_session *se_sess,
int sess_sleep,
int conn_sleep)
{
return;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 12f09ccb4612734a53e47ed5302e0479c10a50f8 | 211,323,730,003,741,400,000,000,000,000,000,000,000 | 7 | loopback: off by one in tcm_loop_make_naa_tpg()
This is an off by one 'tgpt' check in tcm_loop_make_naa_tpg() that could result
in memory corruption.
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Nicholas A. Bellinger <[email protected]> |
isis_print_is_reach_subtlv(netdissect_options *ndo,
const uint8_t *tptr, u_int subt, u_int subl,
const char *ident)
{
u_int te_class,priority_level,gmpls_switch_cap;
union { /* int to float conversion buffer for several subTLVs */
float f;
uint32_t i;
} bw;
/* first lets see if we know the subTLVs name*/
ND_PRINT((ndo, "%s%s subTLV #%u, length: %u",
ident, tok2str(isis_ext_is_reach_subtlv_values, "unknown", subt),
subt, subl));
ND_TCHECK2(*tptr, subl);
switch(subt) {
case ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP:
case ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID:
case ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID:
if (subl >= 4) {
ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr)));
if (subl == 8) /* rfc4205 */
ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr+4)));
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR:
case ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR:
if (subl >= sizeof(struct in_addr))
ND_PRINT((ndo, ", %s", ipaddr_string(ndo, tptr)));
break;
case ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW :
case ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW:
if (subl >= 4) {
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, ", %.3f Mbps", bw.f * 8 / 1000000));
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW :
if (subl >= 32) {
for (te_class = 0; te_class < 8; te_class++) {
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, "%s TE-Class %u: %.3f Mbps",
ident,
te_class,
bw.f * 8 / 1000000));
tptr+=4;
}
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS: /* fall through */
case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD:
ND_PRINT((ndo, "%sBandwidth Constraints Model ID: %s (%u)",
ident,
tok2str(diffserv_te_bc_values, "unknown", *tptr),
*tptr));
tptr++;
/* decode BCs until the subTLV ends */
for (te_class = 0; te_class < (subl-1)/4; te_class++) {
ND_TCHECK2(*tptr, 4);
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, "%s Bandwidth constraint CT%u: %.3f Mbps",
ident,
te_class,
bw.f * 8 / 1000000));
tptr+=4;
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC:
if (subl >= 3)
ND_PRINT((ndo, ", %u", EXTRACT_24BITS(tptr)));
break;
case ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE:
if (subl == 2) {
ND_PRINT((ndo, ", [ %s ] (0x%04x)",
bittok2str(isis_subtlv_link_attribute_values,
"Unknown",
EXTRACT_16BITS(tptr)),
EXTRACT_16BITS(tptr)));
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE:
if (subl >= 2) {
ND_PRINT((ndo, ", %s, Priority %u",
bittok2str(gmpls_link_prot_values, "none", *tptr),
*(tptr+1)));
}
break;
case ISIS_SUBTLV_SPB_METRIC:
if (subl >= 6) {
ND_PRINT((ndo, ", LM: %u", EXTRACT_24BITS(tptr)));
tptr=tptr+3;
ND_PRINT((ndo, ", P: %u", *(tptr)));
tptr++;
ND_PRINT((ndo, ", P-ID: %u", EXTRACT_16BITS(tptr)));
}
break;
case ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR:
if (subl >= 36) {
gmpls_switch_cap = *tptr;
ND_PRINT((ndo, "%s Interface Switching Capability:%s",
ident,
tok2str(gmpls_switch_cap_values, "Unknown", gmpls_switch_cap)));
ND_PRINT((ndo, ", LSP Encoding: %s",
tok2str(gmpls_encoding_values, "Unknown", *(tptr + 1))));
tptr+=4;
ND_PRINT((ndo, "%s Max LSP Bandwidth:", ident));
for (priority_level = 0; priority_level < 8; priority_level++) {
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, "%s priority level %d: %.3f Mbps",
ident,
priority_level,
bw.f * 8 / 1000000));
tptr+=4;
}
subl-=36;
switch (gmpls_switch_cap) {
case GMPLS_PSC1:
case GMPLS_PSC2:
case GMPLS_PSC3:
case GMPLS_PSC4:
ND_TCHECK2(*tptr, 6);
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000));
ND_PRINT((ndo, "%s Interface MTU: %u", ident, EXTRACT_16BITS(tptr + 4)));
break;
case GMPLS_TSC:
ND_TCHECK2(*tptr, 8);
bw.i = EXTRACT_32BITS(tptr);
ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000));
ND_PRINT((ndo, "%s Indication %s", ident,
tok2str(gmpls_switch_cap_tsc_indication_values, "Unknown (%u)", *(tptr + 4))));
break;
default:
/* there is some optional stuff left to decode but this is as of yet
not specified so just lets hexdump what is left */
if(subl>0){
if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl))
return(0);
}
}
}
break;
default:
if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl))
return(0);
break;
}
return(1);
trunc:
return(0);
} | 1 | [
"CWE-125",
"CWE-787"
]
| tcpdump | 5d0d76e88ee2d3236d7e032589d6f1d4ec5f7b1e | 301,315,777,931,496,170,000,000,000,000,000,000,000 | 154 | CVE-2017-13055/IS-IS: fix an Extended IS Reachability sub-TLV
In isis_print_is_reach_subtlv() one of the case blocks did not check that
the sub-TLV "V" is actually present and could over-read the input buffer.
Add a length check to fix that and remove a useless boundary check from
a loop because the boundary is tested for the full length of "V" before
the switch block.
Update one of the prior test cases as it turns out it depended on this
previously incorrect code path to make it to its own malformed structure
further down the buffer, the bugfix has changed its output.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s). |
void heartbeat_kick() {
Mutex::Locker l(heartbeat_lock);
heartbeat_cond.Signal();
} | 0 | [
"CWE-287",
"CWE-284"
]
| ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 123,708,569,656,490,120,000,000,000,000,000,000,000 | 4 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
void __init ptlock_cache_init(void)
{
page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
SLAB_PANIC, NULL);
} | 0 | [
"CWE-20"
]
| linux | 6b7339f4c31ad69c8e9c0b2859276e22cf72176d | 171,964,518,653,619,600,000,000,000,000,000,000,000 | 5 | mm: avoid setting up anonymous pages into file mapping
Reading page fault handler code I've noticed that under right
circumstances kernel would map anonymous pages into file mappings: if
the VMA doesn't have vm_ops->fault() and the VMA wasn't fully populated
on ->mmap(), kernel would handle page fault to not populated pte with
do_anonymous_page().
Let's change page fault handler to use do_anonymous_page() only on
anonymous VMA (->vm_ops == NULL) and make sure that the VMA is not
shared.
For file mappings without vm_ops->fault() or shred VMA without vm_ops,
page fault on pte_none() entry would lead to SIGBUS.
Signed-off-by: Kirill A. Shutemov <[email protected]>
Acked-by: Oleg Nesterov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Willy Tarreau <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
void InitCommand(unsigned int number)
{
snprintf(numericstr, sizeof(numericstr), "%03u", number);
SetCommand(numericstr);
} | 0 | [
"CWE-200",
"CWE-732"
]
| inspircd | 4350a11c663b0d75f8119743bffb7736d87abd4d | 187,349,355,340,270,500,000,000,000,000,000,000,000 | 5 | Fix sending malformed pong messages in some cases. |
static int ecryptfs_calculate_md5(char *dst,
struct ecryptfs_crypt_stat *crypt_stat,
char *src, int len)
{
struct scatterlist sg;
struct hash_desc desc = {
.tfm = crypt_stat->hash_tfm,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP
};
int rc = 0;
mutex_lock(&crypt_stat->cs_hash_tfm_mutex);
sg_init_one(&sg, (u8 *)src, len);
if (!desc.tfm) {
desc.tfm = crypto_alloc_hash(ECRYPTFS_DEFAULT_HASH, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm)) {
rc = PTR_ERR(desc.tfm);
ecryptfs_printk(KERN_ERR, "Error attempting to "
"allocate crypto context; rc = [%d]\n",
rc);
goto out;
}
crypt_stat->hash_tfm = desc.tfm;
}
rc = crypto_hash_init(&desc);
if (rc) {
printk(KERN_ERR
"%s: Error initializing crypto hash; rc = [%d]\n",
__func__, rc);
goto out;
}
rc = crypto_hash_update(&desc, &sg, len);
if (rc) {
printk(KERN_ERR
"%s: Error updating crypto hash; rc = [%d]\n",
__func__, rc);
goto out;
}
rc = crypto_hash_final(&desc, dst);
if (rc) {
printk(KERN_ERR
"%s: Error finalizing crypto hash; rc = [%d]\n",
__func__, rc);
goto out;
}
out:
mutex_unlock(&crypt_stat->cs_hash_tfm_mutex);
return rc;
} | 0 | [
"CWE-189"
]
| linux-2.6 | 8faece5f906725c10e7a1f6caf84452abadbdc7b | 191,774,055,951,855,200,000,000,000,000,000,000,000 | 50 | eCryptfs: Allocate a variable number of pages for file headers
When allocating the memory used to store the eCryptfs header contents, a
single, zeroed page was being allocated with get_zeroed_page().
However, the size of an eCryptfs header is either PAGE_CACHE_SIZE or
ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE (8192), whichever is larger, and is
stored in the file's private_data->crypt_stat->num_header_bytes_at_front
field.
ecryptfs_write_metadata_to_contents() was using
num_header_bytes_at_front to decide how many bytes should be written to
the lower filesystem for the file header. Unfortunately, at least 8K
was being written from the page, despite the chance of the single,
zeroed page being smaller than 8K. This resulted in random areas of
kernel memory being written between the 0x1000 and 0x1FFF bytes offsets
in the eCryptfs file headers if PAGE_SIZE was 4K.
This patch allocates a variable number of pages, calculated with
num_header_bytes_at_front, and passes the number of allocated pages
along to ecryptfs_write_metadata_to_contents().
Thanks to Florian Streibelt for reporting the data leak and working with
me to find the problem. 2.6.28 is the only kernel release with this
vulnerability. Corresponds to CVE-2009-0787
Signed-off-by: Tyler Hicks <[email protected]>
Acked-by: Dustin Kirkland <[email protected]>
Reviewed-by: Eric Sandeen <[email protected]>
Reviewed-by: Eugene Teo <[email protected]>
Cc: Greg KH <[email protected]>
Cc: dann frazier <[email protected]>
Cc: Serge E. Hallyn <[email protected]>
Cc: Florian Streibelt <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
dissect_usb_video_processing_unit(proto_tree *tree, tvbuff_t *tvb, int offset)
{
static const int *control_bits[] = {
&hf_usb_vid_proc_control_D[0],
&hf_usb_vid_proc_control_D[1],
&hf_usb_vid_proc_control_D[2],
&hf_usb_vid_proc_control_D[3],
&hf_usb_vid_proc_control_D[4],
&hf_usb_vid_proc_control_D[5],
&hf_usb_vid_proc_control_D[6],
&hf_usb_vid_proc_control_D[7],
&hf_usb_vid_proc_control_D[8],
&hf_usb_vid_proc_control_D[9],
&hf_usb_vid_proc_control_D[10],
&hf_usb_vid_proc_control_D[11],
&hf_usb_vid_proc_control_D[12],
&hf_usb_vid_proc_control_D[13],
&hf_usb_vid_proc_control_D[14],
&hf_usb_vid_proc_control_D[15],
&hf_usb_vid_proc_control_D[16],
&hf_usb_vid_proc_control_D[17],
&hf_usb_vid_proc_control_D[18],
NULL
};
DISSECTOR_ASSERT(array_length(control_bits) == (1+array_length(hf_usb_vid_proc_control_D)));
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_src_id, tvb, offset, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_max_multiplier, tvb, offset+1, 2, ENC_LITTLE_ENDIAN);
offset += 3;
offset = dissect_bmControl(tree, tvb, offset, ett_processing_controls, control_bits);
proto_tree_add_item(tree, hf_usb_vid_iProcessing, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
/* UVC 1.1 added bmVideoStandards */
if (tvb_reported_length_remaining(tvb, offset) > 0)
{
static const int *standard_bits[] = {
&hf_usb_vid_proc_standards_D[0],
&hf_usb_vid_proc_standards_D[1],
&hf_usb_vid_proc_standards_D[2],
&hf_usb_vid_proc_standards_D[3],
&hf_usb_vid_proc_standards_D[4],
&hf_usb_vid_proc_standards_D[5],
NULL
};
DISSECTOR_ASSERT(array_length(standard_bits) == (1+array_length(hf_usb_vid_proc_standards_D)));
proto_tree_add_bitmask(tree, tvb, offset, hf_usb_vid_proc_standards,
ett_video_standards, standard_bits, ENC_NA);
++offset;
}
return offset;
} | 0 | [
"CWE-476"
]
| wireshark | 2cb5985bf47bdc8bea78d28483ed224abdd33dc6 | 22,308,183,065,008,277,000,000,000,000,000,000,000 | 58 | Make class "type" for USB conversations.
USB dissectors can't assume that only their class type has been passed around in the conversation. Make explicit check that class type expected matches the dissector and stop/prevent dissection if there isn't a match.
Bug: 12356
Change-Id: Ib23973a4ebd0fbb51952ffc118daf95e3389a209
Reviewed-on: https://code.wireshark.org/review/15212
Petri-Dish: Michael Mann <[email protected]>
Reviewed-by: Martin Kaiser <[email protected]>
Petri-Dish: Martin Kaiser <[email protected]>
Tested-by: Petri Dish Buildbot <[email protected]>
Reviewed-by: Michael Mann <[email protected]> |
static int __init nf_tables_module_init(void)
{
int err;
info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS,
GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
goto err1;
}
err = nf_tables_core_module_init();
if (err < 0)
goto err2;
err = nfnetlink_subsys_register(&nf_tables_subsys);
if (err < 0)
goto err3;
pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <[email protected]>\n");
return register_pernet_subsys(&nf_tables_net_ops);
err3:
nf_tables_core_module_exit();
err2:
kfree(info);
err1:
return err;
} | 0 | [
"CWE-19"
]
| nf | a2f18db0c68fec96631c10cad9384c196e9008ac | 224,717,630,896,958,300,000,000,000,000,000,000,000 | 28 | netfilter: nf_tables: fix flush ruleset chain dependencies
Jumping between chains doesn't mix well with flush ruleset. Rules
from a different chain and set elements may still refer to us.
[ 353.373791] ------------[ cut here ]------------
[ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159!
[ 353.373896] invalid opcode: 0000 [#1] SMP
[ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi
[ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98
[ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010
[...]
[ 353.375018] Call Trace:
[ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540
[ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0
[ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0
[ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790
[ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0
[ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70
[ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30
[ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0
[ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400
[ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90
[ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20
[ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0
[ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80
[ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d
[ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20
[ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b
Release objects in this order: rules -> sets -> chains -> tables, to
make sure no references to chains are held anymore.
Reported-by: Asbjoern Sloth Toennesen <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
static int push_vargs(char *data, int data_length, char ***output)
{
int num = 0;
char *cur = data;
if (!data || *output)
return -1;
*output = must_realloc(NULL, sizeof(**output));
while (cur < data + data_length) {
num++;
*output = must_realloc(*output, (num + 1) * sizeof(**output));
(*output)[num - 1] = cur;
cur += strlen(cur) + 1;
}
(*output)[num] = NULL;
return num;
} | 0 | [
"CWE-78"
]
| lxc | 6400238d08cdf1ca20d49bafb85f4e224348bf9d | 52,249,456,616,254,960,000,000,000,000,000,000,000 | 20 | CVE-2019-5736 (runC): rexec callers as memfd
Adam Iwaniuk and Borys Popławski discovered that an attacker can compromise the
runC host binary from inside a privileged runC container. As a result, this
could be exploited to gain root access on the host. runC is used as the default
runtime for containers with Docker, containerd, Podman, and CRI-O.
The attack can be made when attaching to a running container or when starting a
container running a specially crafted image. For example, when runC attaches
to a container the attacker can trick it into executing itself. This could be
done by replacing the target binary inside the container with a custom binary
pointing back at the runC binary itself. As an example, if the target binary
was /bin/bash, this could be replaced with an executable script specifying the
interpreter path #!/proc/self/exe (/proc/self/exec is a symbolic link created
by the kernel for every process which points to the binary that was executed
for that process). As such when /bin/bash is executed inside the container,
instead the target of /proc/self/exe will be executed - which will point to the
runc binary on the host. The attacker can then proceed to write to the target
of /proc/self/exe to try and overwrite the runC binary on the host. However in
general, this will not succeed as the kernel will not permit it to be
overwritten whilst runC is executing. To overcome this, the attacker can
instead open a file descriptor to /proc/self/exe using the O_PATH flag and then
proceed to reopen the binary as O_WRONLY through /proc/self/fd/<nr> and try to
write to it in a busy loop from a separate process. Ultimately it will succeed
when the runC binary exits. After this the runC binary is compromised and can
be used to attack other containers or the host itself.
This attack is only possible with privileged containers since it requires root
privilege on the host to overwrite the runC binary. Unprivileged containers
with a non-identity ID mapping do not have the permission to write to the host
binary and therefore are unaffected by this attack.
LXC is also impacted in a similar manner by this vulnerability, however as the
LXC project considers privileged containers to be unsafe no CVE has been
assigned for this issue for LXC. Quoting from the
https://linuxcontainers.org/lxc/security/ project's Security information page:
"As privileged containers are considered unsafe, we typically will not consider
new container escape exploits to be security issues worthy of a CVE and quick
fix. We will however try to mitigate those issues so that accidental damage to
the host is prevented."
To prevent this attack, LXC has been patched to create a temporary copy of the
calling binary itself when it starts or attaches to containers. To do this LXC
creates an anonymous, in-memory file using the memfd_create() system call and
copies itself into the temporary in-memory file, which is then sealed to
prevent further modifications. LXC then executes this sealed, in-memory file
instead of the original on-disk binary. Any compromising write operations from
a privileged container to the host LXC binary will then write to the temporary
in-memory binary and not to the host binary on-disk, preserving the integrity
of the host LXC binary. Also as the temporary, in-memory LXC binary is sealed,
writes to this will also fail.
Note: memfd_create() was added to the Linux kernel in the 3.17 release.
Signed-off-by: Christian Brauner <[email protected]>
Co-Developed-by: Alesa Sarai <[email protected]>
Acked-by: Serge Hallyn <[email protected]>
Signed-off-by: Christian Brauner <[email protected]> |
int ssl_cert_select_current(CERT *c, X509 *x)
{
int i;
if (x == NULL)
return 0;
for (i = 0; i < SSL_PKEY_NUM; i++) {
CERT_PKEY *cpk = c->pkeys + i;
if (cpk->x509 == x && cpk->privatekey) {
c->key = cpk;
return 1;
}
}
for (i = 0; i < SSL_PKEY_NUM; i++) {
CERT_PKEY *cpk = c->pkeys + i;
if (cpk->privatekey && cpk->x509 && !X509_cmp(cpk->x509, x)) {
c->key = cpk;
return 1;
}
}
return 0;
} | 0 | [
"CWE-835"
]
| openssl | 758754966791c537ea95241438454aa86f91f256 | 27,761,455,870,851,234,000,000,000,000,000,000,000 | 22 | Fix invalid handling of verify errors in libssl
In the event that X509_verify() returned an internal error result then
libssl would mishandle this and set rwstate to SSL_RETRY_VERIFY. This
subsequently causes SSL_get_error() to return SSL_ERROR_WANT_RETRY_VERIFY.
That return code is supposed to only ever be returned if an application
is using an app verify callback to complete replace the use of
X509_verify(). Applications may not be written to expect that return code
and could therefore crash (or misbehave in some other way) as a result.
CVE-2021-4044
Reviewed-by: Tomas Mraz <[email protected]> |
evdev_device_is_joystick_or_gamepad(struct evdev_device *device)
{
enum evdev_device_udev_tags udev_tags;
bool has_joystick_tags;
struct libevdev *evdev = device->evdev;
unsigned int code, num_joystick_btns = 0, num_keys = 0;
/* The EVDEV_UDEV_TAG_JOYSTICK is set when a joystick or gamepad button
* is found. However, it can not be used to identify joysticks or
* gamepads because there are keyboards that also have it. Even worse,
* many joysticks also map KEY_* and thus are tagged as keyboards.
*
* In order to be able to detect joysticks and gamepads and
* differentiate them from keyboards, apply the following rules:
*
* 1. The device is tagged as joystick but not as tablet
* 2. It has at least 2 joystick buttons
* 3. It doesn't have 10 keyboard keys */
udev_tags = evdev_device_get_udev_tags(device, device->udev_device);
has_joystick_tags = (udev_tags & EVDEV_UDEV_TAG_JOYSTICK) &&
!(udev_tags & EVDEV_UDEV_TAG_TABLET) &&
!(udev_tags & EVDEV_UDEV_TAG_TABLET_PAD);
if (!has_joystick_tags)
return false;
for (code = BTN_JOYSTICK; code < BTN_DIGI; code++) {
if (libevdev_has_event_code(evdev, EV_KEY, code))
num_joystick_btns++;
}
for (code = BTN_TRIGGER_HAPPY; code <= BTN_TRIGGER_HAPPY40; code++) {
if (libevdev_has_event_code(evdev, EV_KEY, code))
num_joystick_btns++;
}
if (num_joystick_btns < 2) /* require at least 2 joystick buttons */
return false;
for (code = KEY_ESC; code <= KEY_MICMUTE; code++) {
if (libevdev_has_event_code(evdev, EV_KEY, code) )
num_keys++;
}
for (code = KEY_OK; code <= KEY_LIGHTS_TOGGLE; code++) {
if (libevdev_has_event_code(evdev, EV_KEY, code) )
num_keys++;
}
for (code = KEY_ALS_TOGGLE; code < BTN_TRIGGER_HAPPY; code++) {
if (libevdev_has_event_code(evdev, EV_KEY, code) )
num_keys++;
}
if (num_keys >= 10) /* should not have 10 keyboard keys */
return false;
return true;
} | 0 | [
"CWE-134"
]
| libinput | 2a8b8fde90d63d48ce09ddae44142674bbca1c28 | 239,909,361,827,452,970,000,000,000,000,000,000,000 | 62 | evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]>
(cherry picked from commit a423d7d3269dc32a87384f79e29bb5ac021c83d1) |
static int snd_pcm_oss_post(struct snd_pcm_oss_file *pcm_oss_file)
{
struct snd_pcm_substream *substream;
int err;
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK];
if (substream != NULL) {
err = snd_pcm_oss_make_ready(substream);
if (err < 0)
return err;
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_START, NULL);
}
/* note: all errors from the start action are ignored */
/* OSS apps do not know, how to handle them */
return 0;
} | 0 | [
"CWE-362"
]
| linux | 8423f0b6d513b259fdab9c9bf4aaa6188d054c2d | 26,127,604,240,489,613,000,000,000,000,000,000,000 | 16 | ALSA: pcm: oss: Fix race at SNDCTL_DSP_SYNC
There is a small race window at snd_pcm_oss_sync() that is called from
OSS PCM SNDCTL_DSP_SYNC ioctl; namely the function calls
snd_pcm_oss_make_ready() at first, then takes the params_lock mutex
for the rest. When the stream is set up again by another thread
between them, it leads to inconsistency, and may result in unexpected
results such as NULL dereference of OSS buffer as a fuzzer spotted
recently.
The fix is simply to cover snd_pcm_oss_make_ready() call into the same
params_lock mutex with snd_pcm_oss_make_ready_locked() variant.
Reported-and-tested-by: butt3rflyh4ck <[email protected]>
Reviewed-by: Jaroslav Kysela <[email protected]>
Cc: <[email protected]>
Link: https://lore.kernel.org/r/CAFcO6XN7JDM4xSXGhtusQfS2mSBcx50VJKwQpCq=WeLt57aaZA@mail.gmail.com
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]> |
static void destroy_inodecache(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(ext3_inode_cachep);
} | 0 | [
"CWE-20"
]
| linux | 8d0c2d10dd72c5292eda7a06231056a4c972e4cc | 19,105,248,239,219,718,000,000,000,000,000,000,000 | 9 | ext3: Fix format string issues
ext3_msg() takes the printk prefix as the second parameter and the
format string as the third parameter. Two callers of ext3_msg omit the
prefix and pass the format string as the second parameter and the first
parameter to the format string as the third parameter. In both cases
this string comes from an arbitrary source. Which means the string may
contain format string characters, which will
lead to undefined and potentially harmful behavior.
The issue was introduced in commit 4cf46b67eb("ext3: Unify log messages
in ext3") and is fixed by this patch.
CC: [email protected]
Signed-off-by: Lars-Peter Clausen <[email protected]>
Signed-off-by: Jan Kara <[email protected]> |
int yr_re_ast_create(
RE_AST** re_ast)
{
*re_ast = (RE_AST*) yr_malloc(sizeof(RE_AST));
if (*re_ast == NULL)
return ERROR_INSUFFICIENT_MEMORY;
(*re_ast)->flags = 0;
(*re_ast)->levels = 0;
(*re_ast)->root_node = NULL;
return ERROR_SUCCESS;
} | 0 | [
"CWE-674",
"CWE-787"
]
| yara | 925bcf3c3b0a28b5b78e25d9efda5c0bf27ae699 | 324,271,219,376,602,400,000,000,000,000,000,000,000 | 14 | Fix issue #674. Move regexp limits to limits.h. |
void StreamTcpSetSessionBypassFlag(TcpSession *ssn)
{
ssn->flags |= STREAMTCP_FLAG_BYPASS;
} | 0 | []
| suricata | 50e2b973eeec7172991bf8f544ab06fb782b97df | 39,131,647,414,094,250,000,000,000,000,000,000,000 | 4 | stream/tcp: handle RST with MD5 or AO header
Special handling for RST packets if they have an TCP MD5 or AO header option.
The options hash can't be validated. The end host might be able to validate
it, as it can have a key/password that was communicated out of band.
The sender could use this to move the TCP state to 'CLOSED', leading to
a desync of the TCP session.
This patch builds on top of
843d0b7a10bb ("stream: support RST getting lost/ignored")
It flags the receiver as having received an RST and moves the TCP state
into the CLOSED state. It then reverts this if the sender continues to
send traffic. In this case it sets the following event:
stream-event:suspected_rst_inject;
Bug: #4710. |
MergeAffix(IspellDict *Conf, int a1, int a2)
{
char **ptr;
while (Conf->nAffixData + 1 >= Conf->lenAffixData)
{
Conf->lenAffixData *= 2;
Conf->AffixData = (char **) repalloc(Conf->AffixData,
sizeof(char *) * Conf->lenAffixData);
}
ptr = Conf->AffixData + Conf->nAffixData;
*ptr = cpalloc(strlen(Conf->AffixData[a1]) +
strlen(Conf->AffixData[a2]) +
1 /* space */ + 1 /* \0 */ );
sprintf(*ptr, "%s %s", Conf->AffixData[a1], Conf->AffixData[a2]);
ptr++;
*ptr = NULL;
Conf->nAffixData++;
return Conf->nAffixData - 1;
} | 0 | [
"CWE-119"
]
| postgres | 01824385aead50e557ca1af28640460fa9877d51 | 144,609,670,899,161,240,000,000,000,000,000,000,000 | 22 | Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt() |
TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* axis, TfLiteTensor* output) {
int axis_value = *GetTensorData<int>(axis);
if (axis_value < 0) {
axis_value += NumDimensions(input);
}
// Copy the input dimensions to output except the axis dimension.
TfLiteIntArray* output_dims = TfLiteIntArrayCreate(NumDimensions(input) - 1);
int j = 0;
for (int i = 0; i < NumDimensions(input); ++i) {
if (i != axis_value) {
output_dims->data[j] = SizeOfDimension(input, i);
++j;
}
}
return context->ResizeTensor(context, output, output_dims);
} | 0 | [
"CWE-125",
"CWE-787"
]
| tensorflow | 1970c2158b1ffa416d159d03c3370b9a462aee35 | 330,933,706,328,449,000,000,000,000,000,000,000,000 | 18 | [tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56 |
static void lo_write_buf(fuse_req_t req, fuse_ino_t ino,
struct fuse_bufvec *in_buf, off_t off,
struct fuse_file_info *fi)
{
(void)ino;
ssize_t res;
struct fuse_bufvec out_buf = FUSE_BUFVEC_INIT(fuse_buf_size(in_buf));
bool cap_fsetid_dropped = false;
out_buf.buf[0].flags = FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK;
out_buf.buf[0].fd = lo_fi_fd(req, fi);
out_buf.buf[0].pos = off;
fuse_log(FUSE_LOG_DEBUG,
"lo_write_buf(ino=%" PRIu64 ", size=%zd, off=%lu kill_priv=%d)\n",
ino, out_buf.buf[0].size, (unsigned long)off, fi->kill_priv);
res = drop_security_capability(lo_data(req), out_buf.buf[0].fd);
if (res) {
fuse_reply_err(req, res);
return;
}
/*
* If kill_priv is set, drop CAP_FSETID which should lead to kernel
* clearing setuid/setgid on file. Note, for WRITE, we need to do
* this even if killpriv_v2 is not enabled. fuse direct write path
* relies on this.
*/
if (fi->kill_priv) {
res = drop_effective_cap("FSETID", &cap_fsetid_dropped);
if (res != 0) {
fuse_reply_err(req, res);
return;
}
}
res = fuse_buf_copy(&out_buf, in_buf);
if (res < 0) {
fuse_reply_err(req, -res);
} else {
fuse_reply_write(req, (size_t)res);
}
if (cap_fsetid_dropped) {
res = gain_effective_cap("FSETID");
if (res) {
fuse_log(FUSE_LOG_ERR, "Failed to gain CAP_FSETID\n");
}
}
} | 0 | [
"CWE-281"
]
| qemu | e586edcb410543768ef009eaa22a2d9dd4a53846 | 103,168,596,858,419,800,000,000,000,000,000,000,000 | 51 | virtiofs: drop remapped security.capability xattr as needed
On Linux, the 'security.capability' xattr holds a set of
capabilities that can change when an executable is run, giving
a limited form of privilege escalation to those programs that
the writer of the file deemed worthy.
Any write causes the 'security.capability' xattr to be dropped,
stopping anyone from gaining privilege by modifying a blessed
file.
Fuse relies on the daemon to do this dropping, and in turn the
daemon relies on the host kernel to drop the xattr for it. However,
with the addition of -o xattrmap, the xattr that the guest
stores its capabilities in is now not the same as the one that
the host kernel automatically clears.
Where the mapping changes 'security.capability', explicitly clear
the remapped name to preserve the same behaviour.
This bug is assigned CVE-2021-20263.
Signed-off-by: Dr. David Alan Gilbert <[email protected]>
Reviewed-by: Vivek Goyal <[email protected]> |
static void _blk_free(struct bpf_state *state, struct bpf_blk *blk)
{
int iter;
struct bpf_blk *b_iter;
struct bpf_instr *i_iter;
if (blk == NULL)
return;
/* remove this block from the hash table */
_hsh_remove(state, blk->hash);
/* run through the block freeing TGT_PTR_{BLK,HSH} jump targets */
for (iter = 0; iter < blk->blk_cnt; iter++) {
i_iter = &blk->blks[iter];
switch (i_iter->jt.type) {
case TGT_PTR_BLK:
_blk_free(state, i_iter->jt.tgt.blk);
break;
case TGT_PTR_HSH:
b_iter = _hsh_find(state, i_iter->jt.tgt.hash);
_blk_free(state, b_iter);
break;
default:
/* do nothing */
break;
}
switch (i_iter->jf.type) {
case TGT_PTR_BLK:
_blk_free(state, i_iter->jf.tgt.blk);
break;
case TGT_PTR_HSH:
b_iter = _hsh_find(state, i_iter->jf.tgt.hash);
_blk_free(state, b_iter);
break;
default:
/* do nothing */
break;
}
}
__blk_free(state, blk);
} | 0 | []
| libseccomp | cf5d1538d243fb6f1839db70b69469d3d7e9e077 | 8,654,204,812,807,697,000,000,000,000,000,000,000 | 42 | bpf: pass the correct accumulator state to the next level
We were mistakenly passing the wrong accumulator state (the state at
the start of the instruction block, not at the end) which was causing
us to generate unnecessary load instructions.
Signed-off-by: Paul Moore <[email protected]> |
static void test_http_chunked(void) {
struct mg_mgr mgr;
const char *data, *url = "http://127.0.0.1:12344";
uint32_t i, done = 0;
mg_mgr_init(&mgr);
mg_http_listen(&mgr, url, eh2, NULL);
mg_http_connect(&mgr, url, eh3, &done);
for (i = 0; i < 50 && done == 0; i++) mg_mgr_poll(&mgr, 1);
ASSERT(i < 50);
data = LONG_CHUNK "chunk 1chunk 2";
ASSERT(done == mg_crc32(0, data, strlen(data)));
done = 0;
mg_http_connect(&mgr, url, eh4, &done);
for (i = 0; i < 50 && done == 0; i++) mg_mgr_poll(&mgr, 1);
data = LONG_CHUNK LONG_CHUNK "chunk 1chunk 2" LONG_CHUNK "chunk 1chunk 2";
ASSERT(done == mg_crc32(0, data, strlen(data)));
done = 0;
mg_http_connect(&mgr, url, eh5, &done);
for (i = 0; i < 50 && done == 0; i++) mg_mgr_poll(&mgr, 1);
data = LONG_CHUNK "chunk 1chunk 2";
ASSERT(done == mg_crc32(0, data, strlen(data)));
mg_mgr_free(&mgr);
ASSERT(mgr.conns == NULL);
} | 0 | [
"CWE-552"
]
| mongoose | c65c8fdaaa257e0487ab0aaae9e8f6b439335945 | 255,440,623,456,862,900,000,000,000,000,000,000,000 | 28 | Protect against the directory traversal in mg_upload() |
int dtls1_send_server_done(SSL *s)
{
unsigned char *p;
if (s->state == SSL3_ST_SW_SRVR_DONE_A)
{
p=(unsigned char *)s->init_buf->data;
/* do the header */
p = dtls1_set_message_header(s, p, SSL3_MT_SERVER_DONE, 0, 0, 0);
s->state=SSL3_ST_SW_SRVR_DONE_B;
/* number of bytes to write */
s->init_num=DTLS1_HM_HEADER_LENGTH;
s->init_off=0;
/* buffer the message to handle re-xmits */
dtls1_buffer_message(s, 0);
}
/* SSL3_ST_SW_SRVR_DONE_B */
return(dtls1_do_write(s,SSL3_RT_HANDSHAKE));
} | 0 | []
| openssl | 4817504d069b4c5082161b02a22116ad75f822b1 | 247,126,093,076,016,700,000,000,000,000,000,000,000 | 23 | PR: 2658
Submitted by: Robin Seggelmann <[email protected]>
Reviewed by: steve
Support for TLS/DTLS heartbeats. |
ClientHttpRequest::updateError(const Error &error)
{
if (request)
request->error.update(error);
else
al->updateError(error);
} | 0 | [
"CWE-116"
]
| squid | 7024fb734a59409889e53df2257b3fc817809fb4 | 233,430,880,148,894,000,000,000,000,000,000,000,000 | 7 | Handle more Range requests (#790)
Also removed some effectively unused code. |
int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len) {
if (__redisAppendCommand(c, cmd, len) != REDIS_OK) {
return REDIS_ERR;
}
return REDIS_OK;
} | 0 | [
"CWE-190",
"CWE-680"
]
| redis | 0215324a66af949be39b34be2d55143232c1cb71 | 269,164,445,424,823,800,000,000,000,000,000,000,000 | 8 | Fix redis-cli / redis-sential overflow on some platforms (CVE-2021-32762) (#9587)
The redis-cli command line tool and redis-sentinel service may be vulnerable
to integer overflow when parsing specially crafted large multi-bulk network
replies. This is a result of a vulnerability in the underlying hiredis
library which does not perform an overflow check before calling the calloc()
heap allocation function.
This issue only impacts systems with heap allocators that do not perform their
own overflow checks. Most modern systems do and are therefore not likely to
be affected. Furthermore, by default redis-sentinel uses the jemalloc allocator
which is also not vulnerable.
Co-authored-by: Yossi Gottlieb <[email protected]> |
int UZ_EXP UzpInput(pG, buf, size, flag)
zvoid *pG; /* globals struct: always passed */
uch *buf; /* preformatted string to be printed */
int *size; /* (address of) size of buf and of returned string */
int flag; /* flag bits (bit 0: no echo) */
{
/* tell picky compilers to shut up about "unused variable" warnings */
(void)pG; (void)buf; (void)flag;
*size = 0;
return 0;
} /* end function UzpInput() */ | 0 | [
"CWE-400"
]
| unzip | 41beb477c5744bc396fa1162ee0c14218ec12213 | 70,907,580,209,140,280,000,000,000,000,000,000,000 | 13 | Fix bug in undefer_input() that misplaced the input state. |
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct net *net = dev_net(skb->dev);
struct sock *sk;
struct udphdr *uh;
struct in6_addr *saddr, *daddr;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
/* Check for jumbo payload */
if (ulen == 0)
ulen = skb->len;
if (ulen < sizeof(*uh))
goto short_packet;
if (ulen < skb->len) {
if (pskb_trim_rcsum(skb, ulen))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
}
}
if (udp6_csum_init(skb, uh, proto))
goto discard;
/*
* Multicast receive code
*/
if (ipv6_addr_is_multicast(daddr))
return __udp6_lib_mcast_deliver(net, skb,
saddr, daddr, udptable);
/* Unicast */
/*
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk == NULL) {
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
if (udp_lib_checksum_complete(skb))
goto discard;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
}
/* deliver */
if (sk_rcvqueues_full(sk, skb)) {
sock_put(sk);
goto discard;
}
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb)) {
atomic_inc(&sk->sk_drops);
bh_unlock_sock(sk);
sock_put(sk);
goto discard;
}
bh_unlock_sock(sk);
sock_put(sk);
return 0;
short_packet:
LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
ulen, skb->len);
discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
} | 0 | [
"CWE-400"
]
| linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 47,467,247,152,057,760,000,000,000,000,000,000,000 | 101 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static size_t curl_write_header(char *data, size_t size, size_t nmemb, void *ctx)
{
php_curl *ch = (php_curl *) ctx;
php_curl_write *t = ch->handlers->write_header;
size_t length = size * nmemb;
switch (t->method) {
case PHP_CURL_STDOUT:
/* Handle special case write when we're returning the entire transfer
*/
if (ch->handlers->write->method == PHP_CURL_RETURN && length > 0) {
smart_str_appendl(&ch->handlers->write->buf, data, (int) length);
} else {
PHPWRITE(data, length);
}
break;
case PHP_CURL_FILE:
return fwrite(data, size, nmemb, t->fp);
case PHP_CURL_USER: {
zval argv[2];
zval retval;
int error;
zend_fcall_info fci;
ZVAL_RES(&argv[0], ch->res);
Z_ADDREF(argv[0]);
ZVAL_STRINGL(&argv[1], data, length);
fci.size = sizeof(fci);
fci.function_table = EG(function_table);
ZVAL_COPY_VALUE(&fci.function_name, &t->func_name);
fci.symbol_table = NULL;
fci.object = NULL;
fci.retval = &retval;
fci.param_count = 2;
fci.params = argv;
fci.no_separation = 0;
ch->in_callback = 1;
error = zend_call_function(&fci, &t->fci_cache);
ch->in_callback = 0;
if (error == FAILURE) {
php_error_docref(NULL, E_WARNING, "Could not call the CURLOPT_HEADERFUNCTION");
length = -1;
} else if (!Z_ISUNDEF(retval)) {
if (Z_TYPE(retval) != IS_LONG) {
convert_to_long_ex(&retval);
}
length = Z_LVAL(retval);
}
zval_ptr_dtor(&argv[0]);
zval_ptr_dtor(&argv[1]);
break;
}
case PHP_CURL_IGNORE:
return length;
default:
return -1;
}
return length;
} | 0 | []
| php-src | 124fb22a13fafa3648e4e15b4f207c7096d8155e | 188,998,004,479,615,770,000,000,000,000,000,000,000 | 64 | Fixed bug #68739 #68740 #68741 |
set_time_915(unsigned char *p, time_t t)
{
struct tm tm;
get_tmfromtime(&tm, &t);
set_num_711(p+0, tm.tm_year);
set_num_711(p+1, tm.tm_mon+1);
set_num_711(p+2, tm.tm_mday);
set_num_711(p+3, tm.tm_hour);
set_num_711(p+4, tm.tm_min);
set_num_711(p+5, tm.tm_sec);
set_num_712(p+6, (char)(get_gmoffset(&tm)/(60*15)));
} | 0 | [
"CWE-190"
]
| libarchive | 3014e19820ea53c15c90f9d447ca3e668a0b76c6 | 23,240,843,628,465,780,000,000,000,000,000,000,000 | 13 | Issue 711: Be more careful about verifying filename lengths when writing ISO9660 archives
* Don't cast size_t to int, since this can lead to overflow
on machines where sizeof(int) < sizeof(size_t)
* Check a + b > limit by writing it as
a > limit || b > limit || a + b > limit
to avoid problems when a + b wraps around. |
void* Type_Chromaticity_Dup(struct _cms_typehandler_struct* self, const void *Ptr, cmsUInt32Number n)
{
return _cmsDupMem(self ->ContextID, Ptr, sizeof(cmsCIExyYTRIPLE));
cmsUNUSED_PARAMETER(n);
} | 0 | [
"CWE-125"
]
| Little-CMS | 5ca71a7bc18b6897ab21d815d15e218e204581e2 | 51,845,201,505,451,870,000,000,000,000,000,000,000 | 6 | Added an extra check to MLU bounds
Thanks to Ibrahim el-sayed for spotting the bug |
outs(const char *s)
{
if (VALID_STRING(s)) {
tputs(s, 1, outc);
return TRUE;
}
return FALSE;
} | 0 | []
| ncurses | 790a85dbd4a81d5f5d8dd02a44d84f01512ef443 | 249,321,343,144,802,350,000,000,000,000,000,000,000 | 8 | ncurses 6.2 - patch 20200531
+ correct configure version-check/warnng for g++ to allow for 10.x
+ re-enable "bel" in konsole-base (report by Nia Huang)
+ add linux-s entry (patch by Alexandre Montaron).
+ drop long-obsolete convert_configure.pl
+ add test/test_parm.c, for checking tparm changes.
+ improve parameter-checking for tparm, adding function _nc_tiparm() to
handle the most-used case, which accepts only numeric parameters
(report/testcase by "puppet-meteor").
+ use a more conservative estimate of the buffer-size in lib_tparm.c's
save_text() and save_number(), in case the sprintf() function
passes-through unexpected characters from a format specifier
(report/testcase by "puppet-meteor").
+ add a check for end-of-string in cvtchar to handle a malformed
string in infotocap (report/testcase by "puppet-meteor"). |
struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
{
int err;
struct nfs_client *clp = nfs_alloc_client(cl_init);
if (IS_ERR(clp))
return clp;
err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
if (err)
goto error;
if (cl_init->minorversion > NFS4_MAX_MINOR_VERSION) {
err = -EINVAL;
goto error;
}
spin_lock_init(&clp->cl_lock);
INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
INIT_LIST_HEAD(&clp->cl_ds_clients);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
clp->cl_mig_gen = 1;
#if IS_ENABLED(CONFIG_NFS_V4_1)
init_waitqueue_head(&clp->cl_lock_waitq);
#endif
INIT_LIST_HEAD(&clp->pending_cb_stateids);
return clp;
error:
nfs_free_client(clp);
return ERR_PTR(err);
} | 1 | [
"CWE-703"
]
| linux | dd99e9f98fbf423ff6d365b37a98e8879170f17c | 280,333,338,733,761,740,000,000,000,000,000,000,000 | 33 | NFSv4: Initialise connection to the server in nfs4_alloc_client()
Set up the connection to the NFSv4 server in nfs4_alloc_client(), before
we've added the struct nfs_client to the net-namespace's nfs_client_list
so that a downed server won't cause other mounts to hang in the trunking
detection code.
Reported-by: Michael Wakabayashi <[email protected]>
Fixes: 5c6e5b60aae4 ("NFS: Fix an Oops in the pNFS files and flexfiles connection setup to the DS")
Signed-off-by: Trond Myklebust <[email protected]> |
XML_GetErrorCode(XML_Parser parser)
{
if (parser == NULL)
return XML_ERROR_INVALID_ARGUMENT;
return parser->m_errorCode;
} | 0 | [
"CWE-611"
]
| libexpat | 11f8838bf99ea0a6f0b76f9760c43704d00c4ff6 | 22,669,004,016,628,624,000,000,000,000,000,000,000 | 6 | xmlparse.c: Fix extraction of namespace prefix from XML name (#186) |
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
struct vhost_flush_struct flush;
if (dev->worker) {
init_completion(&flush.wait_event);
vhost_work_init(&flush.work, vhost_flush_work);
vhost_work_queue(dev, &flush.work);
wait_for_completion(&flush.wait_event);
}
} | 0 | [
"CWE-120"
]
| linux | 060423bfdee3f8bc6e2c1bac97de24d5415e2bc4 | 226,479,958,653,207,730,000,000,000,000,000,000,000 | 12 | vhost: make sure log_num < in_num
The code assumes log_num < in_num everywhere, and that is true as long as
in_num is incremented by descriptor iov count, and log_num by 1. However
this breaks if there's a zero sized descriptor.
As a result, if a malicious guest creates a vring desc with desc.len = 0,
it may cause the host kernel to crash by overflowing the log array. This
bug can be triggered during the VM migration.
There's no need to log when desc.len = 0, so just don't increment log_num
in this case.
Fixes: 3a4d5c94e959 ("vhost_net: a kernel-level virtio server")
Cc: [email protected]
Reviewed-by: Lidong Chen <[email protected]>
Signed-off-by: ruippan <[email protected]>
Signed-off-by: yongduan <[email protected]>
Acked-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]> |
resolve_union(struct lyd_node_leaf_list *leaf, struct lys_type *type, int store, int ignore_fail,
struct lys_type **resolved_type)
{
struct ly_ctx *ctx = leaf->schema->module->ctx;
struct lys_type *t;
struct lyd_node *ret;
enum int_log_opts prev_ilo;
int found, success = 0, ext_dep, req_inst;
const char *json_val = NULL;
assert(type->base == LY_TYPE_UNION);
if ((leaf->value_type == LY_TYPE_UNION) || ((leaf->value_type == LY_TYPE_INST) && (leaf->value_flags & LY_VALUE_UNRES))) {
/* either NULL or instid previously converted to JSON */
json_val = lydict_insert(ctx, leaf->value.string, 0);
}
if (store) {
lyd_free_value(leaf->value, leaf->value_type, leaf->value_flags, &((struct lys_node_leaf *)leaf->schema)->type,
leaf->value_str, NULL, NULL, NULL);
memset(&leaf->value, 0, sizeof leaf->value);
}
/* turn logging off, we are going to try to validate the value with all the types in order */
ly_ilo_change(NULL, ILO_IGNORE, &prev_ilo, 0);
t = NULL;
found = 0;
while ((t = lyp_get_next_union_type(type, t, &found))) {
found = 0;
switch (t->base) {
case LY_TYPE_LEAFREF:
if ((ignore_fail == 1) || ((leaf->schema->flags & LYS_LEAFREF_DEP) && (ignore_fail == 2))) {
req_inst = -1;
} else {
req_inst = t->info.lref.req;
}
if (!resolve_leafref(leaf, t->info.lref.path, req_inst, &ret)) {
if (store) {
if (ret && !(leaf->schema->flags & LYS_LEAFREF_DEP)) {
/* valid resolved */
leaf->value.leafref = ret;
leaf->value_type = LY_TYPE_LEAFREF;
} else {
/* valid unresolved */
ly_ilo_restore(NULL, prev_ilo, NULL, 0);
if (!lyp_parse_value(t, &leaf->value_str, NULL, leaf, NULL, NULL, 1, 0)) {
return -1;
}
ly_ilo_change(NULL, ILO_IGNORE, &prev_ilo, NULL);
}
}
success = 1;
}
break;
case LY_TYPE_INST:
ext_dep = check_instid_ext_dep(leaf->schema, (json_val ? json_val : leaf->value_str));
if ((ignore_fail == 1) || (ext_dep && (ignore_fail == 2))) {
req_inst = -1;
} else {
req_inst = t->info.inst.req;
}
if (!resolve_instid((struct lyd_node *)leaf, (json_val ? json_val : leaf->value_str), req_inst, &ret)) {
if (store) {
if (ret && !ext_dep) {
/* valid resolved */
leaf->value.instance = ret;
leaf->value_type = LY_TYPE_INST;
if (json_val) {
lydict_remove(leaf->schema->module->ctx, leaf->value_str);
leaf->value_str = json_val;
json_val = NULL;
}
} else {
/* valid unresolved */
if (json_val) {
/* put the JSON val back */
leaf->value.string = json_val;
json_val = NULL;
} else {
leaf->value.instance = NULL;
}
leaf->value_type = LY_TYPE_INST;
leaf->value_flags |= LY_VALUE_UNRES;
}
}
success = 1;
}
break;
default:
if (lyp_parse_value(t, &leaf->value_str, NULL, leaf, NULL, NULL, store, 0)) {
success = 1;
}
break;
}
if (success) {
break;
}
/* erase possible present and invalid value data */
if (store) {
lyd_free_value(leaf->value, leaf->value_type, leaf->value_flags, t, leaf->value_str, NULL, NULL, NULL);
memset(&leaf->value, 0, sizeof leaf->value);
}
}
/* turn logging back on */
ly_ilo_restore(NULL, prev_ilo, NULL, 0);
if (json_val) {
if (!success) {
/* put the value back for now */
assert(leaf->value_type == LY_TYPE_UNION);
leaf->value.string = json_val;
} else {
/* value was ultimately useless, but we could not have known */
lydict_remove(leaf->schema->module->ctx, json_val);
}
}
if (success) {
if (resolved_type) {
*resolved_type = t;
}
} else if (!ignore_fail || !type->info.uni.has_ptr_type) {
/* not found and it is required */
LOGVAL(ctx, LYE_INVAL, LY_VLOG_LYD, leaf, leaf->value_str ? leaf->value_str : "", leaf->schema->name);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
} | 0 | [
"CWE-617"
]
| libyang | 5ce30801f9ccc372bbe9b7c98bb5324b15fb010a | 10,116,779,796,105,465,000,000,000,000,000,000,000 | 140 | schema tree BUGFIX freeing nodes with no module set
Context must be passed explicitly for these cases.
Fixes #1452 |
*/
double
xmlXPathStringEvalNumber(const xmlChar *str) {
const xmlChar *cur = str;
double ret;
int ok = 0;
int isneg = 0;
int exponent = 0;
int is_exponent_negative = 0;
#ifdef __GNUC__
unsigned long tmp = 0;
double temp;
#endif
if (cur == NULL) return(0);
while (IS_BLANK_CH(*cur)) cur++;
if ((*cur != '.') && ((*cur < '0') || (*cur > '9')) && (*cur != '-')) {
return(xmlXPathNAN);
}
if (*cur == '-') {
isneg = 1;
cur++;
}
#ifdef __GNUC__
/*
* tmp/temp is a workaround against a gcc compiler bug
* http://veillard.com/gcc.bug
*/
ret = 0;
while ((*cur >= '0') && (*cur <= '9')) {
ret = ret * 10;
tmp = (*cur - '0');
ok = 1;
cur++;
temp = (double) tmp;
ret = ret + temp;
}
#else
ret = 0;
while ((*cur >= '0') && (*cur <= '9')) {
ret = ret * 10 + (*cur - '0');
ok = 1;
cur++;
}
#endif
if (*cur == '.') {
int v, frac = 0;
double fraction = 0;
cur++;
if (((*cur < '0') || (*cur > '9')) && (!ok)) {
return(xmlXPathNAN);
}
while (((*cur >= '0') && (*cur <= '9')) && (frac < MAX_FRAC)) {
v = (*cur - '0');
fraction = fraction * 10 + v;
frac = frac + 1;
cur++;
}
fraction /= my_pow10[frac];
ret = ret + fraction;
while ((*cur >= '0') && (*cur <= '9'))
cur++;
}
if ((*cur == 'e') || (*cur == 'E')) {
cur++;
if (*cur == '-') {
is_exponent_negative = 1;
cur++;
} else if (*cur == '+') {
cur++;
}
while ((*cur >= '0') && (*cur <= '9')) {
exponent = exponent * 10 + (*cur - '0');
cur++;
}
}
while (IS_BLANK_CH(*cur)) cur++;
if (*cur != 0) return(xmlXPathNAN);
if (isneg) ret = -ret;
if (is_exponent_negative) exponent = -exponent;
ret *= pow(10.0, (double)exponent); | 0 | [
"CWE-119"
]
| libxml2 | 91d19754d46acd4a639a8b9e31f50f31c78f8c9c | 329,047,693,583,512,200,000,000,000,000,000,000,000 | 83 | Fix the semantic of XPath axis for namespace/attribute context nodes
The processing of namespace and attributes nodes was not compliant
to the XPath-1.0 specification |
static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
} | 0 | []
| linux-2.6 | d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978 | 219,500,194,405,218,000,000,000,000,000,000,000,000 | 7 | tunnels: fix netns vs proto registration ordering
Same stuff as in ip_gre patch: receive hook can be called before netns
setup is done, oopsing in net_generic().
Signed-off-by: Alexey Dobriyan <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void controller::write_item(std::shared_ptr<rss_item> item, std::ostream& ostr) {
std::vector<std::pair<LineType, std::string>> lines;
std::vector<linkpair> links; // not used
std::string title(_("Title: "));
title.append(item->title());
lines.push_back(std::make_pair(LineType::wrappable, title));
std::string author(_("Author: "));
author.append(item->author());
lines.push_back(std::make_pair(LineType::wrappable, author));
std::string date(_("Date: "));
date.append(item->pubDate());
lines.push_back(std::make_pair(LineType::wrappable, date));
std::string link(_("Link: "));
link.append(item->link());
lines.push_back(std::make_pair(LineType::softwrappable, link));
if (item->enclosure_url() != "") {
std::string dlurl(_("Podcast Download URL: "));
dlurl.append(item->enclosure_url());
lines.push_back(std::make_pair(LineType::softwrappable, dlurl));
}
lines.push_back(std::make_pair(LineType::wrappable, std::string("")));
htmlrenderer rnd(true);
rnd.render(item->description(), lines, links, item->feedurl());
textformatter txtfmt;
txtfmt.add_lines(lines);
unsigned int width = cfg.get_configvalue_as_int("text-width");
if (width == 0)
width = 80;
ostr << txtfmt.format_text_plain(width) << std::endl;
} | 0 | [
"CWE-943",
"CWE-787"
]
| newsbeuter | 96e9506ae9e252c548665152d1b8968297128307 | 8,776,421,823,579,266,000,000,000,000,000,000,000 | 38 | Sanitize inputs to bookmark-cmd (#591)
Newsbeuter didn't properly shell-escape the arguments passed to
bookmarking command, which allows a remote attacker to perform remote
code execution by crafting an RSS item whose title and/or URL contain
something interpretable by the shell (most notably subshell
invocations.)
This has been reported by Jeriko One <[email protected]>, complete with
PoC and a patch.
This vulnerability was assigned CVE-2017-12904. |
check_SET_IPV4_DST(const struct ofpact_ipv4 *a OVS_UNUSED,
struct ofpact_check_params *cp)
{
return check_set_ipv4(cp);
} | 0 | [
"CWE-416"
]
| ovs | 77cccc74deede443e8b9102299efc869a52b65b2 | 301,298,756,025,483,550,000,000,000,000,000,000,000 | 5 | ofp-actions: Fix use-after-free while decoding RAW_ENCAP.
While decoding RAW_ENCAP action, decode_ed_prop() might re-allocate
ofpbuf if there is no enough space left. However, function
'decode_NXAST_RAW_ENCAP' continues to use old pointer to 'encap'
structure leading to write-after-free and incorrect decoding.
==3549105==ERROR: AddressSanitizer: heap-use-after-free on address
0x60600000011a at pc 0x0000005f6cc6 bp 0x7ffc3a2d4410 sp 0x7ffc3a2d4408
WRITE of size 2 at 0x60600000011a thread T0
#0 0x5f6cc5 in decode_NXAST_RAW_ENCAP lib/ofp-actions.c:4461:20
#1 0x5f0551 in ofpact_decode ./lib/ofp-actions.inc2:4777:16
#2 0x5ed17c in ofpacts_decode lib/ofp-actions.c:7752:21
#3 0x5eba9a in ofpacts_pull_openflow_actions__ lib/ofp-actions.c:7791:13
#4 0x5eb9fc in ofpacts_pull_openflow_actions lib/ofp-actions.c:7835:12
#5 0x64bb8b in ofputil_decode_packet_out lib/ofp-packet.c:1113:17
#6 0x65b6f4 in ofp_print_packet_out lib/ofp-print.c:148:13
#7 0x659e3f in ofp_to_string__ lib/ofp-print.c:1029:16
#8 0x659b24 in ofp_to_string lib/ofp-print.c:1244:21
#9 0x65a28c in ofp_print lib/ofp-print.c:1288:28
#10 0x540d11 in ofctl_ofp_parse utilities/ovs-ofctl.c:2814:9
#11 0x564228 in ovs_cmdl_run_command__ lib/command-line.c:247:17
#12 0x56408a in ovs_cmdl_run_command lib/command-line.c:278:5
#13 0x5391ae in main utilities/ovs-ofctl.c:179:9
#14 0x7f6911ce9081 in __libc_start_main (/lib64/libc.so.6+0x27081)
#15 0x461fed in _start (utilities/ovs-ofctl+0x461fed)
Fix that by getting a new pointer before using.
Credit to OSS-Fuzz.
Fuzzer regression test will fail only with AddressSanitizer enabled.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=27851
Fixes: f839892a206a ("OF support and translation of generic encap and decap")
Acked-by: William Tu <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]> |
static void netlink_remove(struct sock *sk)
{
netlink_table_grab();
if (sk_del_node_init(sk))
nl_table[sk->sk_protocol].hash.entries--;
if (nlk_sk(sk)->subscriptions)
__sk_del_bind_node(sk);
netlink_table_ungrab();
} | 0 | []
| linux-2.6 | 16e5726269611b71c930054ffe9b858c1cea88eb | 63,809,175,090,469,490,000,000,000,000,000,000,000 | 9 | af_unix: dont send SCM_CREDENTIALS by default
Since commit 7361c36c5224 (af_unix: Allow credentials to work across
user and pid namespaces) af_unix performance dropped a lot.
This is because we now take a reference on pid and cred in each write(),
and release them in read(), usually done from another process,
eventually from another cpu. This triggers false sharing.
# Events: 154K cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... .................. .........................
#
10.40% hackbench [kernel.kallsyms] [k] put_pid
8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg
7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg
6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock
4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb
4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns
4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred
2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm
2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count
1.75% hackbench [kernel.kallsyms] [k] fget_light
1.51% hackbench [kernel.kallsyms] [k]
__mutex_lock_interruptible_slowpath
1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb
This patch includes SCM_CREDENTIALS information in a af_unix message/skb
only if requested by the sender, [man 7 unix for details how to include
ancillary data using sendmsg() system call]
Note: This might break buggy applications that expected SCM_CREDENTIAL
from an unaware write() system call, and receiver not using SO_PASSCRED
socket option.
If SOCK_PASSCRED is set on source or destination socket, we still
include credentials for mere write() syscalls.
Performance boost in hackbench : more than 50% gain on a 16 thread
machine (2 quad-core cpus, 2 threads per core)
hackbench 20 thread 2000
4.228 sec instead of 9.102 sec
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Tim Chen <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void dump_continue(struct req_state * const s)
{
try {
RESTFUL_IO(s)->send_100_continue();
} catch (rgw::io::Exception& e) {
ldout(s->cct, 0) << "ERROR: RESTFUL_IO(s)->send_100_continue() returned err="
<< e.what() << dendl;
}
} | 0 | [
"CWE-770"
]
| ceph | ab29bed2fc9f961fe895de1086a8208e21ddaddc | 189,563,938,681,060,180,000,000,000,000,000,000,000 | 9 | rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests |
GF_Err stsd_AddBox(GF_Box *s, GF_Box *a)
{
GF_UnknownBox *def;
GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s;
if (!a) return GF_OK;
switch (a->type) {
case GF_ISOM_BOX_TYPE_MP4S:
case GF_ISOM_BOX_TYPE_ENCS:
case GF_ISOM_BOX_TYPE_MP4A:
case GF_ISOM_BOX_TYPE_ENCA:
case GF_ISOM_BOX_TYPE_MP4V:
case GF_ISOM_BOX_TYPE_ENCV:
case GF_ISOM_BOX_TYPE_RESV:
case GF_ISOM_BOX_TYPE_GHNT:
case GF_ISOM_BOX_TYPE_RTP_STSD:
case GF_ISOM_BOX_TYPE_SRTP_STSD:
case GF_ISOM_BOX_TYPE_FDP_STSD:
case GF_ISOM_BOX_TYPE_RRTP_STSD:
case GF_ISOM_BOX_TYPE_RTCP_STSD:
case GF_ISOM_BOX_TYPE_AVC1:
case GF_ISOM_BOX_TYPE_AVC2:
case GF_ISOM_BOX_TYPE_AVC3:
case GF_ISOM_BOX_TYPE_AVC4:
case GF_ISOM_BOX_TYPE_SVC1:
case GF_ISOM_BOX_TYPE_MVC1:
case GF_ISOM_BOX_TYPE_HVC1:
case GF_ISOM_BOX_TYPE_HEV1:
case GF_ISOM_BOX_TYPE_HVC2:
case GF_ISOM_BOX_TYPE_HEV2:
case GF_ISOM_BOX_TYPE_HVT1:
case GF_ISOM_BOX_TYPE_LHV1:
case GF_ISOM_BOX_TYPE_LHE1:
case GF_ISOM_BOX_TYPE_TX3G:
case GF_ISOM_BOX_TYPE_TEXT:
case GF_ISOM_BOX_TYPE_ENCT:
case GF_ISOM_BOX_TYPE_METX:
case GF_ISOM_BOX_TYPE_METT:
case GF_ISOM_BOX_TYPE_STXT:
case GF_ISOM_BOX_TYPE_DIMS:
case GF_ISOM_BOX_TYPE_AC3:
case GF_ISOM_BOX_TYPE_EC3:
case GF_ISOM_BOX_TYPE_LSR1:
case GF_ISOM_BOX_TYPE_WVTT:
case GF_ISOM_BOX_TYPE_STPP:
case GF_ISOM_BOX_TYPE_SBTT:
case GF_ISOM_BOX_TYPE_ELNG:
case GF_ISOM_BOX_TYPE_MP3:
case GF_ISOM_BOX_TYPE_JPEG:
case GF_ISOM_BOX_TYPE_JP2K:
case GF_ISOM_BOX_TYPE_PNG:
case GF_ISOM_SUBTYPE_3GP_AMR:
case GF_ISOM_SUBTYPE_3GP_AMR_WB:
case GF_ISOM_SUBTYPE_3GP_EVRC:
case GF_ISOM_SUBTYPE_3GP_QCELP:
case GF_ISOM_SUBTYPE_3GP_SMV:
case GF_ISOM_SUBTYPE_3GP_H263:
return gf_isom_box_add_default((GF_Box*)ptr, a);
//unknown sample description: we need a specific box to handle the data ref index
//rather than a default box ...
case GF_ISOM_BOX_TYPE_UNKNOWN:
def = (GF_UnknownBox *)a;
/*we need at least 8 bytes for unknown sample entries*/
if (def->dataSize < 8) {
gf_isom_box_del(a);
return GF_OK;
}
return gf_isom_box_add_default((GF_Box*)ptr, a);
default:
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Cannot process box of type %s\n", gf_4cc_to_str(a->type)));
return GF_ISOM_INVALID_FILE;
}
} | 0 | [
"CWE-125"
]
| gpac | bceb03fd2be95097a7b409ea59914f332fb6bc86 | 208,496,394,731,206,130,000,000,000,000,000,000,000 | 75 | fixed 2 possible heap overflows (inc. #1088) |
static void wait(CImgDisplay& disp1, CImgDisplay& disp2, CImgDisplay& disp3) {
disp1._is_event = disp2._is_event = disp3._is_event = false;
while ((!disp1._is_closed || !disp2._is_closed || !disp3._is_closed) &&
!disp1._is_event && !disp2._is_event && !disp3._is_event) wait_all();
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 285,700,891,349,723,400,000,000,000,000,000,000,000 | 5 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static int seed_from_timestamp_and_pid(uint32_t *seed) {
#ifdef HAVE_GETTIMEOFDAY
/* XOR of seconds and microseconds */
struct timeval tv;
gettimeofday(&tv, NULL);
*seed = (uint32_t)tv.tv_sec ^ (uint32_t)tv.tv_usec;
#else
/* Seconds only */
*seed = (uint32_t)time(NULL);
#endif
/* XOR with PID for more randomness */
#if defined(_WIN32)
*seed ^= (uint32_t)_getpid();
#elif defined(HAVE_GETPID)
*seed ^= (uint32_t)getpid();
#endif
return 0;
} | 0 | [
"CWE-310"
]
| jansson | 8f80c2d83808150724d31793e6ade92749b1faa4 | 224,245,594,782,554,320,000,000,000,000,000,000,000 | 20 | CVE-2013-6401: Change hash function, randomize hashes
Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing
and testing. |
ipmi_sdr_print_sensor_eventonly(struct ipmi_intf *intf,
struct sdr_record_eventonly_sensor *sensor)
{
char desc[17];
if (!sensor)
return -1;
memset(desc, 0, sizeof (desc));
snprintf(desc, sizeof(desc), "%.*s", (sensor->id_code & 0x1f) + 1, sensor->id_string);
if (verbose) {
printf("Sensor ID : %s (0x%x)\n",
sensor->id_code ? desc : "", sensor->keys.sensor_num);
printf("Entity ID : %d.%d (%s)\n",
sensor->entity.id, sensor->entity.instance,
val2str(sensor->entity.id, entity_id_vals));
printf("Sensor Type : %s (0x%02x)\n",
ipmi_get_sensor_type(intf, sensor->sensor_type),
sensor->sensor_type);
lprintf(LOG_DEBUG, "Event Type Code : 0x%02x",
sensor->event_type);
printf("\n");
} else {
if (csv_output)
printf("%s,%02Xh,ns,%d.%d,Event-Only\n",
sensor->id_code ? desc : "",
sensor->keys.sensor_num,
sensor->entity.id, sensor->entity.instance);
else if (sdr_extended)
printf("%-16s | %02Xh | ns | %2d.%1d | Event-Only\n",
sensor->id_code ? desc : "",
sensor->keys.sensor_num,
sensor->entity.id, sensor->entity.instance);
else
printf("%-16s | Event-Only | ns\n",
sensor->id_code ? desc : "");
}
return 0;
} | 0 | [
"CWE-120"
]
| ipmitool | 7ccea283dd62a05a320c1921e3d8d71a87772637 | 13,214,039,383,232,163,000,000,000,000,000,000,000 | 41 | fru, sdr: Fix id_string buffer overflows
Final part of the fixes for CVE-2020-5208, see
https://github.com/ipmitool/ipmitool/security/advisories/GHSA-g659-9qxw-p7cp
9 variants of stack buffer overflow when parsing `id_string` field of
SDR records returned from `CMD_GET_SDR` command.
SDR record structs have an `id_code` field, and an `id_string` `char`
array.
The length of `id_string` is calculated as `(id_code & 0x1f) + 1`,
which can be larger than expected 16 characters (if `id_code = 0xff`,
then length will be `(0xff & 0x1f) + 1 = 32`).
In numerous places, this can cause stack buffer overflow when copying
into fixed buffer of size `17` bytes from this calculated length. |
diff_lnum_win(linenr_T lnum, win_T *wp)
{
diff_T *dp;
int idx;
int i;
linenr_T n;
idx = diff_buf_idx(curbuf);
if (idx == DB_COUNT) // safety check
return (linenr_T)0;
if (curtab->tp_diff_invalid)
ex_diffupdate(NULL); // update after a big change
// search for a change that includes "lnum" in the list of diffblocks.
FOR_ALL_DIFFBLOCKS_IN_TAB(curtab, dp)
if (lnum <= dp->df_lnum[idx] + dp->df_count[idx])
break;
// When after the last change, compute relative to the last line number.
if (dp == NULL)
return wp->w_buffer->b_ml.ml_line_count
- (curbuf->b_ml.ml_line_count - lnum);
// Find index for "wp".
i = diff_buf_idx(wp->w_buffer);
if (i == DB_COUNT) // safety check
return (linenr_T)0;
n = lnum + (dp->df_lnum[i] - dp->df_lnum[idx]);
if (n > dp->df_lnum[i] + dp->df_count[i])
n = dp->df_lnum[i] + dp->df_count[i];
return n;
} | 0 | [
"CWE-787"
]
| vim | c101abff4c6756db4f5e740fde289decb9452efa | 247,537,944,538,580,200,000,000,000,000,000,000,000 | 34 | patch 8.2.5164: invalid memory access after diff buffer manipulations
Problem: Invalid memory access after diff buffer manipulations.
Solution: Use zero offset when change removes all lines in a diff block. |
ShapeHandle InferenceContext::UnknownShape() {
return shape_manager_.UnknownShape();
} | 0 | [
"CWE-190"
]
| tensorflow | acd56b8bcb72b163c834ae4f18469047b001fadf | 300,567,496,334,509,760,000,000,000,000,000,000,000 | 3 | Fix security vulnerability with SpaceToBatchNDOp.
PiperOrigin-RevId: 445527615 |
static int acp_hw_init(void *handle)
{
int r, i;
uint64_t acp_base;
u32 val = 0;
u32 count = 0;
struct device *dev;
struct i2s_platform_data *i2s_pdata;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
const struct amdgpu_ip_block *ip_block =
amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
if (!ip_block)
return -EINVAL;
r = amd_acp_hw_init(adev->acp.cgs_device,
ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV) {
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
return 0;
} else if (r) {
return r;
}
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
acp_base = adev->rmmio_base;
adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
if (adev->acp.acp_genpd == NULL)
return -ENOMEM;
adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
adev->acp.acp_genpd->gpd.power_on = acp_poweron;
adev->acp.acp_genpd->adev = adev;
pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
GFP_KERNEL);
if (adev->acp.acp_cell == NULL)
return -ENOMEM;
adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
if (adev->acp.acp_res == NULL) {
kfree(adev->acp.acp_cell);
return -ENOMEM;
}
i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
if (i2s_pdata == NULL) {
kfree(adev->acp.acp_res);
kfree(adev->acp.acp_cell);
return -ENOMEM;
}
switch (adev->asic_type) {
case CHIP_STONEY:
i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
break;
default:
i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
}
i2s_pdata[0].cap = DWC_I2S_PLAY;
i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
switch (adev->asic_type) {
case CHIP_STONEY:
i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
DW_I2S_QUIRK_COMP_PARAM1 |
DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
break;
default:
i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
DW_I2S_QUIRK_COMP_PARAM1;
}
i2s_pdata[1].cap = DWC_I2S_RECORD;
i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
switch (adev->asic_type) {
case CHIP_STONEY:
i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
break;
default:
break;
}
i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
adev->acp.acp_res[0].name = "acp2x_dma";
adev->acp.acp_res[0].flags = IORESOURCE_MEM;
adev->acp.acp_res[0].start = acp_base;
adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
adev->acp.acp_res[1].flags = IORESOURCE_MEM;
adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
adev->acp.acp_res[2].flags = IORESOURCE_MEM;
adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
adev->acp.acp_res[3].flags = IORESOURCE_MEM;
adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
adev->acp.acp_res[4].name = "acp2x_dma_irq";
adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
adev->acp.acp_cell[0].num_resources = 5;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
adev->acp.acp_cell[1].num_resources = 1;
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
adev->acp.acp_cell[2].name = "designware-i2s";
adev->acp.acp_cell[2].num_resources = 1;
adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
adev->acp.acp_cell[3].name = "designware-i2s";
adev->acp.acp_cell[3].num_resources = 1;
adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
ACP_DEVS);
if (r)
return r;
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
if (r) {
dev_err(dev, "Failed to add dev to genpd\n");
return r;
}
}
/* Assert Soft reset of ACP */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val |= ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
(val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Enable clock to ACP and wait until the clock is enabled */
val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
val = val | ACP_CONTROL__ClkEn_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
count = ACP_CLOCK_EN_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
if (val & (u32) 0x1)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Deassert the SOFT RESET flags */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
return 0;
} | 1 | [
"CWE-400",
"CWE-401"
]
| linux | 57be09c6e8747bf48704136d9e3f92bfb93f5725 | 106,940,207,875,544,390,000,000,000,000,000,000,000 | 212 | drm/amdgpu: fix multiple memory leaks in acp_hw_init
In acp_hw_init there are some allocations that needs to be released in
case of failure:
1- adev->acp.acp_genpd should be released if any allocation attemp for
adev->acp.acp_cell, adev->acp.acp_res or i2s_pdata fails.
2- all of those allocations should be released if
mfd_add_hotplug_devices or pm_genpd_add_device fail.
3- Release is needed in case of time out values expire.
Reviewed-by: Christian König <[email protected]>
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Alex Deucher <[email protected]> |
static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
{
__be32 *p;
int status = 0;
*res = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_TOTAL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) {
READ_BUF(8);
READ64(*res);
bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL;
}
dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res);
return status;
} | 0 | [
"CWE-703"
]
| linux | dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 | 220,257,130,615,511,770,000,000,000,000,000,000,000 | 16 | NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]> |
int run_command(char *buf, size_t buf_size, int (*child_fn)(void *), void *args)
{
pid_t child;
int ret, fret, pipefd[2];
ssize_t bytes;
/* Make sure our callers do not receive unitialized memory. */
if (buf_size > 0 && buf)
buf[0] = '\0';
if (pipe(pipefd) < 0) {
SYSERROR("failed to create pipe");
return -1;
}
child = lxc_raw_clone(0);
if (child < 0) {
close(pipefd[0]);
close(pipefd[1]);
SYSERROR("failed to create new process");
return -1;
}
if (child == 0) {
/* Close the read-end of the pipe. */
close(pipefd[0]);
/* Redirect std{err,out} to write-end of the
* pipe.
*/
ret = dup2(pipefd[1], STDOUT_FILENO);
if (ret >= 0)
ret = dup2(pipefd[1], STDERR_FILENO);
/* Close the write-end of the pipe. */
close(pipefd[1]);
if (ret < 0) {
SYSERROR("failed to duplicate std{err,out} file descriptor");
exit(EXIT_FAILURE);
}
/* Does not return. */
child_fn(args);
ERROR("failed to exec command");
exit(EXIT_FAILURE);
}
/* close the write-end of the pipe */
close(pipefd[1]);
if (buf && buf_size > 0) {
bytes = read(pipefd[0], buf, buf_size - 1);
if (bytes > 0)
buf[bytes - 1] = '\0';
}
fret = wait_for_pid(child);
/* close the read-end of the pipe */
close(pipefd[0]);
return fret;
} | 0 | [
"CWE-417"
]
| lxc | 5eb45428b312e978fb9e294dde16efb14dd9fa4d | 128,342,449,157,440,300,000,000,000,000,000,000,000 | 63 | CVE 2018-6556: verify netns fd in lxc-user-nic
Signed-off-by: Christian Brauner <[email protected]> |
Gets the Unix timestamp.
*/
PHP_FUNCTION(date_timestamp_get)
{
zval *object;
php_date_obj *dateobj;
long timestamp;
int error;
if (zend_parse_method_parameters(ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "O", &object, date_ce_interface) == FAILURE) {
RETURN_FALSE;
}
dateobj = (php_date_obj *) zend_object_store_get_object(object TSRMLS_CC);
DATE_CHECK_INITIALIZED(dateobj->time, DateTime);
timelib_update_ts(dateobj->time, NULL);
timestamp = timelib_date_to_int(dateobj->time, &error);
if (error) {
RETURN_FALSE;
} else {
RETVAL_LONG(timestamp); | 0 | []
| php-src | bb057498f7457e8b2eba98332a3bad434de4cf12 | 34,737,071,991,465,140,000,000,000,000,000,000,000 | 21 | Fix #70277: new DateTimeZone($foo) is ignoring text after null byte
The DateTimeZone constructors are not binary safe. They're parsing the timezone
as string, but discard the length when calling timezone_initialize(). This
patch adds a tz_len parameter and a respective check to timezone_initialize(). |
generate_name(cms_context *cms, SECItem *der, CERTName *certname)
{
void *marka = PORT_ArenaMark(cms->arena);
CERTRDN **rdns = certname->rdns;
CERTRDN *rdn;
int num_items = 0;
int rc = 0;
while (rdns && (rdn = *rdns++) != NULL) {
CERTAVA **avas = rdn->avas;
while (avas && ((*avas++) != NULL))
num_items++;
}
if (num_items == 0) {
PORT_ArenaRelease(cms->arena, marka);
cnreterr(-1, cms, "No name items to encode");
}
SECItem items[num_items];
int i = 0;
rdns = certname->rdns;
while (rdns && (rdn = *rdns++) != NULL) {
CERTAVA **avas = rdn->avas;
CERTAVA *ava;
while (avas && (ava = *avas++) != NULL) {
SECItem avader;
rc = generate_ava(cms, &avader, ava);
if (rc < 0) {
PORT_ArenaRelease(cms->arena, marka);
return -1;
}
SECItem *list[2] = {
&avader,
NULL,
};
rc = wrap_in_set(cms, &items[i], list);
if (rc < 0) {
PORT_ArenaRelease(cms->arena, marka);
return -1;
}
i++;
}
}
wrap_in_seq(cms, der, &items[0], num_items);
PORT_ArenaUnmark(cms->arena, marka);
return 0;
} | 0 | [
"CWE-787"
]
| pesign | b879dda52f8122de697d145977c285fb0a022d76 | 317,600,618,072,338,000,000,000,000,000,000,000,000 | 52 | Handle NULL pwdata in cms_set_pw_data()
When 12f16710ee44ef64ddb044a3523c3c4c4d90039a rewrote this function, it
didn't handle the NULL pwdata invocation from daemon.c. This leads to a
explicit NULL dereference and crash on all attempts to daemonize pesign.
Signed-off-by: Robbie Harwood <[email protected]> |
date_time_from_opaque(tvbuff_t *tvb, guint32 offset, guint32 data_len)
{
char *str;
switch (data_len) {
case 4: /* YYYY-MM-DD[T00:00:00Z] */
str = wmem_strdup_printf(wmem_packet_scope(), "%%DateTime: "
"%02x%02x-%02x-%02xT00:00:00Z",
tvb_get_guint8(tvb, offset),
tvb_get_guint8(tvb, offset + 1),
tvb_get_guint8(tvb, offset + 2),
tvb_get_guint8(tvb, offset + 3));
break;
case 5: /* YYYY-MM-DDThh[:00:00Z] */
str = wmem_strdup_printf(wmem_packet_scope(), "%%DateTime: "
"%02x%02x-%02x-%02xT%02x:00:00Z",
tvb_get_guint8(tvb, offset),
tvb_get_guint8(tvb, offset + 1),
tvb_get_guint8(tvb, offset + 2),
tvb_get_guint8(tvb, offset + 3),
tvb_get_guint8(tvb, offset + 4));
break;
case 6: /* YYYY-MM-DDThh:mm[:00Z] */
str = wmem_strdup_printf(wmem_packet_scope(), "%%DateTime: "
"%02x%02x-%02x-%02xT%02x:%02x:00Z",
tvb_get_guint8(tvb, offset),
tvb_get_guint8(tvb, offset + 1),
tvb_get_guint8(tvb, offset + 2),
tvb_get_guint8(tvb, offset + 3),
tvb_get_guint8(tvb, offset + 4),
tvb_get_guint8(tvb, offset + 5));
break;
case 7: /* YYYY-MM-DDThh:mm[:00Z] */
str = wmem_strdup_printf(wmem_packet_scope(), "%%DateTime: "
"%02x%02x-%02x-%02xT%02x:%02x:%02xZ",
tvb_get_guint8(tvb, offset),
tvb_get_guint8(tvb, offset + 1),
tvb_get_guint8(tvb, offset + 2),
tvb_get_guint8(tvb, offset + 3),
tvb_get_guint8(tvb, offset + 4),
tvb_get_guint8(tvb, offset + 5),
tvb_get_guint8(tvb, offset + 6));
break;
default:
str = wmem_strdup_printf(wmem_packet_scope(), "<Error: invalid binary %%DateTime "
"(%d bytes of opaque data)>", data_len);
break;
}
return str;
} | 0 | [
"CWE-399",
"CWE-119",
"CWE-787"
]
| wireshark | b8e0d416898bb975a02c1b55883342edc5b4c9c0 | 296,707,051,903,058,730,000,000,000,000,000,000,000 | 51 | WBXML: add a basic sanity check for offset overflow
This is a naive approach allowing to detact that something went wrong,
without the need to replace all proto_tree_add_text() calls as what was
done in master-2.0 branch.
Bug: 12408
Change-Id: Ia14905005e17ae322c2fc639ad5e491fa08b0108
Reviewed-on: https://code.wireshark.org/review/15310
Reviewed-by: Michael Mann <[email protected]>
Reviewed-by: Pascal Quantin <[email protected]> |
R_API ut8* r_socket_slurp(RSocket *s, int *len) {
return NULL;
} | 0 | [
"CWE-78"
]
| radare2 | 04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9 | 161,267,827,103,329,640,000,000,000,000,000,000,000 | 3 | Fix command injection on PDB download (#16966)
* Fix r_sys_mkdirp with absolute path on Windows
* Fix build with --with-openssl
* Use RBuffer in r_socket_http_answer()
* r_socket_http_answer: Fix read for big responses
* Implement r_str_escape_sh()
* Cleanup r_socket_connect() on Windows
* Fix socket being created without a protocol
* Fix socket connect with SSL ##socket
* Use select() in r_socket_ready()
* Fix read failing if received only protocol answer
* Fix double-free
* r_socket_http_get: Fail if req. SSL with no support
* Follow redirects in r_socket_http_answer()
* Fix r_socket_http_get result length with R2_CURL=1
* Also follow redirects
* Avoid using curl for downloading PDBs
* Use r_socket_http_get() on UNIXs
* Use WinINet API on Windows for r_socket_http_get()
* Fix command injection
* Fix r_sys_cmd_str_full output for binary data
* Validate GUID on PDB download
* Pass depth to socket_http_get_recursive()
* Remove 'r_' and '__' from static function names
* Fix is_valid_guid
* Fix for comments |
double Grego::fieldsToDay(int32_t year, int32_t month, int32_t dom) {
int32_t y = year - 1;
double julian = 365 * y + ClockMath::floorDivide(y, 4) + (JULIAN_1_CE - 3) + // Julian cal
ClockMath::floorDivide(y, 400) - ClockMath::floorDivide(y, 100) + 2 + // => Gregorian cal
DAYS_BEFORE[month + (isLeapYear(year) ? 12 : 0)] + dom; // => month/dom
return julian - JULIAN_1970_CE; // JD => epoch day
} | 0 | [
"CWE-190"
]
| icu | 71dd84d4ffd6600a70e5bca56a22b957e6642bd4 | 259,245,072,432,821,230,000,000,000,000,000,000,000 | 10 | ICU-12504 in ICU4C Persian cal, use int64_t math for one operation to avoid overflow; add tests in C and J
X-SVN-Rev: 40654 |
static void xt_client_simulate_focus(Widget w, int type)
{
XEvent xevent;
memset(&xevent, 0, sizeof(xevent));
xevent.xfocus.type = type;
xevent.xfocus.window = XtWindow(w);
xevent.xfocus.display = XtDisplay(w);
xevent.xfocus.mode = NotifyNormal;
xevent.xfocus.detail = NotifyAncestor;
trap_errors();
XSendEvent(XtDisplay(w), xevent.xfocus.window, False, NoEventMask, &xevent);
XSync(XtDisplay(w), False);
untrap_errors();
} | 0 | [
"CWE-264"
]
| nspluginwrapper | 7e4ab8e1189846041f955e6c83f72bc1624e7a98 | 207,472,761,618,834,650,000,000,000,000,000,000,000 | 15 | Support all the new variables added |
static void qxl_reset_state(PCIQXLDevice *d)
{
QXLRom *rom = d->rom;
qxl_check_state(d);
d->shadow_rom.update_id = cpu_to_le32(0);
*rom = d->shadow_rom;
qxl_rom_set_dirty(d);
init_qxl_ram(d);
d->num_free_res = 0;
d->last_release = NULL;
memset(&d->ssd.dirty, 0, sizeof(d->ssd.dirty));
qxl_update_irq(d);
} | 0 | [
"CWE-476"
]
| qemu | d52680fc932efb8a2f334cc6993e705ed1e31e99 | 180,759,161,408,978,050,000,000,000,000,000,000,000 | 14 | qxl: check release info object
When releasing spice resources in release_resource() routine,
if release info object 'ext.info' is null, it leads to null
pointer dereference. Add check to avoid it.
Reported-by: Bugs SysSec <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]> |
CImg<doubleT> eval(const char *const expression, const CImg<t>& xyzc,
CImgList<T> *const list_images=0) const {
return _eval(0,expression,xyzc,list_images);
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 64,886,665,234,303,170,000,000,000,000,000,000,000 | 4 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
ofputil_pull_ofp11_buckets(struct ofpbuf *msg, size_t buckets_length,
enum ofp_version version, struct ovs_list *buckets)
{
struct ofp11_bucket *ob;
uint32_t bucket_id = 0;
ovs_list_init(buckets);
while (buckets_length > 0) {
struct ofputil_bucket *bucket;
struct ofpbuf ofpacts;
enum ofperr error;
size_t ob_len;
ob = (buckets_length >= sizeof *ob
? ofpbuf_try_pull(msg, sizeof *ob)
: NULL);
if (!ob) {
VLOG_WARN_RL(&bad_ofmsg_rl, "buckets end with %"PRIuSIZE" leftover bytes",
buckets_length);
ofputil_bucket_list_destroy(buckets);
return OFPERR_OFPGMFC_BAD_BUCKET;
}
ob_len = ntohs(ob->len);
if (ob_len < sizeof *ob) {
VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket length "
"%"PRIuSIZE" is not valid", ob_len);
ofputil_bucket_list_destroy(buckets);
return OFPERR_OFPGMFC_BAD_BUCKET;
} else if (ob_len > buckets_length) {
VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket length "
"%"PRIuSIZE" exceeds remaining buckets data size %"PRIuSIZE,
ob_len, buckets_length);
ofputil_bucket_list_destroy(buckets);
return OFPERR_OFPGMFC_BAD_BUCKET;
}
buckets_length -= ob_len;
ofpbuf_init(&ofpacts, 0);
error = ofpacts_pull_openflow_actions(msg, ob_len - sizeof *ob,
version, NULL, NULL, &ofpacts);
if (error) {
ofpbuf_uninit(&ofpacts);
ofputil_bucket_list_destroy(buckets);
return error;
}
bucket = xzalloc(sizeof *bucket);
bucket->weight = ntohs(ob->weight);
error = ofputil_port_from_ofp11(ob->watch_port, &bucket->watch_port);
if (error) {
ofpbuf_uninit(&ofpacts);
ofputil_bucket_list_destroy(buckets);
free(bucket);
return OFPERR_OFPGMFC_BAD_WATCH;
}
bucket->watch_group = ntohl(ob->watch_group);
bucket->bucket_id = bucket_id++;
bucket->ofpacts = ofpbuf_steal_data(&ofpacts);
bucket->ofpacts_len = ofpacts.size;
ovs_list_push_back(buckets, &bucket->list_node);
}
return 0;
} | 0 | [
"CWE-772"
]
| ovs | 77ad4225d125030420d897c873e4734ac708c66b | 105,503,669,957,181,640,000,000,000,000,000,000,000 | 66 | ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
static ut64 extract_addr_from_code(ut8 *arm64_code, ut64 vaddr) {
ut64 addr = vaddr & ~0xfff;
ut64 adrp = r_read_le32 (arm64_code);
ut64 adrp_offset = ((adrp & 0x60000000) >> 29) | ((adrp & 0xffffe0) >> 3);
addr += adrp_offset << 12;
ut64 ldr = r_read_le32 (arm64_code + 4);
addr += ((ldr & 0x3ffc00) >> 10) << ((ldr & 0xc0000000) >> 30);
return addr;
} | 0 | [
"CWE-476"
]
| radare2 | feaa4e7f7399c51ee6f52deb84dc3f795b4035d6 | 79,381,890,344,713,190,000,000,000,000,000,000,000 | 12 | Fix null deref in xnu.kernelcache ##crash
* Reported by @xshad3 via huntr.dev |
int dev_get_iflink(const struct net_device *dev)
{
if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
return dev->netdev_ops->ndo_get_iflink(dev);
return dev->ifindex;
} | 0 | [
"CWE-400",
"CWE-703"
]
| linux | fac8e0f579695a3ecbc4d3cac369139d7f819971 | 143,679,539,209,243,560,000,000,000,000,000,000,000 | 7 | tunnels: Don't apply GRO to multiple layers of encapsulation.
When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.
No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.
UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.
Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int mov_write_identification(AVIOContext *pb, AVFormatContext *s)
{
MOVMuxContext *mov = s->priv_data;
int i;
mov_write_ftyp_tag(pb,s);
if (mov->mode == MODE_PSP) {
int video_streams_nb = 0, audio_streams_nb = 0, other_streams_nb = 0;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (is_cover_image(st))
continue;
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
video_streams_nb++;
else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
audio_streams_nb++;
else
other_streams_nb++;
}
if (video_streams_nb != 1 || audio_streams_nb != 1 || other_streams_nb) {
av_log(s, AV_LOG_ERROR, "PSP mode need one video and one audio stream\n");
return AVERROR(EINVAL);
}
return mov_write_uuidprof_tag(pb, s);
}
return 0;
} | 0 | [
"CWE-125"
]
| FFmpeg | 95556e27e2c1d56d9e18f5db34d6f756f3011148 | 182,525,388,622,725,500,000,000,000,000,000,000,000 | 28 | avformat/movenc: Do not pass AVCodecParameters in avpriv_request_sample
Fixes: out of array read
Fixes: ffmpeg_crash_8.avi
Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart
Signed-off-by: Michael Niedermayer <[email protected]> |
void Item_field::save_org_in_field(Field *to)
{
save_field_in_field(field, &null_value, to, TRUE);
} | 0 | []
| server | b000e169562697aa072600695d4f0c0412f94f4f | 285,242,424,321,290,840,000,000,000,000,000,000,000 | 4 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
int g_dhcp_client_decline(GDHCPClient *dhcp_client, uint32_t requested)
{
struct dhcp_packet packet;
dhcp_client->state = DECLINED;
dhcp_client->retry_times = 0;
debug(dhcp_client, "sending DHCP decline");
init_packet(dhcp_client, &packet, DHCPDECLINE);
packet.xid = dhcp_client->xid;
packet.secs = dhcp_attempt_secs(dhcp_client);
if (requested)
dhcp_add_option_uint32(&packet, DHCP_REQUESTED_IP, requested);
add_send_options(dhcp_client, &packet);
return dhcp_send_raw_packet(&packet, INADDR_ANY, CLIENT_PORT,
INADDR_BROADCAST, SERVER_PORT, MAC_BCAST_ADDR,
dhcp_client->ifindex, true);
} | 0 | []
| connman | a74524b3e3fad81b0fd1084ffdf9f2ea469cd9b1 | 103,099,488,575,414,700,000,000,000,000,000,000,000 | 23 | gdhcp: Avoid leaking stack data via unitiialized variable
Fixes: CVE-2021-26676 |
__releases(&txq->axq_lock)
{
struct sk_buff_head q;
struct sk_buff *skb;
__skb_queue_head_init(&q);
skb_queue_splice_init(&txq->complete_q, &q);
spin_unlock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&q)))
ieee80211_tx_status(sc->hw, skb);
} | 0 | [
"CWE-362",
"CWE-241"
]
| linux | 21f8aaee0c62708654988ce092838aa7df4d25d8 | 204,921,914,163,225,520,000,000,000,000,000,000,000 | 12 | ath9k: protect tid->sched check
We check tid->sched without a lock taken on ath_tx_aggr_sleep(). That
is race condition which can result of doing list_del(&tid->list) twice
(second time with poisoned list node) and cause crash like shown below:
[424271.637220] BUG: unable to handle kernel paging request at 00100104
[424271.637328] IP: [<f90fc072>] ath_tx_aggr_sleep+0x62/0xe0 [ath9k]
...
[424271.639953] Call Trace:
[424271.639998] [<f90f6900>] ? ath9k_get_survey+0x110/0x110 [ath9k]
[424271.640083] [<f90f6942>] ath9k_sta_notify+0x42/0x50 [ath9k]
[424271.640177] [<f809cfef>] sta_ps_start+0x8f/0x1c0 [mac80211]
[424271.640258] [<c10f730e>] ? free_compound_page+0x2e/0x40
[424271.640346] [<f809e915>] ieee80211_rx_handlers+0x9d5/0x2340 [mac80211]
[424271.640437] [<c112f048>] ? kmem_cache_free+0x1d8/0x1f0
[424271.640510] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640578] [<c10fc23c>] ? put_page+0x2c/0x40
[424271.640640] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640706] [<c1345a84>] ? kfree_skbmem+0x34/0x90
[424271.640787] [<f809dde3>] ? ieee80211_rx_handlers_result+0x73/0x1d0 [mac80211]
[424271.640897] [<f80a07a0>] ieee80211_prepare_and_rx_handle+0x520/0xad0 [mac80211]
[424271.641009] [<f809e22d>] ? ieee80211_rx_handlers+0x2ed/0x2340 [mac80211]
[424271.641104] [<c13846ce>] ? ip_output+0x7e/0xd0
[424271.641182] [<f80a1057>] ieee80211_rx+0x307/0x7c0 [mac80211]
[424271.641266] [<f90fa6ee>] ath_rx_tasklet+0x88e/0xf70 [ath9k]
[424271.641358] [<f80a0f2c>] ? ieee80211_rx+0x1dc/0x7c0 [mac80211]
[424271.641445] [<f90f82db>] ath9k_tasklet+0xcb/0x130 [ath9k]
Bug report:
https://bugzilla.kernel.org/show_bug.cgi?id=70551
Reported-and-tested-by: Max Sydorenko <[email protected]>
Cc: [email protected]
Signed-off-by: Stanislaw Gruszka <[email protected]>
Signed-off-by: John W. Linville <[email protected]> |
void CrwMap::encode0x180e(const Image& image,
const CrwMapping* pCrwMapping,
CiffHeader* pHead)
{
assert(pCrwMapping != 0);
assert(pHead != 0);
time_t t = 0;
const ExifKey key(pCrwMapping->tag_, Internal::groupName(pCrwMapping->ifdId_));
const ExifData::const_iterator ed = image.exifData().findKey(key);
if (ed != image.exifData().end()) {
struct tm tm;
std::memset(&tm, 0x0, sizeof(tm));
if ( exifTime(ed->toString().c_str(), &tm) == 0 ) {
t=::mktime(&tm);
}
}
if (t != 0) {
DataBuf buf(12);
std::memset(buf.pData_, 0x0, 12);
ul2Data(buf.pData_, static_cast<uint32_t>(t), pHead->byteOrder());
pHead->add(pCrwMapping->crwTagId_, pCrwMapping->crwDir_, buf);
}
else {
pHead->remove(pCrwMapping->crwTagId_, pCrwMapping->crwDir_);
}
} // CrwMap::encode0x180e | 0 | [
"CWE-190"
]
| exiv2 | 9b7a19f957af53304655ed1efe32253a1b11a8d0 | 81,058,746,664,412,830,000,000,000,000,000,000,000 | 27 | Fix integer overflow. |
static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
gfp_t gfp)
{
return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
} | 0 | [
"CWE-125"
]
| linux | 2423496af35d94a87156b063ea5cedffc10a70a1 | 242,636,270,683,786,280,000,000,000,000,000,000,000 | 5 | ipv6: Prevent overrun when parsing v6 header options
The KASAN warning repoted below was discovered with a syzkaller
program. The reproducer is basically:
int s = socket(AF_INET6, SOCK_RAW, NEXTHDR_HOP);
send(s, &one_byte_of_data, 1, MSG_MORE);
send(s, &more_than_mtu_bytes_data, 2000, 0);
The socket() call sets the nexthdr field of the v6 header to
NEXTHDR_HOP, the first send call primes the payload with a non zero
byte of data, and the second send call triggers the fragmentation path.
The fragmentation code tries to parse the header options in order
to figure out where to insert the fragment option. Since nexthdr points
to an invalid option, the calculation of the size of the network header
can made to be much larger than the linear section of the skb and data
is read outside of it.
This fix makes ip6_find_1stfrag return an error if it detects
running out-of-bounds.
[ 42.361487] ==================================================================
[ 42.364412] BUG: KASAN: slab-out-of-bounds in ip6_fragment+0x11c8/0x3730
[ 42.365471] Read of size 840 at addr ffff88000969e798 by task ip6_fragment-oo/3789
[ 42.366469]
[ 42.366696] CPU: 1 PID: 3789 Comm: ip6_fragment-oo Not tainted 4.11.0+ #41
[ 42.367628] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.1-1ubuntu1 04/01/2014
[ 42.368824] Call Trace:
[ 42.369183] dump_stack+0xb3/0x10b
[ 42.369664] print_address_description+0x73/0x290
[ 42.370325] kasan_report+0x252/0x370
[ 42.370839] ? ip6_fragment+0x11c8/0x3730
[ 42.371396] check_memory_region+0x13c/0x1a0
[ 42.371978] memcpy+0x23/0x50
[ 42.372395] ip6_fragment+0x11c8/0x3730
[ 42.372920] ? nf_ct_expect_unregister_notifier+0x110/0x110
[ 42.373681] ? ip6_copy_metadata+0x7f0/0x7f0
[ 42.374263] ? ip6_forward+0x2e30/0x2e30
[ 42.374803] ip6_finish_output+0x584/0x990
[ 42.375350] ip6_output+0x1b7/0x690
[ 42.375836] ? ip6_finish_output+0x990/0x990
[ 42.376411] ? ip6_fragment+0x3730/0x3730
[ 42.376968] ip6_local_out+0x95/0x160
[ 42.377471] ip6_send_skb+0xa1/0x330
[ 42.377969] ip6_push_pending_frames+0xb3/0xe0
[ 42.378589] rawv6_sendmsg+0x2051/0x2db0
[ 42.379129] ? rawv6_bind+0x8b0/0x8b0
[ 42.379633] ? _copy_from_user+0x84/0xe0
[ 42.380193] ? debug_check_no_locks_freed+0x290/0x290
[ 42.380878] ? ___sys_sendmsg+0x162/0x930
[ 42.381427] ? rcu_read_lock_sched_held+0xa3/0x120
[ 42.382074] ? sock_has_perm+0x1f6/0x290
[ 42.382614] ? ___sys_sendmsg+0x167/0x930
[ 42.383173] ? lock_downgrade+0x660/0x660
[ 42.383727] inet_sendmsg+0x123/0x500
[ 42.384226] ? inet_sendmsg+0x123/0x500
[ 42.384748] ? inet_recvmsg+0x540/0x540
[ 42.385263] sock_sendmsg+0xca/0x110
[ 42.385758] SYSC_sendto+0x217/0x380
[ 42.386249] ? SYSC_connect+0x310/0x310
[ 42.386783] ? __might_fault+0x110/0x1d0
[ 42.387324] ? lock_downgrade+0x660/0x660
[ 42.387880] ? __fget_light+0xa1/0x1f0
[ 42.388403] ? __fdget+0x18/0x20
[ 42.388851] ? sock_common_setsockopt+0x95/0xd0
[ 42.389472] ? SyS_setsockopt+0x17f/0x260
[ 42.390021] ? entry_SYSCALL_64_fastpath+0x5/0xbe
[ 42.390650] SyS_sendto+0x40/0x50
[ 42.391103] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.391731] RIP: 0033:0x7fbbb711e383
[ 42.392217] RSP: 002b:00007ffff4d34f28 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
[ 42.393235] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbbb711e383
[ 42.394195] RDX: 0000000000001000 RSI: 00007ffff4d34f60 RDI: 0000000000000003
[ 42.395145] RBP: 0000000000000046 R08: 00007ffff4d34f40 R09: 0000000000000018
[ 42.396056] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000400aad
[ 42.396598] R13: 0000000000000066 R14: 00007ffff4d34ee0 R15: 00007fbbb717af00
[ 42.397257]
[ 42.397411] Allocated by task 3789:
[ 42.397702] save_stack_trace+0x16/0x20
[ 42.398005] save_stack+0x46/0xd0
[ 42.398267] kasan_kmalloc+0xad/0xe0
[ 42.398548] kasan_slab_alloc+0x12/0x20
[ 42.398848] __kmalloc_node_track_caller+0xcb/0x380
[ 42.399224] __kmalloc_reserve.isra.32+0x41/0xe0
[ 42.399654] __alloc_skb+0xf8/0x580
[ 42.400003] sock_wmalloc+0xab/0xf0
[ 42.400346] __ip6_append_data.isra.41+0x2472/0x33d0
[ 42.400813] ip6_append_data+0x1a8/0x2f0
[ 42.401122] rawv6_sendmsg+0x11ee/0x2db0
[ 42.401505] inet_sendmsg+0x123/0x500
[ 42.401860] sock_sendmsg+0xca/0x110
[ 42.402209] ___sys_sendmsg+0x7cb/0x930
[ 42.402582] __sys_sendmsg+0xd9/0x190
[ 42.402941] SyS_sendmsg+0x2d/0x50
[ 42.403273] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.403718]
[ 42.403871] Freed by task 1794:
[ 42.404146] save_stack_trace+0x16/0x20
[ 42.404515] save_stack+0x46/0xd0
[ 42.404827] kasan_slab_free+0x72/0xc0
[ 42.405167] kfree+0xe8/0x2b0
[ 42.405462] skb_free_head+0x74/0xb0
[ 42.405806] skb_release_data+0x30e/0x3a0
[ 42.406198] skb_release_all+0x4a/0x60
[ 42.406563] consume_skb+0x113/0x2e0
[ 42.406910] skb_free_datagram+0x1a/0xe0
[ 42.407288] netlink_recvmsg+0x60d/0xe40
[ 42.407667] sock_recvmsg+0xd7/0x110
[ 42.408022] ___sys_recvmsg+0x25c/0x580
[ 42.408395] __sys_recvmsg+0xd6/0x190
[ 42.408753] SyS_recvmsg+0x2d/0x50
[ 42.409086] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.409513]
[ 42.409665] The buggy address belongs to the object at ffff88000969e780
[ 42.409665] which belongs to the cache kmalloc-512 of size 512
[ 42.410846] The buggy address is located 24 bytes inside of
[ 42.410846] 512-byte region [ffff88000969e780, ffff88000969e980)
[ 42.411941] The buggy address belongs to the page:
[ 42.412405] page:ffffea000025a780 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0
[ 42.413298] flags: 0x100000000008100(slab|head)
[ 42.413729] raw: 0100000000008100 0000000000000000 0000000000000000 00000001800c000c
[ 42.414387] raw: ffffea00002a9500 0000000900000007 ffff88000c401280 0000000000000000
[ 42.415074] page dumped because: kasan: bad access detected
[ 42.415604]
[ 42.415757] Memory state around the buggy address:
[ 42.416222] ffff88000969e880: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 42.416904] ffff88000969e900: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 42.417591] >ffff88000969e980: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 42.418273] ^
[ 42.418588] ffff88000969ea00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 42.419273] ffff88000969ea80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 42.419882] ==================================================================
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Craig Gallek <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void php_libxml_internal_error_handler(int error_type, void *ctx, const char **msg, va_list ap)
{
char *buf;
int len, len_iter, output = 0;
TSRMLS_FETCH();
len = vspprintf(&buf, 0, *msg, ap);
len_iter = len;
/* remove any trailing \n */
while (len_iter && buf[--len_iter] == '\n') {
buf[len_iter] = '\0';
output = 1;
}
smart_str_appendl(&LIBXML(error_buffer), buf, len);
efree(buf);
if (output == 1) {
if (LIBXML(error_list)) {
_php_list_set_error_structure(NULL, LIBXML(error_buffer).c);
} else {
switch (error_type) {
case PHP_LIBXML_CTX_ERROR:
php_libxml_ctx_error_level(E_WARNING, ctx, LIBXML(error_buffer).c TSRMLS_CC);
break;
case PHP_LIBXML_CTX_WARNING:
php_libxml_ctx_error_level(E_NOTICE, ctx, LIBXML(error_buffer).c TSRMLS_CC);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", LIBXML(error_buffer).c);
}
}
smart_str_free(&LIBXML(error_buffer));
}
} | 0 | [
"CWE-200"
]
| php-src | 8e76d0404b7f664ee6719fd98f0483f0ac4669d6 | 322,660,307,106,581,900,000,000,000,000,000,000,000 | 38 | Fixed external entity loading |
void red_stream_set_async_error_handler(RedStream *stream,
AsyncReadError error_handler)
{
stream->priv->async_read.error = error_handler;
} | 0 | []
| spice | 95a0cfac8a1c8eff50f05e65df945da3bb501fc9 | 312,121,567,088,859,800,000,000,000,000,000,000,000 | 5 | With OpenSSL 1.0.2 and earlier: disable client-side renegotiation.
Fixed issue #49
Fixes BZ#1904459
Signed-off-by: Julien Ropé <[email protected]>
Reported-by: BlackKD
Acked-by: Frediano Ziglio <[email protected]> |
GF_Err gf_hinter_finalize(GF_ISOFile *file, GF_SDP_IODProfile IOD_Profile, u32 bandwidth)
{
u32 i, sceneT, odT, descIndex, size, size64;
GF_InitialObjectDescriptor *iod;
GF_SLConfig slc;
GF_ISOSample *samp;
Bool remove_ocr;
u8 *buffer;
char buf64[5000], sdpLine[5100];
gf_isom_sdp_clean(file);
if (bandwidth) {
sprintf(buf64, "b=AS:%d", bandwidth);
gf_isom_sdp_add_line(file, buf64);
}
//xtended attribute for copyright
if (gf_sys_is_test_mode()) {
sprintf(buf64, "a=x-copyright: %s", "MP4/3GP File hinted with GPAC - (c) Telecom ParisTech (http://gpac.io)");
} else {
sprintf(buf64, "a=x-copyright: MP4/3GP File hinted with GPAC %s - %s", gf_gpac_version(), gf_gpac_copyright() );
}
gf_isom_sdp_add_line(file, buf64);
if (IOD_Profile == GF_SDP_IOD_NONE) return GF_OK;
odT = sceneT = 0;
for (i=0; i<gf_isom_get_track_count(file); i++) {
if (!gf_isom_is_track_in_root_od(file, i+1)) continue;
switch (gf_isom_get_media_type(file,i+1)) {
case GF_ISOM_MEDIA_OD:
odT = i+1;
break;
case GF_ISOM_MEDIA_SCENE:
sceneT = i+1;
break;
}
}
remove_ocr = 0;
if (IOD_Profile == GF_SDP_IOD_ISMA_STRICT) {
IOD_Profile = GF_SDP_IOD_ISMA;
remove_ocr = 1;
}
/*if we want ISMA like iods, we need at least BIFS */
if ( (IOD_Profile == GF_SDP_IOD_ISMA) && !sceneT ) return GF_BAD_PARAM;
/*do NOT change PLs, we assume they are correct*/
iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file);
if (!iod) return GF_NOT_SUPPORTED;
/*rewrite an IOD with good SL config - embbed data if possible*/
if (IOD_Profile == GF_SDP_IOD_ISMA) {
GF_ESD *esd;
Bool is_ok = 1;
while (gf_list_count(iod->ESDescriptors)) {
esd = (GF_ESD*)gf_list_get(iod->ESDescriptors, 0);
gf_odf_desc_del((GF_Descriptor *) esd);
gf_list_rem(iod->ESDescriptors, 0);
}
/*get OD esd, and embbed stream data if possible*/
if (odT) {
esd = gf_isom_get_esd(file, odT, 1);
if (gf_isom_get_sample_count(file, odT)==1) {
samp = gf_isom_get_sample(file, odT, 1, &descIndex);
if (samp && gf_hinter_can_embbed_data(samp->data, samp->dataLength, GF_STREAM_OD)) {
InitSL_NULL(&slc);
slc.predefined = 0;
slc.hasRandomAccessUnitsOnlyFlag = 1;
slc.timeScale = slc.timestampResolution = gf_isom_get_media_timescale(file, odT);
slc.OCRResolution = 1000;
slc.startCTS = samp->DTS+samp->CTS_Offset;
slc.startDTS = samp->DTS;
//set the SL for future extraction
gf_isom_set_extraction_slc(file, odT, 1, &slc);
size64 = gf_base64_encode(samp->data, samp->dataLength, buf64, 2000);
buf64[size64] = 0;
sprintf(sdpLine, "data:application/mpeg4-od-au;base64,%s", buf64);
if (esd->decoderConfig) {
esd->decoderConfig->avgBitrate = 0;
esd->decoderConfig->bufferSizeDB = samp->dataLength;
esd->decoderConfig->maxBitrate = 0;
}
size64 = (u32) strlen(sdpLine)+1;
esd->URLString = (char*)gf_malloc(sizeof(char) * size64);
strcpy(esd->URLString, sdpLine);
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_RTP, ("[rtp hinter] OD sample too large to be embedded in IOD - ISMA disabled\n"));
is_ok = 0;
}
gf_isom_sample_del(&samp);
}
if (remove_ocr) esd->OCRESID = 0;
else if (esd->OCRESID == esd->ESID) esd->OCRESID = 0;
//OK, add this to our IOD
gf_list_add(iod->ESDescriptors, esd);
}
esd = gf_isom_get_esd(file, sceneT, 1);
if (gf_isom_get_sample_count(file, sceneT)==1) {
samp = gf_isom_get_sample(file, sceneT, 1, &descIndex);
if (samp && gf_hinter_can_embbed_data(samp->data, samp->dataLength, GF_STREAM_SCENE)) {
InitSL_NULL(&slc);
slc.timeScale = slc.timestampResolution = gf_isom_get_media_timescale(file, sceneT);
slc.OCRResolution = 1000;
slc.startCTS = samp->DTS+samp->CTS_Offset;
slc.startDTS = samp->DTS;
//set the SL for future extraction
gf_isom_set_extraction_slc(file, sceneT, 1, &slc);
//encode in Base64 the sample
size64 = gf_base64_encode(samp->data, samp->dataLength, buf64, 2000);
buf64[size64] = 0;
sprintf(sdpLine, "data:application/mpeg4-bifs-au;base64,%s", buf64);
if (esd->decoderConfig) {
esd->decoderConfig->avgBitrate = 0;
esd->decoderConfig->bufferSizeDB = samp->dataLength;
esd->decoderConfig->maxBitrate = 0;
}
esd->URLString = (char*)gf_malloc(sizeof(char) * (strlen(sdpLine)+1));
strcpy(esd->URLString, sdpLine);
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Scene description sample too large to be embedded in IOD - ISMA disabled\n"));
is_ok = 0;
}
gf_isom_sample_del(&samp);
}
if (remove_ocr) esd->OCRESID = 0;
else if (esd->OCRESID == esd->ESID) esd->OCRESID = 0;
gf_list_add(iod->ESDescriptors, esd);
if (is_ok) {
u32 has_a, has_v, has_i_a, has_i_v;
has_a = has_v = has_i_a = has_i_v = 0;
for (i=0; i<gf_isom_get_track_count(file); i++) {
esd = gf_isom_get_esd(file, i+1, 1);
if (!esd) continue;
if (esd->decoderConfig) {
if (esd->decoderConfig->streamType==GF_STREAM_VISUAL) {
if (esd->decoderConfig->objectTypeIndication==GF_CODECID_MPEG4_PART2) has_i_v ++;
else has_v++;
} else if (esd->decoderConfig->streamType==GF_STREAM_AUDIO) {
if (esd->decoderConfig->objectTypeIndication==GF_CODECID_AAC_MPEG4) has_i_a ++;
else has_a++;
}
}
gf_odf_desc_del((GF_Descriptor *)esd);
}
/*only 1 MPEG-4 visual max and 1 MPEG-4 audio max for ISMA compliancy*/
if (!has_v && !has_a && (has_i_v<=1) && (has_i_a<=1)) {
sprintf(sdpLine, "a=isma-compliance:1,1.0,1");
gf_isom_sdp_add_line(file, sdpLine);
}
}
}
//encode the IOD
buffer = NULL;
size = 0;
gf_odf_desc_write((GF_Descriptor *) iod, &buffer, &size);
gf_odf_desc_del((GF_Descriptor *)iod);
//encode in Base64 the iod
size64 = gf_base64_encode(buffer, size, buf64, 2000);
buf64[size64] = 0;
gf_free(buffer);
sprintf(sdpLine, "a=mpeg4-iod:\"data:application/mpeg4-iod;base64,%s\"", buf64);
gf_isom_sdp_add_line(file, sdpLine);
return GF_OK;
} | 0 | [
"CWE-703"
]
| gpac | f5a038e6893019ee471b6a57490cf7a495673816 | 113,885,463,683,192,280,000,000,000,000,000,000,000 | 179 | fixed #1885 |
qemuProcessHandlePMSuspendDisk(qemuMonitorPtr mon G_GNUC_UNUSED,
virDomainObjPtr vm,
void *opaque)
{
virQEMUDriverPtr driver = opaque;
virObjectEventPtr event = NULL;
virObjectEventPtr lifecycleEvent = NULL;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
virObjectLock(vm);
event = virDomainEventPMSuspendDiskNewFromObj(vm);
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
qemuDomainObjPrivatePtr priv = vm->privateData;
VIR_DEBUG("Transitioned guest %s to pmsuspended state due to "
"QMP suspend_disk event", vm->def->name);
virDomainObjSetState(vm, VIR_DOMAIN_PMSUSPENDED,
VIR_DOMAIN_PMSUSPENDED_UNKNOWN);
lifecycleEvent =
virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_PMSUSPENDED,
VIR_DOMAIN_EVENT_PMSUSPENDED_DISK);
if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) {
VIR_WARN("Unable to save status on vm %s after suspend event",
vm->def->name);
}
if (priv->agent)
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_SUSPEND);
}
virObjectUnlock(vm);
virObjectEventStateQueue(driver->domainEventState, event);
virObjectEventStateQueue(driver->domainEventState, lifecycleEvent);
return 0;
} | 0 | [
"CWE-416"
]
| libvirt | 1ac703a7d0789e46833f4013a3876c2e3af18ec7 | 253,326,259,710,301,100,000,000,000,000,000,000,000 | 40 | qemu: Add missing lock in qemuProcessHandleMonitorEOF
qemuMonitorUnregister will be called in multiple threads (e.g. threads
in rpc worker pool and the vm event thread). In some cases, it isn't
protected by the monitor lock, which may lead to call g_source_unref
more than one time and a use-after-free problem eventually.
Add the missing lock in qemuProcessHandleMonitorEOF (which is the only
position missing lock of monitor I found).
Suggested-by: Michal Privoznik <[email protected]>
Signed-off-by: Peng Liang <[email protected]>
Signed-off-by: Michal Privoznik <[email protected]>
Reviewed-by: Michal Privoznik <[email protected]> |
xmlParseSystemLiteral(xmlParserCtxtPtr ctxt) {
xmlChar *buf = NULL;
int len = 0;
int size = XML_PARSER_BUFFER_SIZE;
int cur, l;
xmlChar stop;
int state = ctxt->instate;
int count = 0;
SHRINK;
if (RAW == '"') {
NEXT;
stop = '"';
} else if (RAW == '\'') {
NEXT;
stop = '\'';
} else {
xmlFatalErr(ctxt, XML_ERR_LITERAL_NOT_STARTED, NULL);
return(NULL);
}
buf = (xmlChar *) xmlMallocAtomic(size * sizeof(xmlChar));
if (buf == NULL) {
xmlErrMemory(ctxt, NULL);
return(NULL);
}
ctxt->instate = XML_PARSER_SYSTEM_LITERAL;
cur = CUR_CHAR(l);
while ((IS_CHAR(cur)) && (cur != stop)) { /* checked */
if (len + 5 >= size) {
xmlChar *tmp;
if ((size > XML_MAX_NAME_LENGTH) &&
((ctxt->options & XML_PARSE_HUGE) == 0)) {
xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "SystemLiteral");
xmlFree(buf);
ctxt->instate = (xmlParserInputState) state;
return(NULL);
}
size *= 2;
tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
if (tmp == NULL) {
xmlFree(buf);
xmlErrMemory(ctxt, NULL);
ctxt->instate = (xmlParserInputState) state;
return(NULL);
}
buf = tmp;
}
count++;
if (count > 50) {
GROW;
count = 0;
if (ctxt->instate == XML_PARSER_EOF) {
xmlFree(buf);
return(NULL);
}
}
COPY_BUF(l,buf,len,cur);
NEXTL(l);
cur = CUR_CHAR(l);
if (cur == 0) {
GROW;
SHRINK;
cur = CUR_CHAR(l);
}
}
buf[len] = 0;
ctxt->instate = (xmlParserInputState) state;
if (!IS_CHAR(cur)) {
xmlFatalErr(ctxt, XML_ERR_LITERAL_NOT_FINISHED, NULL);
} else {
NEXT;
}
return(buf);
} | 0 | [
"CWE-119"
]
| libxml2 | 6a36fbe3b3e001a8a840b5c1fdd81cefc9947f0d | 31,277,060,621,148,047,000,000,000,000,000,000,000 | 76 | Fix potential out of bound access |
static int packet_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct packet_sock *po;
__be16 proto = (__force __be16)protocol; /* weird, but documented */
int err;
if (!ns_capable(net->user_ns, CAP_NET_RAW))
return -EPERM;
if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
sock->type != SOCK_PACKET)
return -ESOCKTNOSUPPORT;
sock->state = SS_UNCONNECTED;
err = -ENOBUFS;
sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
if (sk == NULL)
goto out;
sock->ops = &packet_ops;
if (sock->type == SOCK_PACKET)
sock->ops = &packet_ops_spkt;
sock_init_data(sock, sk);
po = pkt_sk(sk);
init_completion(&po->skb_completion);
sk->sk_family = PF_PACKET;
po->num = proto;
po->xmit = dev_queue_xmit;
err = packet_alloc_pending(po);
if (err)
goto out2;
packet_cached_dev_reset(po);
sk->sk_destruct = packet_sock_destruct;
sk_refcnt_debug_inc(sk);
/*
* Attach a protocol block
*/
spin_lock_init(&po->bind_lock);
mutex_init(&po->pg_vec_lock);
po->rollover = NULL;
po->prot_hook.func = packet_rcv;
if (sock->type == SOCK_PACKET)
po->prot_hook.func = packet_rcv_spkt;
po->prot_hook.af_packet_priv = sk;
if (proto) {
po->prot_hook.type = proto;
__register_prot_hook(sk);
}
mutex_lock(&net->packet.sklist_lock);
sk_add_node_tail_rcu(sk, &net->packet.sklist);
mutex_unlock(&net->packet.sklist_lock);
preempt_disable();
sock_prot_inuse_add(net, &packet_proto, 1);
preempt_enable();
return 0;
out2:
sk_free(sk);
out:
return err;
} | 0 | [
"CWE-787"
]
| linux | acf69c946233259ab4d64f8869d4037a198c7f06 | 278,929,484,364,430,600,000,000,000,000,000,000,000 | 75 | net/packet: fix overflow in tpacket_rcv
Using tp_reserve to calculate netoff can overflow as
tp_reserve is unsigned int and netoff is unsigned short.
This may lead to macoff receving a smaller value then
sizeof(struct virtio_net_hdr), and if po->has_vnet_hdr
is set, an out-of-bounds write will occur when
calling virtio_net_hdr_from_skb.
The bug is fixed by converting netoff to unsigned int
and checking if it exceeds USHRT_MAX.
This addresses CVE-2020-14386
Fixes: 8913336a7e8d ("packet: add PACKET_RESERVE sockopt")
Signed-off-by: Or Cohen <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int eval_if(const char *hdr, const char *etag, const char *lock_token,
unsigned *locked)
{
unsigned ret = 0;
tok_t tok_l;
char *list;
/* Process each list, ORing the results */
tok_init(&tok_l, hdr, ")", TOK_TRIMLEFT|TOK_TRIMRIGHT);
while ((list = tok_next(&tok_l))) {
unsigned ret_l = 1;
tok_t tok_c;
char *cond;
/* XXX Need to handle Resource-Tag for Tagged-list (COPY/MOVE dest) */
/* Process each condition, ANDing the results */
tok_initm(&tok_c, list+1, "]>", TOK_TRIMLEFT|TOK_TRIMRIGHT);
while ((cond = tok_next(&tok_c))) {
unsigned r, not = 0;
if (!strncmp(cond, "Not", 3)) {
not = 1;
cond += 3;
while (*cond == ' ') cond++;
}
if (*cond == '[') {
/* ETag */
r = !etagcmp(cond+1, etag);
}
else {
/* State Token */
if (!lock_token) r = 0;
else {
r = !strcmp(cond+1, lock_token);
if (r) {
/* Correct lock-token has been provided */
*locked = 0;
}
}
}
ret_l &= (not ? !r : r);
}
tok_fini(&tok_c);
ret |= ret_l;
}
tok_fini(&tok_l);
return (ret || locked);
} | 0 | [
"CWE-787"
]
| cyrus-imapd | a5779db8163b99463e25e7c476f9cbba438b65f3 | 61,840,134,599,072,570,000,000,000,000,000,000,000 | 54 | HTTP: don't overrun buffer when parsing strings with sscanf() |
static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 msr_entry_idx;
u32 exit_qual;
int r;
enter_guest_mode(vcpu);
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
vmx_segment_cache_clear(vmx);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
r = EXIT_REASON_INVALID_STATE;
if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
goto fail;
nested_get_vmcs12_pages(vcpu, vmcs12);
r = EXIT_REASON_MSR_LOAD_FAIL;
msr_entry_idx = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count);
if (msr_entry_idx)
goto fail;
/*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
* returned as far as L1 is concerned. It will only return (and set
* the success flag) when L2 exits (see nested_vmx_vmexit()).
*/
return 0;
fail:
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
leave_guest_mode(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
return 1;
} | 0 | [
"CWE-284"
]
| linux | 727ba748e110b4de50d142edca9d6a9b7e6111d8 | 20,440,000,892,905,105,000,000,000,000,000,000,000 | 48 | kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static int acl_search_update_confidential_attrs(struct acl_context *ac,
struct acl_private *data)
{
struct dsdb_attribute *a;
uint32_t n = 0;
if (data->acl_search) {
/*
* If acl:search is activated, the acl_read module
* protects confidential attributes.
*/
return LDB_SUCCESS;
}
if ((ac->schema == data->cached_schema_ptr) &&
(ac->schema->metadata_usn == data->cached_schema_metadata_usn))
{
return LDB_SUCCESS;
}
data->cached_schema_ptr = NULL;
data->cached_schema_loaded_usn = 0;
data->cached_schema_metadata_usn = 0;
TALLOC_FREE(data->confidential_attrs);
if (ac->schema == NULL) {
return LDB_SUCCESS;
}
for (a = ac->schema->attributes; a; a = a->next) {
const char **attrs = data->confidential_attrs;
if (!(a->searchFlags & SEARCH_FLAG_CONFIDENTIAL)) {
continue;
}
attrs = talloc_realloc(data, attrs, const char *, n + 2);
if (attrs == NULL) {
TALLOC_FREE(data->confidential_attrs);
return ldb_module_oom(ac->module);
}
attrs[n] = a->lDAPDisplayName;
attrs[n+1] = NULL;
n++;
data->confidential_attrs = attrs;
}
data->cached_schema_ptr = ac->schema;
data->cached_schema_metadata_usn = ac->schema->metadata_usn;
return LDB_SUCCESS;
} | 0 | [
"CWE-20"
]
| samba | b95431ab2303eb258e37e88d8841f2fb79fc4af5 | 68,396,883,332,583,550,000,000,000,000,000,000,000 | 54 | CVE-2022-32743 dsdb: Implement validated dNSHostName write
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14833
Signed-off-by: Joseph Sutton <[email protected]>
Reviewed-by: Douglas Bagnall <[email protected]> |
static int ipv6_frag_rcv(struct sk_buff *skb)
{
struct frag_hdr *fhdr;
struct frag_queue *fq;
const struct ipv6hdr *hdr = ipv6_hdr(skb);
struct net *net = dev_net(skb_dst(skb)->dev);
int evicted;
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
/* Jumbo payload inhibits frag. header */
if (hdr->payload_len==0)
goto fail_hdr;
if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
sizeof(struct frag_hdr))))
goto fail_hdr;
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
if (!(fhdr->frag_off & htons(0xFFF9))) {
/* It is not a fragmented frame */
skb->transport_header += sizeof(struct frag_hdr);
IP6_INC_STATS_BH(net,
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
return 1;
}
evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
if (evicted)
IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS, evicted);
fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
if (fq != NULL) {
int ret;
spin_lock(&fq->q.lock);
ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
spin_unlock(&fq->q.lock);
inet_frag_put(&fq->q, &ip6_frags);
return ret;
}
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return -1;
fail_hdr:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
return -1;
} | 0 | []
| linux | 3ef0eb0db4bf92c6d2510fe5c4dc51852746f206 | 162,981,365,483,516,400,000,000,000,000,000,000,000 | 58 | net: frag, move LRU list maintenance outside of rwlock
Updating the fragmentation queues LRU (Least-Recently-Used) list,
required taking the hash writer lock. However, the LRU list isn't
tied to the hash at all, so we can use a separate lock for it.
Original-idea-by: Florian Westphal <[email protected]>
Signed-off-by: Jesper Dangaard Brouer <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void get_cmdln_options(int argc, char *argv[]) {
int o;
#if CONFIG_FILE && HAVE_GETPWUID
static struct passwd *pwd_entry;
char *str;
#endif
#ifdef LONG_OPTIONS
int option_index = 0;
static struct option long_options[] = {
{"timeout", 1, 0, 't'},
#ifdef PROC_NET_DEV
{"procfile",1,0,'f'},
#endif
#ifdef PROC_DISKSTATS
{"diskstatsfile",1,0,1000},
{"partitionsfile",1,0,1001},
#endif
#if NETSTAT && ALLOW_NETSTATPATH
{"netstat",1,0,'n'},
#endif
#if IOSERVICE_IN
{"longdisknames",0,0,1002},
#endif
{"input",1,0,'i'},
{"dynamic",1,0,'d'},
{"help", 0, 0, 'h'},
{"version",0,0,'V'},
{"allif",1,0,'a'},
{"unit",1,0,'u'},
{"ansiout",0,0,'N'},
#if EXTENDED_STATS
{"type",1,0,'T'},
{"avglength",1,0,'A'},
#endif
{"interfaces",1,0,'I'},
{"sumhidden",1,0,'S'},
{"output",1,0,'o'},
#ifdef CSV
{"csvchar",1,0,'C'},
{"csvfile",1,0,'F'},
#endif
{"count",1,0,'c'},
{"daemon",1,0,'D'},
#ifdef HTML
{"htmlrefresh",1,0,'R'},
{"htmlheader",1,0,'H'},
#endif
{0,0,0,0}
};
#endif
#ifdef CONFIG_FILE
/* loop till first non option argument */
opterr=0;
while (1) {
#ifdef LONG_OPTIONS
o=getopt_long (argc,argv,SHORT_OPTIONS,long_options, &option_index);
#else
o=getopt (argc,argv,SHORT_OPTIONS);
#endif
if (o==-1) break;
}
opterr=1;
if (optind < argc) {
read_config(argv[optind]);
} else {
read_config("/etc/bwm-ng.conf");
#ifdef HAVE_GETPWUID
pwd_entry=getpwuid(getuid());
if (pwd_entry!=NULL) {
str=(char*)malloc(strlen(pwd_entry->pw_dir)+14);
if(!str) {
printf("Fatal: failed to allocate %zu bytes.\n", strlen(pwd_entry->pw_dir)+14);
exit(EXIT_FAILURE);
}
snprintf(str,strlen(pwd_entry->pw_dir)+14,"%s/.bwm-ng.conf",pwd_entry->pw_dir);
read_config(str);
free(str);
}
#endif
}
/* reset getopt again */
optind=1;
#endif
/* get command line arguments, kinda ugly, wanna rewrite it? */
while (1) {
#ifdef LONG_OPTIONS
o=getopt_long (argc,argv,SHORT_OPTIONS,long_options, &option_index);
#else
o=getopt (argc,argv,SHORT_OPTIONS);
#endif
if (o==-1) break;
switch (o) {
case '?': printf("unknown option: %s\n",argv[optind-1]);
exit(EXIT_FAILURE);
break;
/* ugly workaround to handle optional arguments for all platforms */
case ':': if (!strcmp(argv[optind-1],"-a") || !strcasecmp(argv[optind-1],"--allif"))
show_all_if=1;
else if (!strcmp(argv[optind-1],"-d") || !strcasecmp(argv[optind-1],"--dynamic"))
dynamic=1;
else if (!strcmp(argv[optind-1],"-D") || !strcasecmp(argv[optind-1],"--daemon"))
daemonize=1;
#ifdef HTML
else if (!strcmp(argv[optind-1],"-H") || !strcasecmp(argv[optind-1],"--htmlheader"))
html_header=1;
#endif
else if (!strcmp(argv[optind-1],"-S") || !strcasecmp(argv[optind-1],"--sumhidden"))
sumhidden=1;
else {
printf("%s requires an argument!\n",argv[optind-1]);
exit(EXIT_FAILURE);
}
break;
#ifdef PROC_DISKSTATS
case 1000:
if (strlen(optarg)<PATH_MAX)
strcpy(PROC_DISKSTATS_FILE,optarg);
break;
case 1001:
if (strlen(optarg)<PATH_MAX)
strcpy(PROC_PARTITIONS_FILE,optarg);
break;
#endif
#if IOSERVICE_IN
case 1002:
long_darwin_disk_names=!long_darwin_disk_names;
break;
#endif
case 'D':
if (optarg) daemonize=atoi(optarg);
break;
#ifdef HTML
case 'R':
if ((optarg) && atol(optarg)>0) { html_refresh=atol(optarg); }
break;
case 'H':
if (optarg) html_header=atoi(optarg);
break;
#endif
case 'c':
if (optarg) output_count=atol(optarg);
break;
#if CSV || HTML
case 'F':
if (optarg) {
if (out_file) fclose(out_file);
out_file=fopen(optarg,"a");
if (!out_file) deinit(1, "failed to open outfile\n");
if (out_file_path) free(out_file_path);
out_file_path=(char *)strdup(optarg);
}
break;
#endif
#ifdef CSV
case 'C':
if (optarg) csv_char=optarg[0];
break;
#endif
case 'h':
cmdln_printhelp();
break;
#ifdef PROC_NET_DEV
case 'f':
if (optarg && (strlen(optarg)<PATH_MAX)) strcpy(PROC_FILE,optarg);
break;
#endif
case 'i':
if (optarg) {
input_method=str2in_method(optarg);
}
break;
case 'I':
if (optarg) iface_list=(char *)strdup(optarg);
break;
case 'S':
if (optarg) sumhidden=atoi(optarg);
break;
case 'o':
if (optarg) {
output_method=str2out_method(optarg);
}
break;
case 'N':
ansi_output=!ansi_output;
case 'a':
if (optarg) show_all_if=atoi(optarg);
break;
case 't':
if ((optarg) && atol(optarg)>0) { delay=atol(optarg); }
break;
#if EXTENDED_STATS
case 'T':
output_type=str2output_type(optarg);
break;
case 'A':
if (optarg) avg_length=atoi(optarg)*1000;
break;
#endif
case 'd':
if (optarg) dynamic=atoi(optarg);
break;
case 'u':
output_unit=str2output_unit(optarg);
break;
#if NETSTAT && ALLOW_NETSTATPATH
case 'n':
if (optarg && (strlen(optarg)<PATH_MAX)) strcpy(NETSTAT_FILE,optarg);
break;
#endif
case 'V':
print_version;
exit(EXIT_SUCCESS);
break;
}
}
if (iface_list==NULL && show_all_if==1) show_all_if=2;
#if EXTENDED_STATS
/* default init of avg_length */
if (avg_length==0) {
if (delay<AVG_LENGTH/2)
avg_length=AVG_LENGTH;
else
avg_length=(delay*2)+1;
} else /* avg_length was set via cmdline or config file, better check it */
if (delay*2>=avg_length) deinit(1, "avglength needs to be a least twice the value of timeout\n");
#endif
if ((output_unit==ERRORS_OUT && !net_input_method(input_method)) ||
(output_unit==PACKETS_OUT && input_method==LIBSTATDISK_IN))
output_unit=BYTES_OUT;
return;
} | 0 | [
"CWE-476"
]
| bwm-ng | 9774f23bf78a6e6d3ae4cfe3d73bad34f2fdcd17 | 193,898,408,646,396,080,000,000,000,000,000,000,000 | 231 | Fix https://github.com/vgropp/bwm-ng/issues/26 |
rsvg_filter_primitive_displacement_map_free (RsvgNode * self)
{
RsvgFilterPrimitiveDisplacementMap *upself;
upself = (RsvgFilterPrimitiveDisplacementMap *) self;
g_string_free (upself->super.result, TRUE);
g_string_free (upself->super.in, TRUE);
g_string_free (upself->in2, TRUE);
_rsvg_node_free (self);
} | 0 | []
| librsvg | 34c95743ca692ea0e44778e41a7c0a129363de84 | 43,695,486,725,498,100,000,000,000,000,000,000,000 | 10 | Store node type separately in RsvgNode
The node name (formerly RsvgNode:type) cannot be used to infer
the sub-type of RsvgNode that we're dealing with, since for unknown
elements we put type = node-name. This lead to a (potentially exploitable)
crash e.g. when the element name started with "fe" which tricked
the old code into considering it as a RsvgFilterPrimitive.
CVE-2011-3146
https://bugzilla.gnome.org/show_bug.cgi?id=658014 |
struct accept *parse_accept(const char **hdr)
{
int i, n = 0, alloc = 0;
struct accept *ret = NULL;
#define GROW_ACCEPT 10;
for (i = 0; hdr && hdr[i]; i++) {
tok_t tok = TOK_INITIALIZER(hdr[i], ";,", TOK_TRIMLEFT|TOK_TRIMRIGHT);
char *token;
while ((token = tok_next(&tok))) {
if (!strncmp(token, "q=", 2)) {
if (!ret) break;
ret[n-1].qual = strtof(token+2, NULL);
}
else {
if (n + 1 >= alloc) {
alloc += GROW_ACCEPT;
ret = xrealloc(ret, alloc * sizeof(struct accept));
}
ret[n].token = xstrdup(token);
ret[n].qual = 1.0;
ret[++n].token = NULL;
}
}
tok_fini(&tok);
}
qsort(ret, n, sizeof(struct accept),
(int (*)(const void *, const void *)) &compare_accept);
return ret;
} | 0 | [
"CWE-787"
]
| cyrus-imapd | a5779db8163b99463e25e7c476f9cbba438b65f3 | 268,082,019,748,935,560,000,000,000,000,000,000,000 | 33 | HTTP: don't overrun buffer when parsing strings with sscanf() |
static int _assign_xshm(Display *dpy, XErrorEvent *error) {
cimg::unused(dpy,error);
cimg::X11_attr().is_shm_enabled = false;
return 0;
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 52,345,218,304,976,665,000,000,000,000,000,000,000 | 5 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
new_abbrev_list (dwarf_vma abbrev_base, dwarf_vma abbrev_offset)
{
abbrev_list * list = (abbrev_list *) xcalloc (sizeof * list, 1);
list->abbrev_base = abbrev_base;
list->abbrev_offset = abbrev_offset;
list->next = abbrev_lists;
abbrev_lists = list;
return list;
} | 0 | [
"CWE-269"
]
| binutils-gdb | e98e7d9a70dcc987bff0e925f20b78cd4a2979ed | 233,888,932,981,927,600,000,000,000,000,000,000,000 | 12 | Fix NULL pointer indirection when parsing corrupt DWARF data.
PR 29290
* dwarf.c (read_and_display_attr_value): Check that debug_info_p
is set before dereferencing it. |
set_init_2(void)
{
int idx;
/*
* 'scroll' defaults to half the window height. The stored default is zero,
* which results in the actual value computed from the window height.
*/
idx = findoption((char_u *)"scroll");
if (idx >= 0 && !(options[idx].flags & P_WAS_SET))
set_option_default(idx, OPT_LOCAL, p_cp);
comp_col();
/*
* 'window' is only for backwards compatibility with Vi.
* Default is Rows - 1.
*/
if (!option_was_set((char_u *)"window"))
p_window = Rows - 1;
set_number_default("window", Rows - 1);
// For DOS console the default is always black.
#if !((defined(MSWIN)) && !defined(FEAT_GUI))
/*
* If 'background' wasn't set by the user, try guessing the value,
* depending on the terminal name. Only need to check for terminals
* with a dark background, that can handle color.
*/
idx = findoption((char_u *)"bg");
if (idx >= 0 && !(options[idx].flags & P_WAS_SET)
&& *term_bg_default() == 'd')
{
set_string_option_direct(NULL, idx, (char_u *)"dark", OPT_FREE, 0);
// don't mark it as set, when starting the GUI it may be
// changed again
options[idx].flags &= ~P_WAS_SET;
}
#endif
#ifdef CURSOR_SHAPE
parse_shape_opt(SHAPE_CURSOR); // set cursor shapes from 'guicursor'
#endif
#ifdef FEAT_MOUSESHAPE
parse_shape_opt(SHAPE_MOUSE); // set mouse shapes from 'mouseshape'
#endif
#ifdef FEAT_PRINTER
(void)parse_printoptions(); // parse 'printoptions' default value
#endif
} | 0 | [
"CWE-122"
]
| vim | b7081e135a16091c93f6f5f7525a5c58fb7ca9f9 | 46,328,467,051,028,480,000,000,000,000,000,000,000 | 49 | patch 8.2.3402: invalid memory access when using :retab with large value
Problem: Invalid memory access when using :retab with large value.
Solution: Check the number is positive. |
void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
{
BUG_ON(kgdb_connected);
/*
* KGDB is no longer able to communicate out, so
* unregister our callbacks and reset state.
*/
kgdb_unregister_callbacks();
spin_lock(&kgdb_registration_lock);
WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
dbg_io_ops = NULL;
spin_unlock(&kgdb_registration_lock);
if (old_dbg_io_ops->deinit)
old_dbg_io_ops->deinit();
pr_info("Unregistered I/O driver %s, debugger disabled\n",
old_dbg_io_ops->name);
} | 0 | [
"CWE-787"
]
| linux | eadb2f47a3ced5c64b23b90fd2a3463f63726066 | 92,931,020,657,286,700,000,000,000,000,000,000,000 | 23 | lockdown: also lock down previous kgdb use
KGDB and KDB allow read and write access to kernel memory, and thus
should be restricted during lockdown. An attacker with access to a
serial port (for example, via a hypervisor console, which some cloud
vendors provide over the network) could trigger the debugger so it is
important that the debugger respect the lockdown mode when/if it is
triggered.
Fix this by integrating lockdown into kdb's existing permissions
mechanism. Unfortunately kgdb does not have any permissions mechanism
(although it certainly could be added later) so, for now, kgdb is simply
and brutally disabled by immediately exiting the gdb stub without taking
any action.
For lockdowns established early in the boot (e.g. the normal case) then
this should be fine but on systems where kgdb has set breakpoints before
the lockdown is enacted than "bad things" will happen.
CVE: CVE-2022-21499
Co-developed-by: Stephen Brennan <[email protected]>
Signed-off-by: Stephen Brennan <[email protected]>
Reviewed-by: Douglas Anderson <[email protected]>
Signed-off-by: Daniel Thompson <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int btrfs_setsize(struct inode *inode, struct iattr *attr)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
loff_t oldsize = i_size_read(inode);
loff_t newsize = attr->ia_size;
int mask = attr->ia_valid;
int ret;
/*
* The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
* special case where we need to update the times despite not having
* these flags set. For all other operations the VFS set these flags
* explicitly if it wants a timestamp update.
*/
if (newsize != oldsize) {
inode_inc_iversion(inode);
if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
}
if (newsize > oldsize) {
truncate_pagecache(inode, newsize);
/*
* Don't do an expanding truncate while snapshoting is ongoing.
* This is to ensure the snapshot captures a fully consistent
* state of this file - if the snapshot captures this expanding
* truncation, it must capture all writes that happened before
* this truncation.
*/
wait_for_snapshot_creation(root);
ret = btrfs_cont_expand(inode, oldsize, newsize);
if (ret) {
btrfs_end_write_no_snapshoting(root);
return ret;
}
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
btrfs_end_write_no_snapshoting(root);
return PTR_ERR(trans);
}
i_size_write(inode, newsize);
btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
ret = btrfs_update_inode(trans, root, inode);
btrfs_end_write_no_snapshoting(root);
btrfs_end_transaction(trans, root);
} else {
/*
* We're truncating a file that used to have good data down to
* zero. Make sure it gets into the ordered flush list so that
* any new writes get down to disk quickly.
*/
if (newsize == 0)
set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
&BTRFS_I(inode)->runtime_flags);
/*
* 1 for the orphan item we're going to add
* 1 for the orphan item deletion.
*/
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans))
return PTR_ERR(trans);
/*
* We need to do this in case we fail at _any_ point during the
* actual truncate. Once we do the truncate_setsize we could
* invalidate pages which forces any outstanding ordered io to
* be instantly completed which will give us extents that need
* to be truncated. If we fail to get an orphan inode down we
* could have left over extents that were never meant to live,
* so we need to garuntee from this point on that everything
* will be consistent.
*/
ret = btrfs_orphan_add(trans, inode);
btrfs_end_transaction(trans, root);
if (ret)
return ret;
/* we don't support swapfiles, so vmtruncate shouldn't fail */
truncate_setsize(inode, newsize);
/* Disable nonlocked read DIO to avoid the end less truncate */
btrfs_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
btrfs_inode_resume_unlocked_dio(inode);
ret = btrfs_truncate(inode);
if (ret && inode->i_nlink) {
int err;
/*
* failed to truncate, disk_i_size is only adjusted down
* as we remove extents, so it should represent the true
* size of the inode, so reset the in memory size and
* delete our orphan entry.
*/
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
return ret;
}
i_size_write(inode, BTRFS_I(inode)->disk_i_size);
err = btrfs_orphan_del(trans, inode);
if (err)
btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
}
}
return ret;
} | 0 | [
"CWE-200"
]
| linux | 0305cd5f7fca85dae392b9ba85b116896eb7c1c7 | 208,505,013,849,787,430,000,000,000,000,000,000,000 | 116 | Btrfs: fix truncation of compressed and inlined extents
When truncating a file to a smaller size which consists of an inline
extent that is compressed, we did not discard (or made unusable) the
data between the new file size and the old file size, wasting metadata
space and allowing for the truncated data to be leaked and the data
corruption/loss mentioned below.
We were also not correctly decrementing the number of bytes used by the
inode, we were setting it to zero, giving a wrong report for callers of
the stat(2) syscall. The fsck tool also reported an error about a mismatch
between the nbytes of the file versus the real space used by the file.
Now because we weren't discarding the truncated region of the file, it
was possible for a caller of the clone ioctl to actually read the data
that was truncated, allowing for a security breach without requiring root
access to the system, using only standard filesystem operations. The
scenario is the following:
1) User A creates a file which consists of an inline and compressed
extent with a size of 2000 bytes - the file is not accessible to
any other users (no read, write or execution permission for anyone
else);
2) The user truncates the file to a size of 1000 bytes;
3) User A makes the file world readable;
4) User B creates a file consisting of an inline extent of 2000 bytes;
5) User B issues a clone operation from user A's file into its own
file (using a length argument of 0, clone the whole range);
6) User B now gets to see the 1000 bytes that user A truncated from
its file before it made its file world readbale. User B also lost
the bytes in the range [1000, 2000[ bytes from its own file, but
that might be ok if his/her intention was reading stale data from
user A that was never supposed to be public.
Note that this contrasts with the case where we truncate a file from 2000
bytes to 1000 bytes and then truncate it back from 1000 to 2000 bytes. In
this case reading any byte from the range [1000, 2000[ will return a value
of 0x00, instead of the original data.
This problem exists since the clone ioctl was added and happens both with
and without my recent data loss and file corruption fixes for the clone
ioctl (patch "Btrfs: fix file corruption and data loss after cloning
inline extents").
So fix this by truncating the compressed inline extents as we do for the
non-compressed case, which involves decompressing, if the data isn't already
in the page cache, compressing the truncated version of the extent, writing
the compressed content into the inline extent and then truncate it.
The following test case for fstests reproduces the problem. In order for
the test to pass both this fix and my previous fix for the clone ioctl
that forbids cloning a smaller inline extent into a larger one,
which is titled "Btrfs: fix file corruption and data loss after cloning
inline extents", are needed. Without that other fix the test fails in a
different way that does not leak the truncated data, instead part of
destination file gets replaced with zeroes (because the destination file
has a larger inline extent than the source).
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
_need_to_be_root
_supported_fs btrfs
_supported_os Linux
_require_scratch
_require_cloner
rm -f $seqres.full
_scratch_mkfs >>$seqres.full 2>&1
_scratch_mount "-o compress"
# Create our test files. File foo is going to be the source of a clone operation
# and consists of a single inline extent with an uncompressed size of 512 bytes,
# while file bar consists of a single inline extent with an uncompressed size of
# 256 bytes. For our test's purpose, it's important that file bar has an inline
# extent with a size smaller than foo's inline extent.
$XFS_IO_PROG -f -c "pwrite -S 0xa1 0 128" \
-c "pwrite -S 0x2a 128 384" \
$SCRATCH_MNT/foo | _filter_xfs_io
$XFS_IO_PROG -f -c "pwrite -S 0xbb 0 256" $SCRATCH_MNT/bar | _filter_xfs_io
# Now durably persist all metadata and data. We do this to make sure that we get
# on disk an inline extent with a size of 512 bytes for file foo.
sync
# Now truncate our file foo to a smaller size. Because it consists of a
# compressed and inline extent, btrfs did not shrink the inline extent to the
# new size (if the extent was not compressed, btrfs would shrink it to 128
# bytes), it only updates the inode's i_size to 128 bytes.
$XFS_IO_PROG -c "truncate 128" $SCRATCH_MNT/foo
# Now clone foo's inline extent into bar.
# This clone operation should fail with errno EOPNOTSUPP because the source
# file consists only of an inline extent and the file's size is smaller than
# the inline extent of the destination (128 bytes < 256 bytes). However the
# clone ioctl was not prepared to deal with a file that has a size smaller
# than the size of its inline extent (something that happens only for compressed
# inline extents), resulting in copying the full inline extent from the source
# file into the destination file.
#
# Note that btrfs' clone operation for inline extents consists of removing the
# inline extent from the destination inode and copy the inline extent from the
# source inode into the destination inode, meaning that if the destination
# inode's inline extent is larger (N bytes) than the source inode's inline
# extent (M bytes), some bytes (N - M bytes) will be lost from the destination
# file. Btrfs could copy the source inline extent's data into the destination's
# inline extent so that we would not lose any data, but that's currently not
# done due to the complexity that would be needed to deal with such cases
# (specially when one or both extents are compressed), returning EOPNOTSUPP, as
# it's normally not a very common case to clone very small files (only case
# where we get inline extents) and copying inline extents does not save any
# space (unlike for normal, non-inlined extents).
$CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/foo $SCRATCH_MNT/bar
# Now because the above clone operation used to succeed, and due to foo's inline
# extent not being shinked by the truncate operation, our file bar got the whole
# inline extent copied from foo, making us lose the last 128 bytes from bar
# which got replaced by the bytes in range [128, 256[ from foo before foo was
# truncated - in other words, data loss from bar and being able to read old and
# stale data from foo that should not be possible to read anymore through normal
# filesystem operations. Contrast with the case where we truncate a file from a
# size N to a smaller size M, truncate it back to size N and then read the range
# [M, N[, we should always get the value 0x00 for all the bytes in that range.
# We expected the clone operation to fail with errno EOPNOTSUPP and therefore
# not modify our file's bar data/metadata. So its content should be 256 bytes
# long with all bytes having the value 0xbb.
#
# Without the btrfs bug fix, the clone operation succeeded and resulted in
# leaking truncated data from foo, the bytes that belonged to its range
# [128, 256[, and losing data from bar in that same range. So reading the
# file gave us the following content:
#
# 0000000 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1
# *
# 0000200 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a
# *
# 0000400
echo "File bar's content after the clone operation:"
od -t x1 $SCRATCH_MNT/bar
# Also because the foo's inline extent was not shrunk by the truncate
# operation, btrfs' fsck, which is run by the fstests framework everytime a
# test completes, failed reporting the following error:
#
# root 5 inode 257 errors 400, nbytes wrong
status=0
exit
Cc: [email protected]
Signed-off-by: Filipe Manana <[email protected]> |
static void ip6gre_tap_setup(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &ip6gre_tap_netdev_ops;
dev->destructor = ip6gre_dev_free;
dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
} | 0 | [
"CWE-125"
]
| net | 7892032cfe67f4bde6fc2ee967e45a8fbaf33756 | 223,202,236,972,736,400,000,000,000,000,000,000,000 | 12 | ip6_gre: fix ip6gre_err() invalid reads
Andrey Konovalov reported out of bound accesses in ip6gre_err()
If GRE flags contains GRE_KEY, the following expression
*(((__be32 *)p) + (grehlen / 4) - 1)
accesses data ~40 bytes after the expected point, since
grehlen includes the size of IPv6 headers.
Let's use a "struct gre_base_hdr *greh" pointer to make this
code more readable.
p[1] becomes greh->protocol.
grhlen is the GRE header length.
Fixes: c12b395a4664 ("gre: Support GRE over IPv6")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val)
{
struct kvm_io_bus *bus;
struct kvm_io_range range;
int r;
range = (struct kvm_io_range) {
.addr = addr,
.len = len,
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
} | 0 | [
"CWE-416",
"CWE-284"
]
| linux | a0f1d21c1ccb1da66629627a74059dd7f5ac9c61 | 69,958,473,597,827,260,000,000,000,000,000,000,000 | 16 | KVM: use after free in kvm_ioctl_create_device()
We should move the ops->destroy(dev) after the list_del(&dev->vm_node)
so that we don't use "dev" after freeing it.
Fixes: a28ebea2adc4 ("KVM: Protect device ops->create and list_add with kvm->lock")
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Signed-off-by: Radim Krčmář <[email protected]> |
MONGO_EXPORT void mongo_cursor_set_options( mongo_cursor *cursor, int options ) {
cursor->options = options;
} | 0 | [
"CWE-190"
]
| mongo-c-driver-legacy | 1a1f5e26a4309480d88598913f9eebf9e9cba8ca | 280,341,835,850,472,440,000,000,000,000,000,000,000 | 3 | don't mix up int and size_t (first pass to fix that) |
void CLua::init_libraries()
{
lua_stack_cleaner clean(state());
lua_pushcfunction(_state, lua_loadstring);
lua_setglobal(_state, "loadstring");
lua_pushnil(_state);
lua_setglobal(_state, "load");
// Open Crawl bindings
cluaopen_kills(_state);
cluaopen_you(_state);
cluaopen_item(_state);
cluaopen_food(_state);
cluaopen_crawl(_state);
cluaopen_file(_state);
cluaopen_moninf(_state);
cluaopen_options(_state);
cluaopen_travel(_state);
cluaopen_view(_state);
cluaopen_spells(_state);
cluaopen_globals(_state);
execfile("dlua/macro.lua", true, true);
// All hook names must be chk_????
execstring("chk_startgame = { }", "base");
lua_register(_state, "loadfile", _clua_loadfile);
lua_register(_state, "dofile", _clua_dofile);
lua_register(_state, "crawl_require", _clua_require);
execfile("dlua/util.lua", true, true);
execfile("dlua/iter.lua", true, true);
execfile("dlua/tags.lua", true, true);
execfile("dlua/init.lua", true, true);
if (managed_vm)
{
lua_register(_state, "pcall", _clua_guarded_pcall);
execfile("dlua/userbase.lua", true, true);
execfile("dlua/persist.lua", true, true);
}
} | 0 | [
"CWE-434"
]
| crawl | fc522ff6eb1bbb85e3de60c60a45762571e48c28 | 66,541,364,046,760,640,000,000,000,000,000,000,000 | 46 | Disable lua load(), loadstring() bytcode loading |
static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
{
struct hci_cb *cb;
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->disconn_cfm)
cb->disconn_cfm(conn, reason);
}
mutex_unlock(&hci_cb_list_lock);
if (conn->disconn_cfm_cb)
conn->disconn_cfm_cb(conn, reason);
} | 0 | [
"CWE-290"
]
| linux | 3ca44c16b0dcc764b641ee4ac226909f5c421aa3 | 302,415,003,196,014,040,000,000,000,000,000,000,000 | 14 | Bluetooth: Consolidate encryption handling in hci_encrypt_cfm
This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection
state is BT_CONFIG so callers don't have to check the state.
Signed-off-by: Luiz Augusto von Dentz <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]> |
static inline __s32 inotify_find_update_watch(struct inotify_handle *ih,
struct inode *inode, u32 mask)
{
return -EOPNOTSUPP;
} | 0 | [
"CWE-362"
]
| linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 129,329,883,160,602,420,000,000,000,000,000,000,000 | 5 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static double CubicSpline(const double x,const ResizeFilter *resize_filter)
{
if (resize_filter->support <= 2.0)
{
/*
2-lobe Spline filter.
*/
if (x < 1.0)
return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0);
if (x < 2.0)
return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0));
return(0.0);
}
if (resize_filter->support <= 3.0)
{
/*
3-lobe Spline filter.
*/
if (x < 1.0)
return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0);
if (x < 2.0)
return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0));
if (x < 3.0)
return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0));
return(0.0);
}
/*
4-lobe Spline filter.
*/
if (x < 1.0)
return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0);
if (x < 2.0)
return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0));
if (x < 3.0)
return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0));
if (x < 4.0)
return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0));
return(0.0);
} | 0 | [
"CWE-125"
]
| ImageMagick | c5402b6e0fcf8b694ae2af6a6652ebb8ce0ccf46 | 172,110,835,813,872,900,000,000,000,000,000,000,000 | 39 | https://github.com/ImageMagick/ImageMagick/issues/717 |
u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo)
{
u32 val = hinfo->index;
do {
if (++val == 0)
val = 1;
} while (tcf_hash_lookup(val, hinfo));
hinfo->index = val;
return val;
} | 0 | [
"CWE-264"
]
| net | 90f62cf30a78721641e08737bda787552428061e | 247,761,848,314,325,460,000,000,000,000,000,000,000 | 12 | net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
/*
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
return (void __user *) -1L;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
}
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
sp &= ~7UL;
return (void __user *)(sp - framesize);
} | 0 | []
| linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 20,408,133,891,667,473,000,000,000,000,000,000,000 | 27 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]> |
static void ext4_ext_try_to_merge(handle_t *handle,
struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex)
{
struct ext4_extent_header *eh;
unsigned int depth;
int merge_done = 0;
depth = ext_depth(inode);
BUG_ON(path[depth].p_hdr == NULL);
eh = path[depth].p_hdr;
if (ex > EXT_FIRST_EXTENT(eh))
merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
if (!merge_done)
(void) ext4_ext_try_to_merge_right(inode, path, ex);
ext4_ext_try_to_merge_up(handle, inode, path);
} | 0 | [
"CWE-703"
]
| linux | ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1 | 257,317,598,797,401,830,000,000,000,000,000,000,000 | 21 | ext4: check journal inode extents more carefully
Currently, system zones just track ranges of block, that are "important"
fs metadata (bitmaps, group descriptors, journal blocks, etc.). This
however complicates how extent tree (or indirect blocks) can be checked
for inodes that actually track such metadata - currently the journal
inode but arguably we should be treating quota files or resize inode
similarly. We cannot run __ext4_ext_check() on such metadata inodes when
loading their extents as that would immediately trigger the validity
checks and so we just hack around that and special-case the journal
inode. This however leads to a situation that a journal inode which has
extent tree of depth at least one can have invalid extent tree that gets
unnoticed until ext4_cache_extents() crashes.
To overcome this limitation, track inode number each system zone belongs
to (0 is used for zones not belonging to any inode). We can then verify
inode number matches the expected one when verifying extent tree and
thus avoid the false errors. With this there's no need to to
special-case journal inode during extent tree checking anymore so remove
it.
Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode")
Reported-by: Wolfgang Frisch <[email protected]>
Reviewed-by: Lukas Czerner <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Theodore Ts'o <[email protected]> |
int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
unsigned long nr_pages)
{
unsigned long i;
int err = 0;
int start_sec, end_sec;
/* during initialize mem_map, align hot-added range to section */
start_sec = pfn_to_section_nr(phys_start_pfn);
end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
for (i = start_sec; i <= end_sec; i++) {
err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
/*
* EEXIST is finally dealt with by ioresource collision
* check. see add_memory() => register_memory_resource()
* Warning will be printed if there is collision.
*/
if (err && (err != -EEXIST))
break;
err = 0;
}
return err;
} | 0 | []
| linux-2.6 | 08dff7b7d629807dbb1f398c68dd9cd58dd657a1 | 208,759,792,167,152,800,000,000,000,000,000,000,000 | 25 | mm/hotplug: correctly add new zone to all other nodes' zone lists
When online_pages() is called to add new memory to an empty zone, it
rebuilds all zone lists by calling build_all_zonelists(). But there's a
bug which prevents the new zone to be added to other nodes' zone lists.
online_pages() {
build_all_zonelists()
.....
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY)
}
Here the node of the zone is put into N_HIGH_MEMORY state after calling
build_all_zonelists(), but build_all_zonelists() only adds zones from
nodes in N_HIGH_MEMORY state to the fallback zone lists.
build_all_zonelists()
->__build_all_zonelists()
->build_zonelists()
->find_next_best_node()
->for_each_node_state(n, N_HIGH_MEMORY)
So memory in the new zone will never be used by other nodes, and it may
cause strange behavor when system is under memory pressure. So put node
into N_HIGH_MEMORY state before calling build_all_zonelists().
Signed-off-by: Jianguo Wu <[email protected]>
Signed-off-by: Jiang Liu <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Rusty Russell <[email protected]>
Cc: Yinghai Lu <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Keping Chen <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static BOOL update_send_pointer_large(rdpContext* context, const POINTER_LARGE_UPDATE* pointer)
{
wStream* s;
rdpRdp* rdp = context->rdp;
BOOL ret = FALSE;
s = fastpath_update_pdu_init(rdp->fastpath);
if (!s)
return FALSE;
if (!update_write_pointer_large(s, pointer))
goto out_fail;
ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_LARGE_POINTER, s, FALSE);
out_fail:
Stream_Release(s);
return ret;
} | 0 | [
"CWE-125"
]
| FreeRDP | f8890a645c221823ac133dbf991f8a65ae50d637 | 336,051,945,162,721,240,000,000,000,000,000,000,000 | 18 | Fixed #6005: Bounds checks in update_read_bitmap_data |
int PDFDoc::savePageAs(const GooString *name, int pageNo)
{
FILE *f;
OutStream *outStr;
XRef *yRef, *countRef;
if (file && file->modificationTimeChangedSinceOpen())
return errFileChangedSinceOpen;
int rootNum = getXRef()->getNumObjects() + 1;
// Make sure that special flags are set, because we are going to read
// all objects, including Unencrypted ones.
xref->scanSpecialFlags();
unsigned char *fileKey;
CryptAlgorithm encAlgorithm;
int keyLength;
xref->getEncryptionParameters(&fileKey, &encAlgorithm, &keyLength);
if (pageNo < 1 || pageNo > getNumPages() || !getCatalog()->getPage(pageNo)) {
error(errInternal, -1, "Illegal pageNo: {0:d}({1:d})", pageNo, getNumPages() );
return errOpenFile;
}
const PDFRectangle *cropBox = nullptr;
if (getCatalog()->getPage(pageNo)->isCropped()) {
cropBox = getCatalog()->getPage(pageNo)->getCropBox();
}
replacePageDict(pageNo,
getCatalog()->getPage(pageNo)->getRotate(),
getCatalog()->getPage(pageNo)->getMediaBox(),
cropBox);
Ref *refPage = getCatalog()->getPageRef(pageNo);
Object page = getXRef()->fetch(*refPage);
if (!(f = openFile(name->c_str(), "wb"))) {
error(errIO, -1, "Couldn't open file '{0:t}'", name);
return errOpenFile;
}
outStr = new FileOutStream(f,0);
yRef = new XRef(getXRef()->getTrailerDict());
if (secHdlr != nullptr && !secHdlr->isUnencrypted()) {
yRef->setEncryption(secHdlr->getPermissionFlags(),
secHdlr->getOwnerPasswordOk(), fileKey, keyLength, secHdlr->getEncVersion(), secHdlr->getEncRevision(), encAlgorithm);
}
countRef = new XRef();
Object *trailerObj = getXRef()->getTrailerDict();
if (trailerObj->isDict()) {
markPageObjects(trailerObj->getDict(), yRef, countRef, 0, refPage->num, rootNum + 2);
}
yRef->add(0, 65535, 0, false);
writeHeader(outStr, getPDFMajorVersion(), getPDFMinorVersion());
// get and mark info dict
Object infoObj = getXRef()->getDocInfo();
if (infoObj.isDict()) {
Dict *infoDict = infoObj.getDict();
markPageObjects(infoDict, yRef, countRef, 0, refPage->num, rootNum + 2);
if (trailerObj->isDict()) {
Dict *trailerDict = trailerObj->getDict();
const Object &ref = trailerDict->lookupNF("Info");
if (ref.isRef()) {
yRef->add(ref.getRef().num, ref.getRef().gen, 0, true);
if (getXRef()->getEntry(ref.getRef().num)->type == xrefEntryCompressed) {
yRef->getEntry(ref.getRef().num)->type = xrefEntryCompressed;
}
}
}
}
// get and mark output intents etc.
Object catObj = getXRef()->getCatalog();
Dict *catDict = catObj.getDict();
Object pagesObj = catDict->lookup("Pages");
Object afObj = catDict->lookupNF("AcroForm").copy();
if (!afObj.isNull()) {
markAcroForm(&afObj, yRef, countRef, 0, refPage->num, rootNum + 2);
}
Dict *pagesDict = pagesObj.getDict();
Object resourcesObj = pagesDict->lookup("Resources");
if (resourcesObj.isDict())
markPageObjects(resourcesObj.getDict(), yRef, countRef, 0, refPage->num, rootNum + 2);
markPageObjects(catDict, yRef, countRef, 0, refPage->num, rootNum + 2);
Dict *pageDict = page.getDict();
if (resourcesObj.isNull() && !pageDict->hasKey("Resources")) {
Object *resourceDictObject = getCatalog()->getPage(pageNo)->getResourceDictObject();
if (resourceDictObject->isDict()) {
resourcesObj = resourceDictObject->copy();
markPageObjects(resourcesObj.getDict(), yRef, countRef, 0, refPage->num, rootNum + 2);
}
}
markPageObjects(pageDict, yRef, countRef, 0, refPage->num, rootNum + 2);
Object annotsObj = pageDict->lookupNF("Annots").copy();
if (!annotsObj.isNull()) {
markAnnotations(&annotsObj, yRef, countRef, 0, refPage->num, rootNum + 2);
}
yRef->markUnencrypted();
writePageObjects(outStr, yRef, 0);
yRef->add(rootNum,0,outStr->getPos(),true);
outStr->printf("%d 0 obj\n", rootNum);
outStr->printf("<< /Type /Catalog /Pages %d 0 R", rootNum + 1);
for (int j = 0; j < catDict->getLength(); j++) {
const char *key = catDict->getKey(j);
if (strcmp(key, "Type") != 0 &&
strcmp(key, "Catalog") != 0 &&
strcmp(key, "Pages") != 0)
{
if (j > 0) outStr->printf(" ");
Object value = catDict->getValNF(j).copy();
outStr->printf("/%s ", key);
writeObject(&value, outStr, getXRef(), 0, nullptr, cryptRC4, 0, 0, 0);
}
}
outStr->printf(">>\nendobj\n");
yRef->add(rootNum + 1,0,outStr->getPos(),true);
outStr->printf("%d 0 obj\n", rootNum + 1);
outStr->printf("<< /Type /Pages /Kids [ %d 0 R ] /Count 1 ", rootNum + 2);
if (resourcesObj.isDict()) {
outStr->printf("/Resources ");
writeObject(&resourcesObj, outStr, getXRef(), 0, nullptr, cryptRC4, 0, 0, 0);
}
outStr->printf(">>\n");
outStr->printf("endobj\n");
yRef->add(rootNum + 2,0,outStr->getPos(),true);
outStr->printf("%d 0 obj\n", rootNum + 2);
outStr->printf("<< ");
for (int n = 0; n < pageDict->getLength(); n++) {
if (n > 0) outStr->printf(" ");
const char *key = pageDict->getKey(n);
Object value = pageDict->getValNF(n).copy();
if (strcmp(key, "Parent") == 0) {
outStr->printf("/Parent %d 0 R", rootNum + 1);
} else {
outStr->printf("/%s ", key);
writeObject(&value, outStr, getXRef(), 0, nullptr, cryptRC4, 0, 0, 0);
}
}
outStr->printf(" >>\nendobj\n");
Goffset uxrefOffset = outStr->getPos();
Ref ref;
ref.num = rootNum;
ref.gen = 0;
Object trailerDict = createTrailerDict(rootNum + 3, false, 0, &ref, getXRef(),
name->c_str(), uxrefOffset);
writeXRefTableTrailer(std::move(trailerDict), yRef, false /* do not write unnecessary entries */,
uxrefOffset, outStr, getXRef());
outStr->close();
fclose(f);
delete yRef;
delete countRef;
delete outStr;
return errNone;
} | 0 | [
"CWE-787"
]
| poppler | fada09a2ccc11a3a1d308e810f1336d8df6011fd | 174,411,570,475,479,570,000,000,000,000,000,000,000 | 163 | pdfunite: Fix stack overflow on broken file
Fixes issue #741 |
static void vnc_unlock_queue(VncJobQueue *queue)
{
qemu_mutex_unlock(&queue->mutex);
} | 0 | [
"CWE-125"
]
| qemu | 9f64916da20eea67121d544698676295bbb105a7 | 92,798,102,330,397,580,000,000,000,000,000,000,000 | 4 | pixman/vnc: use pixman images in vnc.
The vnc code uses *three* DisplaySurfaces:
First is the surface of the actual QemuConsole, usually the guest
screen, but could also be a text console (monitor/serial reachable via
Ctrl-Alt-<nr> keys). This is left as-is.
Second is the current server's view of the screen content. The vnc code
uses this to figure which parts of the guest screen did _really_ change
to reduce the amount of updates sent to the vnc clients. It is also
used as data source when sending out the updates to the clients. This
surface gets replaced by a pixman image. The format changes too,
instead of using the guest screen format we'll use fixed 32bit rgb
framebuffer and convert the pixels on the fly when comparing and
updating the server framebuffer.
Third surface carries the format expected by the vnc client. That isn't
used to store image data. This surface is switched to PixelFormat and a
boolean for bigendian byte order.
Signed-off-by: Gerd Hoffmann <[email protected]> |
Subsets and Splits