func
stringlengths
0
484k
target
int64
0
1
cwe
sequencelengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
int dissolve_free_huge_page(struct page *page) { int rc = -EBUSY; /* Not to disrupt normal path by vainly holding hugetlb_lock */ if (!PageHuge(page)) return 0; spin_lock(&hugetlb_lock); if (!PageHuge(page)) { rc = 0; goto out; } if (!page_count(page)) { struct page *head = compound_head(page); struct hstate *h = page_hstate(head); int nid = page_to_nid(head); if (h->free_huge_pages - h->resv_huge_pages == 0) goto out; /* * Move PageHWPoison flag from head page to the raw error page, * which makes any subpages rather than the error page reusable. */ if (PageHWPoison(head) && page != head) { SetPageHWPoison(page); ClearPageHWPoison(head); } list_del(&head->lru); h->free_huge_pages--; h->free_huge_pages_node[nid]--; h->max_huge_pages--; update_and_free_page(h, head); rc = 0; } out: spin_unlock(&hugetlb_lock); return rc; }
0
[ "CWE-362" ]
linux
17743798d81238ab13050e8e2833699b54e15467
16,453,855,090,395,600,000,000,000,000,000,000,000
39
mm/hugetlb: fix a race between hugetlb sysctl handlers There is a race between the assignment of `table->data` and write value to the pointer of `table->data` in the __do_proc_doulongvec_minmax() on the other thread. CPU0: CPU1: proc_sys_write hugetlb_sysctl_handler proc_sys_call_handler hugetlb_sysctl_handler_common hugetlb_sysctl_handler table->data = &tmp; hugetlb_sysctl_handler_common table->data = &tmp; proc_doulongvec_minmax do_proc_doulongvec_minmax sysctl_head_finish __do_proc_doulongvec_minmax unuse_table i = table->data; *i = val; // corrupt CPU1's stack Fix this by duplicating the `table`, and only update the duplicate of it. And introduce a helper of proc_hugetlb_doulongvec_minmax() to simplify the code. The following oops was seen: BUG: kernel NULL pointer dereference, address: 0000000000000000 #PF: supervisor instruction fetch in kernel mode #PF: error_code(0x0010) - not-present page Code: Bad RIP value. ... Call Trace: ? set_max_huge_pages+0x3da/0x4f0 ? alloc_pool_huge_page+0x150/0x150 ? proc_doulongvec_minmax+0x46/0x60 ? hugetlb_sysctl_handler_common+0x1c7/0x200 ? nr_hugepages_store+0x20/0x20 ? copy_fd_bitmaps+0x170/0x170 ? hugetlb_sysctl_handler+0x1e/0x20 ? proc_sys_call_handler+0x2f1/0x300 ? unregister_sysctl_table+0xb0/0xb0 ? __fd_install+0x78/0x100 ? proc_sys_write+0x14/0x20 ? __vfs_write+0x4d/0x90 ? vfs_write+0xef/0x240 ? ksys_write+0xc0/0x160 ? __ia32_sys_read+0x50/0x50 ? __close_fd+0x129/0x150 ? __x64_sys_write+0x43/0x50 ? do_syscall_64+0x6c/0x200 ? entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: e5ff215941d5 ("hugetlb: multiple hstates for multiple page sizes") Signed-off-by: Muchun Song <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Cc: Andi Kleen <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
static void cbs_jpeg_free_comment(void *unit, uint8_t *content) { JPEGRawComment *comment = (JPEGRawComment*)content; av_buffer_unref(&comment->Cm_ref); av_freep(&content); }
0
[ "CWE-787" ]
FFmpeg
a3a3730b5456ca00587455004d40c047f7b20a99
271,666,422,008,633,660,000,000,000,000,000,000,000
6
avcodec/cbs_jpeg: Check length for SOS Fixes: out of array access Fixes: 19734/clusterfuzz-testcase-minimized-ffmpeg_BSF_TRACE_HEADERS_fuzzer-5673507031875584 Fixes: 19353/clusterfuzz-testcase-minimized-ffmpeg_BSF_TRACE_HEADERS_fuzzer-5703944462663680 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg Signed-off-by: Michael Niedermayer <[email protected]> (cherry picked from commit 1812352d767ccf5431aa440123e2e260a4db2726) Signed-off-by: Michael Niedermayer <[email protected]>
static void qxl_update_irq_bh(void *opaque) { PCIQXLDevice *d = opaque; qxl_update_irq(d); }
0
[ "CWE-476" ]
qemu
d52680fc932efb8a2f334cc6993e705ed1e31e99
82,844,638,886,538,740,000,000,000,000,000,000,000
5
qxl: check release info object When releasing spice resources in release_resource() routine, if release info object 'ext.info' is null, it leads to null pointer dereference. Add check to avoid it. Reported-by: Bugs SysSec <[email protected]> Signed-off-by: Prasad J Pandit <[email protected]> Message-id: [email protected] Signed-off-by: Gerd Hoffmann <[email protected]>
static int alloc_profile_is_valid(u64 flags, int extended) { u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : BTRFS_BLOCK_GROUP_PROFILE_MASK); flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; /* 1) check that all other bits are zeroed */ if (flags & ~mask) return 0; /* 2) see if profile is reduced */ if (flags == 0) return !extended; /* "0" is valid for usual profiles */ /* true if exactly one bit set */ return is_power_of_2(flags); }
0
[ "CWE-476", "CWE-284" ]
linux
09ba3bc9dd150457c506e4661380a6183af651c1
159,625,154,008,947,520,000,000,000,000,000,000,000
18
btrfs: merge btrfs_find_device and find_device Both btrfs_find_device() and find_device() does the same thing except that the latter does not take the seed device onto account in the device scanning context. We can merge them. Signed-off-by: Anand Jain <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
httpHeaderFieldStatDumper(StoreEntry * sentry, int, double val, double, int count) { const int id = static_cast<int>(val); const bool valid_id = Http::any_valid_header(static_cast<Http::HdrType>(id)); const char *name = valid_id ? Http::HeaderLookupTable.lookup(static_cast<Http::HdrType>(id)).name : "INVALID"; int visible = count > 0; /* for entries with zero count, list only those that belong to current type of message */ if (!visible && valid_id && dump_stat->owner_mask) visible = CBIT_TEST(*dump_stat->owner_mask, id); if (visible) storeAppendPrintf(sentry, "%2d\t %-20s\t %5d\t %6.2f\n", id, name, count, xdiv(count, dump_stat->busyDestroyedCount)); }
0
[ "CWE-444" ]
squid
9c8e2a71aa1d3c159a319d9365c346c48dc783a5
227,230,106,587,128,700,000,000,000,000,000,000,000
15
Enforce token characters for field-name (#700) RFC 7230 defines field-name as a token. Request splitting and cache poisoning attacks have used non-token characters to fool broken HTTP agents behind or in front of Squid for years. This change should significantly reduce that abuse. If we discover exceptional situations that need special treatment, the relaxed parser can allow them on a case-by-case basis (while being extra careful about framing-related header fields), just like we already tolerate some header whitespace (e.g., between the response header field-name and colon).
netsnmp_set_mib_directory(const char *dir) { const char *newdir; char *olddir, *tmpdir = NULL; DEBUGTRACE; if (NULL == dir) { return; } olddir = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_MIBDIRS); if (olddir) { if ((*dir == '+') || (*dir == '-')) { /** New dir starts with '+', thus we add it. */ tmpdir = (char *)malloc(strlen(dir) + strlen(olddir) + 2); if (!tmpdir) { DEBUGMSGTL(("read_config:initmib", "set mibdir malloc failed")); return; } if (*dir++ == '+') sprintf(tmpdir, "%s%c%s", olddir, ENV_SEPARATOR_CHAR, dir); else sprintf(tmpdir, "%s%c%s", dir, ENV_SEPARATOR_CHAR, olddir); newdir = tmpdir; } else { newdir = dir; } } else { /** If dir starts with '+' skip '+' it. */ newdir = ((*dir == '+') ? ++dir : dir); } netsnmp_ds_set_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_MIBDIRS, newdir); /** set_string calls strdup, so if we allocated memory, free it */ if (tmpdir == newdir) { SNMP_FREE(tmpdir); } }
0
[ "CWE-59", "CWE-61" ]
net-snmp
4fd9a450444a434a993bc72f7c3486ccce41f602
158,293,417,139,742,020,000,000,000,000,000,000,000
40
CHANGES: snmpd: Stop reading and writing the mib_indexes/* files Caching directory contents is something the operating system should do and is not something Net-SNMP should do. Instead of storing a copy of the directory contents in ${tmp_dir}/mib_indexes/${n}, always scan a MIB directory.
static int __init slab_sysfs_init(void) { struct kmem_cache *s; int err; slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); if (!slab_kset) { printk(KERN_ERR "Cannot register slab subsystem.\n"); return -ENOSYS; } slab_state = SYSFS; list_for_each_entry(s, &slab_caches, list) { err = sysfs_slab_add(s); if (err) printk(KERN_ERR "SLUB: Unable to add boot slab %s" " to sysfs\n", s->name); } while (alias_list) { struct saved_alias *al = alias_list; alias_list = alias_list->next; err = sysfs_slab_alias(al->s, al->name); if (err) printk(KERN_ERR "SLUB: Unable to add boot slab alias" " %s to sysfs\n", s->name); kfree(al); } resiliency_test(); return 0;
0
[ "CWE-189" ]
linux
f8bd2258e2d520dff28c855658bd24bdafb5102d
128,998,871,219,643,440,000,000,000,000,000,000,000
34
remove div_long_long_rem x86 is the only arch right now, which provides an optimized for div_long_long_rem and it has the downside that one has to be very careful that the divide doesn't overflow. The API is a little akward, as the arguments for the unsigned divide are signed. The signed version also doesn't handle a negative divisor and produces worse code on 64bit archs. There is little incentive to keep this API alive, so this converts the few users to the new API. Signed-off-by: Roman Zippel <[email protected]> Cc: Ralf Baechle <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: john stultz <[email protected]> Cc: Christoph Lameter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static bool is_finite(const unsigned short) { return true; }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
176,471,314,587,885,550,000,000,000,000,000,000,000
1
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
SrcList *sqlite3SrcListAppendFromTerm( Parse *pParse, /* Parsing context */ SrcList *p, /* The left part of the FROM clause already seen */ Token *pTable, /* Name of the table to add to the FROM clause */ Token *pDatabase, /* Name of the database containing pTable */ Token *pAlias, /* The right-hand side of the AS subexpression */ Select *pSubquery, /* A subquery used in place of a table name */ Expr *pOn, /* The ON clause of a join */ IdList *pUsing /* The USING clause of a join */ ){ struct SrcList_item *pItem; sqlite3 *db = pParse->db; if( !p && (pOn || pUsing) ){ sqlite3ErrorMsg(pParse, "a JOIN clause is required before %s", (pOn ? "ON" : "USING") ); goto append_from_error; } p = sqlite3SrcListAppend(pParse, p, pTable, pDatabase); if( p==0 ){ goto append_from_error; } assert( p->nSrc>0 ); pItem = &p->a[p->nSrc-1]; assert( (pTable==0)==(pDatabase==0) ); assert( pItem->zName==0 || pDatabase!=0 ); if( IN_RENAME_OBJECT && pItem->zName ){ Token *pToken = (ALWAYS(pDatabase) && pDatabase->z) ? pDatabase : pTable; sqlite3RenameTokenMap(pParse, pItem->zName, pToken); } assert( pAlias!=0 ); if( pAlias->n ){ pItem->zAlias = sqlite3NameFromToken(db, pAlias); } pItem->pSelect = pSubquery; pItem->pOn = pOn; pItem->pUsing = pUsing; return p; append_from_error: assert( p==0 ); sqlite3ExprDelete(db, pOn); sqlite3IdListDelete(db, pUsing); sqlite3SelectDelete(db, pSubquery); return 0; }
0
[ "CWE-674", "CWE-787" ]
sqlite
38096961c7cd109110ac21d3ed7dad7e0cb0ae06
310,283,435,141,257,500,000,000,000,000,000,000,000
46
Avoid infinite recursion in the ALTER TABLE code when a view contains an unused CTE that references, directly or indirectly, the view itself. FossilOrigin-Name: 1d2e53a39b87e364685e21de137655b6eee725e4c6d27fc90865072d7c5892b5
static void atfork_parent(struct atfork_state *as) { #ifdef NO_PTHREADS if (sigprocmask(SIG_SETMASK, &as->old, NULL)) die_errno("sigprocmask"); #else bug_die(pthread_setcancelstate(as->cs, NULL), "re-enabling cancellation"); bug_die(pthread_sigmask(SIG_SETMASK, &as->old, NULL), "restoring signal mask"); #endif }
0
[]
git
321fd82389742398d2924640ce3a61791fd27d60
334,044,068,987,855,620,000,000,000,000,000,000,000
12
run-command: mark path lookup errors with ENOENT Since commit e3a434468f (run-command: use the async-signal-safe execv instead of execvp, 2017-04-19), prepare_cmd() does its own PATH lookup for any commands we run (on non-Windows platforms). However, its logic does not match the old execvp call when we fail to find a matching entry in the PATH. Instead of feeding the name directly to execv, execvp would consider that an ENOENT error. By continuing and passing the name directly to execv, we effectively behave as if "." was included at the end of the PATH. This can have confusing and even dangerous results. The fix itself is pretty straight-forward. There's a new test in t0061 to cover this explicitly, and I've also added a duplicate of the ENOENT test to ensure that we return the correct errno for this case. Signed-off-by: Jeff King <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { unsigned long rflags; int i, r; if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { r = -EBUSY; if (vcpu->arch.exception.pending) goto out; if (dbg->control & KVM_GUESTDBG_INJECT_DB) kvm_queue_exception(vcpu, DB_VECTOR); else kvm_queue_exception(vcpu, BP_VECTOR); } /* * Read rflags as long as potentially injected trace flags are still * filtered out. */ rflags = kvm_get_rflags(vcpu); vcpu->guest_debug = dbg->control; if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { for (i = 0; i < KVM_NR_DB_REGS; ++i) vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; } else { for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; } kvm_update_dr7(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); /* * Trigger an rflags update that will inject or remove the trace * flags. */ kvm_set_rflags(vcpu, rflags); kvm_x86_ops->update_db_bp_intercept(vcpu); r = 0; out: return r; }
0
[]
linux
6d1068b3a98519247d8ba4ec85cd40ac136dbdf9
226,782,399,087,240,350,000,000,000,000,000,000,000
54
KVM: x86: invalid opcode oops on SET_SREGS with OSXSAVE bit set (CVE-2012-4461) On hosts without the XSAVE support unprivileged local user can trigger oops similar to the one below by setting X86_CR4_OSXSAVE bit in guest cr4 register using KVM_SET_SREGS ioctl and later issuing KVM_RUN ioctl. invalid opcode: 0000 [#2] SMP Modules linked in: tun ip6table_filter ip6_tables ebtable_nat ebtables ... Pid: 24935, comm: zoog_kvm_monito Tainted: G D 3.2.0-3-686-pae EIP: 0060:[<f8b9550c>] EFLAGS: 00210246 CPU: 0 EIP is at kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm] EAX: 00000001 EBX: 000f387e ECX: 00000000 EDX: 00000000 ESI: 00000000 EDI: 00000000 EBP: ef5a0060 ESP: d7c63e70 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 Process zoog_kvm_monito (pid: 24935, ti=d7c62000 task=ed84a0c0 task.ti=d7c62000) Stack: 00000001 f70a1200 f8b940a9 ef5a0060 00000000 00200202 f8769009 00000000 ef5a0060 000f387e eda5c020 8722f9c8 00015bae 00000000 ed84a0c0 ed84a0c0 c12bf02d 0000ae80 ef7f8740 fffffffb f359b740 ef5a0060 f8b85dc1 0000ae80 Call Trace: [<f8b940a9>] ? kvm_arch_vcpu_ioctl_set_sregs+0x2fe/0x308 [kvm] ... [<c12bfb44>] ? syscall_call+0x7/0xb Code: 89 e8 e8 14 ee ff ff ba 00 00 04 00 89 e8 e8 98 48 ff ff 85 c0 74 1e 83 7d 48 00 75 18 8b 85 08 07 00 00 31 c9 8b 95 0c 07 00 00 <0f> 01 d1 c7 45 48 01 00 00 00 c7 45 1c 01 00 00 00 0f ae f0 89 EIP: [<f8b9550c>] kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm] SS:ESP 0068:d7c63e70 QEMU first retrieves the supported features via KVM_GET_SUPPORTED_CPUID and then sets them later. So guest's X86_FEATURE_XSAVE should be masked out on hosts without X86_FEATURE_XSAVE, making kvm_set_cr4 with X86_CR4_OSXSAVE fail. Userspaces that allow specifying guest cpuid with X86_FEATURE_XSAVE even on hosts that do not support it, might be susceptible to this attack from inside the guest as well. Allow setting X86_CR4_OSXSAVE bit only if host has XSAVE support. Signed-off-by: Petr Matousek <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
static void theme_real_destroy(THEME_REC *rec) { g_hash_table_foreach(rec->abstracts, (GHFunc) theme_abstract_destroy, NULL); g_hash_table_destroy(rec->abstracts); g_hash_table_foreach(rec->modules, (GHFunc) theme_module_destroy, NULL); g_hash_table_destroy(rec->modules); g_slist_foreach(rec->replace_values, (GFunc) g_free, NULL); g_slist_free(rec->replace_values); g_free(rec->path); g_free(rec->name); g_free(rec); }
0
[ "CWE-416" ]
irssi
43e44d553d44e313003cee87e6ea5e24d68b84a1
67,794,722,592,182,890,000,000,000,000,000,000,000
14
Merge branch 'security' into 'master' Security Closes GL#12, GL#13, GL#14, GL#15, GL#16 See merge request irssi/irssi!23
static bool is_topic_in_criterias( const char* topic_name, const std::vector<Criteria>& criterias) { bool returned_value = false; for (auto criteria_it = criterias.begin(); !returned_value && criteria_it != criterias.end(); ++criteria_it) { for (auto topic : (*criteria_it).topics) { if (StringMatching::matchString(topic.c_str(), topic_name)) { returned_value = true; break; } } } return returned_value; }
1
[ "CWE-284" ]
Fast-DDS
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
267,661,843,802,009,350,000,000,000,000,000,000,000
21
check remote permissions (#1387) * Refs 5346. Blackbox test Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. one-way string compare Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Do not add partition separator on last partition Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Access control unit testing It only covers Partition and Topic permissions Signed-off-by: Iker Luengo <[email protected]> * Refs #3680. Fix partition check on Permissions plugin. Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Fix tests on mac Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Fix windows tests Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Avoid memory leak on test Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Proxy data mocks should not return temporary objects Signed-off-by: Iker Luengo <[email protected]> * refs 3680. uncrustify Signed-off-by: Iker Luengo <[email protected]> Co-authored-by: Miguel Company <[email protected]>
*/ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t gfp_mask) { /* * Allocate the copy buffer */ struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, gfp_mask, skb_alloc_rx_flag(skb), NUMA_NO_NODE); int oldheadroom = skb_headroom(skb); int head_copy_len, head_copy_off; if (!n) return NULL; skb_reserve(n, newheadroom); /* Set the tail pointer and length */ skb_put(n, skb->len); head_copy_len = oldheadroom; head_copy_off = 0; if (newheadroom <= head_copy_len) head_copy_len = newheadroom; else head_copy_off = newheadroom - head_copy_len; /* Copy the linear header and data. */ if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, skb->len + head_copy_len)) BUG(); copy_skb_header(n, skb); skb_headers_offset_update(n, newheadroom - oldheadroom); return n;
0
[ "CWE-703", "CWE-125" ]
linux
8605330aac5a5785630aec8f64378a54891937cc
68,550,861,706,011,350,000,000,000,000,000,000,000
39
tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs __sock_recv_timestamp can be called for both normal skbs (for receive timestamps) and for skbs on the error queue (for transmit timestamps). Commit 1c885808e456 (tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING) assumes any skb passed to __sock_recv_timestamp are from the error queue, containing OPT_STATS in the content of the skb. This results in accessing invalid memory or generating junk data. To fix this, set skb->pkt_type to PACKET_OUTGOING for packets on the error queue. This is safe because on the receive path on local sockets skb->pkt_type is never set to PACKET_OUTGOING. With that, copy OPT_STATS from a packet, only if its pkt_type is PACKET_OUTGOING. Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING") Reported-by: JongHwan Kim <[email protected]> Signed-off-by: Soheil Hassas Yeganeh <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: Willem de Bruijn <[email protected]> Signed-off-by: David S. Miller <[email protected]>
_outNotifyStmt(StringInfo str, const NotifyStmt *node) { WRITE_NODE_TYPE("NOTIFY"); WRITE_STRING_FIELD(conditionname); WRITE_STRING_FIELD(payload); }
0
[ "CWE-362" ]
postgres
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
133,450,413,031,834,630,000,000,000,000,000,000,000
7
Avoid repeated name lookups during table and index DDL. If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. This changes the calling convention for DefineIndex, CreateTrigger, transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible (in 9.2 and newer), and AlterTable (in 9.1 and older). In addition, CheckRelationOwnership is removed in 9.2 and newer and the calling convention is changed in older branches. A field has also been added to the Constraint node (FkConstraint in 8.4). Third-party code calling these functions or using the Constraint node will require updating. Report by Andres Freund. Patch by Robert Haas and Andres Freund, reviewed by Tom Lane. Security: CVE-2014-0062
xps_tifsDummyMapProc(thandle_t fd, void** pbase, toff_t* psize) { (void)fd; (void)pbase; (void)psize; return (0); }
0
[]
ghostpdl
94d8955cb7725eb5f3557ddc02310c76124fdd1a
225,730,007,934,330,930,000,000,000,000,000,000,000
7
Bug 701818: better handling of error during PS/PDF image In the xps device, if an error occurred after xps_begin_image() but before xps_image_end_image(), *if* the Postscript had called 'restore' as part of the error handling, the image enumerator would have been freed (by the restore) despite the xps device still holding a reference to it. Simply changing to an allocator unaffected save/restore doesn't work because the enumerator holds references to other objects (graphics state, color space, possibly others) whose lifespans are inherently controlled by save/restore. So, add a finalize method for the XPS device's image enumerator (xps_image_enum_finalize()) which takes over cleaning up the memory it allocates and also deals with cleaning up references from the device to the enumerator and from the enumerator to the device.
static void coroutine_fn v9fs_statfs(void *opaque) { int32_t fid; ssize_t retval = 0; size_t offset = 7; V9fsFidState *fidp; struct statfs stbuf; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; retval = pdu_unmarshal(pdu, offset, "d", &fid); if (retval < 0) { goto out_nofid; } fidp = get_fid(pdu, fid); if (fidp == NULL) { retval = -ENOENT; goto out_nofid; } retval = v9fs_co_statfs(pdu, &fidp->path, &stbuf); if (retval < 0) { goto out; } retval = v9fs_fill_statfs(s, pdu, &stbuf); if (retval < 0) { goto out; } retval += offset; out: put_fid(pdu, fidp); out_nofid: pdu_complete(pdu, retval); }
0
[ "CWE-362" ]
qemu
89fbea8737e8f7b954745a1ffc4238d377055305
219,870,113,080,765,230,000,000,000,000,000,000,000
33
9pfs: Fully restart unreclaim loop (CVE-2021-20181) Depending on the client activity, the server can be asked to open a huge number of file descriptors and eventually hit RLIMIT_NOFILE. This is currently mitigated using a reclaim logic : the server closes the file descriptors of idle fids, based on the assumption that it will be able to re-open them later. This assumption doesn't hold of course if the client requests the file to be unlinked. In this case, we loop on the entire fid list and mark all related fids as unreclaimable (the reclaim logic will just ignore them) and, of course, we open or re-open their file descriptors if needed since we're about to unlink the file. This is the purpose of v9fs_mark_fids_unreclaim(). Since the actual opening of a file can cause the coroutine to yield, another client request could possibly add a new fid that we may want to mark as non-reclaimable as well. The loop is thus restarted if the re-open request was actually transmitted to the backend. This is achieved by keeping a reference on the first fid (head) before traversing the list. This is wrong in several ways: - a potential clunk request from the client could tear the first fid down and cause the reference to be stale. This leads to a use-after-free error that can be detected with ASAN, using a custom 9p client - fids are added at the head of the list : restarting from the previous head will always miss fids added by a some other potential request All these problems could be avoided if fids were being added at the end of the list. This can be achieved with a QSIMPLEQ, but this is probably too much change for a bug fix. For now let's keep it simple and just restart the loop from the current head. Fixes: CVE-2021-20181 Buglink: https://bugs.launchpad.net/qemu/+bug/1911666 Reported-by: Zero Day Initiative <[email protected]> Reviewed-by: Christian Schoenebeck <[email protected]> Reviewed-by: Stefano Stabellini <[email protected]> Message-Id: <[email protected]> Signed-off-by: Greg Kurz <[email protected]>
xcf_load_layer_mask (XcfInfo *info, GimpImage *image) { GimpLayerMask *layer_mask; GimpChannel *channel; GList *iter; goffset hierarchy_offset; gint width; gint height; gboolean is_fs_drawable; gchar *name; GimpRGB color = { 0.0, 0.0, 0.0, GIMP_OPACITY_OPAQUE }; /* check and see if this is the drawable the floating selection * is attached to. if it is then we'll do the attachment in our caller. */ is_fs_drawable = (info->cp == info->floating_sel_offset); /* read in the layer width, height and name */ xcf_read_int32 (info, (guint32 *) &width, 1); xcf_read_int32 (info, (guint32 *) &height, 1); if (width <= 0 || height <= 0) return NULL; xcf_read_string (info, &name, 1); /* create a new layer mask */ layer_mask = gimp_layer_mask_new (image, width, height, name, &color); g_free (name); if (! layer_mask) return NULL; /* read in the layer_mask properties */ channel = GIMP_CHANNEL (layer_mask); if (! xcf_load_channel_props (info, image, &channel)) goto error; xcf_progress_update (info); /* read the hierarchy and layer mask offsets */ xcf_read_offset (info, &hierarchy_offset, 1); /* read in the hierarchy */ if (! xcf_seek_pos (info, hierarchy_offset, NULL)) goto error; if (! xcf_load_buffer (info, gimp_drawable_get_buffer (GIMP_DRAWABLE (layer_mask)))) goto error; xcf_progress_update (info); /* attach the floating selection... */ if (is_fs_drawable) info->floating_sel_drawable = GIMP_DRAWABLE (layer_mask); return layer_mask; error: for (iter = info->selected_channels; iter; iter = iter->next) if (layer_mask == iter->data) { info->selected_channels = g_list_delete_link (info->selected_channels, iter); break; } if (info->floating_sel_drawable == GIMP_DRAWABLE (layer_mask)) info->floating_sel_drawable = NULL; g_object_unref (layer_mask); return NULL; }
0
[ "CWE-120" ]
gimp
4f99f1fcfd892ead19831b5adcd38a99d71214b6
297,370,673,568,352,870,000,000,000,000,000,000,000
73
app: fix #8120 GIMP 2.10.30 crashed when allocate large memory GIMP could crash if the information regarding old path properties read from XCF was incorrect. It did not check if xcf_old_path succeeded and kept trying to load more paths even if the last one failed to load. Instead we now stop loading paths as soon as that function fails. In case we have a failure here we also try to skip to the next property based on the size of the path property, in hopes that the only problem was this property.
ZipStreamBuf::ZipStreamBuf(std::istream& istr, const ZipLocalFileHeader& fileEntry, bool reposition): Poco::BufferedStreamBuf(STREAM_BUFFER_SIZE, std::ios::in), _pIstr(&istr), _pOstr(0), _ptrBuf(), _ptrOBuf(), _ptrHelper(), _ptrOHelper(), _crc32(Poco::Checksum::TYPE_CRC32), _expectedCrc32(0), _checkCRC(true), _bytesWritten(0), _pHeader(0) { if (fileEntry.isDirectory()) return; _expectedCrc32 = fileEntry.getCRC(); std::streamoff start = fileEntry.getDataStartPos(); std::streamoff end = fileEntry.getDataEndPos(); _checkCRC = !fileEntry.searchCRCAndSizesAfterData(); if (fileEntry.getCompressionMethod() == ZipCommon::CM_DEFLATE) { // Fake init bytes at beginning of stream std::string init = ZipUtil::fakeZLibInitString(fileEntry.getCompressionLevel()); // Fake adler at end of stream: just some dummy value, not checked anway std::string crc(4, ' '); if (fileEntry.searchCRCAndSizesAfterData()) { _ptrHelper = new AutoDetectInputStream(istr, init, crc, reposition, static_cast<Poco::UInt32>(start), fileEntry.needsZip64()); } else { _ptrHelper = new PartialInputStream(istr, start, end, reposition, init, crc); } _ptrBuf = new Poco::InflatingInputStream(*_ptrHelper, Poco::InflatingStreamBuf::STREAM_ZIP); } else if (fileEntry.getCompressionMethod() == ZipCommon::CM_STORE) { if (fileEntry.searchCRCAndSizesAfterData()) { _ptrBuf = new AutoDetectInputStream(istr, "", "", reposition, static_cast<Poco::UInt32>(start), fileEntry.needsZip64()); } else { _ptrBuf = new PartialInputStream(istr, start, end, reposition); } } else throw Poco::NotImplementedException("Unsupported compression method"); }
0
[ "CWE-22" ]
poco
bb7e5feece68ccfd8660caee93da25c5c39a4707
120,036,967,978,423,210,000,000,000,000,000,000,000
50
merge zip entry absolute path vulnerability fix (#1968) from develop
void Http2Stream::StartHeaders(nghttp2_headers_category category) { Debug(this, "starting headers, category: %d", id_, category); CHECK(!this->IsDestroyed()); session_->DecrementCurrentSessionMemory(current_headers_length_); current_headers_length_ = 0; current_headers_.clear(); current_headers_category_ = category; }
0
[ "CWE-416" ]
node
7f178663ebffc82c9f8a5a1b6bf2da0c263a30ed
249,355,228,509,504,800,000,000,000,000,000,000,000
8
src: use unique_ptr for WriteWrap This commit attempts to avoid a use-after-free error by using unqiue_ptr and passing a reference to it. CVE-ID: CVE-2020-8265 Fixes: https://github.com/nodejs-private/node-private/issues/227 PR-URL: https://github.com/nodejs-private/node-private/pull/238 Reviewed-By: Michael Dawson <[email protected]> Reviewed-By: Tobias Nießen <[email protected]> Reviewed-By: Richard Lau <[email protected]>
void sqlite3VdbeFrameMemDel(void *pArg){ VdbeFrame *pFrame = (VdbeFrame*)pArg; assert( sqlite3VdbeFrameIsValid(pFrame) ); pFrame->pParent = pFrame->v->pDelFrame; pFrame->v->pDelFrame = pFrame; }
0
[ "CWE-755" ]
sqlite
8654186b0236d556aa85528c2573ee0b6ab71be3
245,530,553,041,072,420,000,000,000,000,000,000,000
6
When an error occurs while rewriting the parser tree for window functions in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set, and make sure that this shuts down any subsequent code generation that might depend on the transformations that were implemented. This fixes a problem discovered by the Yongheng and Rui fuzzer. FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f
static s32 avc_parse_pic_timing_sei(GF_BitStream *bs, AVCState *avc) { int sps_id = avc->sps_active_idx; const char NumClockTS[] = { 1, 1, 1, 2, 2, 3, 3, 2, 3 }; AVCSeiPicTiming *pt = &avc->sei.pic_timing; if (sps_id < 0) { /*sps_active_idx equals -1 when no sps has been detected. In this case SEI should not be decoded.*/ assert(0); return 1; } if (avc->sps[sps_id].vui.nal_hrd_parameters_present_flag || avc->sps[sps_id].vui.vcl_hrd_parameters_present_flag) { /*CpbDpbDelaysPresentFlag, see 14496-10(2003) E.11*/ gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.cpb_removal_delay_length_minus1, "cpb_removal_delay_minus1"); gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.dpb_output_delay_length_minus1, "dpb_output_delay_minus1"); } /*ISO 14496-10 (2003), D.8.2: we need to get pic_struct in order to know if we display top field first or bottom field first*/ if (avc->sps[sps_id].vui.pic_struct_present_flag) { int i; pt->pic_struct = gf_bs_read_int_log(bs, 4, "pic_struct"); if (pt->pic_struct > 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] invalid pic_struct value %d\n", pt->pic_struct)); return 1; } for (i = 0; i < NumClockTS[pt->pic_struct]; i++) { if (gf_bs_read_int_log_idx(bs, 1, "clock_timestamp_flag", i)) { Bool full_timestamp_flag; gf_bs_read_int_log_idx(bs, 2, "ct_type", i); gf_bs_read_int_log_idx(bs, 1, "nuit_field_based_flag", i); gf_bs_read_int_log_idx(bs, 5, "counting_type", i); full_timestamp_flag = gf_bs_read_int_log_idx(bs, 1, "full_timestamp_flag", i); gf_bs_read_int_log_idx(bs, 1, "discontinuity_flag", i); gf_bs_read_int_log_idx(bs, 1, "cnt_dropped_flag", i); gf_bs_read_int_log_idx(bs, 8, "n_frames", i); if (full_timestamp_flag) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } else { if (gf_bs_read_int_log_idx(bs, 1, "seconds_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); if (gf_bs_read_int_log_idx(bs, 1, "minutes_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); if (gf_bs_read_int_log_idx(bs, 1, "hours_flag", i)) { gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } } } if (avc->sps[sps_id].vui.hrd.time_offset_length > 0) gf_bs_read_int_log_idx(bs, avc->sps[sps_id].vui.hrd.time_offset_length, "time_offset", i); } } } } return 0; }
0
[ "CWE-190", "CWE-787" ]
gpac
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
211,319,188,185,759,070,000,000,000,000,000,000,000
59
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
tcl_global_eval(interp, cmd) Tcl_Interp *interp; const char *cmd; /* don't have to be writable */ #endif { char *buf = strdup(cmd); int ret; Tcl_AllowExceptions(interp); ret = Tcl_GlobalEval(interp, buf); free(buf); return ret; }
0
[]
tk
ebd0fc80d62eeb7b8556522256f8d035e013eb65
253,514,539,435,998,360,000,000,000,000,000,000,000
13
tcltklib.c: check argument * ext/tk/tcltklib.c (ip_cancel_eval_core): check argument type and length. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
char *ad_get_entry(const struct adouble *ad, int eid) { off_t off = ad_getentryoff(ad, eid); size_t len = ad_getentrylen(ad, eid); if (off == 0 || len == 0) { return NULL; } return ad->ad_data + off; }
1
[ "CWE-787" ]
samba
0e2b3fb982d1f53d111e10d9197ed2ec2e13712c
149,428,417,400,046,300,000,000,000,000,000,000,000
11
CVE-2021-44142: libadouble: harden parsing code BUG: https://bugzilla.samba.org/show_bug.cgi?id=14914 Signed-off-by: Ralph Boehme <[email protected]> Reviewed-by: Jeremy Allison <[email protected]>
static inline int ape_decode_value_3990(APEContext *ctx, APERice *rice) { unsigned int x, overflow; int base, pivot; pivot = rice->ksum >> 5; if (pivot == 0) pivot = 1; overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980); if (overflow == (MODEL_ELEMENTS - 1)) { overflow = range_decode_bits(ctx, 16) << 16; overflow |= range_decode_bits(ctx, 16); } if (pivot < 0x10000) { base = range_decode_culfreq(ctx, pivot); range_decode_update(ctx, 1, base); } else { int base_hi = pivot, base_lo; int bbits = 0; while (base_hi & ~0xFFFF) { base_hi >>= 1; bbits++; } base_hi = range_decode_culfreq(ctx, base_hi + 1); range_decode_update(ctx, 1, base_hi); base_lo = range_decode_culfreq(ctx, 1 << bbits); range_decode_update(ctx, 1, base_lo); base = (base_hi << bbits) + base_lo; } x = base + overflow * pivot; update_rice(rice, x); /* Convert to signed */ if (x & 1) return (x >> 1) + 1; else return -(x >> 1); }
0
[ "CWE-125" ]
FFmpeg
96349da5ec8eda9f0368446e557fe0c8ba0e66b7
231,001,598,582,996,650,000,000,000,000,000,000,000
45
avcodec/apedec: Fix integer overflow Fixes: out of array access Fixes: PoC.ape and others Found-by: Bingchang, Liu@VARAS of IIE Signed-off-by: Michael Niedermayer <[email protected]> (cherry picked from commit ba4beaf6149f7241c8bd85fe853318c2f6837ad0) Signed-off-by: Michael Niedermayer <[email protected]>
static SLJIT_INLINE sljit_u32 max_fast_forward_char_pair_sse2_offset(void) { #if PCRE2_CODE_UNIT_WIDTH == 8 return 15; #elif PCRE2_CODE_UNIT_WIDTH == 16 return 7; #elif PCRE2_CODE_UNIT_WIDTH == 32 return 3; #else #error "Unsupported unit width" #endif }
0
[ "CWE-125" ]
php-src
8947fd9e9fdce87cd6c59817b1db58e789538fe9
90,197,799,762,916,340,000,000,000,000,000,000,000
12
Fix #78338: Array cross-border reading in PCRE We backport r1092 from pcre2.
static void fscrypt_destroy(void) { struct fscrypt_ctx *pos, *n; list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) kmem_cache_free(fscrypt_ctx_cachep, pos); INIT_LIST_HEAD(&fscrypt_free_ctxs); mempool_destroy(fscrypt_bounce_page_pool); fscrypt_bounce_page_pool = NULL; }
0
[ "CWE-416", "CWE-476" ]
linux
1b53cf9815bb4744958d41f3795d5d5a1d365e2d
184,165,843,558,834,900,000,000,000,000,000,000,000
10
fscrypt: remove broken support for detecting keyring key revocation Filesystem encryption ostensibly supported revoking a keyring key that had been used to "unlock" encrypted files, causing those files to become "locked" again. This was, however, buggy for several reasons, the most severe of which was that when key revocation happened to be detected for an inode, its fscrypt_info was immediately freed, even while other threads could be using it for encryption or decryption concurrently. This could be exploited to crash the kernel or worse. This patch fixes the use-after-free by removing the code which detects the keyring key having been revoked, invalidated, or expired. Instead, an encrypted inode that is "unlocked" now simply remains unlocked until it is evicted from memory. Note that this is no worse than the case for block device-level encryption, e.g. dm-crypt, and it still remains possible for a privileged user to evict unused pages, inodes, and dentries by running 'sync; echo 3 > /proc/sys/vm/drop_caches', or by simply unmounting the filesystem. In fact, one of those actions was already needed anyway for key revocation to work even somewhat sanely. This change is not expected to break any applications. In the future I'd like to implement a real API for fscrypt key revocation that interacts sanely with ongoing filesystem operations --- waiting for existing operations to complete and blocking new operations, and invalidating and sanitizing key material and plaintext from the VFS caches. But this is a hard problem, and for now this bug must be fixed. This bug affected almost all versions of ext4, f2fs, and ubifs encryption, and it was potentially reachable in any kernel configured with encryption support (CONFIG_EXT4_ENCRYPTION=y, CONFIG_EXT4_FS_ENCRYPTION=y, CONFIG_F2FS_FS_ENCRYPTION=y, or CONFIG_UBIFS_FS_ENCRYPTION=y). Note that older kernels did not use the shared fs/crypto/ code, but due to the potential security implications of this bug, it may still be worthwhile to backport this fix to them. Fixes: b7236e21d55f ("ext4 crypto: reorganize how we store keys in the inode") Cc: [email protected] # v4.2+ Signed-off-by: Eric Biggers <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> Acked-by: Michael Halcrow <[email protected]>
INST_HANDLER (std) { // ST Y, Rr ST Z, Rr // ST Y+, Rr ST Z+, Rr // ST -Y, Rr ST -Z, Rr // ST Y+q, Rr ST Z+q, Rr // load register ESIL_A ("r%d,", ((buf[1] & 1) << 4) | ((buf[0] >> 4) & 0xf)); // write in memory __generic_ld_st ( op, "ram", buf[0] & 0x8 ? 'y' : 'z', // index register Y/Z 0, // no use RAMP* registers !(buf[1] & 0x10) ? 0 // no increment : buf[0] & 0x1 ? 1 // post incremented : -1, // pre decremented !(buf[1] & 0x10) ? (buf[1] & 0x20) // offset | ((buf[1] & 0xc) << 1) | (buf[0] & 0x7) : 0, // no offset 1); // load operation (!st) // // cycles // op->cycles = // buf[1] & 0x1 == 0 // ? !(offset ? 1 : 3) // LDD // : buf[0] & 0x3 == 0 // ? 1 // LD Rd, X // : buf[0] & 0x3 == 1 // ? 2 // LD Rd, X+ // : 3; // LD Rd, -X // if (!STR_BEGINS (cpu->model, "ATxmega") && op->cycles > 1) { // // AT*mega optimizes 1 cycle! // op->cycles--; // } }
0
[ "CWE-125" ]
radare2
041e53cab7ca33481ae45ecd65ad596976d78e68
204,818,608,601,402,570,000,000,000,000,000,000,000
36
Fix crash in anal.avr
static void io_flush_timeouts(struct io_ring_ctx *ctx) { u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); while (!list_empty(&ctx->timeout_list)) { u32 events_needed, events_got; struct io_kiocb *req = list_first_entry(&ctx->timeout_list, struct io_kiocb, timeout.list); if (io_is_timeout_noseq(req)) break; /* * Since seq can easily wrap around over time, subtract * the last seq at which timeouts were flushed before comparing. * Assuming not more than 2^31-1 events have happened since, * these subtractions won't have wrapped, so we can check if * target is in [last_seq, current_seq] by comparing the two. */ events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush; events_got = seq - ctx->cq_last_tm_flush; if (events_got < events_needed) break; list_del_init(&req->timeout.list); io_kill_timeout(req, 0); } ctx->cq_last_tm_flush = seq; }
0
[ "CWE-125" ]
linux
89c2b3b74918200e46699338d7bcc19b1ea12110
210,115,770,405,870,840,000,000,000,000,000,000,000
29
io_uring: reexpand under-reexpanded iters [ 74.211232] BUG: KASAN: stack-out-of-bounds in iov_iter_revert+0x809/0x900 [ 74.212778] Read of size 8 at addr ffff888025dc78b8 by task syz-executor.0/828 [ 74.214756] CPU: 0 PID: 828 Comm: syz-executor.0 Not tainted 5.14.0-rc3-next-20210730 #1 [ 74.216525] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014 [ 74.219033] Call Trace: [ 74.219683] dump_stack_lvl+0x8b/0xb3 [ 74.220706] print_address_description.constprop.0+0x1f/0x140 [ 74.224226] kasan_report.cold+0x7f/0x11b [ 74.226085] iov_iter_revert+0x809/0x900 [ 74.227960] io_write+0x57d/0xe40 [ 74.232647] io_issue_sqe+0x4da/0x6a80 [ 74.242578] __io_queue_sqe+0x1ac/0xe60 [ 74.245358] io_submit_sqes+0x3f6e/0x76a0 [ 74.248207] __do_sys_io_uring_enter+0x90c/0x1a20 [ 74.257167] do_syscall_64+0x3b/0x90 [ 74.257984] entry_SYSCALL_64_after_hwframe+0x44/0xae old_size = iov_iter_count(); ... iov_iter_revert(old_size - iov_iter_count()); If iov_iter_revert() is done base on the initial size as above, and the iter is truncated and not reexpanded in the middle, it miscalculates borders causing problems. This trace is due to no one reexpanding after generic_write_checks(). Now iters store how many bytes has been truncated, so reexpand them to the initial state right before reverting. Cc: [email protected] Reported-by: Palash Oswal <[email protected]> Reported-by: Sudip Mukherjee <[email protected]> Reported-and-tested-by: [email protected] Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Al Viro <[email protected]>
_mpegts_section_get_event (GstMpegtsSection * section) { GstStructure *structure; GstEvent *event; structure = _mpegts_section_get_structure (section); event = gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, structure); return event; }
0
[ "CWE-125" ]
gst-plugins-bad
d58f668ece8795bddb3316832e1848c7b7cf38ac
172,707,692,970,176,480,000,000,000,000,000,000,000
11
mpegtssection: Add more section size checks The smallest section ever needs to be at least 3 bytes (i.e. just the short header). Non-short headers need to be at least 11 bytes long (3 for the minimum header, 5 for the non-short header, and 4 for the CRC). https://bugzilla.gnome.org/show_bug.cgi?id=775048
Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return get_item_copy<Item_cache_temporal>(thd, mem_root, this); }
0
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
243,734,821,603,640,550,000,000,000,000,000,000,000
2
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <[email protected]>
BOOL security_decrypt(BYTE* data, size_t length, rdpRdp* rdp) { if (rdp->rc4_decrypt_key == NULL) return FALSE; if (rdp->decrypt_use_count >= 4096) { if (!security_key_update(rdp->decrypt_key, rdp->decrypt_update_key, rdp->rc4_key_len, rdp)) return FALSE; winpr_RC4_Free(rdp->rc4_decrypt_key); rdp->rc4_decrypt_key = winpr_RC4_New(rdp->decrypt_key, rdp->rc4_key_len); if (!rdp->rc4_decrypt_key) return FALSE; rdp->decrypt_use_count = 0; } if (!winpr_RC4_Update(rdp->rc4_decrypt_key, length, data, data)) return FALSE; rdp->decrypt_use_count += 1; rdp->decrypt_checksum_use_count++; return TRUE; }
0
[ "CWE-125", "CWE-787" ]
FreeRDP
d6cd14059b257318f176c0ba3ee0a348826a9ef8
56,221,087,951,663,740,000,000,000,000,000,000,000
26
Fixed GHSL-2020-101 missing NULL check (cherry picked from commit b207dbba35c505bbc3ad5aadc10b34980c6b7e8e)
//! Return minimal path in a graph, using the Dijkstra algorithm \newinstance. CImg<Tfloat> get_dijkstra(const unsigned int starting_node, const unsigned int ending_node=~0U) const { CImg<uintT> foo; return get_dijkstra(starting_node,ending_node,foo);
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
151,545,586,066,950,420,000,000,000,000,000,000,000
4
Fix other issues in 'CImg<T>::load_bmp()'.
static bool generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct nf_conntrack_tuple *tuple) { tuple->src.u.all = 0; tuple->dst.u.all = 0; return true; }
0
[ "CWE-20", "CWE-254", "CWE-787" ]
linux
db29a9508a9246e77087c5531e45b2c88ec6988b
313,373,440,414,336,900,000,000,000,000,000,000,000
9
netfilter: conntrack: disable generic tracking for known protocols Given following iptables ruleset: -P FORWARD DROP -A FORWARD -m sctp --dport 9 -j ACCEPT -A FORWARD -p tcp --dport 80 -j ACCEPT -A FORWARD -p tcp -m conntrack -m state ESTABLISHED,RELATED -j ACCEPT One would assume that this allows SCTP on port 9 and TCP on port 80. Unfortunately, if the SCTP conntrack module is not loaded, this allows *all* SCTP communication, to pass though, i.e. -p sctp -j ACCEPT, which we think is a security issue. This is because on the first SCTP packet on port 9, we create a dummy "generic l4" conntrack entry without any port information (since conntrack doesn't know how to extract this information). All subsequent packets that are unknown will then be in established state since they will fallback to proto_generic and will match the 'generic' entry. Our originally proposed version [1] completely disabled generic protocol tracking, but Jozsef suggests to not track protocols for which a more suitable helper is available, hence we now mitigate the issue for in tree known ct protocol helpers only, so that at least NAT and direction information will still be preserved for others. [1] http://www.spinics.net/lists/netfilter-devel/msg33430.html Joint work with Daniel Borkmann. Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Jozsef Kadlecsik <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); enum ieee80211_frame_release_type reason; bool more_data; trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); reason = IEEE80211_FRAME_RELEASE_UAPSD; more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, reason, 0); ieee80211_send_null_response(sta, tid, reason, false, more_data); }
0
[ "CWE-287" ]
linux
3e493173b7841259a08c5c8e5cbe90adb349da7e
180,795,150,855,259,500,000,000,000,000,000,000,000
14
mac80211: Do not send Layer 2 Update frame before authorization The Layer 2 Update frame is used to update bridges when a station roams to another AP even if that STA does not transmit any frames after the reassociation. This behavior was described in IEEE Std 802.11F-2003 as something that would happen based on MLME-ASSOCIATE.indication, i.e., before completing 4-way handshake. However, this IEEE trial-use recommended practice document was published before RSN (IEEE Std 802.11i-2004) and as such, did not consider RSN use cases. Furthermore, IEEE Std 802.11F-2003 was withdrawn in 2006 and as such, has not been maintained amd should not be used anymore. Sending out the Layer 2 Update frame immediately after association is fine for open networks (and also when using SAE, FT protocol, or FILS authentication when the station is actually authenticated by the time association completes). However, it is not appropriate for cases where RSN is used with PSK or EAP authentication since the station is actually fully authenticated only once the 4-way handshake completes after authentication and attackers might be able to use the unauthenticated triggering of Layer 2 Update frame transmission to disrupt bridge behavior. Fix this by postponing transmission of the Layer 2 Update frame from station entry addition to the point when the station entry is marked authorized. Similarly, send out the VLAN binding update only if the STA entry has already been authorized. Signed-off-by: Jouni Malinen <[email protected]> Reviewed-by: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
uint32_t resolveQueueForTest(absl::string_view vm_id, absl::string_view queue_name) { return global_shared_data.resolveQueue(vm_id, queue_name); }
0
[ "CWE-476" ]
envoy
8788a3cf255b647fd14e6b5e2585abaaedb28153
312,824,088,727,826,250,000,000,000,000,000,000,000
3
1.4 - Do not call into the VM unless the VM Context has been created. (#24) * Ensure that the in VM Context is created before onDone is called. Signed-off-by: John Plevyak <[email protected]> * Update as per offline discussion. Signed-off-by: John Plevyak <[email protected]> * Set in_vm_context_created_ in onNetworkNewConnection. Signed-off-by: John Plevyak <[email protected]> * Add guards to other network calls. Signed-off-by: John Plevyak <[email protected]> * Fix common/wasm tests. Signed-off-by: John Plevyak <[email protected]> * Patch tests. Signed-off-by: John Plevyak <[email protected]> * Remove unecessary file from cherry-pick. Signed-off-by: John Plevyak <[email protected]>
int crypto_check_attr_type(struct rtattr **tb, u32 type) { struct crypto_attr_type *algt; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return PTR_ERR(algt); if ((algt->type ^ type) & algt->mask) return -EINVAL; return 0; }
0
[ "CWE-284", "CWE-264", "CWE-269" ]
linux
4943ba16bbc2db05115707b3ff7b4874e9e3c560
237,535,779,750,915,570,000,000,000,000,000,000,000
13
crypto: include crypto- module prefix in template This adds the module loading prefix "crypto-" to the template lookup as well. For example, attempting to load 'vfat(blowfish)' via AF_ALG now correctly includes the "crypto-" prefix at every level, correctly rejecting "vfat": net-pf-38 algif-hash crypto-vfat(blowfish) crypto-vfat(blowfish)-all crypto-vfat Reported-by: Mathias Krause <[email protected]> Signed-off-by: Kees Cook <[email protected]> Acked-by: Mathias Krause <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
zfilenamesplit(i_ctx_t *i_ctx_p) { os_ptr op = osp; check_read_type(*op, t_string); /****** NOT IMPLEMENTED YET ******/ return_error(gs_error_undefined); }
0
[ "CWE-200" ]
ghostpdl
ab109aaeb3ddba59518b036fb288402a65cf7ce8
102,410,694,354,468,200,000,000,000,000,000,000,000
8
Bug 694724: Have filenameforall and getenv honor SAFER
TEST_P(DnsImplTest, Cancel) { server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); ActiveDnsQuery* query = resolveWithUnreferencedParameters("some.domain", DnsLookupFamily::Auto, false); EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::Auto, DnsResolver::ResolutionStatus::Success, {"201.134.56.7"}, {}, absl::nullopt)); ASSERT_NE(nullptr, query); query->cancel(); dispatcher_->run(Event::Dispatcher::RunType::Block); }
0
[ "CWE-400" ]
envoy
542f84c66e9f6479bc31c6f53157c60472b25240
66,627,496,041,684,845,000,000,000,000,000,000,000
15
overload: Runtime configurable global connection limits (#147) Signed-off-by: Tony Allen <[email protected]>
body_file_send (int sock, const char *file_name, wgint promised_size, FILE *warc_tmp) { static char chunk[8192]; wgint written = 0; int write_error; FILE *fp; DEBUGP (("[writing BODY file %s ... ", file_name)); fp = fopen (file_name, "rb"); if (!fp) return -1; while (!feof (fp) && written < promised_size) { int towrite; int length = fread (chunk, 1, sizeof (chunk), fp); if (length == 0) break; towrite = MIN (promised_size - written, length); write_error = fd_write (sock, chunk, towrite, -1); if (write_error < 0) { fclose (fp); return -1; } if (warc_tmp != NULL) { /* Write a copy of the data to the WARC record. */ int warc_tmp_written = fwrite (chunk, 1, towrite, warc_tmp); if (warc_tmp_written != towrite) { fclose (fp); return -2; } } written += towrite; } fclose (fp); /* If we've written less than was promised, report a (probably nonsensical) error rather than break the promise. */ if (written < promised_size) { errno = EINVAL; return -1; } assert (written == promised_size); DEBUGP (("done]\n")); return 0; }
0
[ "CWE-119" ]
wget
d892291fb8ace4c3b734ea5125770989c215df3f
145,282,464,276,954,150,000,000,000,000,000,000,000
51
Fix stack overflow in HTTP protocol handling (CVE-2017-13089) * src/http.c (skip_short_body): Return error on negative chunk size Reported-by: Antti Levomäki, Christian Jalio, Joonas Pihlaja from Forcepoint Reported-by: Juhani Eronen from Finnish National Cyber Security Centre
REGEXP *mutt_compile_regexp (const char *s, int flags) { REGEXP *pp = safe_calloc (sizeof (REGEXP), 1); pp->pattern = safe_strdup (s); pp->rx = safe_calloc (sizeof (regex_t), 1); if (REGCOMP (pp->rx, NONULL(s), flags) != 0) mutt_free_regexp (&pp); return pp; }
0
[ "CWE-668" ]
mutt
6d0624411a979e2e1d76af4dd97d03f47679ea4a
184,380,590,964,689,830,000,000,000,000,000,000,000
10
use a 64-bit random value in temporary filenames. closes #3158
ves_icall_System_Threading_Thread_VolatileWriteIntPtr (void *ptr, void *value) { *((volatile void **) ptr) = value; }
0
[ "CWE-399", "CWE-264" ]
mono
722f9890f09aadfc37ae479e7d946d5fc5ef7b91
336,085,034,320,668,850,000,000,000,000,000,000,000
4
Fix access to freed members of a dead thread * threads.c: Fix access to freed members of a dead thread. Found and fixed by Rodrigo Kumpera <[email protected]> Ref: CVE-2011-0992
static inline int UT64_ADD(ut64 *r, ut64 a, ut64 b) { if (UT64_MAX - a < b) { return 0; } if (r) { *r = a + b; } return 1; }
0
[ "CWE-476" ]
radare2
1ea23bd6040441a21fbcfba69dce9a01af03f989
206,506,018,487,195,150,000,000,000,000,000,000,000
9
Fix #6816 - null deref in r_read_*
newh6namemem(netdissect_options *ndo) { register struct h6namemem *p; static struct h6namemem *ptr = NULL; static u_int num = 0; if (num <= 0) { num = 64; ptr = (struct h6namemem *)calloc(num, sizeof (*ptr)); if (ptr == NULL) (*ndo->ndo_error)(ndo, "newh6namemem: calloc"); } --num; p = ptr++; return (p); }
0
[ "CWE-125", "CWE-787" ]
tcpdump
730fc35968c5433b9e2a829779057f4f9495dc51
121,305,555,067,146,500,000,000,000,000,000,000,000
16
CVE-2017-12894/In lookup_bytestring(), take the length of the byte string into account. Otherwise, if, in our search of the hash table, we come across a byte string that's shorter than the string we're looking for, we'll search past the end of the string in the hash table. This fixes a buffer over-read discovered by Forcepoint's security researchers Otto Airamo & Antti Levomäki. Add a test using the capture file supplied by the reporter(s).
void Item_cond::neg_arguments(THD *thd) { List_iterator<Item> li(list); Item *item; while ((item= li++)) /* Apply not transformation to the arguments */ { Item *new_item= item->neg_transformer(thd); if (!new_item) { if (!(new_item= new (thd->mem_root) Item_func_not(thd, item))) return; // Fatal OEM error } (void) li.replace(new_item); } }
0
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
249,403,002,115,199,100,000,000,000,000,000,000,000
15
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
command_substitute (string, quoted) char *string; int quoted; { pid_t pid, old_pid, old_pipeline_pgrp, old_async_pid; char *istring, *s; int result, fildes[2], function_value, pflags, rc, tflag; WORD_DESC *ret; istring = (char *)NULL; /* Don't fork () if there is no need to. In the case of no command to run, just return NULL. */ #if 1 for (s = string; s && *s && (shellblank (*s) || *s == '\n'); s++) ; if (s == 0 || *s == 0) return ((WORD_DESC *)NULL); #else if (!string || !*string || (string[0] == '\n' && !string[1])) return ((WORD_DESC *)NULL); #endif if (wordexp_only && read_but_dont_execute) { last_command_exit_value = EX_WEXPCOMSUB; jump_to_top_level (EXITPROG); } /* We're making the assumption here that the command substitution will eventually run a command from the file system. Since we'll run maybe_make_export_env in this subshell before executing that command, the parent shell and any other shells it starts will have to remake the environment. If we make it before we fork, other shells won't have to. Don't bother if we have any temporary variable assignments, though, because the export environment will be remade after this command completes anyway, but do it if all the words to be expanded are variable assignments. */ if (subst_assign_varlist == 0 || garglist == 0) maybe_make_export_env (); /* XXX */ /* Flags to pass to parse_and_execute() */ pflags = (interactive && sourcelevel == 0) ? SEVAL_RESETLINE : 0; /* Pipe the output of executing STRING into the current shell. */ if (pipe (fildes) < 0) { sys_error ("%s", _("cannot make pipe for command substitution")); goto error_exit; } old_pid = last_made_pid; #if defined (JOB_CONTROL) old_pipeline_pgrp = pipeline_pgrp; /* Don't reset the pipeline pgrp if we're already a subshell in a pipeline. */ if ((subshell_environment & SUBSHELL_PIPE) == 0) pipeline_pgrp = shell_pgrp; cleanup_the_pipeline (); #endif /* JOB_CONTROL */ old_async_pid = last_asynchronous_pid; pid = make_child ((char *)NULL, subshell_environment&SUBSHELL_ASYNC); last_asynchronous_pid = old_async_pid; if (pid == 0) { /* Reset the signal handlers in the child, but don't free the trap strings. Set a flag noting that we have to free the trap strings if we run trap to change a signal disposition. */ reset_signal_handlers (); if (ISINTERRUPT) { kill (getpid (), SIGINT); CLRINTERRUPT; /* if we're ignoring SIGINT somehow */ } QUIT; /* catch any interrupts we got post-fork */ subshell_environment |= SUBSHELL_RESETTRAP; } #if defined (JOB_CONTROL) /* XXX DO THIS ONLY IN PARENT ? XXX */ set_sigchld_handler (); stop_making_children (); if (pid != 0) pipeline_pgrp = old_pipeline_pgrp; #else stop_making_children (); #endif /* JOB_CONTROL */ if (pid < 0) { sys_error (_("cannot make child for command substitution")); error_exit: last_made_pid = old_pid; FREE (istring); close (fildes[0]); close (fildes[1]); return ((WORD_DESC *)NULL); } if (pid == 0) { /* The currently executing shell is not interactive. */ interactive = 0; set_sigint_handler (); /* XXX */ free_pushed_string_input (); /* Discard buffered stdio output before replacing the underlying file descriptor. */ fpurge (stdout); if (dup2 (fildes[1], 1) < 0) { sys_error ("%s", _("command_substitute: cannot duplicate pipe as fd 1")); exit (EXECUTION_FAILURE); } /* If standard output is closed in the parent shell (such as after `exec >&-'), file descriptor 1 will be the lowest available file descriptor, and end up in fildes[0]. This can happen for stdin and stderr as well, but stdout is more important -- it will cause no output to be generated from this command. */ if ((fildes[1] != fileno (stdin)) && (fildes[1] != fileno (stdout)) && (fildes[1] != fileno (stderr))) close (fildes[1]); if ((fildes[0] != fileno (stdin)) && (fildes[0] != fileno (stdout)) && (fildes[0] != fileno (stderr))) close (fildes[0]); #ifdef __CYGWIN__ /* Let stdio know the fd may have changed from text to binary mode, and make sure to preserve stdout line buffering. */ freopen (NULL, "w", stdout); sh_setlinebuf (stdout); #endif /* __CYGWIN__ */ /* This is a subshell environment. */ subshell_environment |= SUBSHELL_COMSUB; /* Many shells do not appear to inherit the -v option for command substitutions. */ change_flag ('v', FLAG_OFF); /* When inherit_errexit option is not enabled, command substitution does not inherit the -e flag. It is enabled when Posix mode is enabled */ if (inherit_errexit == 0) { builtin_ignoring_errexit = 0; change_flag ('e', FLAG_OFF); } set_shellopts (); /* If we are expanding a redirection, we can dispose of any temporary environment we received, since redirections are not supposed to have access to the temporary environment. We will have to see whether this affects temporary environments supplied to `eval', but the temporary environment gets copied to builtin_env at some point. */ if (expanding_redir) { flush_temporary_env (); expanding_redir = 0; } remove_quoted_escapes (string); startup_state = 2; /* see if we can avoid a fork */ /* Give command substitution a place to jump back to on failure, so we don't go back up to main (). */ result = setjmp_nosigs (top_level); /* If we're running a command substitution inside a shell function, trap `return' so we don't return from the function in the subshell and go off to never-never land. */ if (result == 0 && return_catch_flag) function_value = setjmp_nosigs (return_catch); else function_value = 0; if (result == ERREXIT) rc = last_command_exit_value; else if (result == EXITPROG) rc = last_command_exit_value; else if (result) rc = EXECUTION_FAILURE; else if (function_value) rc = return_catch_value; else { subshell_level++; rc = parse_and_execute (string, "command substitution", pflags|SEVAL_NOHIST); subshell_level--; } last_command_exit_value = rc; rc = run_exit_trap (); #if defined (PROCESS_SUBSTITUTION) unlink_fifo_list (); #endif exit (rc); } else { #if defined (JOB_CONTROL) && defined (PGRP_PIPE) close_pgrp_pipe (); #endif /* JOB_CONTROL && PGRP_PIPE */ close (fildes[1]); tflag = 0; istring = read_comsub (fildes[0], quoted, &tflag); close (fildes[0]); current_command_subst_pid = pid; last_command_exit_value = wait_for (pid); last_command_subst_pid = pid; last_made_pid = old_pid; #if defined (JOB_CONTROL) /* If last_command_exit_value > 128, then the substituted command was terminated by a signal. If that signal was SIGINT, then send SIGINT to ourselves. This will break out of loops, for instance. */ if (last_command_exit_value == (128 + SIGINT) && last_command_exit_signal == SIGINT) kill (getpid (), SIGINT); /* wait_for gives the terminal back to shell_pgrp. If some other process group should have it, give it away to that group here. pipeline_pgrp is non-zero only while we are constructing a pipeline, so what we are concerned about is whether or not that pipeline was started in the background. A pipeline started in the background should never get the tty back here. We duplicate the conditions that wait_for tests to make sure we only give the terminal back to pipeline_pgrp under the conditions that wait_for gave it to shell_pgrp. If wait_for doesn't mess with the terminal pgrp, we should not either. */ if (interactive && pipeline_pgrp != (pid_t)0 && running_in_background == 0 && (subshell_environment & (SUBSHELL_ASYNC|SUBSHELL_PIPE)) == 0) give_terminal_to (pipeline_pgrp, 0); #endif /* JOB_CONTROL */ ret = alloc_word_desc (); ret->word = istring; ret->flags = tflag; return ret; } }
0
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
226,618,007,373,956,040,000,000,000,000,000,000,000
255
bash-4.4-rc2 release
psutil_aix_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->error); return 0; }
0
[ "CWE-415" ]
psutil
7d512c8e4442a896d56505be3e78f1156f443465
315,318,470,162,443,800,000,000,000,000,000,000,000
4
Use Py_CLEAR instead of Py_DECREF to also set the variable to NULL (#1616) These files contain loops that convert system data into python objects and during the process they create objects and dereference their refcounts after they have been added to the resulting list. However, in case of errors during the creation of those python objects, the refcount to previously allocated objects is dropped again with Py_XDECREF, which should be a no-op in case the paramater is NULL. Even so, in most of these loops the variables pointing to the objects are never set to NULL, even after Py_DECREF is called at the end of the loop iteration. This means, after the first iteration, if an error occurs those python objects will get their refcount dropped two times, resulting in a possible double-free.
static unsigned long __init early_calculate_totalpages(void) { unsigned long totalpages = 0; unsigned long start_pfn, end_pfn; int i, nid; for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { unsigned long pages = end_pfn - start_pfn; totalpages += pages; if (pages) node_set_state(nid, N_MEMORY); } return totalpages; }
0
[]
linux
400e22499dd92613821374c8c6c88c7225359980
20,927,893,715,653,454,000,000,000,000,000,000,000
15
mm: don't warn about allocations which stall for too long Commit 63f53dea0c98 ("mm: warn about allocations which stall for too long") was a great step for reducing possibility of silent hang up problem caused by memory allocation stalls. But this commit reverts it, for it is possible to trigger OOM lockup and/or soft lockups when many threads concurrently called warn_alloc() (in order to warn about memory allocation stalls) due to current implementation of printk(), and it is difficult to obtain useful information due to limitation of synchronous warning approach. Current printk() implementation flushes all pending logs using the context of a thread which called console_unlock(). printk() should be able to flush all pending logs eventually unless somebody continues appending to printk() buffer. Since warn_alloc() started appending to printk() buffer while waiting for oom_kill_process() to make forward progress when oom_kill_process() is processing pending logs, it became possible for warn_alloc() to force oom_kill_process() loop inside printk(). As a result, warn_alloc() significantly increased possibility of preventing oom_kill_process() from making forward progress. ---------- Pseudo code start ---------- Before warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } goto retry; After warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else if (waited_for_10seconds()) { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- Although waited_for_10seconds() becomes true once per 10 seconds, unbounded number of threads can call waited_for_10seconds() at the same time. Also, since threads doing waited_for_10seconds() keep doing almost busy loop, the thread doing print_one_log() can use little CPU resource. Therefore, this situation can be simplified like ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- when printk() is called faster than print_one_log() can process a log. One of possible mitigation would be to introduce a new lock in order to make sure that no other series of printk() (either oom_kill_process() or warn_alloc()) can append to printk() buffer when one series of printk() (either oom_kill_process() or warn_alloc()) is already in progress. Such serialization will also help obtaining kernel messages in readable form. ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { mutex_lock(&oom_printk_lock); while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_printk_lock); mutex_unlock(&oom_lock) } else { if (mutex_trylock(&oom_printk_lock)) { atomic_inc(&printk_pending_logs); mutex_unlock(&oom_printk_lock); } } goto retry; ---------- Pseudo code end ---------- But this commit does not go that direction, for we don't want to introduce a new lock dependency, and we unlikely be able to obtain useful information even if we serialized oom_kill_process() and warn_alloc(). Synchronous approach is prone to unexpected results (e.g. too late [1], too frequent [2], overlooked [3]). As far as I know, warn_alloc() never helped with providing information other than "something is going wrong". I want to consider asynchronous approach which can obtain information during stalls with possibly relevant threads (e.g. the owner of oom_lock and kswapd-like threads) and serve as a trigger for actions (e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump of stalling KVM guest for diagnostic purpose). This commit temporarily loses ability to report e.g. OOM lockup due to unable to invoke the OOM killer due to !__GFP_FS allocation request. But asynchronous approach will be able to detect such situation and emit warning. Thus, let's remove warn_alloc(). [1] https://bugzilla.kernel.org/show_bug.cgi?id=192981 [2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com [3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever")) Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <[email protected]> Reported-by: Cong Wang <[email protected]> Reported-by: yuwang.yuwang <[email protected]> Reported-by: Johannes Weiner <[email protected]> Acked-by: Michal Hocko <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Sergey Senozhatsky <[email protected]> Cc: Petr Mladek <[email protected]> Cc: Steven Rostedt <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
decode_NXAST_RAW_EXIT(struct ofpbuf *out) { ofpact_put_EXIT(out); return 0; }
0
[ "CWE-125" ]
ovs
9237a63c47bd314b807cda0bd2216264e82edbe8
330,329,779,557,478,530,000,000,000,000,000,000,000
5
ofp-actions: Avoid buffer overread in BUNDLE action decoding. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052 Signed-off-by: Ben Pfaff <[email protected]> Acked-by: Justin Pettit <[email protected]>
static MagickBooleanType WriteTIFFImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *mode, *option; CompressionType compression; EndianType endian_type; MagickBooleanType adjoin, debug, status; MagickOffsetType scene; QuantumInfo *quantum_info; QuantumType quantum_type; register ssize_t i; size_t imageListLength, length; ssize_t y; TIFF *tiff; TIFFInfo tiff_info; uint16 bits_per_sample, compress_tag, endian, photometric, predictor; unsigned char *pixels; /* Open TIFF file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) SetMagickThreadValue(tiff_exception,exception); endian_type=(HOST_FILLORDER == FILLORDER_LSB2MSB) ? LSBEndian : MSBEndian; option=GetImageOption(image_info,"tiff:endian"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian_type=MSBEndian; if (LocaleNCompare(option,"lsb",3) == 0) endian_type=LSBEndian; } mode=endian_type == LSBEndian ? "wl" : "wb"; #if defined(TIFF_VERSION_BIG) if (LocaleCompare(image_info->magick,"TIFF64") == 0) mode=endian_type == LSBEndian ? "wl8" : "wb8"; #endif tiff=TIFFClientOpen(image->filename,mode,(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) return(MagickFalse); if (exception->severity > ErrorException) { TIFFClose(tiff); return(MagickFalse); } (void) DeleteImageProfile(image,"tiff:37724"); scene=0; debug=IsEventLogging(); (void) debug; adjoin=image_info->adjoin; imageListLength=GetImageListLength(image); do { /* Initialize TIFF fields. */ if ((image_info->type != UndefinedType) && (image_info->type != OptimizeType)) (void) SetImageType(image,image_info->type,exception); compression=UndefinedCompression; if (image->compression != JPEGCompression) compression=image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; switch (compression) { case FaxCompression: case Group4Compression: { (void) SetImageType(image,BilevelType,exception); (void) SetImageDepth(image,1,exception); break; } case JPEGCompression: { (void) SetImageStorageClass(image,DirectClass,exception); (void) SetImageDepth(image,8,exception); break; } default: break; } quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if ((image->storage_class != PseudoClass) && (image->depth >= 32) && (quantum_info->format == UndefinedQuantumFormat) && (IsHighDynamicRangeImage(image,exception) != MagickFalse)) { status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) { quantum_info=DestroyQuantumInfo(quantum_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } } if ((LocaleCompare(image_info->magick,"PTIF") == 0) && (GetPreviousImageInList(image) != (Image *) NULL)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_REDUCEDIMAGE); if ((image->columns != (uint32) image->columns) || (image->rows != (uint32) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); (void) TIFFSetField(tiff,TIFFTAG_IMAGELENGTH,(uint32) image->rows); (void) TIFFSetField(tiff,TIFFTAG_IMAGEWIDTH,(uint32) image->columns); switch (compression) { case FaxCompression: { compress_tag=COMPRESSION_CCITTFAX3; option=GetImageOption(image_info,"quantum:polarity"); if (option == (const char *) NULL) SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } case Group4Compression: { compress_tag=COMPRESSION_CCITTFAX4; option=GetImageOption(image_info,"quantum:polarity"); if (option == (const char *) NULL) SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } #if defined(COMPRESSION_JBIG) case JBIG1Compression: { compress_tag=COMPRESSION_JBIG; break; } #endif case JPEGCompression: { compress_tag=COMPRESSION_JPEG; break; } #if defined(COMPRESSION_LZMA) case LZMACompression: { compress_tag=COMPRESSION_LZMA; break; } #endif case LZWCompression: { compress_tag=COMPRESSION_LZW; break; } case RLECompression: { compress_tag=COMPRESSION_PACKBITS; break; } case ZipCompression: { compress_tag=COMPRESSION_ADOBE_DEFLATE; break; } #if defined(COMPRESSION_ZSTD) case ZstdCompression: { compress_tag=COMPRESSION_ZSTD; break; } #endif case NoCompression: default: { compress_tag=COMPRESSION_NONE; break; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; compression=NoCompression; } #else switch (compress_tag) { #if defined(CCITT_SUPPORT) case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: #endif #if defined(YCBCR_SUPPORT) && defined(JPEG_SUPPORT) case COMPRESSION_JPEG: #endif #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: #endif #if defined(LZW_SUPPORT) case COMPRESSION_LZW: #endif #if defined(PACKBITS_SUPPORT) case COMPRESSION_PACKBITS: #endif #if defined(ZIP_SUPPORT) case COMPRESSION_ADOBE_DEFLATE: #endif case COMPRESSION_NONE: break; default: { (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; compression=NoCompression; break; } } #endif if (image->colorspace == CMYKColorspace) { photometric=PHOTOMETRIC_SEPARATED; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,4); (void) TIFFSetField(tiff,TIFFTAG_INKSET,INKSET_CMYK); } else { /* Full color TIFF raster. */ if (image->colorspace == LabColorspace) { photometric=PHOTOMETRIC_CIELAB; EncodeLabImage(image,exception); } else if (image->colorspace == YCbCrColorspace) { photometric=PHOTOMETRIC_YCBCR; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,1,1); (void) SetImageStorageClass(image,DirectClass,exception); (void) SetImageDepth(image,8,exception); } else photometric=PHOTOMETRIC_RGB; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,3); if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType)) { if ((image_info->type != PaletteType) && (SetImageGray(image,exception) != MagickFalse)) { photometric=(uint16) (quantum_info->min_is_white != MagickFalse ? PHOTOMETRIC_MINISWHITE : PHOTOMETRIC_MINISBLACK); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); if ((image->depth == 1) && (image->alpha_trait == UndefinedPixelTrait)) SetImageMonochrome(image,exception); } else if (image->storage_class == PseudoClass) { size_t depth; /* Colormapped TIFF raster. */ (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); photometric=PHOTOMETRIC_PALETTE; depth=1; while ((GetQuantumRange(depth)+1) < image->colors) depth<<=1; status=SetQuantumDepth(image,quantum_info,depth); if (status == MagickFalse) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian); if ((compress_tag == COMPRESSION_CCITTFAX3) || (compress_tag == COMPRESSION_CCITTFAX4)) { if ((photometric != PHOTOMETRIC_MINISWHITE) && (photometric != PHOTOMETRIC_MINISBLACK)) { compress_tag=COMPRESSION_NONE; endian=FILLORDER_MSB2LSB; } } option=GetImageOption(image_info,"tiff:fill-order"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian=FILLORDER_MSB2LSB; if (LocaleNCompare(option,"lsb",3) == 0) endian=FILLORDER_LSB2MSB; } (void) TIFFSetField(tiff,TIFFTAG_COMPRESSION,compress_tag); (void) TIFFSetField(tiff,TIFFTAG_FILLORDER,endian); (void) TIFFSetField(tiff,TIFFTAG_BITSPERSAMPLE,quantum_info->depth); if (image->alpha_trait != UndefinedPixelTrait) { uint16 extra_samples, sample_info[1], samples_per_pixel; /* TIFF has a matte channel. */ extra_samples=1; sample_info[0]=EXTRASAMPLE_UNASSALPHA; option=GetImageOption(image_info,"tiff:alpha"); if (option != (const char *) NULL) { if (LocaleCompare(option,"associated") == 0) sample_info[0]=EXTRASAMPLE_ASSOCALPHA; else if (LocaleCompare(option,"unspecified") == 0) sample_info[0]=EXTRASAMPLE_UNSPECIFIED; } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,samples_per_pixel+1); (void) TIFFSetField(tiff,TIFFTAG_EXTRASAMPLES,extra_samples, &sample_info); if (sample_info[0] == EXTRASAMPLE_ASSOCALPHA) SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha); } (void) TIFFSetField(tiff,TIFFTAG_PHOTOMETRIC,photometric); switch (quantum_info->format) { case FloatingPointQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_IEEEFP); (void) TIFFSetField(tiff,TIFFTAG_SMINSAMPLEVALUE,quantum_info->minimum); (void) TIFFSetField(tiff,TIFFTAG_SMAXSAMPLEVALUE,quantum_info->maximum); break; } case SignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_INT); break; } case UnsignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_UINT); break; } default: break; } (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG); if (photometric == PHOTOMETRIC_RGB) if ((image_info->interlace == PlaneInterlace) || (image_info->interlace == PartitionInterlace)) (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_SEPARATE); predictor=0; switch (compress_tag) { case COMPRESSION_JPEG: { #if defined(JPEG_SUPPORT) if (image_info->quality != UndefinedCompressionQuality) (void) TIFFSetField(tiff,TIFFTAG_JPEGQUALITY,image_info->quality); (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RAW); if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { const char *value; (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RGB); if (image->colorspace == YCbCrColorspace) { const char *sampling_factor; GeometryInfo geometry_info; MagickStatusType flags; sampling_factor=(const char *) NULL; value=GetImageProperty(image,"jpeg:sampling-factor",exception); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor != (const char *) NULL) { flags=ParseGeometry(sampling_factor,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,(uint16) geometry_info.rho,(uint16) geometry_info.sigma); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (bits_per_sample == 12) (void) TIFFSetField(tiff,TIFFTAG_JPEGTABLESMODE,JPEGTABLESMODE_QUANT); #endif break; } case COMPRESSION_ADOBE_DEFLATE: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_ZIPQUALITY,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } case COMPRESSION_CCITTFAX3: { /* Byte-aligned EOL. */ (void) TIFFSetField(tiff,TIFFTAG_GROUP3OPTIONS,4); break; } case COMPRESSION_CCITTFAX4: break; #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: { if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_LZMAPRESET,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } #endif case COMPRESSION_LZW: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; break; } #if defined(WEBP_SUPPORT) && defined(COMPRESSION_WEBP) case COMPRESSION_WEBP: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_WEBP_LEVEL,image_info->quality); if (image_info->quality >= 100) (void) TIFFSetField(tiff,TIFFTAG_WEBP_LOSSLESS,1); break; } #endif #if defined(ZSTD_SUPPORT) && defined(COMPRESSION_ZSTD) case COMPRESSION_ZSTD: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_SEPARATED) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) predictor=PREDICTOR_HORIZONTAL; (void) TIFFSetField(tiff,TIFFTAG_ZSTD_LEVEL,22*image_info->quality/ 100.0); break; } #endif default: break; } option=GetImageOption(image_info,"tiff:predictor"); if (option != (const char * ) NULL) predictor=(uint16) strtol(option,(char **) NULL,10); if (predictor != 0) (void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,predictor); if ((image->resolution.x != 0.0) && (image->resolution.y != 0.0)) { unsigned short units; /* Set image resolution. */ units=RESUNIT_NONE; if (image->units == PixelsPerInchResolution) units=RESUNIT_INCH; if (image->units == PixelsPerCentimeterResolution) units=RESUNIT_CENTIMETER; (void) TIFFSetField(tiff,TIFFTAG_RESOLUTIONUNIT,(uint16) units); (void) TIFFSetField(tiff,TIFFTAG_XRESOLUTION,image->resolution.x); (void) TIFFSetField(tiff,TIFFTAG_YRESOLUTION,image->resolution.y); if ((image->page.x < 0) || (image->page.y < 0)) (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "TIFF: negative image positions unsupported","%s",image->filename); if ((image->page.x > 0) && (image->resolution.x > 0.0)) { /* Set horizontal image position. */ (void) TIFFSetField(tiff,TIFFTAG_XPOSITION,(float) image->page.x/ image->resolution.x); } if ((image->page.y > 0) && (image->resolution.y > 0.0)) { /* Set vertical image position. */ (void) TIFFSetField(tiff,TIFFTAG_YPOSITION,(float) image->page.y/ image->resolution.y); } } if (image->chromaticity.white_point.x != 0.0) { float chromaticity[6]; /* Set image chromaticity. */ chromaticity[0]=(float) image->chromaticity.red_primary.x; chromaticity[1]=(float) image->chromaticity.red_primary.y; chromaticity[2]=(float) image->chromaticity.green_primary.x; chromaticity[3]=(float) image->chromaticity.green_primary.y; chromaticity[4]=(float) image->chromaticity.blue_primary.x; chromaticity[5]=(float) image->chromaticity.blue_primary.y; (void) TIFFSetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,chromaticity); chromaticity[0]=(float) image->chromaticity.white_point.x; chromaticity[1]=(float) image->chromaticity.white_point.y; (void) TIFFSetField(tiff,TIFFTAG_WHITEPOINT,chromaticity); } option=GetImageOption(image_info,"tiff:write-layers"); if (IsStringTrue(option) != MagickFalse) { (void) TIFFWritePhotoshopLayers(image,image_info,endian_type,exception); adjoin=MagickFalse; } if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (adjoin != MagickFalse) && (imageListLength > 1)) { (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); if (image->scene != 0) (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,(uint16) image->scene, imageListLength); } if (image->orientation != UndefinedOrientation) (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,(uint16) image->orientation); else (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT); TIFFSetProfiles(tiff,image); { uint16 page, pages; page=(uint16) scene; pages=(uint16) imageListLength; if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (adjoin != MagickFalse) && (pages > 1)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,page,pages); } (void) TIFFSetProperties(tiff,adjoin,image,exception); DisableMSCWarning(4127) if (0) RestoreMSCWarning (void) TIFFSetEXIFProperties(tiff,image,exception); /* Write image scanlines. */ if (GetTIFFInfo(image_info,tiff,&tiff_info) == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); quantum_info->endian=LSBEndian; pixels=(unsigned char *) GetQuantumPixels(quantum_info); tiff_info.scanline=(unsigned char *) GetQuantumPixels(quantum_info); switch (photometric) { case PHOTOMETRIC_CIELAB: case PHOTOMETRIC_YCBCR: case PHOTOMETRIC_RGB: { /* RGB TIFF image. */ switch (image_info->interlace) { case NoInterlace: default: { quantum_type=RGBQuantum; if (image->alpha_trait != UndefinedPixelTrait) quantum_type=RGBAQuantum; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); (void) length; if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } break; } case PlaneInterlace: case PartitionInterlace: { /* Plane interlacing: RRRRRR...GGGGGG...BBBBBB... */ for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, RedQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,100,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, GreenQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,1,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,200,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, BlueQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,2,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,300,400); if (status == MagickFalse) break; } if (image->alpha_trait != UndefinedPixelTrait) for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL, quantum_info,AlphaQuantum,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,3,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,400,400); if (status == MagickFalse) break; } break; } } break; } case PHOTOMETRIC_SEPARATED: { /* CMYK TIFF image. */ quantum_type=CMYKQuantum; if (image->alpha_trait != UndefinedPixelTrait) quantum_type=CMYKAQuantum; if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case PHOTOMETRIC_PALETTE: { uint16 *blue, *green, *red; /* Colormapped TIFF image. */ red=(uint16 *) AcquireQuantumMemory(65536,sizeof(*red)); green=(uint16 *) AcquireQuantumMemory(65536,sizeof(*green)); blue=(uint16 *) AcquireQuantumMemory(65536,sizeof(*blue)); if ((red == (uint16 *) NULL) || (green == (uint16 *) NULL) || (blue == (uint16 *) NULL)) { if (red != (uint16 *) NULL) red=(uint16 *) RelinquishMagickMemory(red); if (green != (uint16 *) NULL) green=(uint16 *) RelinquishMagickMemory(green); if (blue != (uint16 *) NULL) blue=(uint16 *) RelinquishMagickMemory(blue); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize TIFF colormap. */ (void) memset(red,0,65536*sizeof(*red)); (void) memset(green,0,65536*sizeof(*green)); (void) memset(blue,0,65536*sizeof(*blue)); for (i=0; i < (ssize_t) image->colors; i++) { red[i]=ScaleQuantumToShort(image->colormap[i].red); green[i]=ScaleQuantumToShort(image->colormap[i].green); blue[i]=ScaleQuantumToShort(image->colormap[i].blue); } (void) TIFFSetField(tiff,TIFFTAG_COLORMAP,red,green,blue); red=(uint16 *) RelinquishMagickMemory(red); green=(uint16 *) RelinquishMagickMemory(green); blue=(uint16 *) RelinquishMagickMemory(blue); } default: { /* Convert PseudoClass packets to contiguous grayscale scanlines. */ quantum_type=IndexQuantum; if (image->alpha_trait != UndefinedPixelTrait) { if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayAlphaQuantum; else quantum_type=IndexAlphaQuantum; } else if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayQuantum; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } } quantum_info=DestroyQuantumInfo(quantum_info); if (image->colorspace == LabColorspace) DecodeLabImage(image,exception); DestroyTIFFInfo(&tiff_info); DisableMSCWarning(4127) if (0 && (image_info->verbose != MagickFalse)) RestoreMSCWarning TIFFPrintDirectory(tiff,stdout,MagickFalse); (void) TIFFWriteDirectory(tiff); image=SyncNextImageInList(image); if (image == (Image *) NULL) break; status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (adjoin != MagickFalse); TIFFClose(tiff); return(MagickTrue); }
1
[ "CWE-125" ]
ImageMagick
f06925afeabe3f01045db33d5a33d55e64378ebc
308,553,004,386,214,870,000,000,000,000,000,000,000
903
https://github.com/ImageMagick/ImageMagick/issues/1555
static void netjoin_remove(NETJOIN_SERVER_REC *server, NETJOIN_REC *rec) { server->netjoins = g_slist_remove(server->netjoins, rec); g_slist_foreach(rec->old_channels, (GFunc) g_free, NULL); g_slist_foreach(rec->now_channels, (GFunc) g_free, NULL); g_slist_free(rec->old_channels); g_slist_free(rec->now_channels); g_free(rec->nick); g_free(rec); }
0
[ "CWE-416" ]
irssi
a6cae91cecba2e8cf11ed779c5da5a229472575c
185,448,758,111,059,300,000,000,000,000,000,000,000
12
Merge pull request #812 from ailin-nemui/tape-netsplit revert netsplit print optimisation (cherry picked from commit 7de1378dab8081932d9096e19ae3d0921e560230)
TEST_P(WasmTest, EmscriptenVersion) { Stats::IsolatedStoreImpl stats_store; Api::ApiPtr api = Api::createApiForTest(stats_store); Upstream::MockClusterManager cluster_manager; Event::DispatcherPtr dispatcher(api->allocateDispatcher()); auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm.")); NiceMock<LocalInfo::MockLocalInfo> local_info; auto name = ""; auto root_id = ""; auto vm_id = ""; auto vm_configuration = ""; auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>( name, root_id, vm_id, envoy::api::v2::core::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>( absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, plugin, scope, cluster_manager, *dispatcher); EXPECT_NE(wasm, nullptr); const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/wasm/test_data/segv_cpp.wasm")); EXPECT_FALSE(code.empty()); auto context = std::make_unique<TestContext>(wasm.get()); EXPECT_TRUE(wasm->initialize(code, false)); uint32_t major = 9, minor = 9, abi_major = 9, abi_minor = 9; EXPECT_TRUE(wasm->getEmscriptenVersion(&major, &minor, &abi_major, &abi_minor)); EXPECT_EQ(major, 0); EXPECT_LE(minor, 3); // Up to (at least) emsdk 1.39.0. EXPECT_EQ(abi_major, 0); EXPECT_LE(abi_minor, 19); }
0
[ "CWE-476" ]
envoy
8788a3cf255b647fd14e6b5e2585abaaedb28153
128,341,522,276,619,360,000,000,000,000,000,000,000
31
1.4 - Do not call into the VM unless the VM Context has been created. (#24) * Ensure that the in VM Context is created before onDone is called. Signed-off-by: John Plevyak <[email protected]> * Update as per offline discussion. Signed-off-by: John Plevyak <[email protected]> * Set in_vm_context_created_ in onNetworkNewConnection. Signed-off-by: John Plevyak <[email protected]> * Add guards to other network calls. Signed-off-by: John Plevyak <[email protected]> * Fix common/wasm tests. Signed-off-by: John Plevyak <[email protected]> * Patch tests. Signed-off-by: John Plevyak <[email protected]> * Remove unecessary file from cherry-pick. Signed-off-by: John Plevyak <[email protected]>
bool mnt_may_suid(struct vfsmount *mnt) { /* * Foreign mounts (accessed via fchdir or through /proc * symlinks) are always treated as if they are nosuid. This * prevents namespaces from trusting potentially unsafe * suid/sgid bits, file caps, or security labels that originate * in other namespaces. */ return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) && current_in_userns(mnt->mnt_sb->s_user_ns); }
0
[ "CWE-200" ]
linux
427215d85e8d1476da1a86b8d67aceb485eb3631
16,171,160,194,673,306,000,000,000,000,000,000,000
12
ovl: prevent private clone if bind mount is not allowed Add the following checks from __do_loopback() to clone_private_mount() as well: - verify that the mount is in the current namespace - verify that there are no locked children Reported-by: Alois Wohlschlager <[email protected]> Fixes: c771d683a62e ("vfs: introduce clone_private_mount()") Cc: <[email protected]> # v3.18 Signed-off-by: Miklos Szeredi <[email protected]>
static int get_deep_fifo(struct sb_uart_port *port) { int afr_status = 0; afr_status = sb1054_get_register(port, PAGE_4, SB105X_AFR); return afr_status; }
0
[ "CWE-200" ]
linux
a8b33654b1e3b0c74d4a1fed041c9aae50b3c427
73,214,243,569,687,940,000,000,000,000,000,000,000
6
Staging: sb105x: info leak in mp_get_count() The icount.reserved[] array isn't initialized so it leaks stack information to userspace. Reported-by: Nico Golde <[email protected]> Reported-by: Fabian Yamaguchi <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
link_prefix_patterns(apr_array_header_t *array) { int i; if (!array) return; for (i = 1; i < array->nelts; ++i) { sorted_pattern_t *prev = &APR_ARRAY_IDX(array, i - 1, sorted_pattern_t); sorted_pattern_t *pattern = &APR_ARRAY_IDX(array, i, sorted_pattern_t); /* Does PATTERN potentially have a prefix in ARRAY? * If so, at least the first char must match with the predecessor's * because the array is sorted by that string. */ if (prev->node->segment.data[0] != pattern->node->segment.data[0]) continue; /* Only the predecessor or any of its prefixes can be the closest * prefix to PATTERN. */ for ( ; prev; prev = prev->next) if ( prev->node->segment.len < pattern->node->segment.len && !memcmp(prev->node->segment.data, pattern->node->segment.data, prev->node->segment.len)) { pattern->next = prev; break; } } }
0
[ "CWE-703" ]
subversion
e1b615840932fb46aefe1cd90d2115720af4600e
162,236,002,489,652,840,000,000,000,000,000,000,000
32
Fix issue #4880 "Use-after-free of object-pools when used as httpd module" Ensure that we initialize authz again if the pool which our authz caches depend on is cleared. Apache HTTPD may run pre/post config hooks multiple times and clear its global configuration pool which our authz caching pools depend on. Reported-by: Thomas Weißschuh (thomas {at} t-8ch dot de) Thomas has also confirmed that this patch fixes the problem. * subversion/libsvn_repos/authz.c (deinit_authz): New pool cleanup handler which resets authz initialization in case the parent pool of our authz caches is cleared. (synchronized_authz_initialize): Register new pool cleanup handler. git-svn-id: https://svn.apache.org/repos/asf/subversion/trunk@1894734 13f79535-47bb-0310-9956-ffa450edef68
static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct proc_dir_entry *pde = seq->private; struct neigh_table *tbl = pde->data; int cpu; for (cpu = *pos; cpu < NR_CPUS; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return per_cpu_ptr(tbl->stats, cpu); } return NULL; }
0
[ "CWE-200" ]
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
334,603,379,156,399,220,000,000,000,000,000,000,000
14
[NETLINK]: Missing initializations in dumped data Mostly missing initialization of padding fields of 1 or 2 bytes length, two instances of uninitialized nlmsgerr->msg of 16 bytes length. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
RGWOp *RGWHandler_REST_Service_S3::op_head() { return new RGWListBuckets_ObjStore_S3; }
0
[ "CWE-79" ]
ceph
8f90658c731499722d5f4393c8ad70b971d05f77
110,395,458,319,606,750,000,000,000,000,000,000,000
4
rgw: reject unauthenticated response-header actions Signed-off-by: Matt Benjamin <[email protected]> Reviewed-by: Casey Bodley <[email protected]> (cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
static int _query(netsnmp_variable_list *list, int request, netsnmp_session *session) { netsnmp_pdu *pdu; netsnmp_pdu *response = NULL; netsnmp_variable_list *vb1, *vb2, *vtmp; int ret, count; DEBUGMSGTL(("iquery", "query on session %p\n", session)); if (NULL == list) { snmp_log(LOG_ERR, "empty variable list in _query\n"); return SNMP_ERR_GENERR; } pdu = snmp_pdu_create( request ); if (NULL == pdu) { snmp_log(LOG_ERR, "could not allocate pdu\n"); return SNMP_ERR_GENERR; } /* * Clone the varbind list into the request PDU... */ pdu->variables = snmp_clone_varbind( list ); if (NULL == pdu->variables) { snmp_log(LOG_ERR, "could not clone variable list\n"); snmp_free_pdu(pdu); return SNMP_ERR_GENERR; } #ifndef NETSNMP_NO_WRITE_SUPPORT retry: #endif if ( session ) ret = snmp_synch_response( session, pdu, &response ); else if (_def_query_session) ret = snmp_synch_response( _def_query_session, pdu, &response ); else { /* No session specified */ snmp_free_pdu(pdu); return SNMP_ERR_GENERR; } DEBUGMSGTL(("iquery", "query returned %d\n", ret)); /* * ....then copy the results back into the * list (assuming the request succeeded!). * This avoids having to worry about how this * list was originally allocated. */ if ( ret == SNMP_ERR_NOERROR ) { if ( response->errstat != SNMP_ERR_NOERROR ) { DEBUGMSGT(("iquery", "Error in packet: %s\n", snmp_errstring(response->errstat))); /* * If the request failed, then remove the * offending varbind and try again. * (all except SET requests) * * XXX - implement a library version of * NETSNMP_DS_APP_DONT_FIX_PDUS ?? */ ret = response->errstat; if (response->errindex != 0) { DEBUGMSGT(("iquery:result", "Failed object:\n")); for (count = 1, vtmp = response->variables; vtmp && count != response->errindex; vtmp = vtmp->next_variable, count++) /*EMPTY*/; if (vtmp) DEBUGMSGVAR(("iquery:result", vtmp)); DEBUGMSG(("iquery:result", "\n")); } #ifndef NETSNMP_NO_WRITE_SUPPORT if (request != SNMP_MSG_SET && response->errindex != 0) { DEBUGMSGTL(("iquery", "retrying query (%d, %ld)\n", ret, response->errindex)); pdu = snmp_fix_pdu( response, request ); snmp_free_pdu( response ); response = NULL; if ( pdu != NULL ) goto retry; } #endif /* !NETSNMP_NO_WRITE_SUPPORT */ } else { for (vb1 = response->variables, vb2 = list; vb1; vb1 = vb1->next_variable, vb2 = vb2->next_variable) { DEBUGMSGVAR(("iquery:result", vb1)); DEBUGMSG(("iquery:results", "\n")); if ( !vb2 ) { ret = SNMP_ERR_GENERR; break; } vtmp = vb2->next_variable; snmp_free_var_internals( vb2 ); snmp_clone_var( vb1, vb2 ); /* xxx: check return? */ vb2->next_variable = vtmp; } } } else { /* Distinguish snmp_send errors from SNMP errStat errors */ ret = -ret; } snmp_free_pdu( response ); return ret; }
0
[ "CWE-415" ]
net-snmp
5f881d3bf24599b90d67a45cae7a3eb099cd71c9
294,681,117,413,364,970,000,000,000,000,000,000,000
109
libsnmp, USM: Introduce a reference count in struct usmStateReference This patch fixes https://sourceforge.net/p/net-snmp/bugs/2956/.
static int netlink_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, "sk Eth Pid Groups " "Rmem Wmem Dump Locks Drops Inode\n"); } else { struct sock *s = v; struct netlink_sock *nlk = nlk_sk(s); seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", s, s->sk_protocol, nlk->pid, nlk->groups ? (u32)nlk->groups[0] : 0, sk_rmem_alloc_get(s), sk_wmem_alloc_get(s), nlk->cb, atomic_read(&s->sk_refcnt), atomic_read(&s->sk_drops), sock_i_ino(s) ); } return 0; }
0
[ "CWE-287", "CWE-284" ]
linux
e0e3cea46d31d23dc40df0a49a7a2c04fe8edfea
273,972,852,313,508,780,000,000,000,000,000,000,000
26
af_netlink: force credentials passing [CVE-2012-3520] Pablo Neira Ayuso discovered that avahi and potentially NetworkManager accept spoofed Netlink messages because of a kernel bug. The kernel passes all-zero SCM_CREDENTIALS ancillary data to the receiver if the sender did not provide such data, instead of not including any such data at all or including the correct data from the peer (as it is the case with AF_UNIX). This bug was introduced in commit 16e572626961 (af_unix: dont send SCM_CREDENTIALS by default) This patch forces passing credentials for netlink, as before the regression. Another fix would be to not add SCM_CREDENTIALS in netlink messages if not provided by the sender, but it might break some programs. With help from Florian Weimer & Petr Matousek This issue is designated as CVE-2012-3520 Signed-off-by: Eric Dumazet <[email protected]> Cc: Petr Matousek <[email protected]> Cc: Florian Weimer <[email protected]> Cc: Pablo Neira Ayuso <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int selinux_bprm_secureexec(struct linux_binprm *bprm) { const struct cred *cred = current_cred(); const struct task_security_struct *tsec = cred->security; u32 sid, osid; int atsecure = 0; sid = tsec->sid; osid = tsec->osid; if (osid != sid) { /* Enable secure mode for SIDs transitions unless the noatsecure permission is granted between the two SIDs, i.e. ahp returns 0. */ atsecure = avc_has_perm(osid, sid, SECCLASS_PROCESS, PROCESS__NOATSECURE, NULL); } return (atsecure || cap_bprm_secureexec(bprm)); }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
56,462,996,007,891,680,000,000,000,000,000,000,000
21
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
get_seckey_byfprint (PKT_public_key *pk, const byte * fprint, size_t fprint_len) { gpg_error_t err; if (fprint_len == 20 || fprint_len == 16) { struct getkey_ctx_s ctx; kbnode_t kb = NULL; memset (&ctx, 0, sizeof ctx); ctx.exact = 1; ctx.not_allocated = 1; ctx.kr_handle = keydb_new (); ctx.nitems = 1; ctx.items[0].mode = fprint_len == 16 ? KEYDB_SEARCH_MODE_FPR16 : KEYDB_SEARCH_MODE_FPR20; memcpy (ctx.items[0].u.fpr, fprint, fprint_len); err = lookup (&ctx, &kb, 1); if (!err && pk) pk_from_block (&ctx, pk, kb); release_kbnode (kb); get_pubkey_end (&ctx); } else err = gpg_error (GPG_ERR_BUG); return err; }
0
[ "CWE-310" ]
gnupg
4bde12206c5bf199dc6e12a74af8da4558ba41bf
199,618,617,467,471,660,000,000,000,000,000,000,000
27
gpg: Distinguish between missing and cleared key flags. * include/cipher.h (PUBKEY_USAGE_NONE): New. * g10/getkey.c (parse_key_usage): Set new flag. -- We do not want to use the default capabilities (derived from the algorithm) if any key flags are given in a signature. Thus if key flags are used in any way, the default key capabilities are never used. This allows to create a key with key flags set to all zero so it can't be used. This better reflects common sense.
add_ctype_to_cc_by_range(CClassNode* cc, int ctype ARG_UNUSED, int not, OnigEncoding enc ARG_UNUSED, OnigCodePoint sb_out, const OnigCodePoint mbr[]) { int i, r; OnigCodePoint j; int n = ONIGENC_CODE_RANGE_NUM(mbr); if (not == 0) { for (i = 0; i < n; i++) { for (j = ONIGENC_CODE_RANGE_FROM(mbr, i); j <= ONIGENC_CODE_RANGE_TO(mbr, i); j++) { if (j >= sb_out) { if (j == ONIGENC_CODE_RANGE_TO(mbr, i)) i++; else if (j > ONIGENC_CODE_RANGE_FROM(mbr, i)) { r = add_code_range_to_buf(&(cc->mbuf), j, ONIGENC_CODE_RANGE_TO(mbr, i)); if (r != 0) return r; i++; } goto sb_end; } BITSET_SET_BIT(cc->bs, j); } } sb_end: for ( ; i < n; i++) { r = add_code_range_to_buf(&(cc->mbuf), ONIGENC_CODE_RANGE_FROM(mbr, i), ONIGENC_CODE_RANGE_TO(mbr, i)); if (r != 0) return r; } } else { OnigCodePoint prev = 0; for (i = 0; i < n; i++) { for (j = prev; j < ONIGENC_CODE_RANGE_FROM(mbr, i); j++) { if (j >= sb_out) { goto sb_end2; } BITSET_SET_BIT(cc->bs, j); } prev = ONIGENC_CODE_RANGE_TO(mbr, i) + 1; } for (j = prev; j < sb_out; j++) { BITSET_SET_BIT(cc->bs, j); } sb_end2: prev = sb_out; for (i = 0; i < n; i++) { if (prev < ONIGENC_CODE_RANGE_FROM(mbr, i)) { r = add_code_range_to_buf(&(cc->mbuf), prev, ONIGENC_CODE_RANGE_FROM(mbr, i) - 1); if (r != 0) return r; } prev = ONIGENC_CODE_RANGE_TO(mbr, i) + 1; } if (prev < 0x7fffffff) { r = add_code_range_to_buf(&(cc->mbuf), prev, 0x7fffffff); if (r != 0) return r; } } return 0; }
0
[ "CWE-125" ]
oniguruma
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
38,806,118,639,097,644,000,000,000,000,000,000,000
72
onig-5.9.2
bool tr_variantDictFindDict(tr_variant* dict, tr_quark const key, tr_variant** setme) { return tr_variantDictFindType(dict, key, TR_VARIANT_TYPE_DICT, setme); }
0
[ "CWE-416", "CWE-284" ]
transmission
2123adf8e5e1c2b48791f9d22fc8c747e974180e
86,368,812,576,027,700,000,000,000,000,000,000,000
4
CVE-2018-10756: Fix heap-use-after-free in tr_variantWalk In libtransmission/variant.c, function tr_variantWalk, when the variant stack is reallocated, a pointer to the previously allocated memory region is kept. This address is later accessed (heap use-after-free) while walking back down the stack, causing the application to crash. The application can be any application which uses libtransmission, such as transmission-daemon, transmission-gtk, transmission-show, etc. Reported-by: Tom Richards <[email protected]>
LZWFixupTags(TIFF* tif) { (void) tif; return (1); }
0
[ "CWE-787" ]
libtiff
58a898cb4459055bb488ca815c23b880c242a27d
245,727,912,673,559,500,000,000,000,000,000,000,000
5
LZWDecodeCompat(): fix potential index-out-of-bounds write. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2780 / CVE-2018-8905 The fix consists in using the similar code LZWDecode() to validate we don't write outside of the output buffer.
/* {{{ proto DateTimeImmutable::setTimestamp() */ PHP_METHOD(DateTimeImmutable, setTimestamp) { zval *object, *new_object; long timestamp; if (zend_parse_method_parameters(ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "Ol", &object, date_ce_immutable, &timestamp) == FAILURE) { RETURN_FALSE; } new_object = date_clone_immutable(object TSRMLS_CC); php_date_timestamp_set(new_object, timestamp, return_value TSRMLS_CC);
0
[]
php-src
bb057498f7457e8b2eba98332a3bad434de4cf12
200,618,175,717,082,170,000,000,000,000,000,000,000
14
Fix #70277: new DateTimeZone($foo) is ignoring text after null byte The DateTimeZone constructors are not binary safe. They're parsing the timezone as string, but discard the length when calling timezone_initialize(). This patch adds a tz_len parameter and a respective check to timezone_initialize().
static inline size_t GetPixelMetacontentExtent( const Image *magick_restrict image) { return(image->metacontent_extent); }
0
[ "CWE-20", "CWE-125" ]
ImageMagick
8187d2d8fd010d2d6b1a3a8edd935beec404dddc
332,858,005,193,346,380,000,000,000,000,000,000,000
5
https://github.com/ImageMagick/ImageMagick/issues/1610
char *recv_line(struct pool *pool) { ssize_t len, buflen; char *tok, *sret = NULL; if (!strstr(pool->sockbuf, "\n")) { struct timeval rstart, now; gettimeofday(&rstart, NULL); if (!socket_full(pool, true)) { applog(LOG_DEBUG, "Timed out waiting for data on socket_full"); goto out; } mutex_lock(&pool->stratum_lock); do { char s[RBUFSIZE]; size_t slen, n; memset(s, 0, RBUFSIZE); n = recv(pool->sock, s, RECVSIZE, 0); if (n < 1 && errno != EAGAIN && errno != EWOULDBLOCK) { applog(LOG_DEBUG, "Failed to recv sock in recv_line"); break; } slen = strlen(s); recalloc_sock(pool, slen); strcat(pool->sockbuf, s); gettimeofday(&now, NULL); } while (tdiff(&now, &rstart) < 60 && !strstr(pool->sockbuf, "\n")); mutex_unlock(&pool->stratum_lock); } buflen = strlen(pool->sockbuf); tok = strtok(pool->sockbuf, "\n"); if (!tok) { applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line"); goto out; } sret = strdup(tok); len = strlen(sret); /* Copy what's left in the buffer after the \n, including the * terminating \0 */ if (buflen > len + 1) memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1); else strcpy(pool->sockbuf, ""); pool->cgminer_pool_stats.times_received++; pool->cgminer_pool_stats.bytes_received += len; total_bytes_xfer += len; pool->cgminer_pool_stats.net_bytes_received += len; out: if (!sret) clear_sock(pool); else if (opt_protocol) applog(LOG_DEBUG, "RECVD: %s", sret); return sret; }
0
[ "CWE-119", "CWE-787" ]
bfgminer
c80ad8548251eb0e15329fc240c89070640c9d79
64,984,840,260,591,530,000,000,000,000,000,000,000
61
Stratum: extract_sockaddr: Truncate overlong addresses rather than stack overflow Thanks to Mick Ayzenberg <[email protected]> for finding this!
l_id(void) { return (unsigned long)pthread_self(); }
0
[]
pound
a0c52c542ca9620a96750f9877b26bf4c84aef1b
233,763,675,789,355,220,000,000,000,000,000,000,000
4
SSL Compression Disable patch for 2.6f This patch disables SSL/TLS compression entirely. There is no config option. This prevents CRIME attacks against SSL. Note that HTTP compression is still an option. Test your server at https://www.ssllabs.com/ssldb/ Original patch by Hereward Cooper <[email protected]> Openssl 0.9.8 disabling ideas borrowed from Igor Sysoev's code in nginx.
static int customRsaPrivEnc( int flen, const unsigned char* from, unsigned char* to, RSA* rsa, int padding) { LOG(INFO) << "rsa_priv_enc"; EventBase* asyncJobEvb = reinterpret_cast<EventBase*>(RSA_get_ex_data(rsa, kRSAEvbExIndex)); CHECK(asyncJobEvb); RSA* actualRSA = reinterpret_cast<RSA*>(RSA_get_ex_data(rsa, kRSAExIndex)); CHECK(actualRSA); AsyncSSLSocket* socket = reinterpret_cast<AsyncSSLSocket*>( RSA_get_ex_data(rsa, kRSASocketExIndex)); ASYNC_JOB* job = ASYNC_get_current_job(); if (job == nullptr) { throw std::runtime_error("Expected call in job context"); } ASYNC_WAIT_CTX* waitctx = ASYNC_get_wait_ctx(job); OSSL_ASYNC_FD pipefds[2] = {0, 0}; makeNonBlockingPipe(pipefds); if (!ASYNC_WAIT_CTX_set_wait_fd( waitctx, kEngineId.data(), pipefds[0], nullptr, nullptr)) { throw std::runtime_error("Cannot set wait fd"); } int ret = 0; int* retptr = &ret; auto hand = folly::NetworkSocket::native_handle_type(pipefds[1]); auto asyncPipeWriter = folly::AsyncPipeWriter::newWriter( asyncJobEvb, folly::NetworkSocket(hand)); asyncJobEvb->runInEventBaseThread([retptr = retptr, flen = flen, from = from, to = to, padding = padding, actualRSA = actualRSA, writer = std::move(asyncPipeWriter), socket = socket]() { LOG(INFO) << "Running job"; if (socket) { LOG(INFO) << "Got a socket passed in, closing it..."; socket->closeNow(); } *retptr = RSA_meth_get_priv_enc(RSA_PKCS1_OpenSSL())( flen, from, to, actualRSA, padding); LOG(INFO) << "Finished job, writing to pipe"; uint8_t byte = *retptr > 0 ? 1 : 0; writer->write(nullptr, &byte, 1); }); LOG(INFO) << "About to pause job"; ASYNC_pause_job(); LOG(INFO) << "Resumed job with ret: " << ret; return ret; }
0
[ "CWE-125" ]
folly
c321eb588909646c15aefde035fd3133ba32cdee
92,870,852,150,251,030,000,000,000,000,000,000,000
61
Handle close_notify as standard writeErr in AsyncSSLSocket. Summary: Fixes CVE-2019-11934 Reviewed By: mingtaoy Differential Revision: D18020613 fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836
static void Ins_NOT( INS_ARG ) { (void)exc; if ( args[0] != 0 ) args[0] = 0; else args[0] = 1; }
0
[ "CWE-125" ]
ghostpdl
c7c55972758a93350882c32147801a3485b010fe
312,735,563,386,179,800,000,000,000,000,000,000,000
7
Bug 698024: bounds check zone pointer in Ins_MIRP()
static int __init sched_debug_setup(char *str) { sched_debug_enabled = 1; return 0; }
0
[ "CWE-119" ]
linux
29d6455178a09e1dc340380c582b13356227e8df
222,252,535,186,159,040,000,000,000,000,000,000,000
6
sched: panic on corrupted stack end Until now, hitting this BUG_ON caused a recursive oops (because oops handling involves do_exit(), which calls into the scheduler, which in turn raises an oops), which caused stuff below the stack to be overwritten until a panic happened (e.g. via an oops in interrupt context, caused by the overwritten CPU index in the thread_info). Just panic directly. Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
void BytecodeFunctionGenerator::addDebugSourceLocation( const DebugSourceLocation &info) { // If an address is repeated, it means no actual bytecode was emitted for the // previous source location. if (!debugLocations_.empty() && debugLocations_.back().address == info.address) { debugLocations_.back() = info; } else { debugLocations_.push_back(info); } }
0
[ "CWE-125", "CWE-787" ]
hermes
091835377369c8fd5917d9b87acffa721ad2a168
139,105,830,269,879,720,000,000,000,000,000,000,000
11
Correctly restore whether or not a function is an inner generator Summary: If a generator was large enough to be lazily compiled, we would lose that information when reconstituting the function's context. This meant the function was generated as a regular function instead of a generator. #utd-hermes-ignore-android Reviewed By: tmikov Differential Revision: D23580247 fbshipit-source-id: af5628bf322cbdc7c7cdfbb5f8d0756328518ea1
static RzBinInfo *info(RzBinFile *bf) { if (!bf) { return NULL; } LuacBinInfo *bin_info_obj = GET_INTERNAL_BIN_INFO_OBJ(bf); if (!bin_info_obj) { return NULL; } return bin_info_obj->general_info; }
0
[ "CWE-200", "CWE-787" ]
rizin
05bbd147caccc60162d6fba9baaaf24befa281cd
32,107,109,887,770,846,000,000,000,000,000,000,000
11
Fix oob read on _luac_build_info and luac memleaks
QString PostgreSqlStorage::awayMessage(UserId user, NetworkId networkId) { QSqlQuery query(logDb()); query.prepare(queryString("select_network_awaymsg")); query.bindValue(":userid", user.toInt()); query.bindValue(":networkid", networkId.toInt()); safeExec(query); watchQuery(query); QString awayMsg; if (query.first()) awayMsg = query.value(0).toString(); return awayMsg; }
0
[ "CWE-89" ]
quassel
aa1008be162cb27da938cce93ba533f54d228869
184,693,193,050,259,130,000,000,000,000,000,000,000
13
Fixing security vulnerability with Qt 4.8.5+ and PostgreSQL. Properly detects whether Qt performs slash escaping in SQL queries or not, and then configures PostgreSQL accordingly. This bug was a introduced due to a bugfix in Qt 4.8.5 disables slash escaping when binding queries: https://bugreports.qt-project.org/browse/QTBUG-30076 Thanks to brot and Tucos. [Fixes #1244]
std::string PngChunk::zlibCompress(const std::string& text) { uLongf compressedLen = static_cast<uLongf>(text.size() * 2); // just a starting point int zlibResult; DataBuf arr; do { arr.alloc(compressedLen); zlibResult = compress2((Bytef*)arr.pData_, &compressedLen, (const Bytef*)text.data(), static_cast<uLong>(text.size()), Z_BEST_COMPRESSION); switch (zlibResult) { case Z_OK: assert((uLongf)arr.size_ >= compressedLen); arr.size_ = compressedLen; break; case Z_BUF_ERROR: // The compressed array needs to be larger #ifdef DEBUG std::cout << "Exiv2::PngChunk::parsePngChunk: doubling size for compression.\n"; #endif compressedLen *= 2; // DoS protection. Cap max compressed size if ( compressedLen > 131072 ) throw Error(kerFailedToReadImageData); break; default: // Something bad happened throw Error(kerFailedToReadImageData); } } while (zlibResult == Z_BUF_ERROR); return std::string((const char*)arr.pData_, arr.size_); } // PngChunk::zlibCompress
0
[ "CWE-125" ]
exiv2
35b3e596edacd2437c2c5d3dd2b5c9502626163d
135,246,796,817,241,790,000,000,000,000,000,000,000
35
Add overflow & overread checks to PngChunk::parseTXTChunk() This function was creating a lot of new pointers and strings without properly checking the array bounds. This commit adds several calls to enforce(), making sure that the pointers stay within bounds. Strings are now created using the helper function string_from_unterminated() to prevent overreads in the constructor of std::string. This fixes #400
static void debug_handler(int level, const char *format, va_list ap) { if (verbose) { vprintf(format, ap); } else { char buf[4096], *tmp; int len; if (vsnprintf(buf, sizeof(buf), format, ap) >= sizeof(buf)) { fprintf(stderr, "Increase temporary log buffer size!\n"); return; } if (pthread_mutex_lock(&log_mutex) != 0) { fprintf(stderr, "pthread_mutex_lock failed!\n"); return; } len = (log_buffer != NULL) ? strlen(log_buffer) : 0; tmp = realloc(log_buffer, len + strlen(buf) + 1); if (tmp != NULL) { log_buffer = tmp; strcpy(log_buffer + len, buf); } else { fprintf(stderr, "Out of memory for log buffer!\n"); } if (pthread_mutex_unlock(&log_mutex) != 0) { fprintf(stderr, "pthread_mutex_unlock failed!\n"); failure(); } } }
0
[ "CWE-310" ]
libgadu
d882b15661ee94949919ebbbc43edf0db5f619cb
7,269,473,357,736,428,000,000,000,000,000,000,000
35
Odkąd biblioteka weryfikuje certyfikaty ciężko testować z self-signed.
static struct db_sys_list *_db_rule_gen_64(const struct arch_def *arch, const struct db_api_rule_list *rule) { unsigned int iter; struct db_sys_list *s_new; const struct db_api_arg *chain = rule->args; struct db_arg_chain_tree *c_iter[3] = { NULL, NULL, NULL }; struct db_arg_chain_tree *c_prev[3] = { NULL, NULL, NULL }; enum scmp_compare op_prev = _SCMP_CMP_MIN; unsigned int arg; scmp_datum_t mask; scmp_datum_t datum; s_new = zmalloc(sizeof(*s_new)); if (s_new == NULL) return NULL; s_new->num = rule->syscall; s_new->valid = true; /* run through the argument chain */ for (iter = 0; iter < ARG_COUNT_MAX; iter++) { if (chain[iter].valid == 0) continue; /* TODO: handle the case were either hi or lo isn't needed */ /* skip generating instruction which are no-ops */ if (!_db_arg_cmp_need_hi(&chain[iter]) && !_db_arg_cmp_need_lo(&chain[iter])) continue; c_iter[0] = zmalloc(sizeof(*c_iter[0])); if (c_iter[0] == NULL) goto gen_64_failure; c_iter[1] = zmalloc(sizeof(*c_iter[1])); if (c_iter[1] == NULL) { free(c_iter[0]); goto gen_64_failure; } c_iter[2] = NULL; arg = chain[iter].arg; mask = chain[iter].mask; datum = chain[iter].datum; /* NOTE: with the idea that a picture is worth a thousand * words, i'm presenting the following diagrams which * show how we should compare 64-bit syscall arguments * using 32-bit comparisons. * * in the diagrams below "A(x)" is the syscall argument * being evaluated and "R(x)" is the syscall argument * value specified in the libseccomp rule. the "ACCEPT" * verdict indicates a rule match and processing should * continue on to the rest of the rule, or the final rule * action should be triggered. the "REJECT" verdict * indicates that the rule does not match and processing * should continue to the next rule or the default * action. * * SCMP_CMP_GT: * +------------------+ * +--| Ah(x) > Rh(x) |------+ * | +------------------+ | * FALSE TRUE A * | | C * +-----------+ +----> C * v +----> E * +------------------+ | P * +--| Ah(x) == Rh(x) |--+ | T * R | +------------------+ | | * E FALSE TRUE | * J <----+ | | * E <----+ +------------+ | * C FALSE v | * T | +------------------+ | * +--| Al(x) > Rl(x) |------+ * +------------------+ * * SCMP_CMP_GE: * +------------------+ * +--| Ah(x) > Rh(x) |------+ * | +------------------+ | * FALSE TRUE A * | | C * +-----------+ +----> C * v +----> E * +------------------+ | P * +--| Ah(x) == Rh(x) |--+ | T * R | +------------------+ | | * E FALSE TRUE | * J <----+ | | * E <----+ +------------+ | * C FALSE v | * T | +------------------+ | * +--| Al(x) >= Rl(x) |------+ * +------------------+ * * SCMP_CMP_LT: * +------------------+ * +--| Ah(x) > Rh(x) |------+ * | +------------------+ | * FALSE TRUE R * | | E * +-----------+ +----> J * v +----> E * +------------------+ | C * +--| Ah(x) == Rh(x) |--+ | T * A | +------------------+ | | * C FALSE TRUE | * C <----+ | | * E <----+ +------------+ | * P FALSE v | * T | +------------------+ | * +--| Al(x) >= Rl(x) |------+ * +------------------+ * * SCMP_CMP_LE: * +------------------+ * +--| Ah(x) > Rh(x) |------+ * | +------------------+ | * FALSE TRUE R * | | E * +-----------+ +----> J * v +----> E * +------------------+ | C * +--| Ah(x) == Rh(x) |--+ | T * A | +------------------+ | | * C FALSE TRUE | * C <----+ | | * E <----+ +------------+ | * P FALSE v | * T | +------------------+ | * +--| Al(x) > Rl(x) |------+ * +------------------+ * * SCMP_CMP_EQ: * +------------------+ * +--| Ah(x) == Rh(x) |--+ * R | +------------------+ | A * E FALSE TRUE C * J <----+ | C * E <----+ +------------+ +----> E * C FALSE v | P * T | +------------------+ | T * +--| Al(x) == Rl(x) |------+ * +------------------+ * * SCMP_CMP_NE: * +------------------+ * +--| Ah(x) == Rh(x) |--+ * A | +------------------+ | R * C FALSE TRUE E * C <----+ | J * E <----+ +------------+ +----> E * P FALSE v | C * T | +------------------+ | T * +--| Al(x) == Rl(x) |------+ * +------------------+ * */ /* setup the level */ switch (chain[iter].op) { case SCMP_CMP_GT: case SCMP_CMP_GE: case SCMP_CMP_LE: case SCMP_CMP_LT: c_iter[2] = zmalloc(sizeof(*c_iter[2])); if (c_iter[2] == NULL) { free(c_iter[0]); free(c_iter[1]); goto gen_64_failure; } c_iter[0]->arg = arg; c_iter[1]->arg = arg; c_iter[2]->arg = arg; c_iter[0]->arg_h_flg = true; c_iter[1]->arg_h_flg = true; c_iter[2]->arg_h_flg = false; c_iter[0]->arg_offset = arch_arg_offset_hi(arch, arg); c_iter[1]->arg_offset = arch_arg_offset_hi(arch, arg); c_iter[2]->arg_offset = arch_arg_offset_lo(arch, arg); c_iter[0]->mask = D64_HI(mask); c_iter[1]->mask = D64_HI(mask); c_iter[2]->mask = D64_LO(mask); c_iter[0]->datum = D64_HI(datum); c_iter[1]->datum = D64_HI(datum); c_iter[2]->datum = D64_LO(datum); c_iter[0]->datum_full = datum; c_iter[1]->datum_full = datum; c_iter[2]->datum_full = datum; _db_node_mask_fixup(c_iter[0]); _db_node_mask_fixup(c_iter[1]); _db_node_mask_fixup(c_iter[2]); c_iter[0]->op = SCMP_CMP_GT; c_iter[1]->op = SCMP_CMP_EQ; switch (chain[iter].op) { case SCMP_CMP_GT: case SCMP_CMP_LE: c_iter[2]->op = SCMP_CMP_GT; break; case SCMP_CMP_GE: case SCMP_CMP_LT: c_iter[2]->op = SCMP_CMP_GE; break; default: /* we should never get here */ goto gen_64_failure; } c_iter[0]->op_orig = chain[iter].op; c_iter[1]->op_orig = chain[iter].op; c_iter[2]->op_orig = chain[iter].op; c_iter[0]->nxt_f = _db_node_get(c_iter[1]); c_iter[1]->nxt_t = _db_node_get(c_iter[2]); break; case SCMP_CMP_EQ: case SCMP_CMP_MASKED_EQ: case SCMP_CMP_NE: c_iter[0]->arg = arg; c_iter[1]->arg = arg; c_iter[0]->arg_h_flg = true; c_iter[1]->arg_h_flg = false; c_iter[0]->arg_offset = arch_arg_offset_hi(arch, arg); c_iter[1]->arg_offset = arch_arg_offset_lo(arch, arg); c_iter[0]->mask = D64_HI(mask); c_iter[1]->mask = D64_LO(mask); c_iter[0]->datum = D64_HI(datum); c_iter[1]->datum = D64_LO(datum); c_iter[0]->datum_full = datum; c_iter[1]->datum_full = datum; _db_node_mask_fixup(c_iter[0]); _db_node_mask_fixup(c_iter[1]); switch (chain[iter].op) { case SCMP_CMP_MASKED_EQ: c_iter[0]->op = SCMP_CMP_MASKED_EQ; c_iter[1]->op = SCMP_CMP_MASKED_EQ; break; default: c_iter[0]->op = SCMP_CMP_EQ; c_iter[1]->op = SCMP_CMP_EQ; } c_iter[0]->op_orig = chain[iter].op; c_iter[1]->op_orig = chain[iter].op; c_iter[0]->nxt_t = _db_node_get(c_iter[1]); break; default: /* we should never get here */ goto gen_64_failure; } /* link this level to the previous level */ if (c_prev[0] != NULL) { switch (op_prev) { case SCMP_CMP_GT: case SCMP_CMP_GE: c_prev[0]->nxt_t = _db_node_get(c_iter[0]); c_prev[2]->nxt_t = _db_node_get(c_iter[0]); break; case SCMP_CMP_EQ: case SCMP_CMP_MASKED_EQ: c_prev[1]->nxt_t = _db_node_get(c_iter[0]); break; case SCMP_CMP_LE: case SCMP_CMP_LT: c_prev[1]->nxt_f = _db_node_get(c_iter[0]); c_prev[2]->nxt_f = _db_node_get(c_iter[0]); break; case SCMP_CMP_NE: c_prev[0]->nxt_f = _db_node_get(c_iter[0]); c_prev[1]->nxt_f = _db_node_get(c_iter[0]); break; default: /* we should never get here */ goto gen_64_failure; } } else s_new->chains = _db_node_get(c_iter[0]); /* update the node count */ switch (chain[iter].op) { case SCMP_CMP_NE: case SCMP_CMP_EQ: case SCMP_CMP_MASKED_EQ: s_new->node_cnt += 2; break; default: s_new->node_cnt += 3; } /* keep pointers to this level */ c_prev[0] = c_iter[0]; c_prev[1] = c_iter[1]; c_prev[2] = c_iter[2]; op_prev = chain[iter].op; } if (c_iter[0] != NULL) { /* set the actions on the last layer */ switch (op_prev) { case SCMP_CMP_GT: case SCMP_CMP_GE: c_iter[0]->act_t_flg = true; c_iter[0]->act_t = rule->action; c_iter[2]->act_t_flg = true; c_iter[2]->act_t = rule->action; break; case SCMP_CMP_LE: case SCMP_CMP_LT: c_iter[1]->act_f_flg = true; c_iter[1]->act_f = rule->action; c_iter[2]->act_f_flg = true; c_iter[2]->act_f = rule->action; break; case SCMP_CMP_EQ: case SCMP_CMP_MASKED_EQ: c_iter[1]->act_t_flg = true; c_iter[1]->act_t = rule->action; break; case SCMP_CMP_NE: c_iter[0]->act_f_flg = true; c_iter[0]->act_f = rule->action; c_iter[1]->act_f_flg = true; c_iter[1]->act_f = rule->action; break; default: /* we should never get here */ goto gen_64_failure; } } else s_new->action = rule->action; return s_new; gen_64_failure: /* free the new chain and its syscall struct */ _db_tree_put(&s_new->chains); free(s_new); return NULL; }
0
[]
libseccomp
c5bf78de480b32b324e0f511c88ce533ed280b37
206,646,964,921,989,370,000,000,000,000,000,000,000
347
db: fix 64-bit argument comparisons Our approach to doing 64-bit comparisons using 32-bit operators was just plain wrong, leading to a number of potential problems with filters that used the LT, GT, LE, or GE operators. This patch fixes this problem and a few other related issues that came to light in the course of fixing the core problem. A special thanks to Jann Horn for bringing this problem to our attention. Signed-off-by: Paul Moore <[email protected]>
int wait_for_beacon(unsigned char *bssid, unsigned char *capa, char *essid) { int len = 0, chan = 0, taglen = 0, tagtype = 0, pos = 0; unsigned char pkt_sniff[4096]; struct timeval tv,tv2; char essid2[33]; gettimeofday(&tv, NULL); while (1) { len = 0; while (len < 22) { len = read_packet(pkt_sniff, sizeof(pkt_sniff), NULL); gettimeofday(&tv2, NULL); if(((tv2.tv_sec-tv.tv_sec)*1000000) + (tv2.tv_usec-tv.tv_usec) > 10000*1000) //wait 10sec for beacon frame { return -1; } if(len <= 0) usleep(1); } if (! memcmp(pkt_sniff, "\x80", 1)) { pos = 0; taglen = 22; //initial value to get the fixed tags parsing started taglen+= 12; //skip fixed tags in frames do { pos += taglen + 2; tagtype = pkt_sniff[pos]; taglen = pkt_sniff[pos+1]; } while(tagtype != 3 && pos < len-2); if(tagtype != 3) continue; if(taglen != 1) continue; if(pos+2+taglen > len) continue; chan = pkt_sniff[pos+2]; if(essid) { pos = 0; taglen = 22; //initial value to get the fixed tags parsing started taglen+= 12; //skip fixed tags in frames do { pos += taglen + 2; tagtype = pkt_sniff[pos]; taglen = pkt_sniff[pos+1]; } while(tagtype != 0 && pos < len-2); if(tagtype != 0) continue; if(taglen <= 1) { if (memcmp(bssid, pkt_sniff+10, 6) == 0) break; else continue; } if(pos+2+taglen > len) continue; if(taglen > 32)taglen = 32; if((pkt_sniff+pos+2)[0] < 32 && memcmp(bssid, pkt_sniff+10, 6) == 0) { break; } /* if bssid is given, copy essid */ if(bssid != NULL && memcmp(bssid, pkt_sniff+10, 6) == 0 && strlen(essid) == 0) { memset(essid, 0, 33); memcpy(essid, pkt_sniff+pos+2, taglen); break; } /* if essid is given, copy bssid AND essid, so we can handle case insensitive arguments */ if(bssid != NULL && memcmp(bssid, NULL_MAC, 6) == 0 && strncasecmp(essid, (char*)pkt_sniff+pos+2, taglen) == 0 && strlen(essid) == (unsigned)taglen) { memset(essid, 0, 33); memcpy(essid, pkt_sniff+pos+2, taglen); memcpy(bssid, pkt_sniff+10, 6); printf("Found BSSID \"%02X:%02X:%02X:%02X:%02X:%02X\" to given ESSID \"%s\".\n", bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5], essid); break; } /* if essid and bssid are given, check both */ if(bssid != NULL && memcmp(bssid, pkt_sniff+10, 6) == 0 && strlen(essid) > 0) { memset(essid2, 0, 33); memcpy(essid2, pkt_sniff+pos+2, taglen); if(strncasecmp(essid, essid2, taglen) == 0 && strlen(essid) == (unsigned)taglen) break; else { printf("For the given BSSID \"%02X:%02X:%02X:%02X:%02X:%02X\", there is an ESSID mismatch!\n", bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5]); printf("Found ESSID \"%s\" vs. specified ESSID \"%s\"\n", essid2, essid); printf("Using the given one, double check it to be sure its correct!\n"); break; } } } } } if(capa) memcpy(capa, pkt_sniff+34, 2); return chan; }
0
[ "CWE-787" ]
aircrack-ng
091b153f294b9b695b0b2831e65936438b550d7b
81,012,280,315,248,350,000,000,000,000,000,000,000
108
Aireplay-ng: Fixed tcp_test stack overflow (Closes #14 on GitHub). git-svn-id: http://svn.aircrack-ng.org/trunk@2417 28c6078b-6c39-48e3-add9-af49d547ecab
TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLengthAllowed) { config_helper_.addConfigModifier( [](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) -> void { hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); }); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); auto encoder_decoder = codec_client_->startRequest(Http::TestHeaderMapImpl{{":method", "POST"}, {":path", "/test/long/url"}, {":authority", "host"}, {"content-length", "-1"}}); auto response = std::move(encoder_decoder.second); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { codec_client_->waitForDisconnect(); } else { response->waitForReset(); codec_client_->close(); } if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); EXPECT_EQ("400", response->headers().Status()->value().getStringView()); } else { ASSERT_TRUE(response->reset()); EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); } }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
81,626,776,897,051,830,000,000,000,000,000,000,000
33
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
jbig2_decode_symbol_dict(Jbig2Ctx *ctx, Jbig2Segment *segment, const Jbig2SymbolDictParams *params, const byte *data, size_t size, Jbig2ArithCx *GB_stats, Jbig2ArithCx *GR_stats) { Jbig2SymbolDict *SDNEWSYMS = NULL; Jbig2SymbolDict *SDEXSYMS = NULL; uint32_t HCHEIGHT; uint32_t NSYMSDECODED; uint32_t SYMWIDTH, TOTWIDTH; uint32_t HCFIRSTSYM; uint32_t *SDNEWSYMWIDTHS = NULL; int SBSYMCODELEN = 0; Jbig2WordStream *ws = NULL; Jbig2HuffmanState *hs = NULL; Jbig2HuffmanTable *SDHUFFRDX = NULL; Jbig2HuffmanTable *SBHUFFRSIZE = NULL; Jbig2ArithState *as = NULL; Jbig2ArithIntCtx *IADH = NULL; Jbig2ArithIntCtx *IADW = NULL; Jbig2ArithIntCtx *IAEX = NULL; Jbig2ArithIntCtx *IAAI = NULL; Jbig2ArithIaidCtx *IAID = NULL; Jbig2ArithIntCtx *IARDX = NULL; Jbig2ArithIntCtx *IARDY = NULL; int code = 0; Jbig2SymbolDict **refagg_dicts = NULL; int n_refagg_dicts = 1; Jbig2TextRegionParams *tparams = NULL; /* 6.5.5 (3) */ HCHEIGHT = 0; NSYMSDECODED = 0; ws = jbig2_word_stream_buf_new(ctx, data, size); if (ws == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "failed to allocate ws in jbig2_decode_symbol_dict"); return NULL; } as = jbig2_arith_new(ctx, ws); if (as == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "failed to allocate as in jbig2_decode_symbol_dict"); jbig2_word_stream_buf_free(ctx, ws); return NULL; } if (!params->SDHUFF) { IADH = jbig2_arith_int_ctx_new(ctx); IADW = jbig2_arith_int_ctx_new(ctx); IAEX = jbig2_arith_int_ctx_new(ctx); IAAI = jbig2_arith_int_ctx_new(ctx); if ((IADH == NULL) || (IADW == NULL) || (IAEX == NULL) || (IAAI == NULL)) { jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "failed to allocate storage for symbol bitmap"); goto cleanup1; } if (params->SDREFAGG) { int64_t tmp = params->SDNUMINSYMS + params->SDNUMNEWSYMS; for (SBSYMCODELEN = 0; ((int64_t) 1 << SBSYMCODELEN) < tmp; SBSYMCODELEN++); IAID = jbig2_arith_iaid_ctx_new(ctx, SBSYMCODELEN); IARDX = jbig2_arith_int_ctx_new(ctx); IARDY = jbig2_arith_int_ctx_new(ctx); if ((IAID == NULL) || (IARDX == NULL) || (IARDY == NULL)) { jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "failed to allocate storage for symbol bitmap"); goto cleanup2; } } } else { jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "huffman coded symbol dictionary"); hs = jbig2_huffman_new(ctx, ws); SDHUFFRDX = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O); SBHUFFRSIZE = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_A); if ((hs == NULL) || (SDHUFFRDX == NULL) || (SBHUFFRSIZE == NULL)) { jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "failed to allocate storage for symbol bitmap"); goto cleanup2; } if (!params->SDREFAGG) { SDNEWSYMWIDTHS = jbig2_new(ctx, uint32_t, params->SDNUMNEWSYMS); if (SDNEWSYMWIDTHS == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "could not allocate storage for (%u) symbol widths", params->SDNUMNEWSYMS); goto cleanup2; } } } SDNEWSYMS = jbig2_sd_new(ctx, params->SDNUMNEWSYMS); if (SDNEWSYMS == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "could not allocate storage for (%u) new symbols", params->SDNUMNEWSYMS); goto cleanup2; } /* 6.5.5 (4a) */ while (NSYMSDECODED < params->SDNUMNEWSYMS) { int32_t HCDH, DW; /* 6.5.6 */ if (params->SDHUFF) { HCDH = jbig2_huffman_get(hs, params->SDHUFFDH, &code); } else { code = jbig2_arith_int_decode(IADH, as, &HCDH); } if (code != 0) { jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "error or OOB decoding height class delta (%d)\n", code); } if (!params->SDHUFF && jbig2_arith_has_reached_marker(as)) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "prevent DOS while decoding height classes"); goto cleanup2; } /* 6.5.5 (4b) */ HCHEIGHT = HCHEIGHT + HCDH; SYMWIDTH = 0; TOTWIDTH = 0; HCFIRSTSYM = NSYMSDECODED; if ((int32_t) HCHEIGHT < 0) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Invalid HCHEIGHT value"); goto cleanup2; } #ifdef JBIG2_DEBUG jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "HCHEIGHT = %d", HCHEIGHT); #endif jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "decoding height class %d with %d syms decoded", HCHEIGHT, NSYMSDECODED); for (;;) { /* 6.5.7 */ if (params->SDHUFF) { DW = jbig2_huffman_get(hs, params->SDHUFFDW, &code); } else { code = jbig2_arith_int_decode(IADW, as, &DW); } if (code < 0) goto cleanup4; /* 6.5.5 (4c.i) */ if (code == 1) { jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, " OOB signals end of height class %d", HCHEIGHT); break; } /* check for broken symbol table */ if (NSYMSDECODED >= params->SDNUMNEWSYMS) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "No OOB signalling end of height class %d", HCHEIGHT); goto cleanup4; } SYMWIDTH = SYMWIDTH + DW; TOTWIDTH = TOTWIDTH + SYMWIDTH; if ((int32_t) SYMWIDTH < 0) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Invalid SYMWIDTH value (%d) at symbol %d", SYMWIDTH, NSYMSDECODED + 1); goto cleanup4; } #ifdef JBIG2_DEBUG jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "SYMWIDTH = %d TOTWIDTH = %d", SYMWIDTH, TOTWIDTH); #endif /* 6.5.5 (4c.ii) */ if (!params->SDHUFF || params->SDREFAGG) { #ifdef JBIG2_DEBUG jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "SDHUFF = %d; SDREFAGG = %d", params->SDHUFF, params->SDREFAGG); #endif /* 6.5.8 */ if (!params->SDREFAGG) { Jbig2GenericRegionParams region_params; int sdat_bytes; Jbig2Image *image; /* Table 16 */ region_params.MMR = 0; region_params.GBTEMPLATE = params->SDTEMPLATE; region_params.TPGDON = 0; region_params.USESKIP = 0; sdat_bytes = params->SDTEMPLATE == 0 ? 8 : 2; memcpy(region_params.gbat, params->sdat, sdat_bytes); image = jbig2_image_new(ctx, SYMWIDTH, HCHEIGHT); if (image == NULL) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate image in jbig2_decode_symbol_dict"); goto cleanup4; } code = jbig2_decode_generic_region(ctx, segment, &region_params, as, image, GB_stats); if (code < 0) { jbig2_image_release(ctx, image); goto cleanup4; } SDNEWSYMS->glyphs[NSYMSDECODED] = image; } else { /* 6.5.8.2 refinement/aggregate symbol */ uint32_t REFAGGNINST; if (params->SDHUFF) { REFAGGNINST = jbig2_huffman_get(hs, params->SDHUFFAGGINST, &code); } else { code = jbig2_arith_int_decode(IAAI, as, (int32_t *) & REFAGGNINST); } if (code || (int32_t) REFAGGNINST <= 0) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "invalid number of symbols or OOB in aggregate glyph"); goto cleanup4; } jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "aggregate symbol coding (%d instances)", REFAGGNINST); if (REFAGGNINST > 1) { Jbig2Image *image; uint32_t i; if (tparams == NULL) { /* First time through, we need to initialise the */ /* various tables for Huffman or adaptive encoding */ /* as well as the text region parameters structure */ refagg_dicts = jbig2_new(ctx, Jbig2SymbolDict *, n_refagg_dicts); if (refagg_dicts == NULL) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Out of memory allocating dictionary array"); goto cleanup4; } refagg_dicts[0] = jbig2_sd_new(ctx, params->SDNUMINSYMS + params->SDNUMNEWSYMS); if (refagg_dicts[0] == NULL) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Out of memory allocating symbol dictionary"); jbig2_free(ctx->allocator, refagg_dicts); goto cleanup4; } for (i = 0; i < params->SDNUMINSYMS; i++) { refagg_dicts[0]->glyphs[i] = jbig2_image_clone(ctx, params->SDINSYMS->glyphs[i]); } tparams = jbig2_new(ctx, Jbig2TextRegionParams, 1); if (tparams == NULL) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Out of memory creating text region params"); goto cleanup4; } if (!params->SDHUFF) { /* Values from Table 17, section 6.5.8.2 (2) */ tparams->IADT = jbig2_arith_int_ctx_new(ctx); tparams->IAFS = jbig2_arith_int_ctx_new(ctx); tparams->IADS = jbig2_arith_int_ctx_new(ctx); tparams->IAIT = jbig2_arith_int_ctx_new(ctx); /* Table 31 */ for (SBSYMCODELEN = 0; (1 << SBSYMCODELEN) < (int)(params->SDNUMINSYMS + params->SDNUMNEWSYMS); SBSYMCODELEN++); tparams->IAID = jbig2_arith_iaid_ctx_new(ctx, SBSYMCODELEN); tparams->IARI = jbig2_arith_int_ctx_new(ctx); tparams->IARDW = jbig2_arith_int_ctx_new(ctx); tparams->IARDH = jbig2_arith_int_ctx_new(ctx); tparams->IARDX = jbig2_arith_int_ctx_new(ctx); tparams->IARDY = jbig2_arith_int_ctx_new(ctx); } else { tparams->SBHUFFFS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_F); /* Table B.6 */ tparams->SBHUFFDS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_H); /* Table B.8 */ tparams->SBHUFFDT = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_K); /* Table B.11 */ tparams->SBHUFFRDW = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O); /* Table B.15 */ tparams->SBHUFFRDH = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O); /* Table B.15 */ tparams->SBHUFFRDX = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O); /* Table B.15 */ tparams->SBHUFFRDY = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O); /* Table B.15 */ } tparams->SBHUFF = params->SDHUFF; tparams->SBREFINE = 1; tparams->SBSTRIPS = 1; tparams->SBDEFPIXEL = 0; tparams->SBCOMBOP = JBIG2_COMPOSE_OR; tparams->TRANSPOSED = 0; tparams->REFCORNER = JBIG2_CORNER_TOPLEFT; tparams->SBDSOFFSET = 0; tparams->SBRTEMPLATE = params->SDRTEMPLATE; } tparams->SBNUMINSTANCES = REFAGGNINST; image = jbig2_image_new(ctx, SYMWIDTH, HCHEIGHT); if (image == NULL) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Out of memory creating symbol image"); goto cleanup4; } /* multiple symbols are handled as a text region */ jbig2_decode_text_region(ctx, segment, tparams, (const Jbig2SymbolDict * const *)refagg_dicts, n_refagg_dicts, image, data, size, GR_stats, as, ws); SDNEWSYMS->glyphs[NSYMSDECODED] = image; refagg_dicts[0]->glyphs[params->SDNUMINSYMS + NSYMSDECODED] = jbig2_image_clone(ctx, SDNEWSYMS->glyphs[NSYMSDECODED]); } else { /* 6.5.8.2.2 */ /* bool SBHUFF = params->SDHUFF; */ Jbig2RefinementRegionParams rparams; Jbig2Image *image; uint32_t ID; int32_t RDX, RDY; int BMSIZE = 0; uint32_t ninsyms = params->SDNUMINSYMS; int code1 = 0; int code2 = 0; int code3 = 0; int code4 = 0; /* 6.5.8.2.2 (2, 3, 4, 5) */ if (params->SDHUFF) { ID = jbig2_huffman_get_bits(hs, SBSYMCODELEN, &code4); RDX = jbig2_huffman_get(hs, SDHUFFRDX, &code1); RDY = jbig2_huffman_get(hs, SDHUFFRDX, &code2); BMSIZE = jbig2_huffman_get(hs, SBHUFFRSIZE, &code3); jbig2_huffman_skip(hs); } else { code1 = jbig2_arith_iaid_decode(IAID, as, (int32_t *) & ID); code2 = jbig2_arith_int_decode(IARDX, as, &RDX); code3 = jbig2_arith_int_decode(IARDY, as, &RDY); } if ((code1 < 0) || (code2 < 0) || (code3 < 0) || (code4 < 0)) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to decode data"); goto cleanup4; } if (ID >= ninsyms + NSYMSDECODED) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "refinement references unknown symbol %d", ID); goto cleanup4; } jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "symbol is a refinement of id %d with the " "refinement applied at (%d,%d)", ID, RDX, RDY); image = jbig2_image_new(ctx, SYMWIDTH, HCHEIGHT); if (image == NULL) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Out of memory creating symbol image"); goto cleanup4; } /* Table 18 */ rparams.GRTEMPLATE = params->SDRTEMPLATE; rparams.reference = (ID < ninsyms) ? params->SDINSYMS->glyphs[ID] : SDNEWSYMS->glyphs[ID - ninsyms]; /* SumatraPDF: fail on missing glyphs */ if (rparams.reference == NULL) { code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "missing glyph %d/%d!", ID, ninsyms); jbig2_image_release(ctx, image); goto cleanup4; } rparams.DX = RDX; rparams.DY = RDY; rparams.TPGRON = 0; memcpy(rparams.grat, params->sdrat, 4); code = jbig2_decode_refinement_region(ctx, segment, &rparams, as, image, GR_stats); if (code < 0) goto cleanup4; SDNEWSYMS->glyphs[NSYMSDECODED] = image; /* 6.5.8.2.2 (7) */ if (params->SDHUFF) { if (BMSIZE == 0) BMSIZE = image->height * image->stride; jbig2_huffman_advance(hs, BMSIZE); } } } #ifdef OUTPUT_PBM { char name[64]; FILE *out; snprintf(name, 64, "sd.%04d.%04d.pbm", segment->number, NSYMSDECODED); out = fopen(name, "wb"); jbig2_image_write_pbm(SDNEWSYMS->glyphs[NSYMSDECODED], out); jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "writing out glyph as '%s' ...", name); fclose(out); } #endif } /* 6.5.5 (4c.iii) */ if (params->SDHUFF && !params->SDREFAGG) { SDNEWSYMWIDTHS[NSYMSDECODED] = SYMWIDTH; } /* 6.5.5 (4c.iv) */ NSYMSDECODED = NSYMSDECODED + 1; jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "decoded symbol %u of %u (%ux%u)", NSYMSDECODED, params->SDNUMNEWSYMS, SYMWIDTH, HCHEIGHT); } /* end height class decode loop */ /* 6.5.5 (4d) */ if (params->SDHUFF && !params->SDREFAGG) { /* 6.5.9 */ Jbig2Image *image; uint32_t BMSIZE = jbig2_huffman_get(hs, params->SDHUFFBMSIZE, &code); uint32_t j; int x; if (code) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "error decoding size of collective bitmap!"); goto cleanup4; } /* skip any bits before the next byte boundary */ jbig2_huffman_skip(hs); image = jbig2_image_new(ctx, TOTWIDTH, HCHEIGHT); if (image == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "could not allocate collective bitmap image!"); goto cleanup4; } if (BMSIZE == 0) { /* if BMSIZE == 0 bitmap is uncompressed */ const byte *src = data + jbig2_huffman_offset(hs); const int stride = (image->width >> 3) + ((image->width & 7) ? 1 : 0); byte *dst = image->data; /* SumatraPDF: prevent read access violation */ if ((size - jbig2_huffman_offset(hs) < image->height * stride) || (size < jbig2_huffman_offset(hs))) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "not enough data for decoding (%d/%d)", image->height * stride, size - jbig2_huffman_offset(hs)); jbig2_image_release(ctx, image); goto cleanup4; } BMSIZE = image->height * stride; jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "reading %dx%d uncompressed bitmap" " for %d symbols (%d bytes)", image->width, image->height, NSYMSDECODED - HCFIRSTSYM, BMSIZE); for (j = 0; j < image->height; j++) { memcpy(dst, src, stride); dst += image->stride; src += stride; } } else { Jbig2GenericRegionParams rparams; /* SumatraPDF: prevent read access violation */ if (size - jbig2_huffman_offset(hs) < BMSIZE) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "not enough data for decoding (%d/%d)", BMSIZE, size - jbig2_huffman_offset(hs)); jbig2_image_release(ctx, image); goto cleanup4; } jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "reading %dx%d collective bitmap for %d symbols (%d bytes)", image->width, image->height, NSYMSDECODED - HCFIRSTSYM, BMSIZE); rparams.MMR = 1; code = jbig2_decode_generic_mmr(ctx, segment, &rparams, data + jbig2_huffman_offset(hs), BMSIZE, image); if (code) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "error decoding MMR bitmap image!"); jbig2_image_release(ctx, image); goto cleanup4; } } /* advance past the data we've just read */ jbig2_huffman_advance(hs, BMSIZE); /* copy the collective bitmap into the symbol dictionary */ x = 0; for (j = HCFIRSTSYM; j < NSYMSDECODED; j++) { Jbig2Image *glyph; glyph = jbig2_image_new(ctx, SDNEWSYMWIDTHS[j], HCHEIGHT); if (glyph == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to copy the collective bitmap into symbol dictionary"); jbig2_image_release(ctx, image); goto cleanup4; } jbig2_image_compose(ctx, glyph, image, -x, 0, JBIG2_COMPOSE_REPLACE); x += SDNEWSYMWIDTHS[j]; SDNEWSYMS->glyphs[j] = glyph; } jbig2_image_release(ctx, image); } } /* end of symbol decode loop */ /* 6.5.10 */ SDEXSYMS = jbig2_sd_new(ctx, params->SDNUMEXSYMS); if (SDEXSYMS == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate symbols exported from symbols dictionary"); goto cleanup4; } else { uint32_t i = 0; uint32_t j = 0; uint32_t k; int exflag = 0; uint32_t limit = params->SDNUMINSYMS + params->SDNUMNEWSYMS; uint32_t exrunlength; int zerolength = 0; while (i < limit) { if (params->SDHUFF) exrunlength = jbig2_huffman_get(hs, SBHUFFRSIZE, &code); else code = jbig2_arith_int_decode(IAEX, as, (int32_t *)&exrunlength); /* prevent infinite loop */ zerolength = exrunlength > 0 ? 0 : zerolength + 1; if (code || (exrunlength > limit - i) || (zerolength > 4) || (exflag && (exrunlength + j > params->SDNUMEXSYMS))) { if (code) jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to decode exrunlength for exported symbols"); else if (exrunlength <= 0) jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "runlength too small in export symbol table (%d <= 0)\n", exrunlength); else jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "runlength too large in export symbol table (%d > %d - %d)\n", exrunlength, params->SDNUMEXSYMS, j); /* skip to the cleanup code and return SDEXSYMS = NULL */ jbig2_sd_release(ctx, SDEXSYMS); SDEXSYMS = NULL; break; } for (k = 0; k < exrunlength; k++) { if (exflag) { SDEXSYMS->glyphs[j++] = (i < params->SDNUMINSYMS) ? jbig2_image_clone(ctx, params->SDINSYMS->glyphs[i]) : jbig2_image_clone(ctx, SDNEWSYMS->glyphs[i - params->SDNUMINSYMS]); } i++; } exflag = !exflag; } } cleanup4: if (tparams != NULL) { if (!params->SDHUFF) { jbig2_arith_int_ctx_free(ctx, tparams->IADT); jbig2_arith_int_ctx_free(ctx, tparams->IAFS); jbig2_arith_int_ctx_free(ctx, tparams->IADS); jbig2_arith_int_ctx_free(ctx, tparams->IAIT); jbig2_arith_iaid_ctx_free(ctx, tparams->IAID); jbig2_arith_int_ctx_free(ctx, tparams->IARI); jbig2_arith_int_ctx_free(ctx, tparams->IARDW); jbig2_arith_int_ctx_free(ctx, tparams->IARDH); jbig2_arith_int_ctx_free(ctx, tparams->IARDX); jbig2_arith_int_ctx_free(ctx, tparams->IARDY); } else { jbig2_release_huffman_table(ctx, tparams->SBHUFFFS); jbig2_release_huffman_table(ctx, tparams->SBHUFFDS); jbig2_release_huffman_table(ctx, tparams->SBHUFFDT); jbig2_release_huffman_table(ctx, tparams->SBHUFFRDX); jbig2_release_huffman_table(ctx, tparams->SBHUFFRDY); jbig2_release_huffman_table(ctx, tparams->SBHUFFRDW); jbig2_release_huffman_table(ctx, tparams->SBHUFFRDH); } jbig2_free(ctx->allocator, tparams); } if (refagg_dicts != NULL) { jbig2_sd_release(ctx, refagg_dicts[0]); jbig2_free(ctx->allocator, refagg_dicts); } cleanup2: jbig2_sd_release(ctx, SDNEWSYMS); if (params->SDHUFF && !params->SDREFAGG) { jbig2_free(ctx->allocator, SDNEWSYMWIDTHS); } jbig2_release_huffman_table(ctx, SDHUFFRDX); jbig2_release_huffman_table(ctx, SBHUFFRSIZE); jbig2_huffman_free(ctx, hs); jbig2_arith_iaid_ctx_free(ctx, IAID); jbig2_arith_int_ctx_free(ctx, IARDX); jbig2_arith_int_ctx_free(ctx, IARDY); cleanup1: jbig2_word_stream_buf_free(ctx, ws); jbig2_free(ctx->allocator, as); jbig2_arith_int_ctx_free(ctx, IADH); jbig2_arith_int_ctx_free(ctx, IADW); jbig2_arith_int_ctx_free(ctx, IAEX); jbig2_arith_int_ctx_free(ctx, IAAI); return SDEXSYMS; }
0
[]
ghostpdl
b184e783702246e154294326d03d9abda669fcfa
263,544,315,846,066,830,000,000,000,000,000,000,000
568
Bug 697703: Prevent integer overflow vulnerability. Add extra check for the offset being greater than the size of the image and hence reading off the end of the buffer. Thank you to Dai Ge for finding this issue and suggesting a patch.
static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JSET: false_reg->var_off = tnum_and(false_reg->var_off, tnum_const(~val)); if (is_power_of_2(val)) true_reg->var_off = tnum_or(true_reg->var_off, tnum_const(val)); break; case BPF_JGT: true_reg->umax_value = min(true_reg->umax_value, val - 1); false_reg->umin_value = max(false_reg->umin_value, val); break; case BPF_JSGT: true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JLT: true_reg->umin_value = max(true_reg->umin_value, val + 1); false_reg->umax_value = min(false_reg->umax_value, val); break; case BPF_JSLT: true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val); break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); break; case BPF_JSGE: true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; case BPF_JLE: true_reg->umin_value = max(true_reg->umin_value, val); false_reg->umax_value = min(false_reg->umax_value, val - 1); break; case BPF_JSLE: true_reg->smin_value = max_t(s64, true_reg->smin_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); }
0
[ "CWE-703", "CWE-189" ]
linux
979d63d50c0c0f7bc537bf821e056cc9fe5abd38
194,294,391,194,709,300,000,000,000,000,000,000,000
75
bpf: prevent out of bounds speculation on pointer arithmetic Jann reported that the original commit back in b2157399cc98 ("bpf: prevent out-of-bounds speculation") was not sufficient to stop CPU from speculating out of bounds memory access: While b2157399cc98 only focussed on masking array map access for unprivileged users for tail calls and data access such that the user provided index gets sanitized from BPF program and syscall side, there is still a more generic form affected from BPF programs that applies to most maps that hold user data in relation to dynamic map access when dealing with unknown scalars or "slow" known scalars as access offset, for example: - Load a map value pointer into R6 - Load an index into R7 - Do a slow computation (e.g. with a memory dependency) that loads a limit into R8 (e.g. load the limit from a map for high latency, then mask it to make the verifier happy) - Exit if R7 >= R8 (mispredicted branch) - Load R0 = R6[R7] - Load R0 = R6[R0] For unknown scalars there are two options in the BPF verifier where we could derive knowledge from in order to guarantee safe access to the memory: i) While </>/<=/>= variants won't allow to derive any lower or upper bounds from the unknown scalar where it would be safe to add it to the map value pointer, it is possible through ==/!= test however. ii) another option is to transform the unknown scalar into a known scalar, for example, through ALU ops combination such as R &= <imm> followed by R |= <imm> or any similar combination where the original information from the unknown scalar would be destroyed entirely leaving R with a constant. The initial slow load still precedes the latter ALU ops on that register, so the CPU executes speculatively from that point. Once we have the known scalar, any compare operation would work then. A third option only involving registers with known scalars could be crafted as described in [0] where a CPU port (e.g. Slow Int unit) would be filled with many dependent computations such that the subsequent condition depending on its outcome has to wait for evaluation on its execution port and thereby executing speculatively if the speculated code can be scheduled on a different execution port, or any other form of mistraining as described in [1], for example. Given this is not limited to only unknown scalars, not only map but also stack access is affected since both is accessible for unprivileged users and could potentially be used for out of bounds access under speculation. In order to prevent any of these cases, the verifier is now sanitizing pointer arithmetic on the offset such that any out of bounds speculation would be masked in a way where the pointer arithmetic result in the destination register will stay unchanged, meaning offset masked into zero similar as in array_index_nospec() case. With regards to implementation, there are three options that were considered: i) new insn for sanitation, ii) push/pop insn and sanitation as inlined BPF, iii) reuse of ax register and sanitation as inlined BPF. Option i) has the downside that we end up using from reserved bits in the opcode space, but also that we would require each JIT to emit masking as native arch opcodes meaning mitigation would have slow adoption till everyone implements it eventually which is counter-productive. Option ii) and iii) have both in common that a temporary register is needed in order to implement the sanitation as inlined BPF since we are not allowed to modify the source register. While a push / pop insn in ii) would be useful to have in any case, it requires once again that every JIT needs to implement it first. While possible, amount of changes needed would also be unsuitable for a -stable patch. Therefore, the path which has fewer changes, less BPF instructions for the mitigation and does not require anything to be changed in the JITs is option iii) which this work is pursuing. The ax register is already mapped to a register in all JITs (modulo arm32 where it's mapped to stack as various other BPF registers there) and used in constant blinding for JITs-only so far. It can be reused for verifier rewrites under certain constraints. The interpreter's tmp "register" has therefore been remapped into extending the register set with hidden ax register and reusing that for a number of instructions that needed the prior temporary variable internally (e.g. div, mod). This allows for zero increase in stack space usage in the interpreter, and enables (restricted) generic use in rewrites otherwise as long as such a patchlet does not make use of these instructions. The sanitation mask is dynamic and relative to the offset the map value or stack pointer currently holds. There are various cases that need to be taken under consideration for the masking, e.g. such operation could look as follows: ptr += val or val += ptr or ptr -= val. Thus, the value to be sanitized could reside either in source or in destination register, and the limit is different depending on whether the ALU op is addition or subtraction and depending on the current known and bounded offset. The limit is derived as follows: limit := max_value_size - (smin_value + off). For subtraction: limit := umax_value + off. This holds because we do not allow any pointer arithmetic that would temporarily go out of bounds or would have an unknown value with mixed signed bounds where it is unclear at verification time whether the actual runtime value would be either negative or positive. For example, we have a derived map pointer value with constant offset and bounded one, so limit based on smin_value works because the verifier requires that statically analyzed arithmetic on the pointer must be in bounds, and thus it checks if resulting smin_value + off and umax_value + off is still within map value bounds at time of arithmetic in addition to time of access. Similarly, for the case of stack access we derive the limit as follows: MAX_BPF_STACK + off for subtraction and -off for the case of addition where off := ptr_reg->off + ptr_reg->var_off.value. Subtraction is a special case for the masking which can be in form of ptr += -val, ptr -= -val, or ptr -= val. In the first two cases where we know that the value is negative, we need to temporarily negate the value in order to do the sanitation on a positive value where we later swap the ALU op, and restore original source register if the value was in source. The sanitation of pointer arithmetic alone is still not fully sufficient as is, since a scenario like the following could happen ... PTR += 0x1000 (e.g. K-based imm) PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON PTR += 0x1000 PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON [...] ... which under speculation could end up as ... PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] [...] ... and therefore still access out of bounds. To prevent such case, the verifier is also analyzing safety for potential out of bounds access under speculative execution. Meaning, it is also simulating pointer access under truncation. We therefore "branch off" and push the current verification state after the ALU operation with known 0 to the verification stack for later analysis. Given the current path analysis succeeded it is likely that the one under speculation can be pruned. In any case, it is also subject to existing complexity limits and therefore anything beyond this point will be rejected. In terms of pruning, it needs to be ensured that the verification state from speculative execution simulation must never prune a non-speculative execution path, therefore, we mark verifier state accordingly at the time of push_stack(). If verifier detects out of bounds access under speculative execution from one of the possible paths that includes a truncation, it will reject such program. Given we mask every reg-based pointer arithmetic for unprivileged programs, we've been looking into how it could affect real-world programs in terms of size increase. As the majority of programs are targeted for privileged-only use case, we've unconditionally enabled masking (with its alu restrictions on top of it) for privileged programs for the sake of testing in order to check i) whether they get rejected in its current form, and ii) by how much the number of instructions and size will increase. We've tested this by using Katran, Cilium and test_l4lb from the kernel selftests. For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb we've used test_l4lb.o as well as test_l4lb_noinline.o. We found that none of the programs got rejected by the verifier with this change, and that impact is rather minimal to none. balancer_kern.o had 13,904 bytes (1,738 insns) xlated and 7,797 bytes JITed before and after the change. Most complex program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated and 18,538 bytes JITed before and after and none of the other tail call programs in bpf_lxc.o had any changes either. For the older bpf_lxc_opt_-DUNKNOWN.o object we found a small increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed after the change. Other programs from that object file had similar small increase. Both test_l4lb.o had no change and remained at 6,544 bytes (817 insns) xlated and 3,401 bytes JITed and for test_l4lb_noinline.o constant at 5,080 bytes (634 insns) xlated and 3,313 bytes JITed. This can be explained in that LLVM typically optimizes stack based pointer arithmetic by using K-based operations and that use of dynamic map access is not overly frequent. However, in future we may decide to optimize the algorithm further under known guarantees from branch and value speculation. Latter seems also unclear in terms of prediction heuristics that today's CPUs apply as well as whether there could be collisions in e.g. the predictor's Value History/Pattern Table for triggering out of bounds access, thus masking is performed unconditionally at this point but could be subject to relaxation later on. We were generally also brainstorming various other approaches for mitigation, but the blocker was always lack of available registers at runtime and/or overhead for runtime tracking of limits belonging to a specific pointer. Thus, we found this to be minimally intrusive under given constraints. With that in place, a simple example with sanitized access on unprivileged load at post-verification time looks as follows: # bpftool prog dump xlated id 282 [...] 28: (79) r1 = *(u64 *)(r7 +0) 29: (79) r2 = *(u64 *)(r7 +8) 30: (57) r1 &= 15 31: (79) r3 = *(u64 *)(r0 +4608) 32: (57) r3 &= 1 33: (47) r3 |= 1 34: (2d) if r2 > r3 goto pc+19 35: (b4) (u32) r11 = (u32) 20479 | 36: (1f) r11 -= r2 | Dynamic sanitation for pointer 37: (4f) r11 |= r2 | arithmetic with registers 38: (87) r11 = -r11 | containing bounded or known 39: (c7) r11 s>>= 63 | scalars in order to prevent 40: (5f) r11 &= r2 | out of bounds speculation. 41: (0f) r4 += r11 | 42: (71) r4 = *(u8 *)(r4 +0) 43: (6f) r4 <<= r1 [...] For the case where the scalar sits in the destination register as opposed to the source register, the following code is emitted for the above example: [...] 16: (b4) (u32) r11 = (u32) 20479 17: (1f) r11 -= r2 18: (4f) r11 |= r2 19: (87) r11 = -r11 20: (c7) r11 s>>= 63 21: (5f) r2 &= r11 22: (0f) r2 += r0 23: (61) r0 = *(u32 *)(r2 +0) [...] JIT blinding example with non-conflicting use of r10: [...] d5: je 0x0000000000000106 _ d7: mov 0x0(%rax),%edi | da: mov $0xf153246,%r10d | Index load from map value and e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f. e7: and %r10,%rdi |_ ea: mov $0x2f,%r10d | f0: sub %rdi,%r10 | Sanitized addition. Both use r10 f3: or %rdi,%r10 | but do not interfere with each f6: neg %r10 | other. (Neither do these instructions f9: sar $0x3f,%r10 | interfere with the use of ax as temp fd: and %r10,%rdi | in interpreter.) 100: add %rax,%rdi |_ 103: mov 0x0(%rdi),%eax [...] Tested that it fixes Jann's reproducer, and also checked that test_verifier and test_progs suite with interpreter, JIT and JIT with hardening enabled on x86-64 and arm64 runs successfully. [0] Speculose: Analyzing the Security Implications of Speculative Execution in CPUs, Giorgi Maisuradze and Christian Rossow, https://arxiv.org/pdf/1801.04084.pdf [1] A Systematic Evaluation of Transient Execution Attacks and Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz, Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens, Dmitry Evtyushkin, Daniel Gruss, https://arxiv.org/pdf/1811.05441.pdf Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation") Reported-by: Jann Horn <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
static void fuse_lib_statfs(fuse_req_t req, fuse_ino_t ino) { struct fuse *f = req_fuse_prepare(req); struct statvfs buf; char *path; int err; memset(&buf, 0, sizeof(buf)); pthread_rwlock_rdlock(&f->tree_lock); if (!ino) { err = -ENOMEM; path = strdup("/"); } else { err = -ENOENT; path = get_path(f, ino); } if (path) { struct fuse_intr_data d; fuse_prepare_interrupt(f, req, &d); err = fuse_fs_statfs(f->fs, path, &buf); fuse_finish_interrupt(f, req, &d); free(path); } pthread_rwlock_unlock(&f->tree_lock); if (!err) fuse_reply_statfs(req, &buf); else reply_err(req, err); }
0
[]
ntfs-3g
fb28eef6f1c26170566187c1ab7dc913a13ea43c
24,996,920,184,557,670,000,000,000,000,000,000,000
30
Hardened the checking of directory offset requested by a readdir When asked for the next directory entries, make sure the chunk offset is within valid values, otherwise return no more entries in chunk.
static inline void vmcs_init(struct vmcs *vmcs) { u64 phys_addr = __pa(per_cpu(vmxarea, raw_smp_processor_id())); if (!vmm_exclusive) kvm_cpu_vmxon(phys_addr); vmcs_clear(vmcs); if (!vmm_exclusive) kvm_cpu_vmxoff(); }
0
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
217,928,491,743,109,760,000,000,000,000,000,000,000
12
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
buf_charidx_to_byteidx(buf_T *buf, int lnum, int charidx) { char_u *str; char_u *t; if (buf == NULL || buf->b_ml.ml_mfp == NULL) return -1; if (lnum > buf->b_ml.ml_line_count) lnum = buf->b_ml.ml_line_count; str = ml_get_buf(buf, lnum, FALSE); if (str == NULL) return -1; // Convert the character offset to a byte offset t = str; while (*t != NUL && --charidx > 0) t += mb_ptr2len(t); return t - str; }
0
[ "CWE-122", "CWE-787" ]
vim
605ec91e5a7330d61be313637e495fa02a6dc264
189,782,033,330,578,600,000,000,000,000,000,000,000
22
patch 8.2.3847: illegal memory access when using a lambda with an error Problem: Illegal memory access when using a lambda with an error. Solution: Avoid skipping over the NUL after a string.
apdu_disconnect (int slot) { int sw; if (DBG_READER) log_debug ("enter: apdu_disconnect: slot=%d\n", slot); if (slot < 0 || slot >= MAX_READER || !reader_table[slot].used ) { if (DBG_READER) log_debug ("leave: apdu_disconnect => SW_HOST_NO_DRIVER\n"); return SW_HOST_NO_DRIVER; } if (reader_table[slot].disconnect_card) { sw = lock_slot (slot); if (!sw) { sw = reader_table[slot].disconnect_card (slot); unlock_slot (slot); } } else sw = 0; if (DBG_READER) log_debug ("leave: apdu_disconnect => sw=0x%x\n", sw); return sw; }
0
[ "CWE-20" ]
gnupg
2183683bd633818dd031b090b5530951de76f392
281,301,188,279,638,200,000,000,000,000,000,000,000
30
Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>
static void iriap_disconnect_indication(void *instance, void *sap, LM_REASON reason, struct sk_buff *skb) { struct iriap_cb *self; IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); self = (struct iriap_cb *) instance; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); IRDA_ASSERT(iriap != NULL, return;); del_timer(&self->watchdog_timer); /* Not needed */ if (skb) dev_kfree_skb(skb); if (self->mode == IAS_CLIENT) { IRDA_DEBUG(4, "%s(), disconnect as client\n", __func__); iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION, NULL); /* * Inform service user that the request failed by sending * it a NULL value. Warning, the client might close us, so * remember no to use self anymore after calling confirm */ if (self->confirm) self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); } else { IRDA_DEBUG(4, "%s(), disconnect as server\n", __func__); iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION, NULL); iriap_close(self); } }
0
[]
linux-2.6
d370af0ef7951188daeb15bae75db7ba57c67846
126,602,714,164,098,940,000,000,000,000,000,000,000
41
irda: validate peer name and attribute lengths Length fields provided by a peer for names and attributes may be longer than the destination array sizes. Validate lengths to prevent stack buffer overflows. Signed-off-by: Dan Rosenberg <[email protected]> Cc: [email protected] Signed-off-by: David S. Miller <[email protected]>
CMS_ContentInfo *CMS_digest_create(BIO *in, const EVP_MD *md, unsigned int flags) { CMS_ContentInfo *cms; if (!md) md = EVP_sha1(); cms = cms_DigestedData_create(md); if (!cms) return NULL; if(!(flags & CMS_DETACHED)) CMS_set_detached(cms, 0); if ((flags & CMS_STREAM) || CMS_final(cms, in, NULL, flags)) return cms; CMS_ContentInfo_free(cms); return NULL; }
0
[ "CWE-399", "CWE-703" ]
openssl
cd30f03ac5bf2962f44bd02ae8d88245dff2f12c
298,241,976,962,536,230,000,000,000,000,000,000,000
19
Canonicalise input in CMS_verify. If content is detached and not binary mode translate the input to CRLF format. Before this change the input was verified verbatim which lead to a discrepancy between sign and verify.
int cfg80211_vendor_cmd_reply(struct sk_buff *skb) { struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; void *hdr = ((void **)skb->cb)[1]; struct nlattr *data = ((void **)skb->cb)[2]; /* clear CB data for netlink core to own from now on */ memset(skb->cb, 0, sizeof(skb->cb)); if (WARN_ON(!rdev->cur_cmd_info)) { kfree_skb(skb); return -EINVAL; } nla_nest_end(skb, data); genlmsg_end(skb, hdr); return genlmsg_reply(skb, rdev->cur_cmd_info); }
0
[ "CWE-120" ]
linux
f88eb7c0d002a67ef31aeb7850b42ff69abc46dc
104,961,648,185,367,950,000,000,000,000,000,000,000
18
nl80211: validate beacon head We currently don't validate the beacon head, i.e. the header, fixed part and elements that are to go in front of the TIM element. This means that the variable elements there can be malformed, e.g. have a length exceeding the buffer size, but most downstream code from this assumes that this has already been checked. Add the necessary checks to the netlink policy. Cc: [email protected] Fixes: ed1b6cc7f80f ("cfg80211/nl80211: add beacon settings") Link: https://lore.kernel.org/r/1569009255-I7ac7fbe9436e9d8733439eab8acbbd35e55c74ef@changeid Signed-off-by: Johannes Berg <[email protected]>
static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, u16 len, u8 flags, u8 epid) { struct htc_frame_hdr *hdr; struct htc_endpoint *endpoint = &target->endpoint[epid]; int status; hdr = skb_push(skb, sizeof(struct htc_frame_hdr)); hdr->endpoint_id = epid; hdr->flags = flags; hdr->payload_len = cpu_to_be16(len); status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb); return status; }
0
[ "CWE-400", "CWE-401" ]
linux
853acf7caf10b828102d92d05b5c101666a6142b
260,590,983,907,987,540,000,000,000,000,000,000,000
17
ath9k_htc: release allocated buffer if timed out In htc_config_pipe_credits, htc_setup_complete, and htc_connect_service if time out happens, the allocated buffer needs to be released. Otherwise there will be memory leak. Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
static unsigned int mntns_inum(void *ns) { struct mnt_namespace *mnt_ns = ns; return mnt_ns->proc_inum; }
0
[ "CWE-269" ]
user-namespace
a6138db815df5ee542d848318e5dae681590fccd
246,774,381,540,583,740,000,000,000,000,000,000,000
5
mnt: Only change user settable mount flags in remount Kenton Varda <[email protected]> discovered that by remounting a read-only bind mount read-only in a user namespace the MNT_LOCK_READONLY bit would be cleared, allowing an unprivileged user to the remount a read-only mount read-write. Correct this by replacing the mask of mount flags to preserve with a mask of mount flags that may be changed, and preserve all others. This ensures that any future bugs with this mask and remount will fail in an easy to detect way where new mount flags simply won't change. Cc: [email protected] Acked-by: Serge E. Hallyn <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]>
void aa_free_domain_entries(struct aa_domain *domain) { int i; if (domain) { if (!domain->table) return; for (i = 0; i < domain->size; i++) kzfree(domain->table[i]); kzfree(domain->table); domain->table = NULL; } }
0
[ "CWE-264" ]
linux
259e5e6c75a910f3b5e656151dc602f53f9d7548
281,306,616,520,325,600,000,000,000,000,000,000,000
13
Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs With this change, calling prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) disables privilege granting operations at execve-time. For example, a process will not be able to execute a setuid binary to change their uid or gid if this bit is set. The same is true for file capabilities. Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that LSMs respect the requested behavior. To determine if the NO_NEW_PRIVS bit is set, a task may call prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); It returns 1 if set and 0 if it is not set. If any of the arguments are non-zero, it will return -1 and set errno to -EINVAL. (PR_SET_NO_NEW_PRIVS behaves similarly.) This functionality is desired for the proposed seccomp filter patch series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the system call behavior for itself and its child tasks without being able to impact the behavior of a more privileged task. Another potential use is making certain privileged operations unprivileged. For example, chroot may be considered "safe" if it cannot affect privileged tasks. Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is set and AppArmor is in use. It is fixed in a subsequent patch. Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Will Drewry <[email protected]> Acked-by: Eric Paris <[email protected]> Acked-by: Kees Cook <[email protected]> v18: updated change desc v17: using new define values as per 3.4 Signed-off-by: James Morris <[email protected]>
static GF_Err gf_m4v_parse_frame_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, secs; s32 o_type; u32 vop_inc = 0; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = 0; m4v->current_object_type = (u32) -1; *frame_type = 0; M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M4V_VOP_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; /*coding type*/ *frame_type = gf_bs_read_int(m4v->bs, 2); /*modulo time base*/ secs = 0; while (gf_bs_read_int(m4v->bs, 1) != 0) secs ++; /*no support for B frames in parsing*/ secs += (dsi.enh_layer || *frame_type!=2) ? m4v->tc_dec : m4v->tc_disp; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*vop_time_inc*/ if (dsi.NumBitsTimeIncrement) vop_inc = gf_bs_read_int(m4v->bs, dsi.NumBitsTimeIncrement); m4v->prev_tc_dec = m4v->tc_dec; m4v->prev_tc_disp = m4v->tc_disp; if (dsi.enh_layer || *frame_type!=2) { m4v->tc_disp = m4v->tc_dec; m4v->tc_dec = secs; } *time_inc = secs * dsi.clock_rate + vop_inc; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*coded*/ *is_coded = gf_bs_read_int(m4v->bs, 1); gf_bs_align(m4v->bs); break; case M4V_GOV_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M4V_VOS_START_CODE: case M4V_VOL_START_CODE: if (hasVOP) { go = 0; } else if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } break; case M4V_VO_START_CODE: default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } } *size = m4v->current_object_start - *start; return GF_OK; }
0
[ "CWE-119", "CWE-787" ]
gpac
90dc7f853d31b0a4e9441cba97feccf36d8b69a4
28,593,605,037,660,410,000,000,000,000,000,000,000
89
fix some exploitable overflows (#994, #997)
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key) { if (*key >= OPENSSL_CRYPTO_THREAD_LOCAL_KEY_MAX) return NULL; return thread_local_storage[*key]; }
0
[ "CWE-330" ]
openssl
1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be
137,271,634,917,875,010,000,000,000,000,000,000,000
7
drbg: ensure fork-safety without using a pthread_atfork handler When the new OpenSSL CSPRNG was introduced in version 1.1.1, it was announced in the release notes that it would be fork-safe, which the old CSPRNG hadn't been. The fork-safety was implemented using a fork count, which was incremented by a pthread_atfork handler. Initially, this handler was enabled by default. Unfortunately, the default behaviour had to be changed for other reasons in commit b5319bdbd095, so the new OpenSSL CSPRNG failed to keep its promise. This commit restores the fork-safety using a different approach. It replaces the fork count by a fork id, which coincides with the process id on UNIX-like operating systems and is zero on other operating systems. It is used to detect when an automatic reseed after a fork is necessary. To prevent a future regression, it also adds a test to verify that the child reseeds after fork. CVE-2019-1549 Reviewed-by: Paul Dale <[email protected]> Reviewed-by: Matt Caswell <[email protected]> (Merged from https://github.com/openssl/openssl/pull/9802)
void *xt_copy_counters(sockptr_t arg, unsigned int len, struct xt_counters_info *info) { size_t offset; void *mem; u64 size; #ifdef CONFIG_COMPAT if (in_compat_syscall()) { /* structures only differ in size due to alignment */ struct compat_xt_counters_info compat_tmp; if (len <= sizeof(compat_tmp)) return ERR_PTR(-EINVAL); len -= sizeof(compat_tmp); if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0) return ERR_PTR(-EFAULT); memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); info->num_counters = compat_tmp.num_counters; offset = sizeof(compat_tmp); } else #endif { if (len <= sizeof(*info)) return ERR_PTR(-EINVAL); len -= sizeof(*info); if (copy_from_sockptr(info, arg, sizeof(*info)) != 0) return ERR_PTR(-EFAULT); offset = sizeof(*info); } info->name[sizeof(info->name) - 1] = '\0'; size = sizeof(struct xt_counters); size *= info->num_counters; if (size != (u64)len) return ERR_PTR(-EINVAL); mem = vmalloc(len); if (!mem) return ERR_PTR(-ENOMEM); if (copy_from_sockptr_offset(mem, arg, offset, len) == 0) return mem; vfree(mem); return ERR_PTR(-EFAULT); }
0
[]
linux
175e476b8cdf2a4de7432583b49c871345e4f8a1
51,966,627,837,171,530,000,000,000,000,000,000,000
52
netfilter: x_tables: Use correct memory barriers. When a new table value was assigned, it was followed by a write memory barrier. This ensured that all writes before this point would complete before any writes after this point. However, to determine whether the rules are unused, the sequence counter is read. To ensure that all writes have been done before these reads, a full memory barrier is needed, not just a write memory barrier. The same argument applies when incrementing the counter, before the rules are read. Changing to using smp_mb() instead of smp_wmb() fixes the kernel panic reported in cc00bcaa5899 (which is still present), while still maintaining the same speed of replacing tables. The smb_mb() barriers potentially slow the packet path, however testing has shown no measurable change in performance on a 4-core MIPS64 platform. Fixes: 7f5c6d4f665b ("netfilter: get rid of atomic ops in fast path") Signed-off-by: Mark Tomlinson <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, udf_pblk_t *block, int *err) { udf_pblk_t newblock; struct buffer_head *dbh = NULL; struct kernel_lb_addr eloc; uint8_t alloctype; struct extent_position epos; struct udf_fileident_bh sfibh, dfibh; loff_t f_pos = udf_ext0_offset(inode); int size = udf_ext0_offset(inode) + inode->i_size; struct fileIdentDesc cfi, *sfi, *dfi; struct udf_inode_info *iinfo = UDF_I(inode); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) alloctype = ICBTAG_FLAG_AD_SHORT; else alloctype = ICBTAG_FLAG_AD_LONG; if (!inode->i_size) { iinfo->i_alloc_type = alloctype; mark_inode_dirty(inode); return NULL; } /* alloc block, and copy data to it */ *block = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, iinfo->i_location.logicalBlockNum, err); if (!(*block)) return NULL; newblock = udf_get_pblock(inode->i_sb, *block, iinfo->i_location.partitionReferenceNum, 0); if (!newblock) return NULL; dbh = udf_tgetblk(inode->i_sb, newblock); if (!dbh) return NULL; lock_buffer(dbh); memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(dbh); unlock_buffer(dbh); mark_buffer_dirty_inode(dbh, inode); sfibh.soffset = sfibh.eoffset = f_pos & (inode->i_sb->s_blocksize - 1); sfibh.sbh = sfibh.ebh = NULL; dfibh.soffset = dfibh.eoffset = 0; dfibh.sbh = dfibh.ebh = dbh; while (f_pos < size) { iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL); if (!sfi) { brelse(dbh); return NULL; } iinfo->i_alloc_type = alloctype; sfi->descTag.tagLocation = cpu_to_le32(*block); dfibh.soffset = dfibh.eoffset; dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, udf_get_fi_ident(sfi))) { iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; brelse(dbh); return NULL; } } mark_buffer_dirty_inode(dbh, inode); memset(iinfo->i_data + iinfo->i_lenEAttr, 0, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; eloc.logicalBlockNum = *block; eloc.partitionReferenceNum = iinfo->i_location.partitionReferenceNum; iinfo->i_lenExtents = inode->i_size; epos.bh = NULL; epos.block = iinfo->i_location; epos.offset = udf_file_entry_alloc_offset(inode); udf_add_aext(inode, &epos, &eloc, inode->i_size, 0); /* UniqueID stuff */ brelse(epos.bh); mark_inode_dirty(inode); return dbh; }
0
[ "CWE-476" ]
linux
7fc3b7c2981bbd1047916ade327beccb90994eee
280,042,409,720,784,720,000,000,000,000,000,000,000
89
udf: Fix NULL ptr deref when converting from inline format udf_expand_file_adinicb() calls directly ->writepage to write data expanded into a page. This however misses to setup inode for writeback properly and so we can crash on inode->i_wb dereference when submitting page for IO like: BUG: kernel NULL pointer dereference, address: 0000000000000158 #PF: supervisor read access in kernel mode ... <TASK> __folio_start_writeback+0x2ac/0x350 __block_write_full_page+0x37d/0x490 udf_expand_file_adinicb+0x255/0x400 [udf] udf_file_write_iter+0xbe/0x1b0 [udf] new_sync_write+0x125/0x1c0 vfs_write+0x28e/0x400 Fix the problem by marking the page dirty and going through the standard writeback path to write the page. Strictly speaking we would not even have to write the page but we want to catch e.g. ENOSPC errors early. Reported-by: butt3rflyh4ck <[email protected]> CC: [email protected] Fixes: 52ebea749aae ("writeback: make backing_dev_info host cgroup-specific bdi_writebacks") Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Jan Kara <[email protected]>
static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) { int kcmp; struct rb_node *rbp; struct epitem *epi, *epir = NULL; struct epoll_filefd ffd; ep_set_ffd(&ffd, file, fd); for (rbp = ep->rbr.rb_root.rb_node; rbp; ) { epi = rb_entry(rbp, struct epitem, rbn); kcmp = ep_cmp_ffd(&ffd, &epi->ffd); if (kcmp > 0) rbp = rbp->rb_right; else if (kcmp < 0) rbp = rbp->rb_left; else { epir = epi; break; } } return epir; }
0
[ "CWE-416" ]
linux
a9ed4a6560b8562b7e2e2bed9527e88001f7b682
83,603,355,461,681,730,000,000,000,000,000,000,000
23
epoll: Keep a reference on files added to the check list When adding a new fd to an epoll, and that this new fd is an epoll fd itself, we recursively scan the fds attached to it to detect cycles, and add non-epool files to a "check list" that gets subsequently parsed. However, this check list isn't completely safe when deletions can happen concurrently. To sidestep the issue, make sure that a struct file placed on the check list sees its f_count increased, ensuring that a concurrent deletion won't result in the file disapearing from under our feet. Cc: [email protected] Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Al Viro <[email protected]>
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, unsigned int idx) { return page->s_mem + cache->size * idx; }
0
[ "CWE-703" ]
linux
c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
293,763,420,580,339,400,000,000,000,000,000,000,000
5
mm/slab.c: fix SLAB freelist randomization duplicate entries This patch fixes a bug in the freelist randomization code. When a high random number is used, the freelist will contain duplicate entries. It will result in different allocations sharing the same chunk. It will result in odd behaviours and crashes. It should be uncommon but it depends on the machines. We saw it happening more often on some machines (every few hours of running tests). Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: John Sperbeck <[email protected]> Signed-off-by: Thomas Garnier <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
new_string(const char *s, Py_ssize_t len, struct tok_state *tok) { char* result = (char *)PyMem_MALLOC(len + 1); if (!result) { tok->done = E_NOMEM; return NULL; } memcpy(result, s, len); result[len] = '\0'; return result; }
0
[ "CWE-125" ]
cpython
dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c
169,029,024,572,322,500,000,000,000,000,000,000,000
11
bpo-35766: Merge typed_ast back into CPython (GH-11645)
ConnStateData::parseProxyProtocolHeader() { // http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt // detect and parse PROXY/2.0 protocol header if (inBuf.startsWith(Proxy2p0magic)) return parseProxy2p0(); // detect and parse PROXY/1.0 protocol header if (inBuf.startsWith(Proxy1p0magic)) return parseProxy1p0(); // detect and terminate other protocols if (inBuf.length() >= Proxy2p0magic.length()) { // PROXY/1.0 magic is shorter, so we know that // the input does not start with any PROXY magic return proxyProtocolError("PROXY protocol error: invalid header"); } // TODO: detect short non-magic prefixes earlier to avoid // waiting for more data which may never come // not enough bytes to parse yet. return false; }
0
[ "CWE-444" ]
squid
fd68382860633aca92065e6c343cfd1b12b126e7
332,970,134,384,283,600,000,000,000,000,000,000,000
25
Improve Transfer-Encoding handling (#702) Reject messages containing Transfer-Encoding header with coding other than chunked or identity. Squid does not support other codings. For simplicity and security sake, also reject messages where Transfer-Encoding contains unnecessary complex values that are technically equivalent to "chunked" or "identity" (e.g., ",,chunked" or "identity, chunked"). RFC 7230 formally deprecated and removed identity coding, but it is still used by some agents.
ether_hdr_print(netdissect_options *ndo, const u_char *bp, u_int length) { register const struct ether_header *ep; uint16_t length_type; ep = (const struct ether_header *)bp; ND_PRINT((ndo, "%s > %s", etheraddr_string(ndo, ESRC(ep)), etheraddr_string(ndo, EDST(ep)))); length_type = EXTRACT_16BITS(&ep->ether_length_type); if (!ndo->ndo_qflag) { if (length_type <= ETHERMTU) { ND_PRINT((ndo, ", 802.3")); length = length_type; } else ND_PRINT((ndo, ", ethertype %s (0x%04x)", tok2str(ethertype_values,"Unknown", length_type), length_type)); } else { if (length_type <= ETHERMTU) { ND_PRINT((ndo, ", 802.3")); length = length_type; } else ND_PRINT((ndo, ", %s", tok2str(ethertype_values,"Unknown Ethertype (0x%04x)", length_type))); } ND_PRINT((ndo, ", length %u: ", length)); }
0
[ "CWE-125", "CWE-787" ]
tcpdump
1dcd10aceabbc03bf571ea32b892c522cbe923de
266,935,015,801,571,500,000,000,000,000,000,000,000
31
CVE-2017-12897/ISO CLNS: Use ND_TTEST() for the bounds checks in isoclns_print(). This fixes a buffer over-read discovered by Kamil Frankowicz. Don't pass the remaining caplen - that's too hard to get right, and we were getting it wrong in at least one case; just use ND_TTEST(). Add a test using the capture file supplied by the reporter(s).