func
stringlengths
0
484k
target
int64
0
1
cwe
sequencelengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
struct r_bin_pe_export_t* PE_(r_bin_pe_get_exports)(RBinPEObj* pe) { r_return_val_if_fail (pe, NULL); struct r_bin_pe_export_t* exp, * exports = NULL; PE_Word function_ordinal = 0; PE_VWord functions_paddr, names_paddr, ordinals_paddr, function_rva, name_vaddr, name_paddr; char function_name[PE_NAME_LENGTH + 1], forwarder_name[PE_NAME_LENGTH + 1]; char dll_name[PE_NAME_LENGTH + 1]; PE_(image_data_directory) * data_dir_export; PE_VWord export_dir_rva; int n,i, export_dir_size; st64 exports_sz = 0; if (!pe->data_directory) { return NULL; } data_dir_export = &pe->data_directory[PE_IMAGE_DIRECTORY_ENTRY_EXPORT]; export_dir_rva = data_dir_export->VirtualAddress; export_dir_size = data_dir_export->Size; PE_VWord *func_rvas = NULL; PE_Word *ordinals = NULL; if (pe->export_directory) { if (pe->export_directory->NumberOfFunctions + 1 < pe->export_directory->NumberOfFunctions) { // avoid integer overflow return NULL; } exports_sz = (pe->export_directory->NumberOfFunctions + 1) * sizeof (struct r_bin_pe_export_t); // we cant exit with export_sz > pe->size, us r_bin_pe_export_t is 256+256+8+8+8+4 bytes is easy get over file size // to avoid fuzzing we can abort on export_directory->NumberOfFunctions>0xffff if (exports_sz < 0 || pe->export_directory->NumberOfFunctions + 1 > 0xffff) { return NULL; } if (!(exports = malloc (exports_sz))) { return NULL; } if (r_buf_read_at (pe->b, PE_(va2pa) (pe, pe->export_directory->Name), (ut8*) dll_name, PE_NAME_LENGTH) < 1) { // we dont stop if dll name cant be read, we set dllname to null and continue pe_printf ("Warning: read (dll name)\n"); dll_name[0] = '\0'; } functions_paddr = PE_(va2pa) (pe, pe->export_directory->AddressOfFunctions); names_paddr = PE_(va2pa) (pe, pe->export_directory->AddressOfNames); ordinals_paddr = PE_(va2pa) (pe, pe->export_directory->AddressOfOrdinals); const size_t names_sz = pe->export_directory->NumberOfNames * sizeof (PE_Word); const size_t funcs_sz = pe->export_directory->NumberOfFunctions * sizeof (PE_VWord); ordinals = malloc (names_sz); func_rvas = malloc (funcs_sz); if (!ordinals || !func_rvas) { goto beach; } int r = r_buf_read_at (pe->b, ordinals_paddr, (ut8 *)ordinals, names_sz); if (r != names_sz) { goto beach; } r = r_buf_read_at (pe->b, functions_paddr, (ut8 *)func_rvas, funcs_sz); if (r != funcs_sz) { goto beach; } for (i = 0; i < pe->export_directory->NumberOfFunctions; i++) { // get vaddr from AddressOfFunctions array function_rva = r_read_at_ble32 ((ut8 *)func_rvas, i * sizeof (PE_VWord), pe->endian); // have exports by name? if (pe->export_directory->NumberOfNames > 0) { // search for value of i into AddressOfOrdinals name_vaddr = 0; for (n = 0; n < pe->export_directory->NumberOfNames; n++) { PE_Word fo = r_read_at_ble16 ((ut8 *)ordinals, n * sizeof (PE_Word), pe->endian); // if exist this index into AddressOfOrdinals if (i == fo) { function_ordinal = fo; // get the VA of export name from AddressOfNames name_vaddr = r_buf_read_le32_at (pe->b, names_paddr + n * sizeof (PE_VWord)); break; } } // have an address into name_vaddr? if (name_vaddr) { // get the name of the Export name_paddr = PE_(va2pa) (pe, name_vaddr); if (r_buf_read_at (pe->b, name_paddr, (ut8*) function_name, PE_NAME_LENGTH) < 1) { pe_printf ("Warning: read (function name)\n"); exports[i].last = 1; return exports; } } else { // No name export, get the ordinal function_ordinal = i; snprintf (function_name, PE_NAME_LENGTH, "Ordinal_%i", i + pe->export_directory->Base); } } else { // if export by name dont exist, get the ordinal taking in mind the Base value. snprintf (function_name, PE_NAME_LENGTH, "Ordinal_%i", i + pe->export_directory->Base); } // check if VA are into export directory, this mean a forwarder export if (function_rva >= export_dir_rva && function_rva < (export_dir_rva + export_dir_size)) { // if forwarder, the VA point to Forwarded name if (r_buf_read_at (pe->b, PE_(va2pa) (pe, function_rva), (ut8*) forwarder_name, PE_NAME_LENGTH) < 1) { exports[i].last = 1; return exports; } } else { // no forwarder export snprintf (forwarder_name, PE_NAME_LENGTH, "NONE"); } dll_name[PE_NAME_LENGTH] = '\0'; function_name[PE_NAME_LENGTH] = '\0'; exports[i].vaddr = bin_pe_rva_to_va (pe, function_rva); exports[i].paddr = PE_(va2pa) (pe, function_rva); exports[i].ordinal = function_ordinal + pe->export_directory->Base; memcpy (exports[i].forwarder, forwarder_name, PE_NAME_LENGTH); exports[i].forwarder[PE_NAME_LENGTH] = '\0'; memcpy (exports[i].name, function_name, PE_NAME_LENGTH); exports[i].name[PE_NAME_LENGTH] = '\0'; memcpy (exports[i].libname, dll_name, PE_NAME_LENGTH); exports[i].libname[PE_NAME_LENGTH] = '\0'; exports[i].last = 0; } exports[i].last = 1; free (ordinals); free (func_rvas); } exp = parse_symbol_table (pe, exports, exports_sz - sizeof (struct r_bin_pe_export_t)); if (exp) { exports = exp; } return exports; beach: free (exports); free (ordinals); free (func_rvas); return NULL; }
0
[ "CWE-400", "CWE-703" ]
radare2
634b886e84a5c568d243e744becc6b3223e089cf
173,330,934,323,411,300,000,000,000,000,000,000,000
130
Fix DoS in PE/QNX/DYLDCACHE/PSX parsers ##crash * Reported by lazymio * Reproducer: AAA4AAAAAB4=
sendtomaster(dns_forward_t *forward) { isc_result_t result; isc_sockaddr_t src; isc_dscp_t dscp = -1; LOCK_ZONE(forward->zone); if (DNS_ZONE_FLAG(forward->zone, DNS_ZONEFLG_EXITING)) { UNLOCK_ZONE(forward->zone); return (ISC_R_CANCELED); } if (forward->which >= forward->zone->masterscnt) { UNLOCK_ZONE(forward->zone); return (ISC_R_NOMORE); } forward->addr = forward->zone->masters[forward->which]; /* * Always use TCP regardless of whether the original update * used TCP. * XXX The timeout may but a bit small if we are far down a * transfer graph and the master has to try several masters. */ switch (isc_sockaddr_pf(&forward->addr)) { case PF_INET: src = forward->zone->xfrsource4; dscp = forward->zone->xfrsource4dscp; break; case PF_INET6: src = forward->zone->xfrsource6; dscp = forward->zone->xfrsource6dscp; break; default: result = ISC_R_NOTIMPLEMENTED; goto unlock; } result = dns_request_createraw(forward->zone->view->requestmgr, forward->msgbuf, &src, &forward->addr, dscp, forward->options, 15 /* XXX */, 0, 0, forward->zone->task, forward_callback, forward, &forward->request); if (result == ISC_R_SUCCESS) { if (!ISC_LINK_LINKED(forward, link)) ISC_LIST_APPEND(forward->zone->forwards, forward, link); } unlock: UNLOCK_ZONE(forward->zone); return (result); }
0
[ "CWE-327" ]
bind9
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
331,258,949,062,043,020,000,000,000,000,000,000,000
53
Update keyfetch_done compute_tag check If in keyfetch_done the compute_tag fails (because for example the algorithm is not supported), don't crash, but instead ignore the key.
void conv_localetodisp(gchar *outbuf, gint outlen, const gchar *inbuf) { gchar *tmpstr; codeconv_set_strict(TRUE); tmpstr = conv_iconv_strdup(inbuf, conv_get_locale_charset_str(), CS_INTERNAL); codeconv_set_strict(FALSE); if (tmpstr && g_utf8_validate(tmpstr, -1, NULL)) { strncpy2(outbuf, tmpstr, outlen); g_free(tmpstr); return; } else if (tmpstr && !g_utf8_validate(tmpstr, -1, NULL)) { g_free(tmpstr); codeconv_set_strict(TRUE); tmpstr = conv_iconv_strdup(inbuf, conv_get_locale_charset_str_no_utf8(), CS_INTERNAL); codeconv_set_strict(FALSE); } if (tmpstr && g_utf8_validate(tmpstr, -1, NULL)) { strncpy2(outbuf, tmpstr, outlen); g_free(tmpstr); return; } else { g_free(tmpstr); conv_utf8todisp(outbuf, outlen, inbuf); } }
0
[ "CWE-119" ]
claws
d390fa07f5548f3173dd9cc13b233db5ce934c82
132,071,139,811,362,160,000,000,000,000,000,000,000
29
Make sure we don't run out of the output buffer. Maybe fixes bug #3557
static void paging64_init_context(struct kvm_mmu *context) { context->page_fault = paging64_page_fault; context->gva_to_gpa = paging64_gva_to_gpa; context->sync_page = paging64_sync_page; context->invlpg = paging64_invlpg; context->direct_map = false; }
0
[ "CWE-476" ]
linux
9f46c187e2e680ecd9de7983e4d081c3391acc76
329,627,066,367,916,100,000,000,000,000,000,000,000
8
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID With shadow paging enabled, the INVPCID instruction results in a call to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the invlpg callback is not set and the result is a NULL pointer dereference. Fix it trivially by checking for mmu->invlpg before every call. There are other possibilities: - check for CR0.PG, because KVM (like all Intel processors after P5) flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a nop with paging disabled - check for EFER.LMA, because KVM syncs and flushes when switching MMU contexts outside of 64-bit mode All of these are tricky, go for the simple solution. This is CVE-2022-1789. Reported-by: Yongkang Jia <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
MockUdpReadFilterCallbacks::MockUdpReadFilterCallbacks() { ON_CALL(*this, udpListener()).WillByDefault(ReturnRef(udp_listener_)); }
0
[ "CWE-835" ]
envoy
c8de199e2971f79cbcbc6b5eadc8c566b28705d1
142,805,761,333,548,200,000,000,000,000,000,000,000
3
listener: clean up accept filter before creating connection (#8922) Signed-off-by: Yuchen Dai <[email protected]>
MagickExport MagickBooleanType XComponentGenesis(void) { return(MagickTrue); }
0
[ "CWE-401" ]
ImageMagick6
13801f5d0bd7a6fdb119682d34946636afdb2629
188,095,668,179,847,640,000,000,000,000,000,000,000
4
https://github.com/ImageMagick/ImageMagick/issues/1531
static int mem_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgroup, struct cgroup_taskset *tset) { return 0; }
0
[ "CWE-476", "CWE-415" ]
linux
371528caec553785c37f73fa3926ea0de84f986f
185,759,552,288,089,520,000,000,000,000,000,000,000
6
mm: memcg: Correct unregistring of events attached to the same eventfd There is an issue when memcg unregisters events that were attached to the same eventfd: - On the first call mem_cgroup_usage_unregister_event() removes all events attached to a given eventfd, and if there were no events left, thresholds->primary would become NULL; - Since there were several events registered, cgroups core will call mem_cgroup_usage_unregister_event() again, but now kernel will oops, as the function doesn't expect that threshold->primary may be NULL. That's a good question whether mem_cgroup_usage_unregister_event() should actually remove all events in one go, but nowadays it can't do any better as cftype->unregister_event callback doesn't pass any private event-associated cookie. So, let's fix the issue by simply checking for threshold->primary. FWIW, w/o the patch the following oops may be observed: BUG: unable to handle kernel NULL pointer dereference at 0000000000000004 IP: [<ffffffff810be32c>] mem_cgroup_usage_unregister_event+0x9c/0x1f0 Pid: 574, comm: kworker/0:2 Not tainted 3.3.0-rc4+ #9 Bochs Bochs RIP: 0010:[<ffffffff810be32c>] [<ffffffff810be32c>] mem_cgroup_usage_unregister_event+0x9c/0x1f0 RSP: 0018:ffff88001d0b9d60 EFLAGS: 00010246 Process kworker/0:2 (pid: 574, threadinfo ffff88001d0b8000, task ffff88001de91cc0) Call Trace: [<ffffffff8107092b>] cgroup_event_remove+0x2b/0x60 [<ffffffff8103db94>] process_one_work+0x174/0x450 [<ffffffff8103e413>] worker_thread+0x123/0x2d0 Cc: stable <[email protected]> Signed-off-by: Anton Vorontsov <[email protected]> Acked-by: KAMEZAWA Hiroyuki <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Michal Hocko <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static struct stream_encoder *dce100_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; }
0
[ "CWE-400", "CWE-401" ]
linux
104c307147ad379617472dd91a5bcb368d72bd6d
27,476,797,395,229,130,000,000,000,000,000,000,000
14
drm/amd/display: prevent memory leak In dcn*_create_resource_pool the allocated memory should be released if construct pool fails. Reviewed-by: Harry Wentland <[email protected]> Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: Alex Deucher <[email protected]>
GF_Err url_Read(GF_Box *s, GF_BitStream *bs) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if (ptr->size) { ptr->location = (char*)gf_malloc((u32) ptr->size); if (! ptr->location) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->location, (u32)ptr->size); } return GF_OK; }
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
236,633,599,569,611,500,000,000,000,000,000,000,000
11
prevent dref memleak on invalid input (#1183)
GF_Err metx_AddBox(GF_Box *s, GF_Box *a) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_SINF: gf_list_add(ptr->protections, a); break; case GF_ISOM_BOX_TYPE_TXTC: //we allow the config box on metx if (ptr->config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->config = (GF_TextConfigBox *)a; break; default: return gf_isom_box_add_default(s, a); } return GF_OK; }
0
[ "CWE-125" ]
gpac
bceb03fd2be95097a7b409ea59914f332fb6bc86
37,529,383,283,799,058,000,000,000,000,000,000,000
17
fixed 2 possible heap overflows (inc. #1088)
void PackLinuxElf64::unpack(OutputFile *fo) { if (e_phoff != sizeof(Elf64_Ehdr)) {// Phdrs not contiguous with Ehdr throwCantUnpack("bad e_phoff"); } unsigned const c_phnum = get_te16(&ehdri.e_phnum); upx_uint64_t old_data_off = 0; upx_uint64_t old_data_len = 0; upx_uint64_t old_dtinit = 0; unsigned szb_info = sizeof(b_info); { upx_uint64_t const e_entry = get_te64(&ehdri.e_entry); if (e_entry < 0x401180 && get_te16(&ehdri.e_machine)==Elf64_Ehdr::EM_386) { /* old style, 8-byte b_info */ szb_info = 2*sizeof(unsigned); } } fi->seek(overlay_offset - sizeof(l_info), SEEK_SET); fi->readx(&linfo, sizeof(linfo)); lsize = get_te16(&linfo.l_lsize); p_info hbuf; fi->readx(&hbuf, sizeof(hbuf)); unsigned orig_file_size = get_te32(&hbuf.p_filesize); blocksize = get_te32(&hbuf.p_blocksize); if (file_size > (off_t)orig_file_size || blocksize > orig_file_size || !mem_size_valid(1, blocksize, OVERHEAD)) throwCantUnpack("p_info corrupted"); #define MAX_ELF_HDR 1024 union { unsigned char buf[MAX_ELF_HDR]; //struct { Elf64_Ehdr ehdr; Elf64_Phdr phdr; } e; } u; Elf64_Ehdr *const ehdr = (Elf64_Ehdr *) u.buf; Elf64_Phdr const *phdr = 0; ibuf.alloc(blocksize + OVERHEAD); b_info bhdr; memset(&bhdr, 0, sizeof(bhdr)); fi->readx(&bhdr, szb_info); ph.u_len = get_te32(&bhdr.sz_unc); ph.c_len = get_te32(&bhdr.sz_cpr); if (ph.c_len > (unsigned)file_size || ph.c_len == 0 || ph.u_len == 0 || ph.u_len > sizeof(u)) throwCantUnpack("b_info corrupted"); ph.filter_cto = bhdr.b_cto8; // Uncompress Ehdr and Phdrs. if (ibuf.getSize() < ph.c_len || sizeof(u) < ph.u_len) throwCompressedDataViolation(); fi->readx(ibuf, ph.c_len); decompress(ibuf, (upx_byte *)ehdr, false); if (ehdr->e_type !=ehdri.e_type || ehdr->e_machine!=ehdri.e_machine || ehdr->e_version!=ehdri.e_version // less strict for EM_PPC64 to workaround earlier bug || !( ehdr->e_flags==ehdri.e_flags || Elf64_Ehdr::EM_PPC64 == get_te16(&ehdri.e_machine)) || ehdr->e_ehsize !=ehdri.e_ehsize // check EI_MAG[0-3], EI_CLASS, EI_DATA, EI_VERSION || memcmp(ehdr->e_ident, ehdri.e_ident, Elf64_Ehdr::EI_OSABI)) { throwCantUnpack("ElfXX_Ehdr corrupted"); } fi->seek(- (off_t) (szb_info + ph.c_len), SEEK_CUR); unsigned const u_phnum = get_te16(&ehdr->e_phnum); unsigned total_in = 0; unsigned total_out = 0; unsigned c_adler = upx_adler32(NULL, 0); unsigned u_adler = upx_adler32(NULL, 0); // Packed ET_EXE has no PT_DYNAMIC. // Packed ET_DYN has original PT_DYNAMIC for info needed by rtld. bool const is_shlib = !!elf_find_ptype(Elf64_Phdr::PT_DYNAMIC, phdri, c_phnum); if (is_shlib) { // Unpack and output the Ehdr and Phdrs for real. // This depends on position within input file fi. unpackExtent(ph.u_len, fo, total_in, total_out, c_adler, u_adler, false, szb_info); // The first PT_LOAD. Part is not compressed (for benefit of rtld.) // Read enough to position the input for next unpackExtent. fi->seek(0, SEEK_SET); fi->readx(ibuf, overlay_offset + sizeof(hbuf) + szb_info + ph.c_len); overlay_offset -= sizeof(linfo); if (fo) { fo->write(ibuf + ph.u_len, overlay_offset - ph.u_len); } // Search the Phdrs of compressed int n_ptload = 0; phdr = (Elf64_Phdr *) (void *) (1+ (Elf64_Ehdr *)(unsigned char *)ibuf); for (unsigned j=0; j < u_phnum; ++phdr, ++j) { if (PT_LOAD64==get_te32(&phdr->p_type) && 0!=n_ptload++) { old_data_off = get_te64(&phdr->p_offset); old_data_len = get_te64(&phdr->p_filesz); break; } } total_in = overlay_offset; total_out = overlay_offset; ph.u_len = 0; // Decompress and unfilter the tail of first PT_LOAD. phdr = (Elf64_Phdr *) (void *) (1+ ehdr); for (unsigned j=0; j < u_phnum; ++phdr, ++j) { if (PT_LOAD64==get_te32(&phdr->p_type)) { ph.u_len = get_te64(&phdr->p_filesz) - overlay_offset; break; } } unpackExtent(ph.u_len, fo, total_in, total_out, c_adler, u_adler, false, szb_info); } else { // main executable // Decompress each PT_LOAD. bool first_PF_X = true; phdr = (Elf64_Phdr *) (void *) (1+ ehdr); // uncompressed for (unsigned j=0; j < u_phnum; ++phdr, ++j) { if (PT_LOAD64==get_te32(&phdr->p_type)) { unsigned const filesz = get_te64(&phdr->p_filesz); unsigned const offset = get_te64(&phdr->p_offset); if (fo) fo->seek(offset, SEEK_SET); if (Elf64_Phdr::PF_X & get_te32(&phdr->p_flags)) { unpackExtent(filesz, fo, total_in, total_out, c_adler, u_adler, first_PF_X, szb_info); first_PF_X = false; } else { unpackExtent(filesz, fo, total_in, total_out, c_adler, u_adler, false, szb_info); } } } } phdr = phdri; load_va = 0; for (unsigned j=0; j < c_phnum; ++j) { if (PT_LOAD64==get_te32(&phdr->p_type)) { load_va = get_te64(&phdr->p_vaddr); break; } } if (is_shlib || ((unsigned)(get_te64(&ehdri.e_entry) - load_va) + up4(lsize) + ph.getPackHeaderSize() + sizeof(overlay_offset)) < up4(file_size)) { // Loader is not at end; skip past it. funpad4(fi); // MATCH01 unsigned d_info[6]; fi->readx(d_info, sizeof(d_info)); if (0==old_dtinit) { old_dtinit = d_info[2 + (0==d_info[0])]; } fi->seek(lsize - sizeof(d_info), SEEK_CUR); } // The gaps between PT_LOAD and after last PT_LOAD phdr = (Elf64_Phdr *) (u.buf + sizeof(*ehdr)); upx_uint64_t hi_offset(0); for (unsigned j = 0; j < u_phnum; ++j) { if (PT_LOAD64==phdr[j].p_type && hi_offset < phdr[j].p_offset) hi_offset = phdr[j].p_offset; } for (unsigned j = 0; j < u_phnum; ++j) { unsigned const size = find_LOAD_gap(phdr, j, u_phnum); if (size) { unsigned const where = get_te64(&phdr[j].p_offset) + get_te64(&phdr[j].p_filesz); if (fo) fo->seek(where, SEEK_SET); unpackExtent(size, fo, total_in, total_out, c_adler, u_adler, false, szb_info, (phdr[j].p_offset != hi_offset)); } } // check for end-of-file fi->readx(&bhdr, szb_info); unsigned const sz_unc = ph.u_len = get_te32(&bhdr.sz_unc); if (sz_unc == 0) { // uncompressed size 0 -> EOF // note: magic is always stored le32 unsigned const sz_cpr = get_le32(&bhdr.sz_cpr); if (sz_cpr != UPX_MAGIC_LE32) // sz_cpr must be h->magic throwCompressedDataViolation(); } else { // extra bytes after end? throwCompressedDataViolation(); } if (is_shlib) { // the non-first PT_LOAD int n_ptload = 0; unsigned load_off = 0; phdr = (Elf64_Phdr *) (u.buf + sizeof(*ehdr)); for (unsigned j= 0; j < u_phnum; ++j, ++phdr) { if (PT_LOAD64==get_te32(&phdr->p_type) && 0!=n_ptload++) { load_off = get_te64(&phdr->p_offset); fi->seek(old_data_off, SEEK_SET); fi->readx(ibuf, old_data_len); total_in += old_data_len; total_out += old_data_len; if (fo) { fo->seek(get_te64(&phdr->p_offset), SEEK_SET); fo->rewrite(ibuf, old_data_len); } } } // Restore DT_INIT.d_val phdr = (Elf64_Phdr *) (u.buf + sizeof(*ehdr)); for (unsigned j= 0; j < u_phnum; ++j, ++phdr) { if (phdr->PT_DYNAMIC==get_te32(&phdr->p_type)) { unsigned const dyn_off = get_te64(&phdr->p_offset); unsigned const dyn_len = get_te64(&phdr->p_filesz); Elf64_Dyn *dyn = (Elf64_Dyn *)((unsigned char *)ibuf + (dyn_off - load_off)); for (unsigned j2= 0; j2 < dyn_len; ++dyn, j2 += sizeof(*dyn)) { if (dyn->DT_INIT==get_te32(&dyn->d_tag)) { if (fo) { fo->seek(sizeof(upx_uint64_t) + j2 + dyn_off, SEEK_SET); fo->rewrite(&old_dtinit, sizeof(old_dtinit)); fo->seek(0, SEEK_END); } break; } } } } } // update header with totals ph.c_len = total_in; ph.u_len = total_out; // all bytes must be written if (total_out != orig_file_size) throwEOFException(); // finally test the checksums if (ph.c_adler != c_adler || ph.u_adler != u_adler) throwChecksumError(); #undef MAX_ELF_HDR }
0
[ "CWE-476" ]
upx
ef336dbcc6dc8344482f8cf6c909ae96c3286317
30,450,425,629,754,828,000,000,000,000,000,000,000
244
Protect against bad crafted input. https://github.com/upx/upx/issues/128 modified: p_lx_elf.cpp
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, void __user *argp) { struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; struct vdpa_vq_state vq_state; struct vdpa_callback cb; struct vhost_virtqueue *vq; struct vhost_vring_state s; u32 idx; long r; r = get_user(idx, (u32 __user *)argp); if (r < 0) return r; if (idx >= v->nvqs) return -ENOBUFS; idx = array_index_nospec(idx, v->nvqs); vq = &v->vqs[idx]; switch (cmd) { case VHOST_VDPA_SET_VRING_ENABLE: if (copy_from_user(&s, argp, sizeof(s))) return -EFAULT; ops->set_vq_ready(vdpa, idx, s.num); return 0; case VHOST_GET_VRING_BASE: r = ops->get_vq_state(v->vdpa, idx, &vq_state); if (r) return r; vq->last_avail_idx = vq_state.split.avail_index; break; } r = vhost_vring_ioctl(&v->vdev, cmd, argp); if (r) return r; switch (cmd) { case VHOST_SET_VRING_ADDR: if (ops->set_vq_address(vdpa, idx, (u64)(uintptr_t)vq->desc, (u64)(uintptr_t)vq->avail, (u64)(uintptr_t)vq->used)) r = -EINVAL; break; case VHOST_SET_VRING_BASE: vq_state.split.avail_index = vq->last_avail_idx; if (ops->set_vq_state(vdpa, idx, &vq_state)) r = -EINVAL; break; case VHOST_SET_VRING_CALL: if (vq->call_ctx.ctx) { cb.callback = vhost_vdpa_virtqueue_cb; cb.private = vq; } else { cb.callback = NULL; cb.private = NULL; } ops->set_vq_cb(vdpa, idx, &cb); vhost_vdpa_setup_vq_irq(v, idx); break; case VHOST_SET_VRING_NUM: ops->set_vq_num(vdpa, idx, vq->num); break; } return r; }
0
[ "CWE-190" ]
linux
870aaff92e959e29d40f9cfdb5ed06ba2fc2dae0
75,251,201,261,594,340,000,000,000,000,000,000,000
75
vdpa: clean up get_config_size ret value handling The return type of get_config_size is size_t so it makes sense to change the type of the variable holding its result. That said, this already got taken care of (differently, and arguably not as well) by commit 3ed21c1451a1 ("vdpa: check that offsets are within bounds"). The added 'c->off > size' test in that commit will be done as an unsigned comparison on 32-bit (safe due to not being signed). On a 64-bit platform, it will be done as a signed comparison, but in that case the comparison will be done in 64-bit, and 'c->off' being an u32 it will be valid thanks to the extended range (ie both values will be positive in 64 bits). So this was a real bug, but it was already addressed and marked for stable. Signed-off-by: Laura Abbott <[email protected]> Reported-by: Luo Likang <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]>
R_API char *retrieve_class_method_access_string(ut16 flags) { return retrieve_access_string (flags, CLASS_ACCESS_FLAGS); }
0
[ "CWE-119", "CWE-788" ]
radare2
6c4428f018d385fc80a33ecddcb37becea685dd5
13,952,336,418,156,332,000,000,000,000,000,000,000
3
Improve boundary checks to fix oobread segfaults ##crash * Reported by Cen Zhang via huntr.dev * Reproducer: bins/fuzzed/javaoob-havoc.class
static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) { }
0
[]
linux-2.6
6209344f5a3795d34b7f2c0061f49802283b6bdd
139,732,483,862,251,860,000,000,000,000,000,000,000
2
net: unix: fix inflight counting bug in garbage collector Previously I assumed that the receive queues of candidates don't change during the GC. This is only half true, nothing can be received from the queues (see comment in unix_gc()), but buffers could be added through the other half of the socket pair, which may still have file descriptors referring to it. This can result in inc_inflight_move_tail() erronously increasing the "inflight" counter for a unix socket for which dec_inflight() wasn't previously called. This in turn can trigger the "BUG_ON(total_refs < inflight_refs)" in a later garbage collection run. Fix this by only manipulating the "inflight" counter for sockets which are candidates themselves. Duplicating the file references in unix_attach_fds() is also needed to prevent a socket becoming a candidate for GC while the skb that contains it is not yet queued. Reported-by: Andrea Bittau <[email protected]> Signed-off-by: Miklos Szeredi <[email protected]> CC: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
GF_Box *mfhd_New() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentHeaderBox, GF_ISOM_BOX_TYPE_MFHD); return (GF_Box *)tmp; }
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
72,878,659,139,446,720,000,000,000,000,000,000,000
5
prevent dref memleak on invalid input (#1183)
bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array, List<Item> &fields, enum_column_usage column_usage, List<Item> *sum_func_list, List<Item> *pre_fix, bool allow_sum_func) { Item *item; enum_column_usage saved_column_usage= thd->column_usage; nesting_map save_allow_sum_func= thd->lex->allow_sum_func; List_iterator<Item> it(fields); bool save_is_item_list_lookup; bool make_pre_fix= (pre_fix && (pre_fix->elements == 0)); DBUG_ENTER("setup_fields"); DBUG_PRINT("enter", ("ref_pointer_array: %p", ref_pointer_array.array())); thd->column_usage= column_usage; DBUG_PRINT("info", ("thd->column_usage: %d", thd->column_usage)); /* Followimg 2 condition always should be true (but they was added due to an error present only in 10.3): 1) nest_level shoud be 0 or positive; 2) nest level of all SELECTs on the same level shoud be equal first SELECT on this level (and each other). */ DBUG_ASSERT(thd->lex->current_select->nest_level >= 0); DBUG_ASSERT(thd->lex->current_select->master_unit()->first_select() ->nest_level == thd->lex->current_select->nest_level); if (allow_sum_func) thd->lex->allow_sum_func.set_bit(thd->lex->current_select->nest_level); thd->where= THD::DEFAULT_WHERE; save_is_item_list_lookup= thd->lex->current_select->is_item_list_lookup; thd->lex->current_select->is_item_list_lookup= 0; /* To prevent fail on forward lookup we fill it with zeroes, then if we got pointer on zero after find_item_in_list we will know that it is forward lookup. There is other way to solve problem: fill array with pointers to list, but it will be slower. TODO: remove it when (if) we made one list for allfields and ref_pointer_array */ if (!ref_pointer_array.is_null()) { DBUG_ASSERT(ref_pointer_array.size() >= fields.elements); memset(ref_pointer_array.array(), 0, sizeof(Item *) * fields.elements); } /* We call set_entry() there (before fix_fields() of the whole list of field items) because: 1) the list of field items has same order as in the query, and the Item_func_get_user_var item may go before the Item_func_set_user_var: SELECT @a, @a := 10 FROM t; 2) The entry->update_query_id value controls constantness of Item_func_get_user_var items, so in presence of Item_func_set_user_var items we have to refresh their entries before fixing of Item_func_get_user_var items. */ List_iterator<Item_func_set_user_var> li(thd->lex->set_var_list); Item_func_set_user_var *var; while ((var= li++)) var->set_entry(thd, FALSE); Ref_ptr_array ref= ref_pointer_array; thd->lex->current_select->cur_pos_in_select_list= 0; while ((item= it++)) { if (make_pre_fix) pre_fix->push_back(item, thd->stmt_arena->mem_root); if (item->fix_fields_if_needed_for_scalar(thd, it.ref())) { thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; thd->lex->allow_sum_func= save_allow_sum_func; thd->column_usage= saved_column_usage; DBUG_PRINT("info", ("thd->column_usage: %d", thd->column_usage)); DBUG_RETURN(TRUE); /* purecov: inspected */ } item= *(it.ref()); // Item might have changed in fix_fields() if (!ref.is_null()) { ref[0]= item; ref.pop_front(); } /* split_sum_func() must be called for Window Function items, see Item_window_func::split_sum_func. */ if (sum_func_list && ((item->with_sum_func() && item->type() != Item::SUM_FUNC_ITEM) || item->with_window_func)) { item->split_sum_func(thd, ref_pointer_array, *sum_func_list, SPLIT_SUM_SELECT); } thd->lex->current_select->select_list_tables|= item->used_tables(); thd->lex->used_tables|= item->used_tables(); thd->lex->current_select->cur_pos_in_select_list++; } thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; thd->lex->current_select->cur_pos_in_select_list= UNDEF_POS; thd->lex->allow_sum_func= save_allow_sum_func; thd->column_usage= saved_column_usage; DBUG_PRINT("info", ("thd->column_usage: %d", thd->column_usage)); DBUG_RETURN(MY_TEST(thd->is_error())); }
0
[ "CWE-416" ]
server
0beed9b5e933f0ff79b3bb346524f7a451d14e38
334,495,340,346,571,860,000,000,000,000,000,000,000
110
MDEV-28097 use-after-free when WHERE has subquery with an outer reference in HAVING when resolving WHERE and ON clauses, do not look in SELECT list/aliases.
static int cmpBitmap(unsigned char *buf, int width, int pitch, int height, int pf, int flags, int gray2rgb) { int roffset = tjRedOffset[pf]; int goffset = tjGreenOffset[pf]; int boffset = tjBlueOffset[pf]; int aoffset = tjAlphaOffset[pf]; int ps = tjPixelSize[pf]; int i, j; for (j = 0; j < height; j++) { int row = (flags & TJFLAG_BOTTOMUP) ? height - j - 1 : j; for (i = 0; i < width; i++) { unsigned char r = (i * 256 / width) % 256; unsigned char g = (j * 256 / height) % 256; unsigned char b = (j * 256 / height + i * 256 / width) % 256; if (pf == TJPF_GRAY) { if (buf[row * pitch + i * ps] != b) return 0; } else if (pf == TJPF_CMYK) { unsigned char rf, gf, bf; cmyk_to_rgb(buf[row * pitch + i * ps + 0], buf[row * pitch + i * ps + 1], buf[row * pitch + i * ps + 2], buf[row * pitch + i * ps + 3], &rf, &gf, &bf); if (gray2rgb) { if (rf != b || gf != b || bf != b) return 0; } else if (rf != r || gf != g || bf != b) return 0; } else { if (gray2rgb) { if (buf[row * pitch + i * ps + roffset] != b || buf[row * pitch + i * ps + goffset] != b || buf[row * pitch + i * ps + boffset] != b) return 0; } else if (buf[row * pitch + i * ps + roffset] != r || buf[row * pitch + i * ps + goffset] != g || buf[row * pitch + i * ps + boffset] != b) return 0; if (aoffset >= 0 && buf[row * pitch + i * ps + aoffset] != 0xFF) return 0; } } } return 1; }
0
[ "CWE-787" ]
libjpeg-turbo
2a9e3bd7430cfda1bc812d139e0609c6aca0b884
274,841,222,890,135,900,000,000,000,000,000,000,000
49
TurboJPEG: Properly handle gigapixel images Prevent several integer overflow issues and subsequent segfaults that occurred when attempting to compress or decompress gigapixel images with the TurboJPEG API: - Modify tjBufSize(), tjBufSizeYUV2(), and tjPlaneSizeYUV() to avoid integer overflow when computing the return values and to return an error if such an overflow is unavoidable. - Modify tjunittest to validate the above. - Modify tjCompress2(), tjEncodeYUVPlanes(), tjDecompress2(), and tjDecodeYUVPlanes() to avoid integer overflow when computing the row pointers in the 64-bit TurboJPEG C API. - Modify TJBench (both C and Java versions) to avoid overflowing the size argument to malloc()/new and to fail gracefully if such an overflow is unavoidable. In general, this allows gigapixel images to be accommodated by the 64-bit TurboJPEG C API when using automatic JPEG buffer (re)allocation. Such images cannot currently be accommodated without automatic JPEG buffer (re)allocation, due to the fact that tjAlloc() accepts a 32-bit integer argument (oops.) Such images cannot be accommodated in the TurboJPEG Java API due to the fact that Java always uses a signed 32-bit integer as an array index. Fixes #361
struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) { struct dentry *result; BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); if (inode) spin_lock(&inode->i_lock); result = __d_instantiate_unique(entry, inode); if (inode) spin_unlock(&inode->i_lock); if (!result) { security_d_instantiate(entry, inode); return NULL; } BUG_ON(!d_unhashed(result)); iput(inode); return result; }
0
[ "CWE-401", "CWE-254" ]
linux
cde93be45a8a90d8c264c776fab63487b5038a65
87,195,470,472,394,740,000,000,000,000,000,000,000
21
dcache: Handle escaped paths in prepend_path A rename can result in a dentry that by walking up d_parent will never reach it's mnt_root. For lack of a better term I call this an escaped path. prepend_path is called by four different functions __d_path, d_absolute_path, d_path, and getcwd. __d_path only wants to see paths are connected to the root it passes in. So __d_path needs prepend_path to return an error. d_absolute_path similarly wants to see paths that are connected to some root. Escaped paths are not connected to any mnt_root so d_absolute_path needs prepend_path to return an error greater than 1. So escaped paths will be treated like paths on lazily unmounted mounts. getcwd needs to prepend "(unreachable)" so getcwd also needs prepend_path to return an error. d_path is the interesting hold out. d_path just wants to print something, and does not care about the weird cases. Which raises the question what should be printed? Given that <escaped_path>/<anything> should result in -ENOENT I believe it is desirable for escaped paths to be printed as empty paths. As there are not really any meaninful path components when considered from the perspective of a mount tree. So tweak prepend_path to return an empty path with an new error code of 3 when it encounters an escaped path. Signed-off-by: "Eric W. Biederman" <[email protected]> Signed-off-by: Al Viro <[email protected]>
returncmd(int argc, char **argv) { int skip; int status; /* * If called outside a function, do what ksh does; * skip the rest of the file. */ if (argv[1]) { skip = SKIPFUNC; status = number(argv[1]); } else { skip = SKIPFUNCDEF; status = exitstatus; } evalskip = skip; return status; }
0
[]
dash
29d6f2148f10213de4e904d515e792d2cf8c968e
146,153,103,188,811,850,000,000,000,000,000,000,000
20
eval: Check nflag in evaltree instead of cmdloop This patch moves the nflag check from cmdloop into evaltree. This is so that nflag will be in force even if we enter the shell via a path other than cmdloop, e.g., through sh -c. Reported-by: Joey Hess <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
bool allowed_new_user_problem_entry(uid_t uid, const char *name, const char *value) { /* Allow root to create everything */ if (uid == 0) return true; /* Permit non-root users to create everything except: analyzer and type */ if (strcmp(name, FILENAME_ANALYZER) != 0 && strcmp(name, FILENAME_TYPE) != 0 /* compatibility value used in abrt-server */ && strcmp(name, "basename") != 0) return true; /* Permit non-root users to create all types except: C/C++, Koops, vmcore and xorg */ if (strcmp(value, "CCpp") != 0 && strcmp(value, "Kerneloops") != 0 && strcmp(value, "vmcore") != 0 && strcmp(value, "xorg") != 0) return true; error_msg("Only root is permitted to create element '%s' containing '%s'", name, value); return false; }
0
[ "CWE-59" ]
abrt
7417505e1d93cc95ec648b74e3c801bc67aacb9f
164,183,507,613,390,470,000,000,000,000,000,000,000
23
daemon, dbus: allow only root to create CCpp, Koops, vmcore and xorg Florian Weimer <[email protected]>: This prevents users from feeding things that are not actually coredumps and excerpts from /proc to these analyzers. For example, it should not be possible to trigger a rule with “EVENT=post-create analyzer=CCpp” using NewProblem Related: #1212861 Signed-off-by: Jakub Filak <[email protected]>
open_io(const char *hostname, int port) { IPAddr ip; /* Note, this call could block for a while */ if (DNS_Name2IPAddress(hostname, &ip) != DNS_Success) { fprintf(stderr, "Could not get IP address for %s\n", hostname); exit(1); } memset(&his_addr, 0, sizeof (his_addr)); switch (ip.family) { case IPADDR_INET4: sock_fd = socket(AF_INET, SOCK_DGRAM, 0); his_addr.in4.sin_family = AF_INET; his_addr.in4.sin_addr.s_addr = htonl(ip.addr.in4); his_addr.in4.sin_port = htons(port); his_addr_len = sizeof (his_addr.in4); break; #ifdef HAVE_IPV6 case IPADDR_INET6: sock_fd = socket(AF_INET6, SOCK_DGRAM, 0); his_addr.in6.sin6_family = AF_INET6; memcpy(his_addr.in6.sin6_addr.s6_addr, ip.addr.in6, sizeof (his_addr.in6.sin6_addr.s6_addr)); his_addr.in6.sin6_port = htons(port); his_addr_len = sizeof (his_addr.in6); break; #endif default: assert(0); } if (sock_fd < 0) { perror("Can't create socket"); exit(1); } }
0
[ "CWE-189" ]
chrony
7712455d9aa33d0db0945effaa07e900b85987b1
87,275,271,899,853,120,000,000,000,000,000,000,000
41
Fix buffer overflow when processing crafted command packets When the length of the REQ_SUBNETS_ACCESSED, REQ_CLIENT_ACCESSES command requests and the RPY_SUBNETS_ACCESSED, RPY_CLIENT_ACCESSES, RPY_CLIENT_ACCESSES_BY_INDEX, RPY_MANUAL_LIST command replies is calculated, the number of items stored in the packet is not validated. A crafted command request/reply can be used to crash the server/client. Only clients allowed by cmdallow (by default only localhost) can crash the server. With chrony versions 1.25 and 1.26 this bug has a smaller security impact as the server requires the clients to be authenticated in order to process the subnet and client accesses commands. In 1.27 and 1.28, however, the invalid calculated length is included also in the authentication check which may cause another crash.
msgparse_bucket_remove(struct msg_parse* msg, struct rrset_parse* rrset) { struct rrset_parse** p; p = &msg->hashtable[ rrset->hash & (PARSE_TABLE_SIZE-1) ]; while(*p) { if(*p == rrset) { *p = rrset->rrset_bucket_next; return; } p = &( (*p)->rrset_bucket_next ); } }
0
[ "CWE-400" ]
unbound
ba0f382eee814e56900a535778d13206b86b6d49
35,593,536,133,140,220,000,000,000,000,000,000,000
12
- CVE-2020-12662 Unbound can be tricked into amplifying an incoming query into a large number of queries directed to a target. - CVE-2020-12663 Malformed answers from upstream name servers can be used to make Unbound unresponsive.
static void init_header(struct ctl_table_header *head, struct ctl_table_root *root, struct ctl_table_set *set, struct ctl_node *node, struct ctl_table *table) { head->ctl_table = table; head->ctl_table_arg = table; head->used = 0; head->count = 1; head->nreg = 1; head->unregistering = NULL; head->root = root; head->set = set; head->parent = NULL; head->node = node; if (node) { struct ctl_table *entry; for (entry = table; entry->procname; entry++, node++) node->header = head; } }
0
[ "CWE-20", "CWE-399" ]
linux
93362fa47fe98b62e4a34ab408c4a418432e7939
141,111,821,754,644,130,000,000,000,000,000,000,000
20
sysctl: Drop reference added by grab_header in proc_sys_readdir Fixes CVE-2016-9191, proc_sys_readdir doesn't drop reference added by grab_header when return from !dir_emit_dots path. It can cause any path called unregister_sysctl_table will wait forever. The calltrace of CVE-2016-9191: [ 5535.960522] Call Trace: [ 5535.963265] [<ffffffff817cdaaf>] schedule+0x3f/0xa0 [ 5535.968817] [<ffffffff817d33fb>] schedule_timeout+0x3db/0x6f0 [ 5535.975346] [<ffffffff817cf055>] ? wait_for_completion+0x45/0x130 [ 5535.982256] [<ffffffff817cf0d3>] wait_for_completion+0xc3/0x130 [ 5535.988972] [<ffffffff810d1fd0>] ? wake_up_q+0x80/0x80 [ 5535.994804] [<ffffffff8130de64>] drop_sysctl_table+0xc4/0xe0 [ 5536.001227] [<ffffffff8130de17>] drop_sysctl_table+0x77/0xe0 [ 5536.007648] [<ffffffff8130decd>] unregister_sysctl_table+0x4d/0xa0 [ 5536.014654] [<ffffffff8130deff>] unregister_sysctl_table+0x7f/0xa0 [ 5536.021657] [<ffffffff810f57f5>] unregister_sched_domain_sysctl+0x15/0x40 [ 5536.029344] [<ffffffff810d7704>] partition_sched_domains+0x44/0x450 [ 5536.036447] [<ffffffff817d0761>] ? __mutex_unlock_slowpath+0x111/0x1f0 [ 5536.043844] [<ffffffff81167684>] rebuild_sched_domains_locked+0x64/0xb0 [ 5536.051336] [<ffffffff8116789d>] update_flag+0x11d/0x210 [ 5536.057373] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450 [ 5536.064186] [<ffffffff81167acb>] ? cpuset_css_offline+0x1b/0x60 [ 5536.070899] [<ffffffff810fce3d>] ? trace_hardirqs_on+0xd/0x10 [ 5536.077420] [<ffffffff817cf61f>] ? mutex_lock_nested+0x2df/0x450 [ 5536.084234] [<ffffffff8115a9f5>] ? css_killed_work_fn+0x25/0x220 [ 5536.091049] [<ffffffff81167ae5>] cpuset_css_offline+0x35/0x60 [ 5536.097571] [<ffffffff8115aa2c>] css_killed_work_fn+0x5c/0x220 [ 5536.104207] [<ffffffff810bc83f>] process_one_work+0x1df/0x710 [ 5536.110736] [<ffffffff810bc7c0>] ? process_one_work+0x160/0x710 [ 5536.117461] [<ffffffff810bce9b>] worker_thread+0x12b/0x4a0 [ 5536.123697] [<ffffffff810bcd70>] ? process_one_work+0x710/0x710 [ 5536.130426] [<ffffffff810c3f7e>] kthread+0xfe/0x120 [ 5536.135991] [<ffffffff817d4baf>] ret_from_fork+0x1f/0x40 [ 5536.142041] [<ffffffff810c3e80>] ? kthread_create_on_node+0x230/0x230 One cgroup maintainer mentioned that "cgroup is trying to offline a cpuset css, which takes place under cgroup_mutex. The offlining ends up trying to drain active usages of a sysctl table which apprently is not happening." The real reason is that proc_sys_readdir doesn't drop reference added by grab_header when return from !dir_emit_dots path. So this cpuset offline path will wait here forever. See here for details: http://www.openwall.com/lists/oss-security/2016/11/04/13 Fixes: f0c3b5093add ("[readdir] convert procfs") Cc: [email protected] Reported-by: CAI Qian <[email protected]> Tested-by: Yang Shukui <[email protected]> Signed-off-by: Zhou Chengming <[email protected]> Acked-by: Al Viro <[email protected]> Signed-off-by: Eric W. Biederman <[email protected]>
PHP_FUNCTION(mb_eregi_replace) { _php_mb_regex_ereg_replace_exec(INTERNAL_FUNCTION_PARAM_PASSTHRU, ONIG_OPTION_IGNORECASE, 0); }
0
[ "CWE-415" ]
php-src
5b597a2e5b28e2d5a52fc1be13f425f08f47cb62
270,890,334,185,320,400,000,000,000,000,000,000,000
4
Fix bug #72402: _php_mb_regex_ereg_replace_exec - double free
__acquires(bitlock) { struct va_format vaf; va_list args; struct ext4_super_block *es = EXT4_SB(sb)->s_es; es->s_last_error_ino = cpu_to_le32(ino); es->s_last_error_block = cpu_to_le64(block); __save_error_info(sb, function, line); va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", sb->s_id, function, line, grp); if (ino) printk(KERN_CONT "inode %lu: ", ino); if (block) printk(KERN_CONT "block %llu:", (unsigned long long) block); printk(KERN_CONT "%pV\n", &vaf); va_end(args); if (test_opt(sb, ERRORS_CONT)) { ext4_commit_super(sb, 0); return; } ext4_unlock_group(sb, grp); ext4_handle_error(sb); /* * We only get here in the ERRORS_RO case; relocking the group * may be dangerous, but nothing bad will happen since the * filesystem will have already been marked read/only and the * journal has been aborted. We return 1 as a hint to callers * who might what to use the return value from * ext4_grp_locked_error() to distinguish beween the * ERRORS_CONT and ERRORS_RO case, and perhaps return more * aggressively from the ext4 function in question, with a * more appropriate error code. */ ext4_lock_group(sb, grp); return; }
0
[ "CWE-703" ]
linux
0449641130f5652b344ef6fa39fa019d7e94660a
193,792,844,946,714,850,000,000,000,000,000,000,000
44
ext4: init timer earlier to avoid a kernel panic in __save_error_info During mount, when we fail to open journal inode or root inode, the __save_error_info will mod_timer. But actually s_err_report isn't initialized yet and the kernel oops. The detailed information can be found https://bugzilla.kernel.org/show_bug.cgi?id=32082. The best way is to check whether the timer s_err_report is initialized or not. But it seems that in include/linux/timer.h, we can't find a good function to check the status of this timer, so this patch just move the initializtion of s_err_report earlier so that we can avoid the kernel panic. The corresponding del_timer is also added in the error path. Reported-by: Sami Liedes <[email protected]> Signed-off-by: Tao Ma <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]>
static int checkout_blob( checkout_data *data, const git_diff_file *file) { git_buf *fullpath; struct stat st; int error = 0; if (checkout_target_fullpath(&fullpath, data, file->path) < 0) return -1; if ((data->strategy & GIT_CHECKOUT_UPDATE_ONLY) != 0) { int rval = checkout_safe_for_update_only( data, fullpath->ptr, file->mode); if (rval <= 0) return rval; } error = checkout_write_content( data, &file->id, fullpath->ptr, NULL, file->mode, &st); /* update the index unless prevented */ if (!error && (data->strategy & GIT_CHECKOUT_DONT_UPDATE_INDEX) == 0) error = checkout_update_index(data, file, &st); /* update the submodule data if this was a new .gitmodules file */ if (!error && strcmp(file->path, ".gitmodules") == 0) data->reload_submodules = true; return error; }
0
[ "CWE-20", "CWE-706" ]
libgit2
64c612cc3e25eff5fb02c59ef5a66ba7a14751e4
285,484,738,749,676,940,000,000,000,000,000,000,000
32
Protect against 8.3 "short name" attacks also on Linux/macOS The Windows Subsystem for Linux (WSL) is getting increasingly popular, in particular because it makes it _so_ easy to run Linux software on Windows' files, via the auto-mounted Windows drives (`C:\` is mapped to `/mnt/c/`, no need to set that up manually). Unfortunately, files/directories on the Windows drives can be accessed via their _short names_, if that feature is enabled (which it is on the `C:` drive by default). Which means that we have to safeguard even our Linux users against the short name attacks. Further, while the default options of CIFS/SMB-mounts seem to disallow accessing files on network shares via their short names on Linux/macOS, it _is_ possible to do so with the right options. So let's just safe-guard against short name attacks _everywhere_. Signed-off-by: Johannes Schindelin <[email protected]>
MagickPrivate const Quantum *GetVirtualPixelsFromNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; RectangleInfo region; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,nexus_info, exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ s=(unsigned char *) nexus_info->metacontent; virtual_nexus=AcquirePixelCacheNexus(1); if (virtual_nexus == (NexusInfo **) NULL) { if (virtual_nexus != (NexusInfo **) NULL) virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) ResetMagickMemory(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) ResetMagickMemory(virtual_metacontent,0, cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) length*cache_info->number_channels* sizeof(*p)); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*cache_info->number_channels*sizeof(*p)); q+=length*cache_info->number_channels; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); }
0
[ "CWE-119", "CWE-787" ]
ImageMagick
aecd0ada163a4d6c769cec178955d5f3e9316f2f
133,478,555,662,816,800,000,000,000,000,000,000,000
397
Set pixel cache to undefined if any resource limit is exceeded
njs_promise_prototype_then(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { njs_int_t ret; njs_value_t *promise, *fulfilled, *rejected, constructor; njs_function_t *function; njs_promise_capability_t *capability; promise = njs_argument(args, 0); if (njs_slow_path(!njs_is_promise(promise))) { goto failed; } function = njs_promise_create_function(vm, sizeof(njs_promise_context_t)); function->u.native = njs_promise_constructor; njs_set_function(&constructor, function); ret = njs_value_species_constructor(vm, promise, &constructor, &constructor); if (njs_slow_path(ret != NJS_OK)) { return ret; } capability = njs_promise_new_capability(vm, &constructor); if (njs_slow_path(capability == NULL)) { return NJS_ERROR; } fulfilled = njs_arg(args, nargs, 1); rejected = njs_arg(args, nargs, 2); return njs_promise_perform_then(vm, promise, fulfilled, rejected, capability); failed: njs_type_error(vm, "required a promise object"); return NJS_ERROR; }
0
[ "CWE-416", "CWE-703" ]
njs
31ed93a5623f24ca94e6d47e895ba735d9d97d46
145,600,589,468,990,930,000,000,000,000,000,000,000
42
Fixed aggregation methods of Promise ctor with array-like object. Previously, while iterating over an array-like object the methods may be resolved with INVALID values. INVALID value is a special internal type which should never be visible by ordinary functions. The fix is to ensure that absent elements are represented by undefined value. The following methods were fixed Promise.all(), Promise.allSettled(), Promise.any(), Promise.race(). This closes #483 issue on Github.
void HttpIntegrationTest::testRetryAttemptCountHeader() { auto host = config_helper_.createVirtualHost("host", "/test_retry"); host.set_include_request_attempt_count(true); host.set_include_attempt_count_in_response(true); config_helper_.addVirtualHost(host); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); auto response = codec_client_->makeRequestWithBody( Http::TestRequestHeaderMapImpl{{":method", "POST"}, {":path", "/test_retry"}, {":scheme", "http"}, {":authority", "host"}, {"x-forwarded-for", "10.0.0.1"}, {"x-envoy-retry-on", "5xx"}}, 1024); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 1); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); } else { ASSERT_TRUE(upstream_request_->waitForReset()); } waitForNextUpstreamRequest(); EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 2); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->encodeData(512, true); response->waitForEndStream(); EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); EXPECT_EQ(2, atoi(std::string(response->headers().getEnvoyAttemptCountValue()).c_str())); }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
56,546,620,001,992,890,000,000,000,000,000,000,000
41
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
static int tls_starttls_close (CONNECTION* conn) { int rc; rc = tls_socket_close (conn); conn->conn_read = raw_socket_read; conn->conn_write = raw_socket_write; conn->conn_close = raw_socket_close; conn->conn_poll = raw_socket_poll; return rc; }
0
[ "CWE-74" ]
mutt
c547433cdf2e79191b15c6932c57f1472bfb5ff4
167,040,711,258,695,880,000,000,000,000,000,000,000
12
Fix STARTTLS response injection attack. Thanks again to Damian Poddebniak and Fabian Ising from the Münster University of Applied Sciences for reporting this issue. Their summary in ticket 248 states the issue clearly: We found another STARTTLS-related issue in Mutt. Unfortunately, it affects SMTP, POP3 and IMAP. When the server responds with its "let's do TLS now message", e.g. A OK begin TLS\r\n in IMAP or +OK begin TLS\r\n in POP3, Mutt will also read any data after the \r\n and save it into some internal buffer for later processing. This is problematic, because a MITM attacker can inject arbitrary responses. There is a nice blogpost by Wietse Venema about a "command injection" in postfix (http://www.postfix.org/CVE-2011-0411.html). What we have here is the problem in reverse, i.e. not a command injection, but a "response injection." This commit fixes the issue by clearing the CONNECTION input buffer in mutt_ssl_starttls(). To make backporting this fix easier, the new functions only clear the top-level CONNECTION buffer; they don't handle nested buffering in mutt_zstrm.c or mutt_sasl.c. However both of those wrap the connection *after* STARTTLS, so this is currently okay. mutt_tunnel.c occurs before connecting, but it does not perform any nesting.
SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, unsigned long, maxnode) { int err; nodemask_t nodes; unsigned short flags; flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if ((unsigned int)mode >= MPOL_MAX) return -EINVAL; if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_set_mempolicy(mode, flags, &nodes); }
0
[ "CWE-264" ]
linux-2.6
1a5a9906d4e8d1976b701f889d8f35d54b928f25
35,434,516,713,631,035,000,000,000,000,000,000,000
18
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [[email protected]: checkpatch fixes] Reported-by: Ulrich Obergfell <[email protected]> Signed-off-by: Andrea Arcangeli <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Dave Jones <[email protected]> Acked-by: Larry Woodman <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: <[email protected]> [2.6.38+] Cc: Mark Salter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) { unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID); }
0
[ "CWE-416", "CWE-269" ]
linux
bb1fceca22492109be12640d49f5ea5a544c6bb4
249,036,796,002,121,340,000,000,000,000,000,000,000
6
tcp: fix use after free in tcp_xmit_retransmit_queue() When tcp_sendmsg() allocates a fresh and empty skb, it puts it at the tail of the write queue using tcp_add_write_queue_tail() Then it attempts to copy user data into this fresh skb. If the copy fails, we undo the work and remove the fresh skb. Unfortunately, this undo lacks the change done to tp->highest_sack and we can leave a dangling pointer (to a freed skb) Later, tcp_xmit_retransmit_queue() can dereference this pointer and access freed memory. For regular kernels where memory is not unmapped, this might cause SACK bugs because tcp_highest_sack_seq() is buggy, returning garbage instead of tp->snd_nxt, but with various debug features like CONFIG_DEBUG_PAGEALLOC, this can crash the kernel. This bug was found by Marco Grassi thanks to syzkaller. Fixes: 6859d49475d4 ("[TCP]: Abstract tp->highest_sack accessing & point to next skb") Reported-by: Marco Grassi <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Cc: Ilpo Järvinen <[email protected]> Cc: Yuchung Cheng <[email protected]> Cc: Neal Cardwell <[email protected]> Acked-by: Neal Cardwell <[email protected]> Reviewed-by: Cong Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static WERROR dnsserver_query_server(struct dnsserver_state *dsstate, TALLOC_CTX *mem_ctx, const char *operation, const unsigned int client_version, enum DNS_RPC_TYPEID *typeid, union DNSSRV_RPC_UNION *r) { uint8_t is_integer, is_addresses, is_string, is_wstring, is_stringlist; uint32_t answer_integer; struct IP4_ARRAY *answer_iparray; struct DNS_ADDR_ARRAY *answer_addrarray; char *answer_string; struct DNS_RPC_UTF8_STRING_LIST *answer_stringlist; struct dnsserver_serverinfo *serverinfo; serverinfo = dsstate->serverinfo; if (strcasecmp(operation, "ServerInfo") == 0) { if (client_version == DNS_CLIENT_VERSION_W2K) { *typeid = DNSSRV_TYPEID_SERVER_INFO_W2K; r->ServerInfoW2K = talloc_zero(mem_ctx, struct DNS_RPC_SERVER_INFO_W2K); r->ServerInfoW2K->dwVersion = serverinfo->dwVersion; r->ServerInfoW2K->fBootMethod = serverinfo->fBootMethod; r->ServerInfoW2K->fAdminConfigured = serverinfo->fAdminConfigured; r->ServerInfoW2K->fAllowUpdate = serverinfo->fAllowUpdate; r->ServerInfoW2K->fDsAvailable = serverinfo->fDsAvailable; r->ServerInfoW2K->pszServerName = talloc_strdup(mem_ctx, serverinfo->pszServerName); r->ServerInfoW2K->pszDsContainer = talloc_strdup(mem_ctx, serverinfo->pszDsContainer); r->ServerInfoW2K->aipServerAddrs = dns_addr_array_to_ip4_array(mem_ctx, serverinfo->aipServerAddrs); r->ServerInfoW2K->aipListenAddrs = dns_addr_array_to_ip4_array(mem_ctx, serverinfo->aipListenAddrs); r->ServerInfoW2K->aipForwarders = ip4_array_copy(mem_ctx, serverinfo->aipForwarders); r->ServerInfoW2K->dwLogLevel = serverinfo->dwLogLevel; r->ServerInfoW2K->dwDebugLevel = serverinfo->dwDebugLevel; r->ServerInfoW2K->dwForwardTimeout = serverinfo->dwForwardTimeout; r->ServerInfoW2K->dwRpcProtocol = serverinfo->dwRpcProtocol; r->ServerInfoW2K->dwNameCheckFlag = serverinfo->dwNameCheckFlag; r->ServerInfoW2K->cAddressAnswerLimit = serverinfo->cAddressAnswerLimit; r->ServerInfoW2K->dwRecursionRetry = serverinfo->dwRecursionRetry; r->ServerInfoW2K->dwRecursionTimeout = serverinfo->dwRecursionTimeout; r->ServerInfoW2K->dwMaxCacheTtl = serverinfo->dwMaxCacheTtl; r->ServerInfoW2K->dwDsPollingInterval = serverinfo->dwDsPollingInterval; r->ServerInfoW2K->dwScavengingInterval = serverinfo->dwScavengingInterval; r->ServerInfoW2K->dwDefaultRefreshInterval = serverinfo->dwDefaultRefreshInterval; r->ServerInfoW2K->dwDefaultNoRefreshInterval = serverinfo->dwDefaultNoRefreshInterval; r->ServerInfoW2K->fAutoReverseZones = serverinfo->fAutoReverseZones; r->ServerInfoW2K->fAutoCacheUpdate = serverinfo->fAutoCacheUpdate; r->ServerInfoW2K->fRecurseAfterForwarding = serverinfo->fRecurseAfterForwarding; r->ServerInfoW2K->fForwardDelegations = serverinfo->fForwardDelegations; r->ServerInfoW2K->fNoRecursion = serverinfo->fNoRecursion; r->ServerInfoW2K->fSecureResponses = serverinfo->fSecureResponses; r->ServerInfoW2K->fRoundRobin = serverinfo->fRoundRobin; r->ServerInfoW2K->fLocalNetPriority = serverinfo->fLocalNetPriority; r->ServerInfoW2K->fBindSecondaries = serverinfo->fBindSecondaries; r->ServerInfoW2K->fWriteAuthorityNs = serverinfo->fWriteAuthorityNs; r->ServerInfoW2K->fStrictFileParsing = serverinfo->fStrictFileParsing; r->ServerInfoW2K->fLooseWildcarding = serverinfo->fLooseWildcarding; r->ServerInfoW2K->fDefaultAgingState = serverinfo->fDefaultAgingState; } else if (client_version == DNS_CLIENT_VERSION_DOTNET) { *typeid = DNSSRV_TYPEID_SERVER_INFO_DOTNET; r->ServerInfoDotNet = talloc_zero(mem_ctx, struct DNS_RPC_SERVER_INFO_DOTNET); r->ServerInfoDotNet->dwRpcStructureVersion = 0x01; r->ServerInfoDotNet->dwVersion = serverinfo->dwVersion; r->ServerInfoDotNet->fBootMethod = serverinfo->fBootMethod; r->ServerInfoDotNet->fAdminConfigured = serverinfo->fAdminConfigured; r->ServerInfoDotNet->fAllowUpdate = serverinfo->fAllowUpdate; r->ServerInfoDotNet->fDsAvailable = serverinfo->fDsAvailable; r->ServerInfoDotNet->pszServerName = talloc_strdup(mem_ctx, serverinfo->pszServerName); r->ServerInfoDotNet->pszDsContainer = talloc_strdup(mem_ctx, serverinfo->pszDsContainer); r->ServerInfoDotNet->aipServerAddrs = dns_addr_array_to_ip4_array(mem_ctx, serverinfo->aipServerAddrs); r->ServerInfoDotNet->aipListenAddrs = dns_addr_array_to_ip4_array(mem_ctx, serverinfo->aipListenAddrs); r->ServerInfoDotNet->aipForwarders = ip4_array_copy(mem_ctx, serverinfo->aipForwarders); r->ServerInfoDotNet->aipLogFilter = ip4_array_copy(mem_ctx, serverinfo->aipLogFilter); r->ServerInfoDotNet->pwszLogFilePath = talloc_strdup(mem_ctx, serverinfo->pwszLogFilePath); r->ServerInfoDotNet->pszDomainName = talloc_strdup(mem_ctx, serverinfo->pszDomainName); r->ServerInfoDotNet->pszForestName = talloc_strdup(mem_ctx, serverinfo->pszForestName); r->ServerInfoDotNet->pszDomainDirectoryPartition = talloc_strdup(mem_ctx, serverinfo->pszDomainDirectoryPartition); r->ServerInfoDotNet->pszForestDirectoryPartition = talloc_strdup(mem_ctx, serverinfo->pszForestDirectoryPartition); r->ServerInfoDotNet->dwLogLevel = serverinfo->dwLogLevel; r->ServerInfoDotNet->dwDebugLevel = serverinfo->dwDebugLevel; r->ServerInfoDotNet->dwForwardTimeout = serverinfo->dwForwardTimeout; r->ServerInfoDotNet->dwRpcProtocol = serverinfo->dwRpcProtocol; r->ServerInfoDotNet->dwNameCheckFlag = serverinfo->dwNameCheckFlag; r->ServerInfoDotNet->cAddressAnswerLimit = serverinfo->cAddressAnswerLimit; r->ServerInfoDotNet->dwRecursionRetry = serverinfo->dwRecursionRetry; r->ServerInfoDotNet->dwRecursionTimeout = serverinfo->dwRecursionTimeout; r->ServerInfoDotNet->dwMaxCacheTtl = serverinfo->dwMaxCacheTtl; r->ServerInfoDotNet->dwDsPollingInterval = serverinfo->dwDsPollingInterval; r->ServerInfoDotNet->dwLocalNetPriorityNetMask = serverinfo->dwLocalNetPriorityNetMask; r->ServerInfoDotNet->dwScavengingInterval = serverinfo->dwScavengingInterval; r->ServerInfoDotNet->dwDefaultRefreshInterval = serverinfo->dwDefaultRefreshInterval; r->ServerInfoDotNet->dwDefaultNoRefreshInterval = serverinfo->dwDefaultNoRefreshInterval; r->ServerInfoDotNet->dwLastScavengeTime = serverinfo->dwLastScavengeTime; r->ServerInfoDotNet->dwEventLogLevel = serverinfo->dwEventLogLevel; r->ServerInfoDotNet->dwLogFileMaxSize = serverinfo->dwLogFileMaxSize; r->ServerInfoDotNet->dwDsForestVersion = serverinfo->dwDsForestVersion; r->ServerInfoDotNet->dwDsDomainVersion = serverinfo->dwDsDomainVersion; r->ServerInfoDotNet->dwDsDsaVersion = serverinfo->dwDsDsaVersion; r->ServerInfoDotNet->fAutoReverseZones = serverinfo->fAutoReverseZones; r->ServerInfoDotNet->fAutoCacheUpdate = serverinfo->fAutoCacheUpdate; r->ServerInfoDotNet->fRecurseAfterForwarding = serverinfo->fRecurseAfterForwarding; r->ServerInfoDotNet->fForwardDelegations = serverinfo->fForwardDelegations; r->ServerInfoDotNet->fNoRecursion = serverinfo->fNoRecursion; r->ServerInfoDotNet->fSecureResponses = serverinfo->fSecureResponses; r->ServerInfoDotNet->fRoundRobin = serverinfo->fRoundRobin; r->ServerInfoDotNet->fLocalNetPriority = serverinfo->fLocalNetPriority; r->ServerInfoDotNet->fBindSecondaries = serverinfo->fBindSecondaries; r->ServerInfoDotNet->fWriteAuthorityNs = serverinfo->fWriteAuthorityNs; r->ServerInfoDotNet->fStrictFileParsing = serverinfo->fStrictFileParsing; r->ServerInfoDotNet->fLooseWildcarding = serverinfo->fLooseWildcarding; r->ServerInfoDotNet->fDefaultAgingState = serverinfo->fDefaultAgingState; } else if (client_version == DNS_CLIENT_VERSION_LONGHORN) { *typeid = DNSSRV_TYPEID_SERVER_INFO; r->ServerInfo = talloc_zero(mem_ctx, struct DNS_RPC_SERVER_INFO_LONGHORN); r->ServerInfo->dwRpcStructureVersion = 0x02; r->ServerInfo->dwVersion = serverinfo->dwVersion; r->ServerInfo->fBootMethod = serverinfo->fBootMethod; r->ServerInfo->fAdminConfigured = serverinfo->fAdminConfigured; r->ServerInfo->fAllowUpdate = serverinfo->fAllowUpdate; r->ServerInfo->fDsAvailable = serverinfo->fDsAvailable; r->ServerInfo->pszServerName = talloc_strdup(mem_ctx, serverinfo->pszServerName); r->ServerInfo->pszDsContainer = talloc_strdup(mem_ctx, serverinfo->pszDsContainer); r->ServerInfo->aipServerAddrs = serverinfo->aipServerAddrs; r->ServerInfo->aipListenAddrs = serverinfo->aipListenAddrs; r->ServerInfo->aipForwarders = ip4_array_to_dns_addr_array(mem_ctx, serverinfo->aipForwarders); r->ServerInfo->aipLogFilter = ip4_array_to_dns_addr_array(mem_ctx, serverinfo->aipLogFilter); r->ServerInfo->pwszLogFilePath = talloc_strdup(mem_ctx, serverinfo->pwszLogFilePath); r->ServerInfo->pszDomainName = talloc_strdup(mem_ctx, serverinfo->pszDomainName); r->ServerInfo->pszForestName = talloc_strdup(mem_ctx, serverinfo->pszForestName); r->ServerInfo->pszDomainDirectoryPartition = talloc_strdup(mem_ctx, serverinfo->pszDomainDirectoryPartition); r->ServerInfo->pszForestDirectoryPartition = talloc_strdup(mem_ctx, serverinfo->pszForestDirectoryPartition); r->ServerInfo->dwLogLevel = serverinfo->dwLogLevel; r->ServerInfo->dwDebugLevel = serverinfo->dwDebugLevel; r->ServerInfo->dwForwardTimeout = serverinfo->dwForwardTimeout; r->ServerInfo->dwRpcProtocol = serverinfo->dwRpcProtocol; r->ServerInfo->dwNameCheckFlag = serverinfo->dwNameCheckFlag; r->ServerInfo->cAddressAnswerLimit = serverinfo->cAddressAnswerLimit; r->ServerInfo->dwRecursionRetry = serverinfo->dwRecursionRetry; r->ServerInfo->dwRecursionTimeout = serverinfo->dwRecursionTimeout; r->ServerInfo->dwMaxCacheTtl = serverinfo->dwMaxCacheTtl; r->ServerInfo->dwDsPollingInterval = serverinfo->dwDsPollingInterval; r->ServerInfo->dwLocalNetPriorityNetMask = serverinfo->dwLocalNetPriorityNetMask; r->ServerInfo->dwScavengingInterval = serverinfo->dwScavengingInterval; r->ServerInfo->dwDefaultRefreshInterval = serverinfo->dwDefaultRefreshInterval; r->ServerInfo->dwDefaultNoRefreshInterval = serverinfo->dwDefaultNoRefreshInterval; r->ServerInfo->dwLastScavengeTime = serverinfo->dwLastScavengeTime; r->ServerInfo->dwEventLogLevel = serverinfo->dwEventLogLevel; r->ServerInfo->dwLogFileMaxSize = serverinfo->dwLogFileMaxSize; r->ServerInfo->dwDsForestVersion = serverinfo->dwDsForestVersion; r->ServerInfo->dwDsDomainVersion = serverinfo->dwDsDomainVersion; r->ServerInfo->dwDsDsaVersion = serverinfo->dwDsDsaVersion; r->ServerInfo->fReadOnlyDC = serverinfo->fReadOnlyDC; r->ServerInfo->fAutoReverseZones = serverinfo->fAutoReverseZones; r->ServerInfo->fAutoCacheUpdate = serverinfo->fAutoCacheUpdate; r->ServerInfo->fRecurseAfterForwarding = serverinfo->fRecurseAfterForwarding; r->ServerInfo->fForwardDelegations = serverinfo->fForwardDelegations; r->ServerInfo->fNoRecursion = serverinfo->fNoRecursion; r->ServerInfo->fSecureResponses = serverinfo->fSecureResponses; r->ServerInfo->fRoundRobin = serverinfo->fRoundRobin; r->ServerInfo->fLocalNetPriority = serverinfo->fLocalNetPriority; r->ServerInfo->fBindSecondaries = serverinfo->fBindSecondaries; r->ServerInfo->fWriteAuthorityNs = serverinfo->fWriteAuthorityNs; r->ServerInfo->fStrictFileParsing = serverinfo->fStrictFileParsing; r->ServerInfo->fLooseWildcarding = serverinfo->fLooseWildcarding; r->ServerInfo->fDefaultAgingState = serverinfo->fDefaultAgingState; } return WERR_OK; } is_integer = 0; answer_integer = 0; if (strcasecmp(operation, "AddressAnswerLimit") == 0) { answer_integer = serverinfo->cAddressAnswerLimit; is_integer = 1; } else if (strcasecmp(operation, "AdminConfigured") == 0) { answer_integer = serverinfo->fAdminConfigured; is_integer = 1; } else if (strcasecmp(operation, "AllowCNAMEAtNS") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "AllowUpdate") == 0) { answer_integer = serverinfo->fAllowUpdate; is_integer = 1; } else if (strcasecmp(operation, "AutoCacheUpdate") == 0) { answer_integer = serverinfo->fAutoCacheUpdate; is_integer = 1; } else if (strcasecmp(operation, "AutoConfigFileZones") == 0) { answer_integer = 1; is_integer = 1; } else if (strcasecmp(operation, "BindSecondaries") == 0) { answer_integer = serverinfo->fBindSecondaries; is_integer = 1; } else if (strcasecmp(operation, "BootMethod") == 0) { answer_integer = serverinfo->fBootMethod; is_integer = 1; } else if (strcasecmp(operation, "DebugLevel") == 0) { answer_integer = serverinfo->dwDebugLevel; is_integer = 1; } else if (strcasecmp(operation, "DefaultAgingState") == 0) { answer_integer = serverinfo->fDefaultAgingState; is_integer = 1; } else if (strcasecmp(operation, "DefaultNoRefreshInterval") == 0) { answer_integer = serverinfo->dwDefaultNoRefreshInterval; is_integer = 1; } else if (strcasecmp(operation, "DefaultRefreshInterval") == 0) { answer_integer = serverinfo->dwDefaultRefreshInterval; is_integer = 1; } else if (strcasecmp(operation, "DeleteOutsideGlue") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "DisjointNets") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "DsLazyUpdateInterval") == 0) { answer_integer = 3; /* seconds */ is_integer = 1; } else if (strcasecmp(operation, "DsPollingInterval") == 0) { answer_integer = serverinfo->dwDsPollingInterval; is_integer = 1; } else if (strcasecmp(operation, "DsTombstoneInterval") == 0) { answer_integer = 0x00127500; /* 14 days */ is_integer = 1; } else if (strcasecmp(operation, "EnableRegistryBoot") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "EventLogLevel") == 0) { answer_integer = serverinfo->dwEventLogLevel; is_integer = 1; } else if (strcasecmp(operation, "ForceSoaSerial") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "ForceSaoRetry") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "ForceSoaRefresh") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "ForceSoaMinimumTtl") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "ForwardDelegations") == 0) { answer_integer = 1; is_integer = 1; } else if (strcasecmp(operation, "ForwardingTimeout") == 0) { answer_integer = serverinfo->dwForwardTimeout; is_integer = 1; } else if (strcasecmp(operation, "IsSlave") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "LocalNetPriority") == 0) { answer_integer = serverinfo->fLocalNetPriority; is_integer = 1; } else if (strcasecmp(operation, "LogFileMaxSize") == 0) { answer_integer = serverinfo->dwLogFileMaxSize; is_integer = 1; } else if (strcasecmp(operation, "LogLevel") == 0) { answer_integer = serverinfo->dwLogLevel; is_integer = 1; } else if (strcasecmp(operation, "LooseWildcarding") == 0) { answer_integer = serverinfo->fLooseWildcarding; is_integer = 1; } else if (strcasecmp(operation, "MaxCacheTtl") == 0) { answer_integer = serverinfo->dwMaxCacheTtl; is_integer = 1; } else if (strcasecmp(operation, "MaxNegativeCacheTtl") == 0) { answer_integer = 0x00000384; /* 15 minutes */ is_integer = 1; } else if (strcasecmp(operation, "NameCheckFlag") == 0) { answer_integer = serverinfo->dwNameCheckFlag; is_integer = 1; } else if (strcasecmp(operation, "NoRecursion") == 0) { answer_integer = serverinfo->fNoRecursion; is_integer = 1; } else if (strcasecmp(operation, "NoUpdateDelegations") == 0) { answer_integer = 1; is_integer = 1; } else if (strcasecmp(operation, "PublishAutonet") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "QuietRecvFaultInterval") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "QuietRecvLogInterval") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "RecursionRetry") == 0) { answer_integer = serverinfo->dwRecursionRetry; is_integer = 1; } else if (strcasecmp(operation, "RecursionTimeout") == 0) { answer_integer = serverinfo->dwRecursionTimeout; is_integer = 1; } else if (strcasecmp(operation, "ReloadException") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "RoundRobin") == 0) { answer_integer = serverinfo->fRoundRobin; is_integer = 1; } else if (strcasecmp(operation, "RpcProtocol") == 0) { answer_integer = serverinfo->dwRpcProtocol; is_integer = 1; } else if (strcasecmp(operation, "SecureResponses") == 0) { answer_integer = serverinfo->fSecureResponses; is_integer = 1; } else if (strcasecmp(operation, "SendPort") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "ScavengingInterval") == 0) { answer_integer = serverinfo->dwScavengingInterval; is_integer = 1; } else if (strcasecmp(operation, "SocketPoolSize") == 0) { answer_integer = 0x000009C4; is_integer = 1; } else if (strcasecmp(operation, "StrictFileParsing") == 0) { answer_integer = serverinfo->fStrictFileParsing; is_integer = 1; } else if (strcasecmp(operation, "SyncDnsZoneSerial") == 0) { answer_integer = 2; /* ZONE_SERIAL_SYNC_XFER */ is_integer = 1; } else if (strcasecmp(operation, "UpdateOptions") == 0) { answer_integer = 0x0000030F; /* DNS_DEFAULT_UPDATE_OPTIONS */ is_integer = 1; } else if (strcasecmp(operation, "UseSystemEvengLog") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "Version") == 0) { answer_integer = serverinfo->dwVersion; is_integer = 1; } else if (strcasecmp(operation, "XfrConnectTimeout") == 0) { answer_integer = 0x0000001E; is_integer = 1; } else if (strcasecmp(operation, "WriteAuthorityNs") == 0) { answer_integer = serverinfo->fWriteAuthorityNs; is_integer = 1; } else if (strcasecmp(operation, "AdditionalRecursionTimeout") == 0) { answer_integer = 0x00000004; is_integer = 1; } else if (strcasecmp(operation, "AppendMsZoneTransferFlag") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "AutoCreateDelegations") == 0) { answer_integer = 0; /* DNS_ACD_DONT_CREATE */ is_integer = 1; } else if (strcasecmp(operation, "BreakOnAscFailure") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "CacheEmptyAuthResponses") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "DirectoryPartitionAutoEnlistInterval") == 0) { answer_integer = 0x00015180; /* 1 day */ is_integer = 1; } else if (strcasecmp(operation, "DisableAutoReverseZones") == 0) { answer_integer = ~serverinfo->fAutoReverseZones; is_integer = 1; } else if (strcasecmp(operation, "EDnsCacheTimeout") == 0) { answer_integer = 0x00000384; /* 15 minutes */ is_integer = 1; } else if (strcasecmp(operation, "EnableDirectoryPartitions") == 0) { answer_integer = serverinfo->fDsAvailable; is_integer = 1; } else if (strcasecmp(operation, "EnableDnsSec") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "EnableEDnsProbes") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "EnableEDnsReception") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "EnableIPv6") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "EnableIQueryResponseGeneration") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "EnableSendErrorSuppression") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "EnableUpdateForwarding") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "EnableWinsR") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "ForceDsaBehaviorVersion") == 0) { answer_integer = serverinfo->dwDsDsaVersion; is_integer = 1; } else if (strcasecmp(operation, "ForceDomainBehaviorVersion") == 0) { answer_integer = serverinfo->dwDsDsaVersion; is_integer = 1; } else if (strcasecmp(operation, "ForceForestBehaviorVersion") == 0) { answer_integer = serverinfo->dwDsDsaVersion; is_integer = 1; } else if (strcasecmp(operation, "HeapDebug") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "LameDelegationTtl") == 0) { answer_integer = 0; /* seconds */ is_integer = 1; } else if (strcasecmp(operation, "LocalNetPriorityNetMask") == 0) { answer_integer = serverinfo->dwLocalNetPriorityNetMask; is_integer = 1; } else if (strcasecmp(operation, "MaxCacheSize") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "MaxResourceRecordsInNonSecureUpdate") == 0) { answer_integer = 0x0000001E; is_integer = 1; } else if (strcasecmp(operation, "OperationsLogLevel") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "OperationsLogLevel2") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "MaximumUdpPacketSize") == 0) { answer_integer = 0x00004000; /* maximum possible */ is_integer = 1; } else if (strcasecmp(operation, "RecurseToInternetRootMask") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "SelfTest") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "SilentlyIgnoreCNameUpdateConflicts") == 0) { answer_integer = 1; is_integer = 1; } else if (strcasecmp(operation, "TcpReceivePacketSize") == 0) { answer_integer = 0x00010000; is_integer = 1; } else if (strcasecmp(operation, "XfrThrottleMultiplier") == 0) { answer_integer = 0x0000000A; is_integer = 1; } else if (strcasecmp(operation, "AllowMsdcsLookupRetry") == 0) { answer_integer = 1; is_integer = 1; } else if (strcasecmp(operation, "AllowReadOnlyZoneTransfer") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "DsBackGroundLoadPaused") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "DsMinimumBackgroundLoadThreads") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "DsRemoteReplicationDelay") == 0) { answer_integer = 0x0000001E; /* 30 seconds */ is_integer = 1; } else if (strcasecmp(operation, "EnableDuplicateQuerySuppresion") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "EnableGlobalNamesSupport") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "EnableVersionQuery") == 0) { answer_integer = 1; /* DNS_VERSION_QUERY_FULL */ is_integer = 1; } else if (strcasecmp(operation, "EnableRsoForRodc") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "ForceRODCMode") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "GlobalNamesAlwaysQuerySrv") == 0) { answer_integer = 1; is_integer = 1; } else if (strcasecmp(operation, "GlobalNamesBlockUpdates") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "GlobalNamesEnableEDnsProbes") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "GlobalNamesPreferAAAA") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "GlobalNamesQueryOrder") == 0) { answer_integer = 1; is_integer = 1; } else if (strcasecmp(operation, "GlobalNamesSendTimeout") == 0) { answer_integer = 3; /* seconds */ is_integer = 1; } else if (strcasecmp(operation, "GlobalNamesServerQueryInterval") == 0) { answer_integer = 0x00005460; /* 6 hours */ is_integer = 1; } else if (strcasecmp(operation, "RemoteIPv4RankBoost") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "RemoteIPv6RankBoost") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "MaximumRodcRsoAttemptsPerCycle") == 0) { answer_integer = 0x00000064; is_integer = 1; } else if (strcasecmp(operation, "MaximumRodcRsoQueueLength") == 0) { answer_integer = 0x0000012C; is_integer = 1; } else if (strcasecmp(operation, "EnableGlobalQueryBlockList") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "OpenACLOnProxyUpdates") == 0) { answer_integer = 0; is_integer = 1; } else if (strcasecmp(operation, "CacheLockingPercent") == 0) { answer_integer = 0x00000064; is_integer = 1; } if (is_integer == 1) { *typeid = DNSSRV_TYPEID_DWORD; r->Dword = answer_integer; return WERR_OK; } is_addresses = 0; if (strcasecmp(operation, "Forwarders") == 0) { if (client_version == DNS_CLIENT_VERSION_LONGHORN) { answer_addrarray = ip4_array_to_dns_addr_array(mem_ctx, serverinfo->aipForwarders); } else { answer_iparray = ip4_array_copy(mem_ctx, serverinfo->aipForwarders); } is_addresses = 1; } else if (strcasecmp(operation, "ListenAddresses") == 0) { if (client_version == DNS_CLIENT_VERSION_LONGHORN) { answer_addrarray = serverinfo->aipListenAddrs; } else { answer_iparray = dns_addr_array_to_ip4_array(mem_ctx, serverinfo->aipListenAddrs); } is_addresses = 1; } else if (strcasecmp(operation, "BreakOnReceiveFrom") == 0) { if (client_version == DNS_CLIENT_VERSION_LONGHORN) { answer_addrarray = NULL; } else { answer_iparray = NULL; } is_addresses = 1; } else if (strcasecmp(operation, "BreakOnUpdateFrom") == 0) { if (client_version == DNS_CLIENT_VERSION_LONGHORN) { answer_addrarray = NULL; } else { answer_iparray = NULL; } is_addresses = 1; } else if (strcasecmp(operation, "LogIPFilterList") == 0) { if (client_version == DNS_CLIENT_VERSION_LONGHORN) { answer_addrarray = ip4_array_to_dns_addr_array(mem_ctx, serverinfo->aipLogFilter); } else { answer_iparray = ip4_array_copy(mem_ctx, serverinfo->aipLogFilter); } is_addresses = 1; } if (is_addresses == 1) { if (client_version == DNS_CLIENT_VERSION_LONGHORN) { *typeid = DNSSRV_TYPEID_ADDRARRAY; r->AddrArray = answer_addrarray; } else { *typeid = DNSSRV_TYPEID_IPARRAY; r->IpArray = answer_iparray; } return WERR_OK; } is_string = is_wstring = 0; if (strcasecmp(operation, "DomainDirectoryPartitionBaseName") == 0) { answer_string = talloc_strdup(mem_ctx, "DomainDnsZones"); if (! answer_string) { return WERR_OUTOFMEMORY; } is_string = 1; } else if (strcasecmp(operation, "ForestDirectoryPartitionBaseName") == 0) { answer_string = talloc_strdup(mem_ctx, "ForestDnsZones"); if (! answer_string) { return WERR_OUTOFMEMORY; } is_string = 1; } else if (strcasecmp(operation, "LogFilePath") == 0) { answer_string = talloc_strdup(mem_ctx, serverinfo->pwszLogFilePath); is_wstring = 1; } else if (strcasecmp(operation, "ServerLevelPluginDll") == 0) { answer_string = NULL; is_wstring = 1; } else if (strcasecmp(operation, "DsBackgroundPauseName") == 0) { answer_string = NULL; is_string = 1; } else if (strcasecmp(operation, "DsNotRoundRobinTypes") == 0) { answer_string = NULL; is_string = 1; } if (is_string == 1) { *typeid = DNSSRV_TYPEID_LPSTR; r->String = answer_string; return WERR_OK; } else if (is_wstring == 1) { *typeid = DNSSRV_TYPEID_LPWSTR; r->WideString = answer_string; return WERR_OK; } is_stringlist = 0; if (strcasecmp(operation, "GlobalQueryBlockList") == 0) { answer_stringlist = NULL; is_stringlist = 1; } else if (strcasecmp(operation, "SocketPoolExcludedPortRanges") == 0) { answer_stringlist = NULL; is_stringlist = 1; } if (is_stringlist == 1) { *typeid = DNSSRV_TYPEID_UTF8_STRING_LIST; r->Utf8StringList = answer_stringlist; return WERR_OK; } DEBUG(0,("dnsserver: Invalid server operation %s", operation)); return WERR_DNS_ERROR_INVALID_PROPERTY; }
0
[]
samba
4cbf95e731b39b2dbfec02f33fd6b195d0b0f7a8
249,301,536,284,720,060,000,000,000,000,000,000,000
628
CVE-2020-14383: s4/dns: Ensure variable initialization with NULL. Based on patches from Francis Brosnan Blázquez <[email protected]> and Jeremy Allison <[email protected]> BUG: https://bugzilla.samba.org/show_bug.cgi?id=14472 BUG: https://bugzilla.samba.org/show_bug.cgi?id=12795 Signed-off-by: Douglas Bagnall <[email protected]> Reviewed-by: Jeremy Allison <[email protected]> (based on commit 7afe449e7201be92bed8e53cbb37b74af720ef4e)
void clear_pending(ConnectionRef con) { Mutex::Locker l(lock); for (list<uint64_t>::iterator it = conn_sent[con].begin(); it != conn_sent[con].end(); ++it) sent.erase(*it); conn_sent.erase(con); }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
7,364,896,654,945,257,000,000,000,000,000,000,000
8
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <[email protected]> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, std::vector<string>* results) { // Check that `fs`, `env` and `results` are non-null. if (fs == nullptr || env == nullptr || results == nullptr) { return Status(tensorflow::error::INVALID_ARGUMENT, "Filesystem calls GetMatchingPaths with nullptr arguments"); } // By design, we don't match anything on empty pattern results->clear(); if (pattern.empty()) { return Status::OK(); } // The pattern can contain globbing characters at multiple levels, e.g.: // // foo/ba?/baz/f*r // // To match the full pattern, we must match every prefix subpattern and then // operate on the children for each match. Thus, we separate all subpatterns // in the `dirs` vector below. std::vector<std::string> dirs = AllDirectoryPrefixes(pattern); // We can have patterns that have several parents where no globbing is being // done, for example, `foo/bar/baz/*`. We don't need to expand the directories // which don't contain the globbing characters. int matching_index = GetFirstGlobbingEntry(dirs); // If we don't have globbing characters in the pattern then it specifies a // path in the filesystem. We add it to the result set if it exists. if (matching_index == dirs.size()) { if (fs->FileExists(pattern).ok()) { results->emplace_back(pattern); } return Status::OK(); } // To expand the globbing, we do a BFS from `dirs[matching_index-1]`. // At every step, we work on a pair `{dir, ix}` such that `dir` is a real // directory, `ix < dirs.size() - 1` and `dirs[ix+1]` is a globbing pattern. // To expand the pattern, we select from all the children of `dir` only those // that match against `dirs[ix+1]`. // If there are more entries in `dirs` after `dirs[ix+1]` this mean we have // more patterns to match. So, we add to the queue only those children that // are also directories, paired with `ix+1`. // If there are no more entries in `dirs`, we return all children as part of // the answer. // Since we can get into a combinatorial explosion issue (e.g., pattern // `/*/*/*`), we process the queue in parallel. Each parallel processing takes // elements from `expand_queue` and adds them to `next_expand_queue`, after // which we swap these two queues (similar to double buffering algorithms). // PRECONDITION: `IsGlobbingPattern(dirs[0]) == false` // PRECONDITION: `matching_index > 0` // INVARIANT: If `{d, ix}` is in queue, then `d` and `dirs[ix]` are at the // same level in the filesystem tree. // INVARIANT: If `{d, _}` is in queue, then `IsGlobbingPattern(d) == false`. // INVARIANT: If `{d, _}` is in queue, then `d` is a real directory. // INVARIANT: If `{_, ix}` is in queue, then `ix < dirs.size() - 1`. // INVARIANT: If `{_, ix}` is in queue, `IsGlobbingPattern(dirs[ix + 1])`. std::deque<std::pair<string, int>> expand_queue; std::deque<std::pair<string, int>> next_expand_queue; expand_queue.emplace_back(dirs[matching_index - 1], matching_index - 1); // Adding to `result` or `new_expand_queue` need to be protected by mutexes // since there are multiple threads writing to these. mutex result_mutex; mutex queue_mutex; while (!expand_queue.empty()) { next_expand_queue.clear(); // The work item for every item in `expand_queue`. // pattern, we process them in parallel. auto handle_level = [&fs, &results, &dirs, &expand_queue, &next_expand_queue, &result_mutex, &queue_mutex](int i) { // See invariants above, all of these are valid accesses. const auto& queue_item = expand_queue.at(i); const std::string& parent = queue_item.first; const int index = queue_item.second + 1; const std::string& match_pattern = dirs[index]; // Get all children of `parent`. If this fails, return early. std::vector<std::string> children; Status s = fs->GetChildren(parent, &children); if (s.code() == tensorflow::error::PERMISSION_DENIED) { return; } // Also return early if we don't have any children if (children.empty()) { return; } // Since we can get extremely many children here and on some filesystems // `IsDirectory` is expensive, we process the children in parallel. // We also check that children match the pattern in parallel, for speedup. // We store the status of the match and `IsDirectory` in // `children_status` array, one element for each children. std::vector<Status> children_status(children.size()); auto handle_children = [&fs, &match_pattern, &parent, &children, &children_status](int j) { const std::string path = io::JoinPath(parent, children[j]); if (!fs->Match(path, match_pattern)) { children_status[j] = Status(tensorflow::error::CANCELLED, "Operation not needed"); } else { children_status[j] = fs->IsDirectory(path); } }; ForEach(0, children.size(), handle_children); // At this point, pairing `children` with `children_status` will tell us // if a children: // * does not match the pattern // * matches the pattern and is a directory // * matches the pattern and is not a directory // We fully ignore the first case. // If we matched the last pattern (`index == dirs.size() - 1`) then all // remaining children get added to the result. // Otherwise, only the directories get added to the next queue. for (size_t j = 0; j < children.size(); j++) { if (children_status[j].code() == tensorflow::error::CANCELLED) { continue; } const std::string path = io::JoinPath(parent, children[j]); if (index == dirs.size() - 1) { mutex_lock l(result_mutex); results->emplace_back(path); } else if (children_status[j].ok()) { mutex_lock l(queue_mutex); next_expand_queue.emplace_back(path, index); } } }; ForEach(0, expand_queue.size(), handle_level); // After evaluating one level, swap the "buffers" std::swap(expand_queue, next_expand_queue); } return Status::OK(); }
0
[ "CWE-125" ]
tensorflow
8b5b9dc96666a3a5d27fad7179ff215e3b74b67c
336,337,959,098,913,650,000,000,000,000,000,000,000
144
Completely rewrite `GetMatchingPaths`. The current parallel implementation is too complex (lambda inside lambda, two levels of parallelism) and has a read outside of bounds issue. The new implementation cleans up artifacts from the previous implementations that were left in the code as it evolves. We add multiple helper functions, and document invariants and preconditions as well as every major step. This way, we fix the security issue and a potential new one which was not caught before PiperOrigin-RevId: 346146220 Change-Id: Iec0f44673f43349797bf9944dffe9b2f779137d8
ZEND_VM_HELPER(zend_fetch_static_prop_helper, ANY, ANY, int type) { USE_OPLINE zval *prop; SAVE_OPLINE(); if (UNEXPECTED(zend_fetch_static_property_address(&prop, NULL, opline->extended_value & ~ZEND_FETCH_OBJ_FLAGS, type, opline->extended_value & ZEND_FETCH_OBJ_FLAGS OPLINE_CC EXECUTE_DATA_CC) != SUCCESS)) { ZEND_ASSERT(EG(exception) || (type == BP_VAR_IS)); prop = &EG(uninitialized_zval); } if (type == BP_VAR_R || type == BP_VAR_IS) { ZVAL_COPY_DEREF(EX_VAR(opline->result.var), prop); } else { ZVAL_INDIRECT(EX_VAR(opline->result.var), prop); } ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION(); }
0
[ "CWE-787" ]
php-src
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
169,729,605,314,922,770,000,000,000,000,000,000,000
19
Fix #73122: Integer Overflow when concatenating strings We must avoid integer overflows in memory allocations, so we introduce an additional check in the VM, and bail out in the rare case of an overflow. Since the recent fix for bug #74960 still doesn't catch all possible overflows, we fix that right away.
getname_kernel(const char * filename) { struct filename *result; int len = strlen(filename) + 1; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); if (len <= EMBEDDED_NAME_MAX) { result->name = (char *)result->iname; } else if (len <= PATH_MAX) { const size_t size = offsetof(struct filename, iname[1]); struct filename *tmp; tmp = kmalloc(size, GFP_KERNEL); if (unlikely(!tmp)) { __putname(result); return ERR_PTR(-ENOMEM); } tmp->name = (char *)result; result = tmp; } else { __putname(result); return ERR_PTR(-ENAMETOOLONG); } memcpy((char *)result->name, filename, len); result->uptr = NULL; result->aname = NULL; result->refcnt = 1; audit_getname(result); return result; }
0
[ "CWE-416", "CWE-284" ]
linux
d0cb50185ae942b03c4327be322055d622dc79f6
111,715,560,362,180,390,000,000,000,000,000,000,000
34
do_last(): fetch directory ->i_mode and ->i_uid before it's too late may_create_in_sticky() call is done when we already have dropped the reference to dir. Fixes: 30aba6656f61e (namei: allow restricted O_CREAT of FIFOs and regular files) Signed-off-by: Al Viro <[email protected]>
TEST_P(SslIntegrationTest, RouterHeaderOnlyRequestAndResponse) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection({}); }; testRouterHeaderOnlyRequestAndResponse(&creator); checkStats(); }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
127,068,150,251,135,330,000,000,000,000,000,000,000
7
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
static void _gdImageFilledVRectangle (gdImagePtr im, int x1, int y1, int x2, int y2, int color) { int x, y; if (x1 == x2 && y1 == y2) { gdImageSetPixel(im, x1, y1, color); return; } if (x1 > x2) { x = x1; x1 = x2; x2 = x; } if (y1 > y2) { y = y1; y1 = y2; y2 = y; } if (x1 < 0) { x1 = 0; } if (x2 >= gdImageSX(im)) { x2 = gdImageSX(im) - 1; } if (y1 < 0) { y1 = 0; } if (y2 >= gdImageSY(im)) { y2 = gdImageSY(im) - 1; } for (y = y1; (y <= y2); y++) { for (x = x1; (x <= x2); x++) { gdImageSetPixel (im, x, y, color); } } }
0
[ "CWE-20" ]
libgd
1846f48e5fcdde996e7c27a4bbac5d0aef183e4b
122,877,112,046,292,050,000,000,000,000,000,000,000
44
Fix #340: System frozen gdImageCreate() doesn't check for oversized images and as such is prone to DoS vulnerabilities. We fix that by applying the same overflow check that is already in place for gdImageCreateTrueColor(). CVE-2016-9317
ConnStateData::notes() { if (!theNotes) theNotes = new NotePairs; return theNotes; }
0
[ "CWE-116" ]
squid
7024fb734a59409889e53df2257b3fc817809fb4
194,642,148,063,063,150,000,000,000,000,000,000,000
6
Handle more Range requests (#790) Also removed some effectively unused code.
int SSL_read(SSL *s,void *buf,int num) { if (s->handshake_func == 0) { SSLerr(SSL_F_SSL_READ, SSL_R_UNINITIALIZED); return -1; } if (s->shutdown & SSL_RECEIVED_SHUTDOWN) { s->rwstate=SSL_NOTHING; return(0); } return(s->method->ssl_read(s,buf,num)); }
0
[]
openssl
ee2ffc279417f15fef3b1073c7dc81a908991516
180,322,880,982,963,440,000,000,000,000,000,000,000
15
Add Next Protocol Negotiation.
static inline void sk_stream_moderate_sndbuf(struct sock *sk) { if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); } }
0
[ "CWE-400" ]
linux-2.6
c377411f2494a931ff7facdbb3a6839b1266bcf6
223,752,641,741,173,030,000,000,000,000,000,000,000
7
net: sk_add_backlog() take rmem_alloc into account Current socket backlog limit is not enough to really stop DDOS attacks, because user thread spend many time to process a full backlog each round, and user might crazy spin on socket lock. We should add backlog size and receive_queue size (aka rmem_alloc) to pace writers, and let user run without being slow down too much. Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in stress situations. Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp receiver can now process ~200.000 pps (instead of ~100 pps before the patch) on a 8 core machine. Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring) { struct tpacket_kbdq_core *pkc; if (tx_ring) BUG(); pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
90,144,678,087,458,060,000,000,000,000,000,000,000
10
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void __exit packet_exit(void) { unregister_netdevice_notifier(&packet_netdev_notifier); unregister_pernet_subsys(&packet_net_ops); sock_unregister(PF_PACKET); proto_unregister(&packet_proto); }
0
[ "CWE-909" ]
linux-2.6
67286640f638f5ad41a946b9a3dc75327950248f
242,404,079,203,349,430,000,000,000,000,000,000,000
7
net: packet: fix information leak to userland packet_getname_spkt() doesn't initialize all members of sa_data field of sockaddr struct if strlen(dev->name) < 13. This structure is then copied to userland. It leads to leaking of contents of kernel stack memory. We have to fully fill sa_data with strncpy() instead of strlcpy(). The same with packet_getname(): it doesn't initialize sll_pkttype field of sockaddr_ll. Set it to zero. Signed-off-by: Vasiliy Kulikov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int find_and_clear_dirty_height(struct VncState *vs, int y, int last_x, int x, int height) { int h; for (h = 1; h < (height - y); h++) { if (!test_bit(last_x, vs->dirty[y + h])) { break; } bitmap_clear(vs->dirty[y + h], last_x, x - last_x); } return h; }
0
[ "CWE-125" ]
qemu
bea60dd7679364493a0d7f5b54316c767cf894ef
279,212,404,541,612,860,000,000,000,000,000,000,000
14
ui/vnc: fix potential memory corruption issues this patch makes the VNC server work correctly if the server surface and the guest surface have different sizes. Basically the server surface is adjusted to not exceed VNC_MAX_WIDTH x VNC_MAX_HEIGHT and additionally the width is rounded up to multiple of VNC_DIRTY_PIXELS_PER_BIT. If we have a resolution whose width is not dividable by VNC_DIRTY_PIXELS_PER_BIT we now get a small black bar on the right of the screen. If the surface is too big to fit the limits only the upper left area is shown. On top of that this fixes 2 memory corruption issues: The first was actually discovered during playing around with a Windows 7 vServer. During resolution change in Windows 7 it happens sometimes that Windows changes to an intermediate resolution where server_stride % cmp_bytes != 0 (in vnc_refresh_server_surface). This happens only if width % VNC_DIRTY_PIXELS_PER_BIT != 0. The second is a theoretical issue, but is maybe exploitable by the guest. If for some reason the guest surface size is bigger than VNC_MAX_WIDTH x VNC_MAX_HEIGHT we end up in severe corruption since this limit is nowhere enforced. Signed-off-by: Peter Lieven <[email protected]> Signed-off-by: Gerd Hoffmann <[email protected]>
TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
0
[ "CWE-125", "CWE-787" ]
tensorflow
1970c2158b1ffa416d159d03c3370b9a462aee35
216,887,393,596,596,130,000,000,000,000,000,000,000
49
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
static void sixel_advance(sixel_output_t *context, int nwrite) { if ((context->pos += nwrite) >= SIXEL_OUTPUT_PACKET_SIZE) { WriteBlob(context->image,SIXEL_OUTPUT_PACKET_SIZE,context->buffer); memmove(context->buffer, context->buffer + SIXEL_OUTPUT_PACKET_SIZE, (context->pos -= SIXEL_OUTPUT_PACKET_SIZE)); } }
0
[ "CWE-399", "CWE-401" ]
ImageMagick
748a03651e5b138bcaf160d15133de2f4b1b89ce
245,659,084,026,229,170,000,000,000,000,000,000,000
9
https://github.com/ImageMagick/ImageMagick/issues/1452
ulonglong timer_now(void) { return my_micro_time() / 1000; }
0
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
320,717,958,158,807,400,000,000,000,000,000,000,000
4
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
xmlFAParseCharGroup(xmlRegParserCtxtPtr ctxt) { int n = ctxt->neg; while ((CUR != ']') && (ctxt->error == 0)) { if (CUR == '^') { int neg = ctxt->neg; NEXT; ctxt->neg = !ctxt->neg; xmlFAParsePosCharGroup(ctxt); ctxt->neg = neg; } else if ((CUR == '-') && (NXT(1) == '[')) { int neg = ctxt->neg; ctxt->neg = 2; NEXT; /* eat the '-' */ NEXT; /* eat the '[' */ xmlFAParseCharGroup(ctxt); if (CUR == ']') { NEXT; } else { ERROR("charClassExpr: ']' expected"); break; } ctxt->neg = neg; break; } else if (CUR != ']') { xmlFAParsePosCharGroup(ctxt); } } ctxt->neg = n; }
0
[ "CWE-119" ]
libxml2
cbb271655cadeb8dbb258a64701d9a3a0c4835b4
15,145,369,310,050,955,000,000,000,000,000,000,000
30
Bug 757711: heap-buffer-overflow in xmlFAParsePosCharGroup <https://bugzilla.gnome.org/show_bug.cgi?id=757711> * xmlregexp.c: (xmlFAParseCharRange): Only advance to the next character if there is no error. Advancing to the next character in case of an error while parsing regexp leads to an out of bounds access.
wc_any_to_ucs(wc_wchar_t cc) { int f; wc_uint16 *map = NULL; wc_uint32 map_size = 0x80; wc_map *map2; f = WC_CCS_INDEX(cc.ccs); switch (WC_CCS_TYPE(cc.ccs)) { case WC_CCS_A_CS94: if (cc.ccs == WC_CCS_US_ASCII) return cc.code; if (f < WC_F_ISO_BASE || f > WC_F_CS94_END) return WC_C_UCS4_ERROR; map = cs94_ucs_map[f - WC_F_ISO_BASE]; cc.code &= 0x7f; break; case WC_CCS_A_CS94W: if (cc.ccs == WC_CCS_GB_2312 && WcOption.use_gb12345_map) { cc.ccs = WC_CCS_GB_12345; return wc_any_to_ucs(cc); } else if (cc.ccs == WC_CCS_JIS_X_0213_1) { map2 = wc_map_search((wc_uint16)(cc.code & 0x7f7f), jisx02131_ucs_p2_map, N_jisx02131_ucs_p2_map); if (map2) return map2->code2 | WC_C_UCS4_PLANE2; } else if (cc.ccs == WC_CCS_JIS_X_0213_2) { map2 = wc_map_search((wc_uint16)(cc.code & 0x7f7f), jisx02132_ucs_p2_map, N_jisx02132_ucs_p2_map); if (map2) return map2->code2 | WC_C_UCS4_PLANE2; } if (f < WC_F_ISO_BASE || f > WC_F_CS94W_END) return 0; map = cs94w_ucs_map[f - WC_F_ISO_BASE]; map_size = cs94w_ucs_map_size[f - WC_F_ISO_BASE]; cc.code = WC_CS94W_N(cc.code); break; case WC_CCS_A_CS96: if (f < WC_F_ISO_BASE || f > WC_F_CS96_END) return WC_C_UCS4_ERROR; map = cs96_ucs_map[f - WC_F_ISO_BASE]; cc.code &= 0x7f; break; case WC_CCS_A_CS96W: if (f < WC_F_ISO_BASE || f > WC_F_CS96W_END) return WC_C_UCS4_ERROR; map = cs96w_ucs_map[f - WC_F_ISO_BASE]; map_size = cs96w_ucs_map_size[f - WC_F_ISO_BASE]; cc.code = WC_CS96W_N(cc.code); break; case WC_CCS_A_CS942: if (f < WC_F_ISO_BASE || f > WC_F_CS942_END) return WC_C_UCS4_ERROR; map = cs942_ucs_map[f - WC_F_ISO_BASE]; cc.code &= 0x7f; break; case WC_CCS_A_PCS: if (f < WC_F_PCS_BASE || f > WC_F_PCS_END) return WC_C_UCS4_ERROR; switch (cc.ccs) { case WC_CCS_CP1258_2: map2 = wc_map_search((wc_uint16)cc.code, cp12582_ucs_map, N_cp12582_ucs_map); if (map2) return map2->code2; return WC_C_UCS4_ERROR; case WC_CCS_TCVN_5712_3: return wc_any_to_ucs(wc_tcvn57123_to_tcvn5712(cc)); case WC_CCS_GBK_80: return WC_C_UCS2_EURO; } map = pcs_ucs_map[f - WC_F_PCS_BASE]; map_size = pcs_ucs_map_size[f - WC_F_PCS_BASE]; cc.code &= 0x7f; break; case WC_CCS_A_PCSW: if (f < WC_F_PCS_BASE || f > WC_F_PCSW_END) return WC_C_UCS4_ERROR; map = pcsw_ucs_map[f - WC_F_PCS_BASE]; map_size = pcsw_ucs_map_size[f - WC_F_PCS_BASE]; switch (cc.ccs) { case WC_CCS_BIG5: cc.code = WC_BIG5_N(cc.code); break; case WC_CCS_BIG5_2: cc.code = WC_CS94W_N(cc.code) + WC_C_BIG5_2_BASE; break; case WC_CCS_HKSCS_1: case WC_CCS_HKSCS_2: cc = wc_cs128w_to_hkscs(cc); case WC_CCS_HKSCS: map2 = wc_map_search((wc_uint16)cc.code, hkscs_ucs_p2_map, N_hkscs_ucs_p2_map); if (map2) return map2->code2 | WC_C_UCS4_PLANE2; cc.code = wc_hkscs_to_N(cc.code); break; case WC_CCS_JOHAB: return wc_any_to_ucs(wc_johab_to_cs128w(cc)); case WC_CCS_JOHAB_1: return WC_CS94x128_N(cc.code) + WC_C_UCS2_HANGUL; case WC_CCS_JOHAB_2: cc.code = WC_CS128W_N(cc.code); cc.code = WC_N_JOHAB2(cc.code); map2 = wc_map_search((wc_uint16)cc.code, johab2_ucs_map, N_johab2_ucs_map); if (map2) return map2->code2; return WC_C_UCS4_ERROR; case WC_CCS_JOHAB_3: if ((cc.code & 0x7f7f) < 0x2121) return WC_C_UCS4_ERROR; case WC_CCS_SJIS_EXT: return wc_any_to_ucs(wc_sjis_ext_to_cs94w(cc)); case WC_CCS_SJIS_EXT_1: cc.code = wc_sjis_ext1_to_N(cc.code); if (cc.code == WC_C_SJIS_ERROR) return WC_C_UCS4_ERROR; break; case WC_CCS_SJIS_EXT_2: cc.code = wc_sjis_ext2_to_N(cc.code); if (cc.code == WC_C_SJIS_ERROR) return WC_C_UCS4_ERROR; break; case WC_CCS_GBK_1: case WC_CCS_GBK_2: cc = wc_cs128w_to_gbk(cc); case WC_CCS_GBK: cc.code = wc_gbk_to_N(cc.code); break; case WC_CCS_GBK_EXT: case WC_CCS_GBK_EXT_1: case WC_CCS_GBK_EXT_2: return wc_gb18030_to_ucs(cc); case WC_CCS_UHC_1: case WC_CCS_UHC_2: cc = wc_cs128w_to_uhc(cc); case WC_CCS_UHC: if (cc.code > WC_C_UHC_END) return WC_C_UCS4_ERROR; cc.code = wc_uhc_to_N(cc.code); break; default: cc.code = WC_CS94W_N(cc.code); break; } break; case WC_CCS_A_WCS16: switch (WC_CCS_SET(cc.ccs)) { case WC_CCS_UCS2: return cc.code; } return WC_C_UCS4_ERROR; case WC_CCS_A_WCS32: switch (WC_CCS_SET(cc.ccs)) { case WC_CCS_UCS4: return cc.code; case WC_CCS_UCS_TAG: return wc_ucs_tag_to_ucs(cc.code); case WC_CCS_GB18030: return wc_gb18030_to_ucs(cc); } return WC_C_UCS4_ERROR; case WC_CCS_A_UNKNOWN: if (cc.ccs == WC_CCS_C1) return (cc.code | 0x80); default: return WC_C_UCS4_ERROR; } if (map == NULL) return WC_C_UCS4_ERROR; if (map_size == 0 || cc.code > map_size - 1) return WC_C_UCS4_ERROR; cc.code = map[cc.code]; return cc.code ? cc.code : WC_C_UCS4_ERROR; }
0
[ "CWE-119" ]
w3m
716bc126638393c733399d11d3228edb82877faa
59,358,180,654,881,260,000,000,000,000,000,000,000
177
Prevent global-buffer-overflow in wc_any_to_ucs() Bug-Debian: https://github.com/tats/w3m/issues/43
ZEND_API void zend_generator_close(zend_generator *generator, zend_bool finished_execution) /* {{{ */ { if (EXPECTED(generator->execute_data)) { zend_execute_data *execute_data = generator->execute_data; if (EX_CALL_INFO() & ZEND_CALL_HAS_SYMBOL_TABLE) { zend_clean_and_cache_symbol_table(execute_data->symbol_table); } /* always free the CV's, in the symtable are only not-free'd IS_INDIRECT's */ zend_free_compiled_variables(execute_data); if (EX_CALL_INFO() & ZEND_CALL_RELEASE_THIS) { OBJ_RELEASE(Z_OBJ(execute_data->This)); } /* A fatal error / die occurred during the generator execution. * Trying to clean up the stack may not be safe in this case. */ if (UNEXPECTED(CG(unclean_shutdown))) { generator->execute_data = NULL; return; } zend_vm_stack_free_extra_args(generator->execute_data); /* Some cleanups are only necessary if the generator was closed * before it could finish execution (reach a return statement). */ if (UNEXPECTED(!finished_execution)) { zend_generator_cleanup_unfinished_execution(generator, 0); } /* Free closure object */ if (EX_CALL_INFO() & ZEND_CALL_CLOSURE) { OBJ_RELEASE((zend_object *) EX(func)->common.prototype); } /* Free GC buffer. GC for closed generators doesn't need an allocated buffer */ if (generator->gc_buffer) { efree(generator->gc_buffer); generator->gc_buffer = NULL; } efree(generator->execute_data); generator->execute_data = NULL; } }
0
[]
php-src
83e2b9e2202da6cc25bdaac67a58022b90be88e7
57,591,406,541,438,520,000,000,000,000,000,000,000
45
Fixed bug #76946
static int verify_stack(unsigned long sp) { if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != NULL)) return -EFAULT; return 0; }
0
[ "CWE-284", "CWE-264" ]
linux
a4780adeefd042482f624f5e0d577bf9cdcbb760
213,844,285,129,440,620,000,000,000,000,000,000,000
8
ARM: 7735/2: Preserve the user r/w register TPIDRURW on context switch and fork Since commit 6a1c53124aa1 the user writeable TLS register was zeroed to prevent it from being used as a covert channel between two tasks. There are more and more applications coming to Windows RT, Wine could support them, but mostly they expect to have the thread environment block (TEB) in TPIDRURW. This patch preserves that register per thread instead of clearing it. Unlike the TPIDRURO, which is already switched, the TPIDRURW can be updated from userspace so needs careful treatment in the case that we modify TPIDRURW and call fork(). To avoid this we must always read TPIDRURW in copy_thread. Signed-off-by: André Hentschel <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Jonathan Austin <[email protected]> Signed-off-by: Russell King <[email protected]>
*/ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) { put_page(spd->pages[i]);
0
[ "CWE-703", "CWE-125" ]
linux
8605330aac5a5785630aec8f64378a54891937cc
184,467,390,935,991,900,000,000,000,000,000,000,000
4
tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs __sock_recv_timestamp can be called for both normal skbs (for receive timestamps) and for skbs on the error queue (for transmit timestamps). Commit 1c885808e456 (tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING) assumes any skb passed to __sock_recv_timestamp are from the error queue, containing OPT_STATS in the content of the skb. This results in accessing invalid memory or generating junk data. To fix this, set skb->pkt_type to PACKET_OUTGOING for packets on the error queue. This is safe because on the receive path on local sockets skb->pkt_type is never set to PACKET_OUTGOING. With that, copy OPT_STATS from a packet, only if its pkt_type is PACKET_OUTGOING. Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING") Reported-by: JongHwan Kim <[email protected]> Signed-off-by: Soheil Hassas Yeganeh <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: Willem de Bruijn <[email protected]> Signed-off-by: David S. Miller <[email protected]>
EIGEN_STRONG_INLINE QInt32 operator-(const QInt16 a, const QInt32 b) { return QInt32(static_cast<int32_t>(a.value) - b.value); }
0
[ "CWE-908", "CWE-787" ]
tensorflow
ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2
330,935,712,980,802,450,000,000,000,000,000,000,000
3
Default initialize fixed point Eigen types. In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN. PiperOrigin-RevId: 344101137 Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2
void controller::reload_urls_file() { urlcfg->reload(); std::vector<std::shared_ptr<rss_feed>> new_feeds; unsigned int i = 0; for (auto url : urlcfg->get_urls()) { bool found = false; for (auto feed : feeds) { if (url == feed->rssurl()) { found = true; feed->set_tags(urlcfg->get_tags(url)); feed->set_order(i); new_feeds.push_back(feed); break; } } if (!found) { try { bool ignore_disp = (cfg.get_configvalue("ignore-mode") == "display"); std::shared_ptr<rss_feed> new_feed = rsscache->internalize_rssfeed(url, ignore_disp ? &ign : nullptr); new_feed->set_tags(urlcfg->get_tags(url)); new_feed->set_order(i); new_feeds.push_back(new_feed); } catch(const dbexception& e) { LOG(level::ERROR, "controller::reload_urls_file: caught exception: %s", e.what()); throw; } } i++; } v->set_tags(urlcfg->get_alltags()); { std::lock_guard<std::mutex> feedslock(feeds_mutex); feeds = new_feeds; } sort_feeds(); update_feedlist(); }
0
[ "CWE-943", "CWE-787" ]
newsbeuter
96e9506ae9e252c548665152d1b8968297128307
11,591,879,367,346,108,000,000,000,000,000,000,000
42
Sanitize inputs to bookmark-cmd (#591) Newsbeuter didn't properly shell-escape the arguments passed to bookmarking command, which allows a remote attacker to perform remote code execution by crafting an RSS item whose title and/or URL contain something interpretable by the shell (most notably subshell invocations.) This has been reported by Jeriko One <[email protected]>, complete with PoC and a patch. This vulnerability was assigned CVE-2017-12904.
//! Load gif file, using ImageMagick or GraphicsMagick's external tools. /** \param filename Filename to read data from. **/ CImgList<T>& load_gif_external(const char *const filename) { if (!filename) throw CImgArgumentException(_cimglist_instance "load_gif_external(): Specified filename is (null).", cimglist_instance); std::fclose(cimg::fopen(filename,"rb")); // Check if file exists. if (!_load_gif_external(filename,false)) if (!_load_gif_external(filename,true)) try { assign(CImg<T>().load_other(filename)); } catch (CImgException&) { assign(); } if (is_empty()) throw CImgIOException(_cimglist_instance
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
160,301,249,085,232,000,000,000,000,000,000,000,000
15
Fix other issues in 'CImg<T>::load_bmp()'.
*/ static void php_wddx_process_data(void *user_data, const XML_Char *s, int len) { st_entry *ent; wddx_stack *stack = (wddx_stack *)user_data; TSRMLS_FETCH(); if (!wddx_stack_is_empty(stack) && !stack->done) { wddx_stack_top(stack, (void**)&ent); switch (Z_TYPE_P(ent)) { case ST_STRING: if (Z_STRLEN_P(ent->data) == 0) { STR_FREE(Z_STRVAL_P(ent->data)); Z_STRVAL_P(ent->data) = estrndup(s, len); Z_STRLEN_P(ent->data) = len; } else { Z_STRVAL_P(ent->data) = erealloc(Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data) + len + 1); memcpy(Z_STRVAL_P(ent->data) + Z_STRLEN_P(ent->data), s, len); Z_STRLEN_P(ent->data) += len; Z_STRVAL_P(ent->data)[Z_STRLEN_P(ent->data)] = '\0'; } break; case ST_BINARY: if (Z_STRLEN_P(ent->data) == 0) { STR_FREE(Z_STRVAL_P(ent->data)); Z_STRVAL_P(ent->data) = estrndup(s, len + 1); } else { Z_STRVAL_P(ent->data) = erealloc(Z_STRVAL_P(ent->data), Z_STRLEN_P(ent->data) + len + 1); memcpy(Z_STRVAL_P(ent->data) + Z_STRLEN_P(ent->data), s, len); } Z_STRLEN_P(ent->data) += len; Z_STRVAL_P(ent->data)[Z_STRLEN_P(ent->data)] = '\0'; break; case ST_NUMBER: Z_TYPE_P(ent->data) = IS_STRING; Z_STRLEN_P(ent->data) = len; Z_STRVAL_P(ent->data) = estrndup(s, len); convert_scalar_to_number(ent->data TSRMLS_CC); break; case ST_BOOLEAN: if (!strcmp(s, "true")) { Z_LVAL_P(ent->data) = 1; } else if (!strcmp(s, "false")) { Z_LVAL_P(ent->data) = 0; } else { stack->top--; zval_ptr_dtor(&ent->data); if (ent->varname) efree(ent->varname); efree(ent); } break; case ST_DATETIME: { char *tmp; tmp = emalloc(len + 1); memcpy(tmp, s, len); tmp[len] = '\0'; Z_LVAL_P(ent->data) = php_parse_date(tmp, NULL); /* date out of range < 1969 or > 2038 */ if (Z_LVAL_P(ent->data) == -1) { Z_TYPE_P(ent->data) = IS_STRING; Z_STRLEN_P(ent->data) = len; Z_STRVAL_P(ent->data) = estrndup(s, len); } efree(tmp); } break; default: break; } }
1
[ "CWE-119" ]
php-src
b1bd4119bcafab6f9a8f84d92cd65eec3afeface
102,684,035,101,466,200,000,000,000,000,000,000,000
78
Fixed bug #71587 - Use-After-Free / Double-Free in WDDX Deserialize
Status buildCredentials(BSONObjBuilder* builder, const auth::CreateOrUpdateUserArgs& args) { if (!args.hasPassword) { // Must be external user. builder->append("external", true); return Status::OK(); } bool buildSCRAMSHA1 = false, buildSCRAMSHA256 = false; if (args.mechanisms.empty()) { buildSCRAMSHA1 = sequenceContains(saslGlobalParams.authenticationMechanisms, "SCRAM-SHA-1"); buildSCRAMSHA256 = sequenceContains(saslGlobalParams.authenticationMechanisms, "SCRAM-SHA-256"); } else { for (const auto& mech : args.mechanisms) { if (mech == "SCRAM-SHA-1") { buildSCRAMSHA1 = true; } else if (mech == "SCRAM-SHA-256") { buildSCRAMSHA256 = true; } else { return {ErrorCodes::BadValue, str::stream() << "Unknown auth mechanism '" << mech << "'"}; } if (!sequenceContains(saslGlobalParams.authenticationMechanisms, mech)) { return {ErrorCodes::BadValue, str::stream() << mech << " not supported in authMechanisms"}; } } } if (buildSCRAMSHA1) { // Add SCRAM-SHA-1 credentials. std::string hashedPwd; if (args.digestPassword) { hashedPwd = createPasswordDigest(args.userName.getUser(), args.password); } else { hashedPwd = args.password; } auto sha1Cred = scram::Secrets<SHA1Block>::generateCredentials( hashedPwd, saslGlobalParams.scramSHA1IterationCount.load()); builder->append("SCRAM-SHA-1", sha1Cred); } if (buildSCRAMSHA256) { // FCV check is deferred till this point so that the suitability checks can be performed // regardless. const auto fcv = serverGlobalParams.featureCompatibility.getVersion(); if (fcv < ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo40) { buildSCRAMSHA256 = false; } } if (buildSCRAMSHA256) { if (!args.digestPassword) { return {ErrorCodes::BadValue, "Use of SCRAM-SHA-256 requires undigested passwords"}; } const auto swPwd = saslPrep(args.password); if (!swPwd.isOK()) { return swPwd.getStatus(); } auto sha256Cred = scram::Secrets<SHA256Block>::generateCredentials( swPwd.getValue(), saslGlobalParams.scramSHA256IterationCount.load()); builder->append("SCRAM-SHA-256", sha256Cred); } return Status::OK(); }
0
[ "CWE-613" ]
mongo
6dfb92b1299de04677d0bd2230e89a52eb01003c
223,339,671,733,135,300,000,000,000,000,000,000,000
67
SERVER-38984 Validate unique User ID on UserCache hit (cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7)
bool Animation::operator==(const Animation &other) const { return this->channels == other.channels && this->extensions == other.extensions && this->extras == other.extras && this->name == other.name && this->samplers == other.samplers; }
0
[ "CWE-20" ]
tinygltf
52ff00a38447f06a17eab1caa2cf0730a119c751
300,751,785,727,685,300,000,000,000,000,000,000,000
5
Do not expand file path since its not necessary for glTF asset path(URI) and for security reason(`wordexp`).
static unsigned gen_handle(struct tcf_proto *tp, unsigned salt) { struct rsvp_head *data = tp->root; int i = 0xFFFF; while (i-- > 0) { u32 h; if ((data->hgenerator += 0x10000) == 0) data->hgenerator = 0x10000; h = data->hgenerator|salt; if (rsvp_get(tp, h) == 0) return h; } return 0; }
0
[ "CWE-200" ]
linux-2.6
8a47077a0b5aa2649751c46e7a27884e6686ccbf
170,101,137,156,179,410,000,000,000,000,000,000,000
15
[NETLINK]: Missing padding fields in dumped structures Plug holes with padding fields and initialized them to zero. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void gfar_set_mac_for_addr(struct net_device *dev, int num, const u8 *addr) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; u32 __iomem *macptr = &regs->macstnaddr1; macptr += num*2; /* For a station address of 0x12345678ABCD in transmission * order (BE), MACnADDR1 is set to 0xCDAB7856 and * MACnADDR2 is set to 0x34120000. */ tempval = (addr[5] << 24) | (addr[4] << 16) | (addr[3] << 8) | addr[2]; gfar_write(macptr, tempval); tempval = (addr[1] << 24) | (addr[0] << 16); gfar_write(macptr+1, tempval); }
0
[]
linux
d8861bab48b6c1fc3cdbcab8ff9d1eaea43afe7f
102,045,871,381,247,820,000,000,000,000,000,000,000
23
gianfar: fix jumbo packets+napi+rx overrun crash When using jumbo packets and overrunning rx queue with napi enabled, the following sequence is observed in gfar_add_rx_frag: | lstatus | | skb | t | lstatus, size, flags | first | len, data_len, *ptr | ---+--------------------------------------+-------+-----------------------+ 13 | 18002348, 9032, INTERRUPT LAST | 0 | 9600, 8000, f554c12e | 12 | 10000640, 1600, INTERRUPT | 0 | 8000, 6400, f554c12e | 11 | 10000640, 1600, INTERRUPT | 0 | 6400, 4800, f554c12e | 10 | 10000640, 1600, INTERRUPT | 0 | 4800, 3200, f554c12e | 09 | 10000640, 1600, INTERRUPT | 0 | 3200, 1600, f554c12e | 08 | 14000640, 1600, INTERRUPT FIRST | 0 | 1600, 0, f554c12e | 07 | 14000640, 1600, INTERRUPT FIRST | 1 | 0, 0, f554c12e | 06 | 1c000080, 128, INTERRUPT LAST FIRST | 1 | 0, 0, abf3bd6e | 05 | 18002348, 9032, INTERRUPT LAST | 0 | 8000, 6400, c5a57780 | 04 | 10000640, 1600, INTERRUPT | 0 | 6400, 4800, c5a57780 | 03 | 10000640, 1600, INTERRUPT | 0 | 4800, 3200, c5a57780 | 02 | 10000640, 1600, INTERRUPT | 0 | 3200, 1600, c5a57780 | 01 | 10000640, 1600, INTERRUPT | 0 | 1600, 0, c5a57780 | 00 | 14000640, 1600, INTERRUPT FIRST | 1 | 0, 0, c5a57780 | So at t=7 a new packets is started but not finished, probably due to rx overrun - but rx overrun is not indicated in the flags. Instead a new packets starts at t=8. This results in skb->len to exceed size for the LAST fragment at t=13 and thus a negative fragment size added to the skb. This then crashes: kernel BUG at include/linux/skbuff.h:2277! Oops: Exception in kernel mode, sig: 5 [#1] ... NIP [c04689f4] skb_pull+0x2c/0x48 LR [c03f62ac] gfar_clean_rx_ring+0x2e4/0x844 Call Trace: [ec4bfd38] [c06a84c4] _raw_spin_unlock_irqrestore+0x60/0x7c (unreliable) [ec4bfda8] [c03f6a44] gfar_poll_rx_sq+0x48/0xe4 [ec4bfdc8] [c048d504] __napi_poll+0x54/0x26c [ec4bfdf8] [c048d908] net_rx_action+0x138/0x2c0 [ec4bfe68] [c06a8f34] __do_softirq+0x3a4/0x4fc [ec4bfed8] [c0040150] run_ksoftirqd+0x58/0x70 [ec4bfee8] [c0066ecc] smpboot_thread_fn+0x184/0x1cc [ec4bff08] [c0062718] kthread+0x140/0x144 [ec4bff38] [c0012350] ret_from_kernel_thread+0x14/0x1c This patch fixes this by checking for computed LAST fragment size, so a negative sized fragment is never added. In order to prevent the newer rx frame from getting corrupted, the FIRST flag is checked to discard the incomplete older frame. Signed-off-by: Michael Braun <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static bool needs_preempt_context(struct drm_i915_private *i915) { return HAS_LOGICAL_RING_PREEMPTION(i915); }
0
[ "CWE-416" ]
linux
7dc40713618c884bf07c030d1ab1f47a9dc1f310
250,983,897,934,429,100,000,000,000,000,000,000,000
4
drm/i915: Introduce a mutex for file_priv->context_idr Define a mutex for the exclusive use of interacting with the per-file context-idr, that was previously guarded by struct_mutex. This allows us to reduce the coverage of struct_mutex, with a view to removing the last bits coordinating GEM context later. (In the short term, we avoid taking struct_mutex while using the extended constructor functions, preventing some nasty recursion.) v2: s/context_lock/context_idr_lock/ Signed-off-by: Chris Wilson <[email protected]> Cc: Tvrtko Ursulin <[email protected]> Reviewed-by: Tvrtko Ursulin <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
static inline void __pipe_unlock(struct pipe_inode_info *pipe) { mutex_unlock(&pipe->mutex); }
0
[ "CWE-17" ]
linux
f0d1bec9d58d4c038d0ac958c9af82be6eb18045
85,256,338,212,680,860,000,000,000,000,000,000,000
4
new helper: copy_page_from_iter() parallel to copy_page_to_iter(). pipe_write() switched to it (and became ->write_iter()). Signed-off-by: Al Viro <[email protected]>
virtual absl::optional<int> checkHeaderNameForUnderscores(absl::string_view /* header_name */) { return absl::nullopt; }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
96,731,260,838,229,540,000,000,000,000,000,000,000
3
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
delete_first_msg(void) { struct msg_hist *p; if (msg_hist_len <= 0) return FAIL; p = first_msg_hist; first_msg_hist = p->next; if (first_msg_hist == NULL) last_msg_hist = NULL; // history is empty vim_free(p->msg); vim_free(p); --msg_hist_len; return OK; }
0
[ "CWE-416" ]
vim
9f1a39a5d1cd7989ada2d1cb32f97d84360e050f
132,798,593,641,334,060,000,000,000,000,000,000,000
15
patch 8.2.4040: keeping track of allocated lines is too complicated Problem: Keeping track of allocated lines in user functions is too complicated. Solution: Instead of freeing individual lines keep them all until the end.
static int r_bin_dwarf_expand_die(RBinDwarfDIE* die) { RBinDwarfAttrValue *tmp = NULL; if (!die || die->capacity == 0) { return -EINVAL; } if (die->capacity != die->length) { return -EINVAL; } tmp = (RBinDwarfAttrValue*)realloc (die->attr_values, die->capacity * 2 * sizeof (RBinDwarfAttrValue)); if (!tmp) { return -ENOMEM; } memset ((ut8*)tmp + die->capacity, 0, die->capacity); die->attr_values = tmp; die->capacity *= 2; return 0; }
0
[ "CWE-119", "CWE-125" ]
radare2
d37d2b858ac47f2f108034be0bcecadaddfbc8b3
49,962,107,548,861,560,000,000,000,000,000,000,000
18
Fix #10465 - Avoid string on low addresses (workaround) for corrupted dwarf (#10478)
lquery_out(PG_FUNCTION_ARGS) { lquery *in = PG_GETARG_LQUERY(0); char *buf, *ptr; int i, j, totallen = 1; lquery_level *curqlevel; lquery_variant *curtlevel; curqlevel = LQUERY_FIRST(in); for (i = 0; i < in->numlevel; i++) { totallen++; if (curqlevel->numvar) totallen += 1 + (curqlevel->numvar * 4) + curqlevel->totallen; else totallen += 2 * 11 + 4; curqlevel = LQL_NEXT(curqlevel); } ptr = buf = (char *) palloc(totallen); curqlevel = LQUERY_FIRST(in); for (i = 0; i < in->numlevel; i++) { if (i != 0) { *ptr = '.'; ptr++; } if (curqlevel->numvar) { if (curqlevel->flag & LQL_NOT) { *ptr = '!'; ptr++; } curtlevel = LQL_FIRST(curqlevel); for (j = 0; j < curqlevel->numvar; j++) { if (j != 0) { *ptr = '|'; ptr++; } memcpy(ptr, curtlevel->name, curtlevel->len); ptr += curtlevel->len; if ((curtlevel->flag & LVAR_SUBLEXEME)) { *ptr = '%'; ptr++; } if ((curtlevel->flag & LVAR_INCASE)) { *ptr = '@'; ptr++; } if ((curtlevel->flag & LVAR_ANYEND)) { *ptr = '*'; ptr++; } curtlevel = LVAR_NEXT(curtlevel); } } else { if (curqlevel->low == curqlevel->high) { sprintf(ptr, "*{%d}", curqlevel->low); } else if (curqlevel->low == 0) { if (curqlevel->high == 0xffff) { *ptr = '*'; *(ptr + 1) = '\0'; } else sprintf(ptr, "*{,%d}", curqlevel->high); } else if (curqlevel->high == 0xffff) { sprintf(ptr, "*{%d,}", curqlevel->low); } else sprintf(ptr, "*{%d,%d}", curqlevel->low, curqlevel->high); ptr = strchr(ptr, '\0'); } curqlevel = LQL_NEXT(curqlevel); } *ptr = '\0'; PG_FREE_IF_COPY(in, 0); PG_RETURN_POINTER(buf); }
0
[ "CWE-703", "CWE-189" ]
postgres
31400a673325147e1205326008e32135a78b4d8a
10,057,431,745,202,907,000,000,000,000,000,000,000
99
Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064
static inline void ahash_request_complete(struct ahash_request *req, int err) { req->base.complete(&req->base, err); }
0
[ "CWE-835" ]
linux
ef0579b64e93188710d48667cb5e014926af9f1b
259,282,722,504,487,100,000,000,000,000,000,000,000
4
crypto: ahash - Fix EINPROGRESS notification callback The ahash API modifies the request's callback function in order to clean up after itself in some corner cases (unaligned final and missing finup). When the request is complete ahash will restore the original callback and everything is fine. However, when the request gets an EBUSY on a full queue, an EINPROGRESS callback is made while the request is still ongoing. In this case the ahash API will incorrectly call its own callback. This patch fixes the problem by creating a temporary request object on the stack which is used to relay EINPROGRESS back to the original completion function. This patch also adds code to preserve the original flags value. Fixes: ab6bf4e5e5e4 ("crypto: hash - Fix the pointer voodoo in...") Cc: <[email protected]> Reported-by: Sabrina Dubroca <[email protected]> Tested-by: Sabrina Dubroca <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); if (!section) return UT64_MAX; return section->offset; }
0
[ "CWE-125" ]
radare2
c6d0076c924891ad9948a62d89d0bcdaf965f0cd
269,576,554,429,609,400,000,000,000,000,000,000,000
5
Fix #8731 - Crash in ELF parser with negative 32bit number
static void edge_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; if (edge_port == NULL) return; /* if we are implementing XON/XOFF, send the start character */ if (I_IXOFF(tty)) { unsigned char start_char = START_CHAR(tty); status = edge_write(tty, port, &start_char, 1); if (status <= 0) { dev_err(&port->dev, "%s - failed to write start character, %d\n", __func__, status); } } /* if we are implementing RTS/CTS, restart reads */ /* are the Edgeport will assert the RTS line */ if (C_CRTSCTS(tty)) { status = restart_read(edge_port); if (status) dev_err(&port->dev, "%s - read bulk usb_submit_urb failed: %d\n", __func__, status); } }
0
[ "CWE-284", "CWE-264" ]
linux
1ee0a224bc9aad1de496c795f96bc6ba2c394811
140,981,126,526,286,330,000,000,000,000,000,000,000
28
USB: io_ti: Fix NULL dereference in chase_port() The tty is NULL when the port is hanging up. chase_port() needs to check for this. This patch is intended for stable series. The behavior was observed and tested in Linux 3.2 and 3.7.1. Johan Hovold submitted a more elaborate patch for the mainline kernel. [ 56.277883] usb 1-1: edge_bulk_in_callback - nonzero read bulk status received: -84 [ 56.278811] usb 1-1: USB disconnect, device number 3 [ 56.278856] usb 1-1: edge_bulk_in_callback - stopping read! [ 56.279562] BUG: unable to handle kernel NULL pointer dereference at 00000000000001c8 [ 56.280536] IP: [<ffffffff8144e62a>] _raw_spin_lock_irqsave+0x19/0x35 [ 56.281212] PGD 1dc1b067 PUD 1e0f7067 PMD 0 [ 56.282085] Oops: 0002 [#1] SMP [ 56.282744] Modules linked in: [ 56.283512] CPU 1 [ 56.283512] Pid: 25, comm: khubd Not tainted 3.7.1 #1 innotek GmbH VirtualBox/VirtualBox [ 56.283512] RIP: 0010:[<ffffffff8144e62a>] [<ffffffff8144e62a>] _raw_spin_lock_irqsave+0x19/0x35 [ 56.283512] RSP: 0018:ffff88001fa99ab0 EFLAGS: 00010046 [ 56.283512] RAX: 0000000000000046 RBX: 00000000000001c8 RCX: 0000000000640064 [ 56.283512] RDX: 0000000000010000 RSI: ffff88001fa99b20 RDI: 00000000000001c8 [ 56.283512] RBP: ffff88001fa99b20 R08: 0000000000000000 R09: 0000000000000000 [ 56.283512] R10: 0000000000000000 R11: ffffffff812fcb4c R12: ffff88001ddf53c0 [ 56.283512] R13: 0000000000000000 R14: 00000000000001c8 R15: ffff88001e19b9f4 [ 56.283512] FS: 0000000000000000(0000) GS:ffff88001fd00000(0000) knlGS:0000000000000000 [ 56.283512] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [ 56.283512] CR2: 00000000000001c8 CR3: 000000001dc51000 CR4: 00000000000006e0 [ 56.283512] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 56.283512] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [ 56.283512] Process khubd (pid: 25, threadinfo ffff88001fa98000, task ffff88001fa94f80) [ 56.283512] Stack: [ 56.283512] 0000000000000046 00000000000001c8 ffffffff810578ec ffffffff812fcb4c [ 56.283512] ffff88001e19b980 0000000000002710 ffffffff812ffe81 0000000000000001 [ 56.283512] ffff88001fa94f80 0000000000000202 ffffffff00000001 0000000000000296 [ 56.283512] Call Trace: [ 56.283512] [<ffffffff810578ec>] ? add_wait_queue+0x12/0x3c [ 56.283512] [<ffffffff812fcb4c>] ? usb_serial_port_work+0x28/0x28 [ 56.283512] [<ffffffff812ffe81>] ? chase_port+0x84/0x2d6 [ 56.283512] [<ffffffff81063f27>] ? try_to_wake_up+0x199/0x199 [ 56.283512] [<ffffffff81263a5c>] ? tty_ldisc_hangup+0x222/0x298 [ 56.283512] [<ffffffff81300171>] ? edge_close+0x64/0x129 [ 56.283512] [<ffffffff810612f7>] ? __wake_up+0x35/0x46 [ 56.283512] [<ffffffff8106135b>] ? should_resched+0x5/0x23 [ 56.283512] [<ffffffff81264916>] ? tty_port_shutdown+0x39/0x44 [ 56.283512] [<ffffffff812fcb4c>] ? usb_serial_port_work+0x28/0x28 [ 56.283512] [<ffffffff8125d38c>] ? __tty_hangup+0x307/0x351 [ 56.283512] [<ffffffff812e6ddc>] ? usb_hcd_flush_endpoint+0xde/0xed [ 56.283512] [<ffffffff8144e625>] ? _raw_spin_lock_irqsave+0x14/0x35 [ 56.283512] [<ffffffff812fd361>] ? usb_serial_disconnect+0x57/0xc2 [ 56.283512] [<ffffffff812ea99b>] ? usb_unbind_interface+0x5c/0x131 [ 56.283512] [<ffffffff8128d738>] ? __device_release_driver+0x7f/0xd5 [ 56.283512] [<ffffffff8128d9cd>] ? device_release_driver+0x1a/0x25 [ 56.283512] [<ffffffff8128d393>] ? bus_remove_device+0xd2/0xe7 [ 56.283512] [<ffffffff8128b7a3>] ? device_del+0x119/0x167 [ 56.283512] [<ffffffff812e8d9d>] ? usb_disable_device+0x6a/0x180 [ 56.283512] [<ffffffff812e2ae0>] ? usb_disconnect+0x81/0xe6 [ 56.283512] [<ffffffff812e4435>] ? hub_thread+0x577/0xe82 [ 56.283512] [<ffffffff8144daa7>] ? __schedule+0x490/0x4be [ 56.283512] [<ffffffff8105798f>] ? abort_exclusive_wait+0x79/0x79 [ 56.283512] [<ffffffff812e3ebe>] ? usb_remote_wakeup+0x2f/0x2f [ 56.283512] [<ffffffff812e3ebe>] ? usb_remote_wakeup+0x2f/0x2f [ 56.283512] [<ffffffff810570b4>] ? kthread+0x81/0x89 [ 56.283512] [<ffffffff81057033>] ? __kthread_parkme+0x5c/0x5c [ 56.283512] [<ffffffff8145387c>] ? ret_from_fork+0x7c/0xb0 [ 56.283512] [<ffffffff81057033>] ? __kthread_parkme+0x5c/0x5c [ 56.283512] Code: 8b 7c 24 08 e8 17 0b c3 ff 48 8b 04 24 48 83 c4 10 c3 53 48 89 fb 41 50 e8 e0 0a c3 ff 48 89 04 24 e8 e7 0a c3 ff ba 00 00 01 00 <f0> 0f c1 13 48 8b 04 24 89 d1 c1 ea 10 66 39 d1 74 07 f3 90 66 [ 56.283512] RIP [<ffffffff8144e62a>] _raw_spin_lock_irqsave+0x19/0x35 [ 56.283512] RSP <ffff88001fa99ab0> [ 56.283512] CR2: 00000000000001c8 [ 56.283512] ---[ end trace 49714df27e1679ce ]--- Signed-off-by: Wolfgang Frisch <[email protected]> Cc: Johan Hovold <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
R_API ut8 *r_bin_java_cp_get_idx_bytes(RBinJavaObj *bin, ut16 idx, ut32 *out_sz) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj || !out_sz) { return NULL; } if (out_sz) { *out_sz = 0; } switch (cp_obj->tag) { case R_BIN_JAVA_CP_INTEGER: case R_BIN_JAVA_CP_FLOAT: return r_bin_java_cp_get_4bytes (cp_obj->tag, out_sz, cp_obj->info.cp_integer.bytes.raw, 5); case R_BIN_JAVA_CP_LONG: case R_BIN_JAVA_CP_DOUBLE: return r_bin_java_cp_get_4bytes (cp_obj->tag, out_sz, cp_obj->info.cp_long.bytes.raw, 9); case R_BIN_JAVA_CP_UTF8: // eprintf ("Getting idx: %d = %p (3+0x%"PFMT64x")\n", idx, cp_obj, cp_obj->info.cp_utf8.length); if (cp_obj->info.cp_utf8.length > 0) { return r_bin_java_cp_get_utf8 (cp_obj->tag, out_sz, cp_obj->info.cp_utf8.bytes, cp_obj->info.cp_utf8.length); } } return NULL; }
0
[ "CWE-119", "CWE-788" ]
radare2
6c4428f018d385fc80a33ecddcb37becea685dd5
12,354,823,805,894,273,000,000,000,000,000,000,000
24
Improve boundary checks to fix oobread segfaults ##crash * Reported by Cen Zhang via huntr.dev * Reproducer: bins/fuzzed/javaoob-havoc.class
static int table_do_fn_check_lengths(void *r_, const char *key, const char *value) { request_rec *r = r_; if (value == NULL || r->server->limit_req_fieldsize >= strlen(value) ) return 1; r->status = HTTP_BAD_REQUEST; apr_table_setn(r->notes, "error-notes", apr_pstrcat(r->pool, "Size of a request header field " "after merging exceeds server limit.<br />" "\n<pre>\n", ap_escape_html(r->pool, key), "</pre>\n", NULL)); ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00560) "Request header " "exceeds LimitRequestFieldSize after merging: %s", key); return 0; }
0
[ "CWE-703" ]
httpd
be0f5335e3e73eb63253b050fdc23f252f5c8ae3
193,647,843,161,062,470,000,000,000,000,000,000,000
18
*) SECURITY: CVE-2015-0253 (cve.mitre.org) core: Fix a crash introduced in with ErrorDocument 400 pointing to a local URL-path with the INCLUDES filter active, introduced in 2.4.11. PR 57531. [Yann Ylavic] Submitted By: ylavic Committed By: covener git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1664205 13f79535-47bb-0310-9956-ffa450edef68
void ext4_dirty_inode(struct inode *inode) { handle_t *handle; handle = ext4_journal_start(inode, 2); if (IS_ERR(handle)) goto out; ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out: return; }
0
[ "CWE-703" ]
linux
744692dc059845b2a3022119871846e74d4f6e11
187,567,033,159,412,260,000,000,000,000,000,000,000
14
ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]>
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) { /* * Huge pages are NOT write protected when we start dirty logging in * initially-all-set mode; must write protect them here so that they * are split to 4K on the first write. * * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn * of memslot has no such restriction, so the range can cross two large * pages. */ if (kvm_dirty_log_manual_protect_and_init_set(kvm)) { gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask); gfn_t end = slot->base_gfn + gfn_offset + __fls(mask); if (READ_ONCE(eager_page_split)) kvm_mmu_try_split_huge_pages(kvm, slot, start, end, PG_LEVEL_4K); kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M); /* Cross two large pages? */ if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) != ALIGN(end << PAGE_SHIFT, PMD_SIZE)) kvm_mmu_slot_gfn_write_protect(kvm, slot, end, PG_LEVEL_2M); } /* Now handle 4K PTEs. */ if (kvm_x86_ops.cpu_dirty_log_size) kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask); else kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); }
0
[ "CWE-476" ]
linux
9f46c187e2e680ecd9de7983e4d081c3391acc76
160,965,462,667,174,600,000,000,000,000,000,000,000
35
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID With shadow paging enabled, the INVPCID instruction results in a call to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the invlpg callback is not set and the result is a NULL pointer dereference. Fix it trivially by checking for mmu->invlpg before every call. There are other possibilities: - check for CR0.PG, because KVM (like all Intel processors after P5) flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a nop with paging disabled - check for EFER.LMA, because KVM syncs and flushes when switching MMU contexts outside of 64-bit mode All of these are tricky, go for the simple solution. This is CVE-2022-1789. Reported-by: Yongkang Jia <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
Field *Field::new_key_field(MEM_ROOT *root, TABLE *new_table, uchar *new_ptr, uint32 length, uchar *new_null_ptr, uint new_null_bit) { Field *tmp; if ((tmp= make_new_field(root, new_table, table == new_table))) { tmp->ptr= new_ptr; tmp->null_ptr= new_null_ptr; tmp->null_bit= new_null_bit; } return tmp; }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
133,152,752,778,428,050,000,000,000,000,000,000,000
13
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <[email protected]>
Colour* colour() { return colour_; }
0
[ "CWE-20" ]
libvpx
f00890eecdf8365ea125ac16769a83aa6b68792d
332,394,686,896,294,500,000,000,000,000,000,000,000
1
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
void loadServerConfig(char *filename, char *options) { sds config = sdsempty(); char buf[CONFIG_MAX_LINE+1]; /* Load the file content */ if (filename) { FILE *fp; if (filename[0] == '-' && filename[1] == '\0') { fp = stdin; } else { if ((fp = fopen(filename,"r")) == NULL) { serverLog(LL_WARNING, "Fatal error, can't open config file '%s'", filename); exit(1); } } while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL) config = sdscat(config,buf); if (fp != stdin) fclose(fp); } /* Append the additional options */ if (options) { config = sdscat(config,"\n"); config = sdscat(config,options); } loadServerConfigFromString(config); sdsfree(config); }
0
[ "CWE-119", "CWE-787" ]
redis
6d9f8e2462fc2c426d48c941edeb78e5df7d2977
229,262,902,582,077,680,000,000,000,000,000,000,000
29
Security: CONFIG SET client-output-buffer-limit overflow fixed. This commit fixes a vunlerability reported by Cory Duplantis of Cisco Talos, see TALOS-2016-0206 for reference. CONFIG SET client-output-buffer-limit accepts as client class "master" which is actually only used to implement CLIENT KILL. The "master" class has ID 3. What happens is that the global structure: server.client_obuf_limits[class] Is accessed with class = 3. However it is a 3 elements array, so writing the 4th element means to write up to 24 bytes of memory *after* the end of the array, since the structure is defined as: typedef struct clientBufferLimitsConfig { unsigned long long hard_limit_bytes; unsigned long long soft_limit_bytes; time_t soft_limit_seconds; } clientBufferLimitsConfig; EVALUATION OF IMPACT: Checking what's past the boundaries of the array in the global 'server' structure, we find AOF state fields: clientBufferLimitsConfig client_obuf_limits[CLIENT_TYPE_OBUF_COUNT]; /* AOF persistence */ int aof_state; /* AOF_(ON|OFF|WAIT_REWRITE) */ int aof_fsync; /* Kind of fsync() policy */ char *aof_filename; /* Name of the AOF file */ int aof_no_fsync_on_rewrite; /* Don't fsync if a rewrite is in prog. */ int aof_rewrite_perc; /* Rewrite AOF if % growth is > M and... */ off_t aof_rewrite_min_size; /* the AOF file is at least N bytes. */ off_t aof_rewrite_base_size; /* AOF size on latest startup or rewrite. */ off_t aof_current_size; /* AOF current size. */ Writing to most of these fields should be harmless and only cause problems in Redis persistence that should not escalate to security problems. However unfortunately writing to "aof_filename" could be potentially a security issue depending on the access pattern. Searching for "aof.filename" accesses in the source code returns many different usages of the field, including using it as input for open(), logging to the Redis log file or syslog, and calling the rename() syscall. It looks possible that attacks could lead at least to informations disclosure of the state and data inside Redis. However note that the attacker must already have access to the server. But, worse than that, it looks possible that being able to change the AOF filename can be used to mount more powerful attacks: like overwriting random files with AOF data (easily a potential security issue as demostrated here: http://antirez.com/news/96), or even more subtle attacks where the AOF filename is changed to a path were a malicious AOF file is loaded in order to exploit other potential issues when the AOF parser is fed with untrusted input (no known issue known currently). The fix checks the places where the 'master' class is specifiedf in order to access configuration data structures, and return an error in this cases. WHO IS AT RISK? The "master" client class was introduced in Redis in Jul 28 2015. Every Redis instance released past this date is not vulnerable while all the releases after this date are. Notably: Redis 3.0.x is NOT vunlerable. Redis 3.2.x IS vulnerable. Redis unstable is vulnerable. In order for the instance to be at risk, at least one of the following conditions must be true: 1. The attacker can access Redis remotely and is able to send the CONFIG SET command (often banned in managed Redis instances). 2. The attacker is able to control the "redis.conf" file and can wait or trigger a server restart. The problem was fixed 26th September 2016 in all the releases affected.
void perf_event_wakeup(struct perf_event *event) { ring_buffer_wakeup(event); if (event->pending_kill) { kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); event->pending_kill = 0; } }
0
[ "CWE-416", "CWE-362" ]
linux
12ca6ad2e3a896256f086497a7c7406a547ee373
218,848,324,919,489,830,000,000,000,000,000,000,000
9
perf: Fix race in swevent hash There's a race on CPU unplug where we free the swevent hash array while it can still have events on. This will result in a use-after-free which is BAD. Simply do not free the hash array on unplug. This leaves the thing around and no use-after-free takes place. When the last swevent dies, we do a for_each_possible_cpu() iteration anyway to clean these up, at which time we'll free it, so no leakage will occur. Reported-by: Sasha Levin <[email protected]> Tested-by: Sasha Levin <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Stephane Eranian <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vince Weaver <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
void CWebServer::Cmd_SendNotification(WebEmSession & session, const request& req, Json::Value &root) { std::string subject = request::findValue(&req, "subject"); std::string body = request::findValue(&req, "body"); std::string subsystem = request::findValue(&req, "subsystem"); if ( (subject.empty()) || (body.empty()) ) return; if (subsystem.empty()) subsystem = NOTIFYALL; //Add to queue if (m_notifications.SendMessage(0, std::string(""), subsystem, subject, body, std::string(""), 1, std::string(""), false)) { root["status"] = "OK"; } root["title"] = "SendNotification"; }
0
[ "CWE-89" ]
domoticz
ee70db46f81afa582c96b887b73bcd2a86feda00
150,422,528,159,140,270,000,000,000,000,000,000,000
17
Fixed possible SQL Injection Vulnerability (Thanks to Fabio Carretto!)
fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values, bool ignore_errors, bool update) { List_iterator_fast<Item> f(fields),v(values); Item *value, *fld; Item_field *field; Field *rfield; TABLE *table; bool only_unvers_fields= update && table_arg->versioned(); bool save_abort_on_warning= thd->abort_on_warning; bool save_no_errors= thd->no_errors; DBUG_ENTER("fill_record"); thd->no_errors= ignore_errors; /* Reset the table->auto_increment_field_not_null as it is valid for only one row. */ if (fields.elements) table_arg->auto_increment_field_not_null= FALSE; while ((fld= f++)) { if (!(field= fld->field_for_view_update())) { my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name.str); goto err; } value=v++; DBUG_ASSERT(value); rfield= field->field; table= rfield->table; if (table->next_number_field && rfield->field_index == table->next_number_field->field_index) table->auto_increment_field_not_null= TRUE; const bool skip_sys_field= rfield->vers_sys_field(); // TODO: && !thd->vers_modify_history() [MDEV-16546] if ((rfield->vcol_info || skip_sys_field) && !value->vcol_assignment_allowed_value() && table->s->table_category != TABLE_CATEGORY_TEMPORARY) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARNING_NON_DEFAULT_VALUE_FOR_GENERATED_COLUMN, ER_THD(thd, ER_WARNING_NON_DEFAULT_VALUE_FOR_GENERATED_COLUMN), rfield->field_name.str, table->s->table_name.str); } if (only_unvers_fields && !rfield->vers_update_unversioned()) only_unvers_fields= false; if (rfield->stored_in_db()) { if (!skip_sys_field && unlikely(value->save_in_field(rfield, 0) < 0) && !ignore_errors) { my_message(ER_UNKNOWN_ERROR, ER_THD(thd, ER_UNKNOWN_ERROR), MYF(0)); goto err; } /* In sql MODE_SIMULTANEOUS_ASSIGNMENT, move field pointer on value stored in record[1] which contains row before update (see MDEV-13417) */ if (update && thd->variables.sql_mode & MODE_SIMULTANEOUS_ASSIGNMENT) rfield->move_field_offset((my_ptrdiff_t) (table->record[1] - table->record[0])); } rfield->set_has_explicit_value(); } if (update && thd->variables.sql_mode & MODE_SIMULTANEOUS_ASSIGNMENT) { // restore fields pointers on record[0] f.rewind(); while ((fld= f++)) { rfield= fld->field_for_view_update()->field; if (rfield->stored_in_db()) { table= rfield->table; rfield->move_field_offset((my_ptrdiff_t) (table->record[0] - table->record[1])); } } } if (update) table_arg->evaluate_update_default_function(); else if (table_arg->default_field && table_arg->update_default_fields(ignore_errors)) goto err; if (table_arg->versioned() && !only_unvers_fields) table_arg->vers_update_fields(); /* Update virtual fields */ if (table_arg->vfield && table_arg->update_virtual_fields(table_arg->file, VCOL_UPDATE_FOR_WRITE)) goto err; thd->abort_on_warning= save_abort_on_warning; thd->no_errors= save_no_errors; DBUG_RETURN(thd->is_error()); err: DBUG_PRINT("error",("got error")); thd->abort_on_warning= save_abort_on_warning; thd->no_errors= save_no_errors; if (fields.elements) table_arg->auto_increment_field_not_null= FALSE; DBUG_RETURN(TRUE); }
0
[ "CWE-416" ]
server
0beed9b5e933f0ff79b3bb346524f7a451d14e38
75,904,534,340,429,490,000,000,000,000,000,000,000
108
MDEV-28097 use-after-free when WHERE has subquery with an outer reference in HAVING when resolving WHERE and ON clauses, do not look in SELECT list/aliases.
static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: case SIOCINQ: return -ENOIOCTLCMD; default: #ifdef CONFIG_IPV6_MROUTE return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg)); #else return -ENOIOCTLCMD; #endif } }
0
[ "CWE-20" ]
net
bceaa90240b6019ed73b49965eac7d167610be69
76,222,585,402,839,270,000,000,000,000,000,000,000
14
inet: prevent leakage of uninitialized memory to user in recv syscalls Only update *addr_len when we actually fill in sockaddr, otherwise we can return uninitialized memory from the stack to the caller in the recvfrom, recvmmsg and recvmsg syscalls. Drop the the (addr_len == NULL) checks because we only get called with a valid addr_len pointer either from sock_common_recvmsg or inet_recvmsg. If a blocking read waits on a socket which is concurrently shut down we now return zero and set msg_msgnamelen to 0. Reported-by: mpb <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
ldns_rr_list2str(const ldns_rr_list *list) { return ldns_rr_list2str_fmt(ldns_output_format_default, list); }
0
[ "CWE-415" ]
ldns
070b4595981f48a21cc6b4f5047fdc2d09d3da91
277,636,280,360,992,580,000,000,000,000,000,000,000
4
CAA and URI
static SVG_Element *svg_parse_element(GF_SVG_Parser *parser, const char *name, const char *name_space, const GF_XMLAttribute *attributes, u32 nb_attributes, SVG_NodeStack *parent, Bool *has_ns) { GF_FieldInfo info; u32 tag, i, count, ns, xmlns; Bool needs_init, has_id; SVG_Element *elt = NULL; const char *node_name = NULL; const char *ev_event, *ev_observer; SVG_DeferredAnimation *anim = NULL; char *ID = NULL; GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[SVG Parsing] Parsing node %s\n", name)); *has_ns = GF_FALSE; svg_check_namespace(parser, attributes, nb_attributes, has_ns); for (i=0; i<nb_attributes; i++) { GF_XMLAttribute *att = (GF_XMLAttribute *)&attributes[i]; if (!att->value || !strlen(att->value)) continue; /* FIXME: This should be changed to reflect that xml:id has precedence over id if both are specified with different values */ if (!stricmp(att->name, "id") || !stricmp(att->name, "xml:id")) { if (!ID) ID = att->value; } } /* CHECK: overriding the element namespace with the parent one, if given ??? This is wrong ??*/ xmlns = parser->current_ns; if (name_space) { xmlns = gf_sg_get_namespace_code(parser->load->scene_graph, (char *) name_space); if (!xmlns) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[SVG Parsing] line %d - XMLNS prefix %s not defined - skipping\n", gf_xml_sax_get_line(parser->sax_parser), name_space)); return NULL; } } /* Translates the node type (called name) from a String into a unique numeric identifier in GPAC */ tag = xmlns ? gf_xml_get_element_tag(name, xmlns) : TAG_UndefinedNode; if (tag == TAG_UndefinedNode) { #ifdef SKIP_UNKNOWN_NODES GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[SVG Parsing] line %d - Unknown element %s - skipping\n", gf_xml_sax_get_line(parser->sax_parser), name)); return NULL; #else tag = TAG_DOMFullNode; #endif } /* If this element has an ID, we look in the list of elements already created in advance (in case of reference) to see if it is there, in which case we will reuse it*/ has_id = GF_FALSE; count = gf_list_count(parser->peeked_nodes); if (count && ID) { for (i=0; i<count; i++) { GF_Node *n = (GF_Node *)gf_list_get(parser->peeked_nodes, i); const char *n_id = gf_node_get_name(n); if (n_id && !strcmp(n_id, ID)) { gf_list_rem(parser->peeked_nodes, i); has_id = GF_TRUE; elt = (SVG_Element*)n; break; } } } /* If the element was found in the list of elements already created, we do not need to create it, we reuse it. Otherwise, we create it based on the tag */ if (!has_id) { /* Creates a node in the current scene graph */ elt = (SVG_Element*)gf_node_new(parser->load->scene_graph, tag); if (!elt) { parser->last_error = GF_SG_UNKNOWN_NODE; return NULL; } /* CHECK: Why isn't this code in the gf_node_new call ?? */ if (tag == TAG_DOMFullNode) { GF_DOMFullNode *d = (GF_DOMFullNode *)elt; d->name = gf_strdup(name); d->ns = xmlns; if (ID) gf_svg_parse_element_id((GF_Node *)d, ID, GF_FALSE); } } /* We indicate that the element is used by its parent (reference counting for safe deleting) */ gf_node_register((GF_Node *)elt, (parent ? (GF_Node *)parent->node : NULL)); /* We attach this element as the last child of its parent */ if (parent && elt) gf_node_list_add_child_last( & parent->node->children, (GF_Node*)elt, & parent->last_child); /* By default, all elements will need initialization for rendering, except some that will explicitly set it to 0 */ needs_init = GF_TRUE; if (gf_svg_is_animation_tag(tag)) { GF_SAFEALLOC(anim, SVG_DeferredAnimation); if (!anim) { parser->last_error = GF_OUT_OF_MEM; return NULL; } /*default anim target is parent node*/ anim->animation_elt = elt; if (!parent) { if (parser->command) { anim->target = anim->anim_parent = (SVG_Element*) parser->command->node; } } else { anim->target = anim->anim_parent = parent->node; } } else if (gf_svg_is_timing_tag(tag)) { /* warning: we use the SVG_DeferredAnimation structure for some timing nodes which are not animations, but we put the parse stage at 1 (timing) see svg_parse_animation. */ GF_SAFEALLOC(anim, SVG_DeferredAnimation); if (!anim) { parser->last_error = GF_OUT_OF_MEM; return NULL; } /*default anim target is parent node*/ anim->animation_elt = elt; if (!parent) { if (parser->command) { anim->target = anim->anim_parent = (SVG_Element*) parser->command->node; } } else { anim->target = anim->anim_parent = parent->node; } anim->resolve_stage = 1; } else if ((tag == TAG_SVG_script) || (tag==TAG_SVG_handler)) { /* Scripts and handlers don't render and have no initialization phase */ needs_init = GF_FALSE; } ev_event = ev_observer = NULL; #ifdef SKIP_ATTS nb_attributes = 0; #endif /*set the root of the SVG tree BEFORE processing events in order to have it setup for script init (e.g. load events, including in root svg)*/ if ((tag == TAG_SVG_svg) && !parser->has_root) { svg_init_root_element(parser, elt); } /*parse all att*/ for (i=0; i<nb_attributes; i++) { GF_XMLAttribute *att = (GF_XMLAttribute *)&attributes[i]; char *att_name = NULL; if (!att->value || !strlen(att->value)) continue; /* first determine in which namespace is the attribute and store the result in ns, then shift the char buffer to point to the local name of the attribute*/ ns = xmlns; att_name = strchr(att->name, ':'); if (att_name) { if (!strncmp(att->name, "xmlns", 5)) { ns = gf_sg_get_namespace_code(parser->load->scene_graph, att_name+1); att_name = att->name; } else { att_name[0] = 0; ns = gf_sg_get_namespace_code(parser->load->scene_graph, att->name); att_name[0] = ':'; att_name++; } } else { att_name = att->name; } /* Begin of special cases of attributes */ /* CHECK: Shouldn't namespaces be checked here ? */ if (!stricmp(att_name, "style")) { gf_svg_parse_style((GF_Node *)elt, att->value); continue; } /* Some attributes of the animation elements cannot be parsed (into typed values) until the type of value is known, we defer the parsing and store them temporarily as strings */ if (anim) { if (!stricmp(att_name, "to")) { anim->to = gf_strdup(att->value); continue; } if (!stricmp(att_name, "from")) { anim->from = gf_strdup(att->value); continue; } if (!stricmp(att_name, "by")) { anim->by = gf_strdup(att->value); continue; } if (!stricmp(att_name, "values")) { anim->values = gf_strdup(att->value); continue; } if ((tag == TAG_SVG_animateTransform) && !stricmp(att_name, "type")) { anim->type = gf_strdup(att->value); continue; } } /* Special case for xlink:href attributes */ if ((ns == GF_XMLNS_XLINK) && !stricmp(att_name, "href") ) { if (gf_svg_is_animation_tag(tag)) { /* For xlink:href in animation elements, we try to locate the target of the xlink:href to determine the type of values to be animated */ assert(anim); anim->target_id = gf_strdup(att->value); /*The target may be NULL, if it has not yet been parsed, we will try to resolve it later on */ anim->target = (SVG_Element *) gf_sg_find_node_by_name(parser->load->scene_graph, anim->target_id + 1); continue; } else { /* For xlink:href attribute on elements other than animation elements, we create the attribute, parse it and try to do some special process it */ XMLRI *iri = NULL; if (gf_node_get_attribute_by_tag((GF_Node *)elt, TAG_XLINK_ATT_href, GF_TRUE, GF_FALSE, &info)==GF_OK) { gf_svg_parse_attribute((GF_Node *)elt, &info, att->value, 0); iri = (XMLRI *)info.far_ptr; /* extract streamID ref or data URL and store as file */ svg_post_process_href(parser, (GF_Node *)elt, iri); continue; } } } /* For the XML Event handler element, we need to defer the parsing of some attributes */ if ((tag == TAG_SVG_handler) && (ns == GF_XMLNS_XMLEV)) { if (!stricmp(att_name, "event") ) { ev_event = att->value; continue; } if (!stricmp(att_name, "observer") ) { ev_observer = att->value; continue; } } /*laser specific stuff*/ if (ns == GF_XMLNS_LASER) { /* CHECK: we should probably check the namespace of the attribute here */ if (!stricmp(att_name, "scale") ) { if (gf_node_get_attribute_by_tag((GF_Node *)elt, TAG_SVG_ATT_transform, GF_TRUE, GF_TRUE, &info)==GF_OK) { SVG_Point pt; SVG_Transform *mat = (SVG_Transform *)info.far_ptr; svg_parse_point(&pt, att->value); gf_mx2d_add_scale(&mat->mat, pt.x, pt.y); continue; } } if (!stricmp(att_name, "translation") ) { if (gf_node_get_attribute_by_tag((GF_Node *)elt, TAG_SVG_ATT_transform, GF_TRUE, GF_TRUE, &info)==GF_OK) { SVG_Point pt; SVG_Transform *mat = (SVG_Transform *)info.far_ptr; svg_parse_point(&pt, att->value); gf_mx2d_add_translation(&mat->mat, pt.x, pt.y); continue; } } } /* For all attributes of the form 'on...', like 'onclick' we create a listener for the event on the current element, we connect the listener to a handler that contains the code in the 'on...' attribute. */ /* CHECK: we should probably check the namespace of the attribute and of the element here */ if (!strncmp(att_name, "on", 2)) { u32 evtType = gf_dom_event_type_by_name(att_name + 2); if (evtType != GF_EVENT_UNKNOWN) { SVG_handlerElement *handler = gf_dom_listener_build((GF_Node *) elt, evtType, 0); gf_dom_add_text_node((GF_Node *)handler, gf_strdup(att->value) ); gf_node_init((GF_Node *)handler); continue; } svg_report(parser, GF_OK, "Skipping unknown event handler %s on node %s", att->name, name); } /* end of special cases of attributes */ /* General attribute creation and parsing */ if (gf_node_get_attribute_by_name((GF_Node *)elt, att_name, ns, GF_TRUE, GF_FALSE, &info)==GF_OK) { #ifndef SKIP_ATTS_PARSING GF_Err e = gf_svg_parse_attribute((GF_Node *)elt, &info, att->value, 0); if (e) { svg_report(parser, e, "Error parsing attribute %s on node %s", att->name, name); continue; } if (info.fieldType == SVG_ID_datatype) { /*"when both 'id' and 'xml:id' are specified on the same element but with different values, the SVGElement::id field must return either of the values but should give precedence to the 'xml:id' attribute."*/ if (!node_name || (info.fieldIndex == TAG_XML_ATT_id)) { node_name = *(SVG_ID *)info.far_ptr; /* Check if ID start with a digit, which is not a valid ID for a node according to XML (see http://www.w3.org/TR/xml/#id) */ if (isdigit(node_name[0])) { svg_report(parser, GF_BAD_PARAM, "Invalid value %s for node %s %s", node_name, name, att->name); node_name = NULL; } } } else { switch (info.fieldIndex) { case TAG_SVG_ATT_syncMaster: case TAG_SVG_ATT_focusHighlight: case TAG_SVG_ATT_initialVisibility: case TAG_SVG_ATT_fullscreen: case TAG_SVG_ATT_requiredFonts: /*switch LASeR Configuration to v2 because these attributes are not part of v1*/ svg_lsr_set_v2(parser); break; } } #endif continue; } /* all other attributes (??? failed to be created) should fall in this category */ svg_report(parser, GF_OK, "Skipping attribute %s on node %s", att->name, name); } /* When a handler element specifies the event attribute, an implicit listener is defined */ if (ev_event) { GF_Node *node = (GF_Node *)elt; SVG_Element *listener; u32 type; listener = (SVG_Element *) gf_node_new(node->sgprivate->scenegraph, TAG_SVG_listener); /*We don't want to insert the implicit listener in the DOM. However remember the listener at the handler level in case the handler gets destroyed*/ gf_node_set_private(node, (GF_Node*)listener ); gf_node_register((GF_Node*)listener, NULL); /* this listener listens to the given type of event */ type = gf_dom_event_type_by_name(ev_event); gf_node_get_attribute_by_tag(node, TAG_XMLEV_ATT_event, GF_TRUE, GF_FALSE, &info); ((XMLEV_Event *)info.far_ptr)->type = type; gf_node_get_attribute_by_tag((GF_Node *)listener, TAG_XMLEV_ATT_event, GF_TRUE, GF_FALSE, &info); ((XMLEV_Event *)info.far_ptr)->type = type; gf_node_get_attribute_by_tag((GF_Node *)listener, TAG_XMLEV_ATT_handler, GF_TRUE, GF_FALSE, &info); ((XMLRI *)info.far_ptr)->target = node; if (ev_observer) { /* An observer was specified, so it needs to be used */ gf_node_get_attribute_by_tag((GF_Node *)listener, TAG_XMLEV_ATT_observer, GF_TRUE, GF_FALSE, &info); gf_svg_parse_attribute((GF_Node *)elt, &info, (char*)ev_observer, 0); } else { /* No observer specified, this listener listens with the parent of the handler as the event target */ gf_node_get_attribute_by_tag((GF_Node *)listener, TAG_XMLEV_ATT_target, GF_TRUE, GF_FALSE, &info); ((XMLRI *)info.far_ptr)->target = parent->node; } /* if the target was found (already parsed), we are fine, otherwise we need to try to find it again, we place the listener in the deferred listener list */ if ( ((XMLRI *)info.far_ptr)->target) gf_node_dom_listener_add(((XMLRI *)info.far_ptr)->target, (GF_Node *) listener); else gf_list_add(parser->deferred_listeners, listener); } if (!node_name && ID) node_name = ID; /* if the new element has an id, we try to resolve deferred references (including animations, href and listeners (just above)*/ if (node_name) { if (!has_id) { /* if the element was already created before this call, we don't need to get a numerical id, we have it already */ gf_svg_parse_element_id((GF_Node *)elt, node_name, parser->command_depth ? GF_TRUE : GF_FALSE); } svg_resolved_refs(parser, parser->load->scene_graph, node_name); } /* if the new element is an animation, now that all specified attributes have been found, we can start parsing them */ if (anim) { /*FIXME - we need to parse from/to/values but not initialize the stack*/ // if (parser->load->flags & GF_SM_LOAD_FOR_PLAYBACK) { needs_init = GF_FALSE; if (svg_parse_animation(parser, parser->load->scene_graph, anim, NULL, 0)) { svg_delete_deferred_anim(anim, NULL); } else { gf_list_add(parser->deferred_animations, anim); } // } else { // svg_delete_deferred_anim(anim, NULL); // } } #ifndef SKIP_INIT if (needs_init) { /* For elements that need it, we initialize the rendering stack */ gf_node_init((GF_Node *)elt); } #endif if (parent && elt) { /*mark parent element as dirty (new child added) and invalidate parent graph for progressive rendering*/ gf_node_dirty_set((GF_Node *)parent->node, GF_SG_CHILD_DIRTY, GF_TRUE); /*request scene redraw*/ if (parser->load->scene_graph->NodeCallback) { parser->load->scene_graph->NodeCallback(parser->load->scene_graph->userpriv, GF_SG_CALLBACK_MODIFIED, NULL, NULL); } } /*If we are in playback mode, we register (reference counting for safe deleting) the listener element with the element that uses it */ if ((parser->load->flags & GF_SM_LOAD_FOR_PLAYBACK) && elt && (tag==TAG_SVG_listener)) { Bool post_pone = GF_FALSE; SVG_Element *par = NULL; SVG_Element *listener = (SVG_Element *)elt; if (gf_node_get_attribute_by_tag((GF_Node *)listener, TAG_XMLEV_ATT_observer, GF_FALSE, GF_FALSE, &info) == GF_OK) { XMLRI *observer = (XMLRI *)info.far_ptr; if (observer->type == XMLRI_ELEMENTID) { if (!observer->target) post_pone = GF_TRUE; else par = (SVG_Element *)observer->target; } } if (gf_node_get_attribute_by_tag((GF_Node *)listener, TAG_XMLEV_ATT_target, GF_FALSE, GF_FALSE, &info) == GF_OK) { XMLRI *target = (XMLRI *)info.far_ptr; if (!par && (target->type == XMLRI_ELEMENTID)) { if (!target->target) post_pone = GF_TRUE; else par = (SVG_Element *)target->target; } } /*check handler, create it if not specified*/ if (parent && (gf_node_get_attribute_by_tag((GF_Node *)listener, TAG_XMLEV_ATT_handler, GF_TRUE, GF_FALSE, &info) == GF_OK)) { XMLRI *handler = (XMLRI *)info.far_ptr; if (!handler->target) { if (!handler->string) handler->target = parent->node; } } /*if event is a key event, register it with root*/ if (!par && gf_node_get_attribute_by_tag((GF_Node *)listener, TAG_XMLEV_ATT_event, GF_FALSE, GF_FALSE, &info) == GF_OK) { XMLEV_Event *ev = (XMLEV_Event *)info.far_ptr; if ((ev->type>=GF_EVENT_KEYUP) && (ev->type<=GF_EVENT_TEXTINPUT)) par = (SVG_Element*) listener->sgprivate->scenegraph->RootNode; } if (post_pone) { gf_list_add(parser->deferred_listeners, listener); } else { if (!par && parent) par = parent->node; gf_node_dom_listener_add((GF_Node *)par, (GF_Node *) listener); } } return elt; }
0
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
153,623,684,625,074,010,000,000,000,000,000,000,000
439
fixed #2138
static void clear_tables(JOIN *join) { /* must clear only the non-const tables, as const tables are not re-calculated. */ for (uint i= 0 ; i < join->table_count ; i++) { if (!(join->table[i]->map & join->const_table_map)) mark_as_null_row(join->table[i]); // All fields are NULL } }
0
[ "CWE-89" ]
server
5ba77222e9fe7af8ff403816b5338b18b342053c
338,639,917,636,372,080,000,000,000,000,000,000,000
12
MDEV-21028 Server crashes in Query_arena::set_query_arena upon SELECT from view if the view has algorithm=temptable it is not updatable, so DEFAULT() for its fields is meaningless, and thus it's NULL or 0/'' for NOT NULL columns.
static int eaptls_initiate(void *type_arg, EAP_HANDLER *handler) { int status; tls_session_t *ssn; eap_tls_t *inst; VALUE_PAIR *vp; int client_cert = TRUE; int verify_mode = 0; REQUEST *request = handler->request; inst = (eap_tls_t *)type_arg; handler->tls = TRUE; handler->finished = FALSE; /* * Manually flush the sessions every so often. If HALF * of the session lifetime has passed since we last * flushed, then flush it again. * * FIXME: Also do it every N sessions? */ if (inst->conf.session_cache_enable && ((inst->conf.session_last_flushed + (inst->conf.session_timeout * 1800)) <= request->timestamp)) { RDEBUG2("Flushing SSL sessions (of #%ld)", SSL_CTX_sess_number(inst->ctx)); SSL_CTX_flush_sessions(inst->ctx, request->timestamp); inst->conf.session_last_flushed = request->timestamp; } /* * If we're TTLS or PEAP, then do NOT require a client * certificate. * * FIXME: This should be more configurable. */ if (handler->eap_type != PW_EAP_TLS) { vp = pairfind(handler->request->config_items, PW_EAP_TLS_REQUIRE_CLIENT_CERT); if (!vp) { client_cert = FALSE; } else { client_cert = vp->vp_integer; } } /* * Every new session is started only from EAP-TLS-START. * Before Sending EAP-TLS-START, open a new SSL session. * Create all the required data structures & store them * in Opaque. So that we can use these data structures * when we get the response */ ssn = eaptls_new_session(inst->ctx, client_cert); if (!ssn) { return 0; } /* * Verify the peer certificate, if asked. */ if (client_cert) { RDEBUG2("Requiring client certificate"); verify_mode = SSL_VERIFY_PEER; verify_mode |= SSL_VERIFY_FAIL_IF_NO_PEER_CERT; verify_mode |= SSL_VERIFY_CLIENT_ONCE; } SSL_set_verify(ssn->ssl, verify_mode, cbtls_verify); /* * Create a structure for all the items required to be * verified for each client and set that as opaque data * structure. * * NOTE: If we want to set each item sepearately then * this index should be global. */ SSL_set_ex_data(ssn->ssl, 0, (void *)handler); SSL_set_ex_data(ssn->ssl, 1, (void *)&(inst->conf)); #ifdef HAVE_OPENSSL_OCSP_H SSL_set_ex_data(ssn->ssl, 2, (void *)inst->store); #endif ssn->length_flag = inst->conf.include_length; /* * We use default fragment size, unless the Framed-MTU * tells us it's too big. Note that we do NOT account * for the EAP-TLS headers if conf->fragment_size is * large, because that config item looks to be confusing. * * i.e. it should REALLY be called MTU, and the code here * should figure out what that means for TLS fragment size. * asking the administrator to know the internal details * of EAP-TLS in order to calculate fragment sizes is * just too much. */ ssn->offset = inst->conf.fragment_size; vp = pairfind(handler->request->packet->vps, PW_FRAMED_MTU); if (vp && ((vp->vp_integer - 14) < ssn->offset)) { /* * Discount the Framed-MTU by: * 4 : EAPOL header * 4 : EAP header (code + id + length) * 1 : EAP type == EAP-TLS * 1 : EAP-TLS Flags * 4 : EAP-TLS Message length * (even if conf->include_length == 0, * just to be lazy). * --- * 14 */ ssn->offset = vp->vp_integer - 14; } handler->opaque = ((void *)ssn); handler->free_opaque = session_free; RDEBUG2("Initiate"); /* * Set up type-specific information. */ switch (handler->eap_type) { case PW_EAP_TLS: default: ssn->prf_label = "client EAP encryption"; break; case PW_EAP_TTLS: ssn->prf_label = "ttls keying material"; break; /* * PEAP-specific breakage. */ case PW_EAP_PEAP: /* * As it is a poorly designed protocol, PEAP uses * bits in the TLS header to indicate PEAP * version numbers. For now, we only support * PEAP version 0, so it doesn't matter too much. * However, if we support later versions of PEAP, * we will need this flag to indicate which * version we're currently dealing with. */ ssn->peap_flag = 0x00; /* * PEAP version 0 requires 'include_length = no', * so rather than hoping the user figures it out, * we force it here. */ ssn->length_flag = 0; ssn->prf_label = "client EAP encryption"; break; } if (inst->conf.session_cache_enable) { ssn->allow_session_resumption = 1; /* otherwise it's zero */ } /* * TLS session initialization is over. Now handle TLS * related handshaking or application data. */ status = eaptls_start(handler->eap_ds, ssn->peap_flag); RDEBUG2("Start returned %d", status); if (status == 0) return 0; /* * The next stage to process the packet. */ handler->stage = AUTHENTICATE; return 1; }
0
[ "CWE-295" ]
freeradius-server
5e698b407dcac2bc45cf03484bac4398109d25c3
75,692,422,881,160,880,000,000,000,000,000,000,000
180
Set X509_V_FLAG_CRL_CHECK_ALL
run_sigchld_trap (nchild) int nchild; { char *trap_command; int i; /* Turn off the trap list during the call to parse_and_execute () to avoid potentially infinite recursive calls. Preserve the values of last_command_exit_value, last_made_pid, and the_pipeline around the execution of the trap commands. */ trap_command = savestring (trap_list[SIGCHLD]); begin_unwind_frame ("SIGCHLD trap"); unwind_protect_int (last_command_exit_value); unwind_protect_int (last_command_exit_signal); unwind_protect_var (last_made_pid); unwind_protect_int (interrupt_immediately); unwind_protect_int (jobs_list_frozen); unwind_protect_pointer (the_pipeline); unwind_protect_pointer (subst_assign_varlist); unwind_protect_pointer (this_shell_builtin); /* We have to add the commands this way because they will be run in reverse order of adding. We don't want maybe_set_sigchld_trap () to reference freed memory. */ add_unwind_protect (xfree, trap_command); add_unwind_protect (maybe_set_sigchld_trap, trap_command); subst_assign_varlist = (WORD_LIST *)NULL; the_pipeline = (PROCESS *)NULL; running_trap = SIGCHLD + 1; set_impossible_sigchld_trap (); jobs_list_frozen = 1; for (i = 0; i < nchild; i++) { #if 0 interrupt_immediately = 1; #endif parse_and_execute (savestring (trap_command), "trap", SEVAL_NOHIST|SEVAL_RESETLINE); } run_unwind_frame ("SIGCHLD trap"); running_trap = 0; }
1
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
250,530,469,649,440,000,000,000,000,000,000,000,000
46
bash-4.4-rc2 release
static ssize_t nr_addr_filters_show(struct device *dev, struct device_attribute *attr, char *page) { struct pmu *pmu = dev_get_drvdata(dev); return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters); }
0
[ "CWE-362", "CWE-125" ]
linux
321027c1fe77f892f4ea07846aeae08cefbbb290
34,025,716,666,320,146,000,000,000,000,000,000,000
8
perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race Di Shen reported a race between two concurrent sys_perf_event_open() calls where both try and move the same pre-existing software group into a hardware context. The problem is exactly that described in commit: f63a8daa5812 ("perf: Fix event->ctx locking") ... where, while we wait for a ctx->mutex acquisition, the event->ctx relation can have changed under us. That very same commit failed to recognise sys_perf_event_context() as an external access vector to the events and thereby didn't apply the established locking rules correctly. So while one sys_perf_event_open() call is stuck waiting on mutex_lock_double(), the other (which owns said locks) moves the group about. So by the time the former sys_perf_event_open() acquires the locks, the context we've acquired is stale (and possibly dead). Apply the established locking rules as per perf_event_ctx_lock_nested() to the mutex_lock_double() for the 'move_group' case. This obviously means we need to validate state after we acquire the locks. Reported-by: Di Shen (Keen Lab) Tested-by: John Dias <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Kees Cook <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Min Chong <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Stephane Eranian <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vince Weaver <[email protected]> Fixes: f63a8daa5812 ("perf: Fix event->ctx locking") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
gdImagePtr gdImageCreate (int sx, int sy) { int i; gdImagePtr im; if (overflow2(sx, sy)) { return NULL; } if (overflow2(sizeof(unsigned char *), sy)) { return NULL; } if (overflow2(sizeof(unsigned char *), sx)) { return NULL; } im = (gdImage *) gdCalloc(1, sizeof(gdImage)); /* Row-major ever since gd 1.3 */ im->pixels = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy); im->AA_opacity = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy); im->polyInts = 0; im->polyAllocated = 0; im->brush = 0; im->tile = 0; im->style = 0; for (i = 0; i < sy; i++) { /* Row-major ever since gd 1.3 */ im->pixels[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char)); im->AA_opacity[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char)); } im->sx = sx; im->sy = sy; im->colorsTotal = 0; im->transparent = (-1); im->interlace = 0; im->thick = 1; im->AA = 0; im->AA_polygon = 0; for (i = 0; i < gdMaxColors; i++) { im->open[i] = 1; im->red[i] = 0; im->green[i] = 0; im->blue[i] = 0; } im->trueColor = 0; im->tpixels = 0; im->cx1 = 0; im->cy1 = 0; im->cx2 = im->sx - 1; im->cy2 = im->sy - 1; im->interpolation = NULL; im->interpolation_id = GD_BILINEAR_FIXED; return im; }
0
[ "CWE-190" ]
php-src
c395c6e5d7e8df37a21265ff76e48fe75ceb5ae6
21,404,625,845,625,904,000,000,000,000,000,000,000
56
iFixed bug #72446 - Integer Overflow in gdImagePaletteToTrueColor() resulting in heap overflow
void MainWindow::on_actionLayoutRemove_triggered() { // Setup the dialog. ListSelectionDialog dialog(Settings.layouts(), this); dialog.setWindowModality(QmlApplication::dialogModality()); dialog.setWindowTitle(tr("Remove Layout")); // Show the dialog. if (dialog.exec() == QDialog::Accepted) { foreach(const QString& layout, dialog.selection()) { // Update the configuration. if (Settings.removeLayout(layout)) Settings.sync(); // Locate the menu item. foreach (QAction* action, ui->menuLayout->actions()) { if (action->text() == layout) { // Remove the menu item. delete action; break; } } } // If no more custom layouts. if (Settings.layouts().size() == 0) { // Remove the Remove action and separator. ui->menuLayout->removeAction(ui->actionLayoutRemove); bool isSecondSeparator = false; foreach (QAction* action, ui->menuLayout->actions()) { if (action->isSeparator()) { if (isSecondSeparator) { delete action; break; } else { isSecondSeparator = true; } } } } } }
0
[ "CWE-89", "CWE-327", "CWE-295" ]
shotcut
f008adc039642307f6ee3378d378cdb842e52c1d
314,937,902,334,631,050,000,000,000,000,000,000,000
40
fix upgrade check is not using TLS correctly
Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return get_item_copy<Item_direct_view_ref>(thd, mem_root, this); }
0
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
254,686,870,084,318,440,000,000,000,000,000,000,000
2
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <[email protected]>
ar6000_rx(void *Context, struct htc_packet *pPacket) { struct ar6_softc *ar = (struct ar6_softc *)Context; struct sk_buff *skb = (struct sk_buff *)pPacket->pPktContext; int minHdrLen; u8 containsDot11Hdr = 0; int status = pPacket->Status; HTC_ENDPOINT_ID ept = pPacket->Endpoint; A_ASSERT((status) || (pPacket->pBuffer == (A_NETBUF_DATA(skb) + HTC_HEADER_LEN))); AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_RX,("ar6000_rx ar=0x%lx eid=%d, skb=0x%lx, data=0x%lx, len=0x%x status:%d", (unsigned long)ar, ept, (unsigned long)skb, (unsigned long)pPacket->pBuffer, pPacket->ActualLength, status)); if (status) { if (status != A_ECANCELED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("RX ERR (%d) \n",status)); } } /* take lock to protect buffer counts * and adaptive power throughput state */ AR6000_SPIN_LOCK(&ar->arLock, 0); if (!status) { AR6000_STAT_INC(ar, rx_packets); ar->arNetStats.rx_bytes += pPacket->ActualLength; #ifdef ADAPTIVE_POWER_THROUGHPUT_CONTROL aptcTR.bytesReceived += a_netbuf_to_len(skb); applyAPTCHeuristics(ar); #endif /* ADAPTIVE_POWER_THROUGHPUT_CONTROL */ A_NETBUF_PUT(skb, pPacket->ActualLength + HTC_HEADER_LEN); A_NETBUF_PULL(skb, HTC_HEADER_LEN); #ifdef DEBUG if (debugdriver >= 2) { ar6000_dump_skb(skb); } #endif /* DEBUG */ } AR6000_SPIN_UNLOCK(&ar->arLock, 0); skb->dev = ar->arNetDev; if (status) { AR6000_STAT_INC(ar, rx_errors); A_NETBUF_FREE(skb); } else if (ar->arWmiEnabled == true) { if (ept == ar->arControlEp) { /* * this is a wmi control msg */ #ifdef CONFIG_PM ar6000_check_wow_status(ar, skb, true); #endif /* CONFIG_PM */ wmi_control_rx(ar->arWmi, skb); } else { WMI_DATA_HDR *dhdr = (WMI_DATA_HDR *)A_NETBUF_DATA(skb); bool is_amsdu; u8 tid; /* * This check can be removed if after a while we do not * see the warning. For now we leave it to ensure * we drop these frames accordingly in case the * target generates them for some reason. These * were used for an internal PAL but that's not * used or supported anymore. These frames should * not come up from the target. */ if (WARN_ON(WMI_DATA_HDR_GET_DATA_TYPE(dhdr) == WMI_DATA_HDR_DATA_TYPE_ACL)) { AR6000_STAT_INC(ar, rx_errors); A_NETBUF_FREE(skb); return; } #ifdef CONFIG_PM ar6000_check_wow_status(ar, NULL, false); #endif /* CONFIG_PM */ /* * this is a wmi data packet */ // NWF if (processDot11Hdr) { minHdrLen = sizeof(WMI_DATA_HDR) + sizeof(struct ieee80211_frame) + sizeof(ATH_LLC_SNAP_HDR); } else { minHdrLen = sizeof (WMI_DATA_HDR) + sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR); } /* In the case of AP mode we may receive NULL data frames * that do not have LLC hdr. They are 16 bytes in size. * Allow these frames in the AP mode. * ACL data frames don't follow ethernet frame bounds for * min length */ if (ar->arNetworkType != AP_NETWORK && ((pPacket->ActualLength < minHdrLen) || (pPacket->ActualLength > AR6000_MAX_RX_MESSAGE_SIZE))) { /* * packet is too short or too long */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("TOO SHORT or TOO LONG\n")); AR6000_STAT_INC(ar, rx_errors); AR6000_STAT_INC(ar, rx_length_errors); A_NETBUF_FREE(skb); } else { u16 seq_no; u8 meta_type; #if 0 /* Access RSSI values here */ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("RSSI %d\n", ((WMI_DATA_HDR *) A_NETBUF_DATA(skb))->rssi)); #endif /* Get the Power save state of the STA */ if (ar->arNetworkType == AP_NETWORK) { sta_t *conn = NULL; u8 psState=0,prevPsState; ATH_MAC_HDR *datap=NULL; u16 offset; meta_type = WMI_DATA_HDR_GET_META(dhdr); psState = (((WMI_DATA_HDR *)A_NETBUF_DATA(skb))->info >> WMI_DATA_HDR_PS_SHIFT) & WMI_DATA_HDR_PS_MASK; offset = sizeof(WMI_DATA_HDR); switch (meta_type) { case 0: break; case WMI_META_VERSION_1: offset += sizeof(WMI_RX_META_V1); break; case WMI_META_VERSION_2: offset += sizeof(WMI_RX_META_V2); break; default: break; } datap = (ATH_MAC_HDR *)(A_NETBUF_DATA(skb)+offset); conn = ieee80211_find_conn(ar, datap->srcMac); if (conn) { /* if there is a change in PS state of the STA, * take appropriate steps. * 1. If Sleep-->Awake, flush the psq for the STA * Clear the PVB for the STA. * 2. If Awake-->Sleep, Starting queueing frames * the STA. */ prevPsState = STA_IS_PWR_SLEEP(conn); if (psState) { STA_SET_PWR_SLEEP(conn); } else { STA_CLR_PWR_SLEEP(conn); } if (prevPsState ^ STA_IS_PWR_SLEEP(conn)) { if (!STA_IS_PWR_SLEEP(conn)) { A_MUTEX_LOCK(&conn->psqLock); while (!A_NETBUF_QUEUE_EMPTY(&conn->psq)) { struct sk_buff *skb=NULL; skb = A_NETBUF_DEQUEUE(&conn->psq); A_MUTEX_UNLOCK(&conn->psqLock); ar6000_data_tx(skb,ar->arNetDev); A_MUTEX_LOCK(&conn->psqLock); } A_MUTEX_UNLOCK(&conn->psqLock); /* Clear the PVB for this STA */ wmi_set_pvb_cmd(ar->arWmi, conn->aid, 0); } } } else { /* This frame is from a STA that is not associated*/ A_ASSERT(false); } /* Drop NULL data frames here */ if((pPacket->ActualLength < minHdrLen) || (pPacket->ActualLength > AR6000_MAX_RX_MESSAGE_SIZE)) { A_NETBUF_FREE(skb); goto rx_done; } } is_amsdu = WMI_DATA_HDR_IS_AMSDU(dhdr) ? true : false; tid = WMI_DATA_HDR_GET_UP(dhdr); seq_no = WMI_DATA_HDR_GET_SEQNO(dhdr); meta_type = WMI_DATA_HDR_GET_META(dhdr); containsDot11Hdr = WMI_DATA_HDR_GET_DOT11(dhdr); wmi_data_hdr_remove(ar->arWmi, skb); switch (meta_type) { case WMI_META_VERSION_1: { WMI_RX_META_V1 *pMeta = (WMI_RX_META_V1 *)A_NETBUF_DATA(skb); A_PRINTF("META %d %d %d %d %x\n", pMeta->status, pMeta->rix, pMeta->rssi, pMeta->channel, pMeta->flags); A_NETBUF_PULL((void*)skb, sizeof(WMI_RX_META_V1)); break; } case WMI_META_VERSION_2: { WMI_RX_META_V2 *pMeta = (WMI_RX_META_V2 *)A_NETBUF_DATA(skb); if(pMeta->csumFlags & 0x1){ skb->ip_summed=CHECKSUM_COMPLETE; skb->csum=(pMeta->csum); } A_NETBUF_PULL((void*)skb, sizeof(WMI_RX_META_V2)); break; } default: break; } A_ASSERT(status == 0); /* NWF: print the 802.11 hdr bytes */ if(containsDot11Hdr) { status = wmi_dot11_hdr_remove(ar->arWmi,skb); } else if(!is_amsdu) { status = wmi_dot3_2_dix(skb); } if (status) { /* Drop frames that could not be processed (lack of memory, etc.) */ A_NETBUF_FREE(skb); goto rx_done; } if ((ar->arNetDev->flags & IFF_UP) == IFF_UP) { if (ar->arNetworkType == AP_NETWORK) { struct sk_buff *skb1 = NULL; ATH_MAC_HDR *datap; datap = (ATH_MAC_HDR *)A_NETBUF_DATA(skb); if (IEEE80211_IS_MULTICAST(datap->dstMac)) { /* Bcast/Mcast frames should be sent to the OS * stack as well as on the air. */ skb1 = skb_copy(skb,GFP_ATOMIC); } else { /* Search for a connected STA with dstMac as * the Mac address. If found send the frame to * it on the air else send the frame up the * stack */ sta_t *conn = NULL; conn = ieee80211_find_conn(ar, datap->dstMac); if (conn && ar->intra_bss) { skb1 = skb; skb = NULL; } else if(conn && !ar->intra_bss) { A_NETBUF_FREE(skb); skb = NULL; } } if (skb1) { ar6000_data_tx(skb1, ar->arNetDev); } } } aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, is_amsdu, (void **)&skb); ar6000_deliver_frames_to_nw_stack((void *) ar->arNetDev, (void *)skb); } } } else { if (EPPING_ALIGNMENT_PAD > 0) { A_NETBUF_PULL(skb, EPPING_ALIGNMENT_PAD); } ar6000_deliver_frames_to_nw_stack((void *)ar->arNetDev, (void *)skb); } rx_done: return; }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
142,386,016,796,346,590,000,000,000,000,000,000,000
289
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int sx_sasl_init(sx_env_t env, sx_plugin_t p, va_list args) { const char *appname; sx_sasl_callback_t cb; void *cbarg; _sx_sasl_t ctx; int ret, i; _sx_debug(ZONE, "initialising sasl plugin"); appname = va_arg(args, const char *); if(appname == NULL) { _sx_debug(ZONE, "appname was NULL, failing"); return 1; } cb = va_arg(args, sx_sasl_callback_t); cbarg = va_arg(args, void *); ctx = (_sx_sasl_t) calloc(1, sizeof(struct _sx_sasl_st)); ctx->appname = strdup(appname); ctx->cb = cb; ctx->cbarg = cbarg; for (i = 0; i < SX_CONN_EXTERNAL_ID_MAX_COUNT; i++) ctx->ext_id[i] = NULL; ret = gsasl_init(&ctx->gsasl_ctx); if(ret != GSASL_OK) { _sx_debug(ZONE, "couldn't initialize libgsasl (%d): %s", ret, gsasl_strerror (ret)); free(ctx); return 1; } gsasl_callback_set (ctx->gsasl_ctx, &_sx_sasl_gsasl_callback); _sx_debug(ZONE, "sasl context initialised"); p->private = (void *) ctx; p->unload = _sx_sasl_unload; p->wio = _sx_sasl_wio; p->rio = _sx_sasl_rio; p->stream = _sx_sasl_stream; p->features = _sx_sasl_features; p->process = _sx_sasl_process; p->free = _sx_sasl_free; return 0; }
0
[ "CWE-287", "CWE-703" ]
jabberd2
8416ae54ecefa670534f27a31db71d048b9c7f16
125,452,413,031,757,060,000,000,000,000,000,000,000
51
Fixed offered SASL mechanism check
int _gnutls13_handshake_client(gnutls_session_t session) { int ret = 0; switch (STATE) { case STATE99: case STATE100: #ifdef TLS13_APPENDIX_D4 /* We send it before keys are generated. That works because CCS * is always being cached and queued and not being sent directly */ ret = _gnutls_send_change_cipher_spec(session, AGAIN(STATE100)); STATE = STATE100; IMED_RET("send change cipher spec", ret, 0); #endif FALLTHROUGH; case STATE101: /* Note that we check IN_FLIGHT, not ACCEPTED * here. This is because the client sends early data * speculatively. */ if (session->internals.hsk_flags & HSK_EARLY_DATA_IN_FLIGHT) { ret = _tls13_write_connection_state_init(session, STAGE_EARLY); if (ret == 0) { _gnutls_epoch_bump(session); ret = _gnutls_epoch_dup(session, EPOCH_WRITE_CURRENT); } STATE = STATE101; IMED_RET_FATAL("set early traffic keys", ret, 0); } FALLTHROUGH; case STATE102: ret = _gnutls13_send_early_data(session); STATE = STATE102; IMED_RET("send early data", ret, 0); FALLTHROUGH; case STATE103: STATE = STATE103; ret = generate_hs_traffic_keys(session); /* Note that we check IN_FLIGHT, not ACCEPTED * here. This is because the client sends early data * speculatively. */ IMED_RET_FATAL("generate hs traffic keys", ret, 0); if (session->internals.hsk_flags & HSK_EARLY_DATA_IN_FLIGHT) ret = _tls13_read_connection_state_init(session, STAGE_HS); else ret = _tls13_connection_state_init(session, STAGE_HS); IMED_RET_FATAL("set hs traffic keys", ret, 0); FALLTHROUGH; case STATE104: ret = _gnutls13_recv_encrypted_extensions(session); STATE = STATE104; IMED_RET("recv encrypted extensions", ret, 0); FALLTHROUGH; case STATE105: ret = _gnutls13_recv_certificate_request(session); STATE = STATE105; IMED_RET("recv certificate request", ret, 0); FALLTHROUGH; case STATE106: ret = _gnutls13_recv_certificate(session); STATE = STATE106; IMED_RET("recv certificate", ret, 0); FALLTHROUGH; case STATE107: ret = _gnutls13_recv_certificate_verify(session); STATE = STATE107; IMED_RET("recv server certificate verify", ret, 0); FALLTHROUGH; case STATE108: ret = _gnutls_run_verify_callback(session, GNUTLS_CLIENT); STATE = STATE108; if (ret < 0) return gnutls_assert_val(ret); FALLTHROUGH; case STATE109: ret = _gnutls13_recv_finished(session); STATE = STATE109; IMED_RET("recv finished", ret, 0); FALLTHROUGH; case STATE110: ret = _gnutls13_send_end_of_early_data(session, AGAIN(STATE110)); STATE = STATE110; IMED_RET("send end of early data", ret, 0); /* Note that we check IN_FLIGHT, not ACCEPTED * here. This is because the client sends early data * speculatively. */ if (session->internals.hsk_flags & HSK_EARLY_DATA_IN_FLIGHT) { session->internals.hsk_flags &= ~HSK_EARLY_DATA_IN_FLIGHT; ret = _tls13_write_connection_state_init(session, STAGE_HS); IMED_RET_FATAL("set hs traffic key after sending early data", ret, 0); } FALLTHROUGH; case STATE111: ret = _gnutls13_send_certificate(session, AGAIN(STATE111)); STATE = STATE111; IMED_RET("send certificate", ret, 0); FALLTHROUGH; case STATE112: ret = _gnutls13_send_certificate_verify(session, AGAIN(STATE112)); STATE = STATE112; IMED_RET("send certificate verify", ret, 0); FALLTHROUGH; case STATE113: ret = _gnutls13_send_finished(session, AGAIN(STATE113)); STATE = STATE113; IMED_RET("send finished", ret, 0); FALLTHROUGH; case STATE114: STATE = STATE114; ret = generate_ap_traffic_keys(session); IMED_RET_FATAL("generate app keys", ret, 0); ret = generate_rms_keys(session); IMED_RET_FATAL("generate rms keys", ret, 0); /* set traffic keys */ ret = _tls13_connection_state_init(session, STAGE_APP); IMED_RET_FATAL("set app keys", ret, 0); STATE = STATE0; break; default: return gnutls_assert_val(GNUTLS_E_INTERNAL_ERROR); } /* no lock of post_negotiation_lock is required here as this is not run * after handshake */ session->internals.recv_state = RECV_STATE_0; session->internals.initial_negotiation_completed = 1; SAVE_TRANSCRIPT; if (session->internals.resumed != RESUME_FALSE) _gnutls_set_resumed_parameters(session); return 0; }
0
[ "CWE-824" ]
gnutls
96e07075e8f105b13e76b11e493d5aa2dd937226
206,215,370,797,422,500,000,000,000,000,000,000,000
139
handshake: add missing initialization of local variable Resolves: #704 Signed-off-by: Daiki Ueno <[email protected]> Signed-off-by: Nikos Mavrogiannopoulos <[email protected]>
static int avr_custom_spm_page_erase(RAnalEsil *esil) { CPU_MODEL *cpu; ut8 c; ut64 addr, page_size_bits, i; // sanity check if (!esil || !esil->anal || !esil->anal->reg) { return false; } // get target address if (!__esil_pop_argument(esil, &addr)) { return false; } // get details about current MCU and fix input address cpu = get_cpu_model (esil->anal->cpu); page_size_bits = const_get_value (const_by_name (cpu, CPU_CONST_PARAM, "page_size")); // align base address to page_size_bits addr &= ~(MASK (page_size_bits)); // perform erase //eprintf ("SPM_PAGE_ERASE %ld bytes @ 0x%08" PFMT64x ".\n", page_size, addr); c = 0xff; for (i = 0; i < (1ULL << page_size_bits); i++) { r_anal_esil_mem_write ( esil, (addr + i) & CPU_PC_MASK (cpu), &c, 1); } return true; }
0
[ "CWE-125" ]
radare2
041e53cab7ca33481ae45ecd65ad596976d78e68
139,725,794,612,183,800,000,000,000,000,000,000,000
32
Fix crash in anal.avr
Section* Binary::add_section(const SegmentCommand& segment, const Section& section) { const auto it_segment = std::find_if( std::begin(segments_), std::end(segments_), [&segment] (const SegmentCommand* s) { return segment == *s; }); if (it_segment == std::end(segments_)) { LIEF_ERR("Unable to find segment: '{}'", segment.name()); return nullptr; } SegmentCommand* target_segment = *it_segment; span<const uint8_t> content_ref = section.content(); Section::content_t content = {std::begin(content_ref), std::end(content_ref)}; const size_t sec_size = is64_ ? sizeof(details::section_64) : sizeof(details::section_32); const size_t data_size = content.size(); const int32_t needed_size = align(sec_size + data_size, page_size()); if (available_command_space_ < needed_size) { shift(needed_size); available_command_space_ += needed_size; return add_section(segment, section); } if (!extend(*target_segment, sec_size)) { LIEF_ERR("Unable to extend segment '{}' by 0x{:x}", segment.name(), sec_size); return nullptr; } available_command_space_ -= needed_size; auto new_section = std::make_unique<Section>(section); // Compute offset, virtual address etc for the new section // ======================================================= // Section raw data will be located just after commands table if (section.offset() == 0) { uint64_t new_offset = is64_ ? sizeof(details::mach_header_64) : sizeof(details::mach_header); new_offset += header().sizeof_cmds(); new_offset += available_command_space_; new_section->offset(new_offset); } if (section.size() == 0) { new_section->size(data_size); } if (section.virtual_address() == 0) { new_section->virtual_address(target_segment->virtual_address() + new_section->offset()); } new_section->segment_ = target_segment; target_segment->numberof_sections(target_segment->numberof_sections() + 1); // Copy the new section in the cache sections_.push_back(new_section.get()); // Copy data to segment const size_t relative_offset = new_section->offset() - target_segment->file_offset(); std::move(std::begin(content), std::end(content), std::begin(target_segment->data_) + relative_offset); target_segment->sections_.push_back(std::move(new_section)); return target_segment->sections_.back().get(); }
0
[ "CWE-703" ]
LIEF
7acf0bc4224081d4f425fcc8b2e361b95291d878
44,183,895,631,288,960,000,000,000,000,000,000,000
70
Resolve #764
IOBuf IOBuf::cloneCoalescedAsValue() const { const std::size_t newHeadroom = headroom(); const std::size_t newTailroom = prev()->tailroom(); return cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom); }
0
[ "CWE-787" ]
folly
4f304af1411e68851bdd00ef6140e9de4616f7d3
157,314,337,617,903,000,000,000,000,000,000,000,000
5
[folly] Add additional overflow checks to IOBuf - CVE-2021-24036 Summary: As per title CVE-2021-24036 Reviewed By: jan Differential Revision: D27938605 fbshipit-source-id: 7481c54ae6fbb7b67b15b3631d5357c2f7043f9c
XMLRPC_VALUE XMLRPC_RequestSetError (XMLRPC_REQUEST request, XMLRPC_VALUE error) { if (request && error) { if (request->error) { XMLRPC_CleanupValue (request->error); } request->error = XMLRPC_CopyValue (error); return request->error; } return NULL; }
0
[ "CWE-119" ]
php-src
88412772d295ebf7dd34409534507dc9bcac726e
307,704,951,996,269,960,000,000,000,000,000,000,000
10
Fix bug #68027 - fix date parsing in XMLRPC lib
static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr) { u64 __user *uaddr = kvm_get_attr_addr(attr); if (attr->group) return -ENXIO; if (IS_ERR(uaddr)) return PTR_ERR(uaddr); switch (attr->attr) { case KVM_X86_XCOMP_GUEST_SUPP: if (put_user(supported_xcr0, uaddr)) return -EFAULT; return 0; default: return -ENXIO; break; } }
0
[ "CWE-459" ]
linux
683412ccf61294d727ead4a73d97397396e69a6b
6,915,412,508,659,703,000,000,000,000,000,000,000
20
KVM: SEV: add cache flush to solve SEV cache incoherency issues Flush the CPU caches when memory is reclaimed from an SEV guest (where reclaim also includes it being unmapped from KVM's memslots). Due to lack of coherency for SEV encrypted memory, failure to flush results in silent data corruption if userspace is malicious/broken and doesn't ensure SEV guest memory is properly pinned and unpinned. Cache coherency is not enforced across the VM boundary in SEV (AMD APM vol.2 Section 15.34.7). Confidential cachelines, generated by confidential VM guests have to be explicitly flushed on the host side. If a memory page containing dirty confidential cachelines was released by VM and reallocated to another user, the cachelines may corrupt the new user at a later time. KVM takes a shortcut by assuming all confidential memory remain pinned until the end of VM lifetime. Therefore, KVM does not flush cache at mmu_notifier invalidation events. Because of this incorrect assumption and the lack of cache flushing, malicous userspace can crash the host kernel: creating a malicious VM and continuously allocates/releases unpinned confidential memory pages when the VM is running. Add cache flush operations to mmu_notifier operations to ensure that any physical memory leaving the guest VM get flushed. In particular, hook mmu_notifier_invalidate_range_start and mmu_notifier_release events and flush cache accordingly. The hook after releasing the mmu lock to avoid contention with other vCPUs. Cc: [email protected] Suggested-by: Sean Christpherson <[email protected]> Reported-by: Mingwei Zhang <[email protected]> Signed-off-by: Mingwei Zhang <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
add_rotation_items_for_output (GsdXrandrManager *manager, GnomeOutputInfo *output) { int num_rotations; GnomeRRRotation rotations; get_allowed_rotations_for_output (manager, output, &num_rotations, &rotations); if (num_rotations == 1) add_unsupported_rotation_item (manager); else add_items_for_rotations (manager, output, rotations); }
0
[]
gnome-settings-daemon
be513b3c7d80d0b7013d79ce46d7eeca929705cc
54,112,876,698,762,690,000,000,000,000,000,000,000
12
Implement autoconfiguration of the outputs This is similar in spirit to 'xrandr --auto', but we disfavor selecting clone modes. Instead, we lay out the outputs left-to-right. Signed-off-by: Federico Mena Quintero <[email protected]>
DEFINE_RUN_ONCE_STATIC_ALT(ossl_init_no_register_atexit, ossl_init_register_atexit) { #ifdef OPENSSL_INIT_DEBUG fprintf(stderr, "OPENSSL_INIT: ossl_init_no_register_atexit ok!\n"); #endif /* Do nothing in this case */ return 1; }
0
[ "CWE-330" ]
openssl
1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be
148,612,842,470,450,510,000,000,000,000,000,000,000
9
drbg: ensure fork-safety without using a pthread_atfork handler When the new OpenSSL CSPRNG was introduced in version 1.1.1, it was announced in the release notes that it would be fork-safe, which the old CSPRNG hadn't been. The fork-safety was implemented using a fork count, which was incremented by a pthread_atfork handler. Initially, this handler was enabled by default. Unfortunately, the default behaviour had to be changed for other reasons in commit b5319bdbd095, so the new OpenSSL CSPRNG failed to keep its promise. This commit restores the fork-safety using a different approach. It replaces the fork count by a fork id, which coincides with the process id on UNIX-like operating systems and is zero on other operating systems. It is used to detect when an automatic reseed after a fork is necessary. To prevent a future regression, it also adds a test to verify that the child reseeds after fork. CVE-2019-1549 Reviewed-by: Paul Dale <[email protected]> Reviewed-by: Matt Caswell <[email protected]> (Merged from https://github.com/openssl/openssl/pull/9802)