func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
off_t PackLinuxElf64::pack3(OutputFile *fo, Filter &ft) { off_t flen = super::pack3(fo, ft); // loader follows compressed PT_LOADs // NOTE: PackLinuxElf::pack3 adjusted xct_off for the extra page unsigned v_hole = sz_pack2 + lsize; set_te64(&elfout.phdr[C_TEXT].p_filesz, v_hole); set_te64(&elfout.phdr[C_TEXT].p_memsz, v_hole); // Then compressed gaps (including debuginfo.) for (unsigned k = 0; k < e_phnum; ++k) { Extent x; x.size = find_LOAD_gap(phdri, k, e_phnum); if (x.size) { x.offset = get_te64(&phdri[k].p_offset) + get_te64(&phdri[k].p_filesz); packExtent(x, nullptr, fo); } } // write block end marker (uncompressed size 0) b_info hdr; memset(&hdr, 0, sizeof(hdr)); set_le32(&hdr.sz_cpr, UPX_MAGIC_LE32); fo->write(&hdr, sizeof(hdr)); flen = fpad4(fo); set_te64(&elfout.phdr[C_TEXT].p_filesz, sz_pack2 + lsize); set_te64(&elfout.phdr[C_TEXT].p_memsz, sz_pack2 + lsize); if (0==xct_off) { // not shared library set_te64(&elfout.phdr[C_BASE].p_align, ((upx_uint64_t)0) - page_mask); elfout.phdr[C_BASE].p_paddr = elfout.phdr[C_BASE].p_vaddr; elfout.phdr[C_BASE].p_offset = 0; upx_uint64_t abrk = getbrk(phdri, e_phnum); // vbase handles ET_EXEC. FIXME: pre-linking? upx_uint64_t const vbase = get_te64(&elfout.phdr[C_BASE].p_vaddr); set_te64(&elfout.phdr[C_BASE].p_filesz, 0x1000); // Linux kernel SIGSEGV if (0==.p_filesz) set_te64(&elfout.phdr[C_BASE].p_memsz, abrk - vbase); set_te32(&elfout.phdr[C_BASE].p_flags, Elf32_Phdr::PF_W|Elf32_Phdr::PF_R); set_te64(&elfout.phdr[C_TEXT].p_vaddr, abrk= (page_mask & (~page_mask + abrk))); elfout.phdr[C_TEXT].p_paddr = elfout.phdr[C_TEXT].p_vaddr; set_te64(&elfout.ehdr.e_entry, abrk + get_te64(&elfout.ehdr.e_entry) - vbase); } if (0!=xct_off) { // shared library upx_uint64_t word = load_va + sz_pack2; set_te64(&file_image[user_init_off], word); // set the hook Elf64_Phdr *phdr = (Elf64_Phdr *)lowmem.subref( "bad e_phoff", e_phoff, e_phnum * sizeof(Elf64_Phdr)); unsigned off = fo->st_size(); so_slide = 0; for (unsigned j = 0; j < e_phnum; ++j, ++phdr) { upx_uint64_t const len = get_te64(&phdr->p_filesz); upx_uint64_t const ioff = get_te64(&phdri[j].p_offset); upx_uint64_t align= get_te64(&phdr->p_align); unsigned const type = get_te32(&phdr->p_type); if (Elf64_Phdr::PT_INTERP==type) { // Rotate to highest position, so it can be lopped // by decrementing e_phnum. memcpy((unsigned char *)ibuf, phdr, sizeof(*phdr)); // extract memmove(phdr, 1+phdr, (e_phnum - (1+ j))*sizeof(*phdr)); // overlapping memcpy(&phdr[e_phnum - (1+ j)], (unsigned char *)ibuf, sizeof(*phdr)); // to top --phdr; --e_phnum; set_te16(&ehdri.e_phnum, e_phnum); set_te16(&((Elf64_Ehdr *)(unsigned char *)lowmem)->e_phnum, e_phnum); continue; } if (PT_LOAD64 == type) { if ((xct_off - ioff) < len) { // Change length of compressed PT_LOAD. set_te64(&phdr->p_filesz, sz_pack2 + lsize - ioff); set_te64(&phdr->p_memsz, sz_pack2 + lsize - ioff); if (user_init_off < xct_off) { // MIPS puts PT_DYNAMIC here // Allow for DT_INIT in a new [stolen] slot unsigned off2 = user_init_off - sizeof(word); fo->seek(off2, SEEK_SET); fo->rewrite(&file_image[off2], 2*sizeof(word)); } } else if (xct_off < ioff) { // Slide subsequent PT_LOAD. // AMD64 chip supports page sizes of 4KiB, 2MiB, and 1GiB; // the operating system chooses one. .p_align typically // is a forward-looking 2MiB. In 2009 Linux chooses 4KiB. // We choose 4KiB to waste less space. If Linux chooses // 2MiB later, then our output will not run. if ((1u<<12) < align && Elf64_Ehdr::EM_X86_64 ==e_machine ) { align = 1u<<12; set_te64(&phdr->p_align, align); } off += (align-1) & (ioff - off); set_te64(&phdr->p_offset, off); so_slide = off - ioff; fo->seek( off, SEEK_SET); fo->write(&file_image[ioff], len); off += len; } continue; // all done with this PT_LOAD } if (xct_off < ioff) set_te64(&phdr->p_offset, so_slide + ioff); } // end each Phdr if (opt->o_unix.android_shlib) { // Update {DYNAMIC}.sh_offset by so_slide. Elf64_Shdr *shdr = (Elf64_Shdr *)lowmem.subref( "bad e_shoff", xct_off - asl_delta, e_shnum * sizeof(Elf64_Shdr)); for (unsigned j = 0; j < e_shnum; ++shdr, ++j) { unsigned sh_type = get_te32(&shdr->sh_type); if (Elf64_Shdr::SHT_DYNAMIC == sh_type) { upx_uint64_t offset = get_te64(&shdr->sh_offset); set_te64(&shdr->sh_offset, so_slide + offset); fo->seek((j * sizeof(Elf64_Shdr)) + xct_off - asl_delta, SEEK_SET); fo->rewrite(shdr, sizeof(*shdr)); fo->seek(0, SEEK_END); } if (Elf64_Shdr::SHT_RELA == sh_type && n_jmp_slot && !strcmp(".rela.plt", get_te32(&shdr->sh_name) + shstrtab)) { upx_uint64_t f_off = elf_get_offset_from_address(plt_off); fo->seek(so_slide + f_off, SEEK_SET); // FIXME: assumes PT_LOAD[1] fo->rewrite(&file_image[f_off], n_jmp_slot * 8); } } } else { // !opt->o_unix.android_shlib) ehdri.e_shnum = 0; ehdri.e_shoff = 0; ehdri.e_shstrndx = 0; } } return flen; }
0
[ "CWE-476", "CWE-415" ]
upx
90279abdfcd235172eab99651043051188938dcc
187,952,962,006,738,400,000,000,000,000,000,000,000
130
PackLinuxElf::canUnpack must checkEhdr() for ELF input https://github.com/upx/upx/issues/485 modified: p_lx_elf.cpp
static int lg_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_INVERT_HWHEEL) && usage->code == REL_HWHEEL) { input_event(field->hidinput->input, usage->type, usage->code, -value); return 1; } if (drv_data->quirks & LG_FF4) { return lg4ff_adjust_input_event(hdev, field, usage, value, drv_data); } return 0; }
0
[ "CWE-119", "CWE-787" ]
linux
4ab25786c87eb20857bbb715c3ae34ec8fd6a214
229,297,138,689,968,200,000,000,000,000,000,000,000
16
HID: fix a couple of off-by-ones There are a few very theoretical off-by-one bugs in report descriptor size checking when performing a pre-parsing fixup. Fix those. Cc: [email protected] Reported-by: Ben Hawkes <[email protected]> Reviewed-by: Benjamin Tissoires <[email protected]> Signed-off-by: Jiri Kosina <[email protected]>
static inline enum zone_type page_zonenum(const struct page *page) { return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; }
0
[ "CWE-119" ]
linux
1be7107fbe18eed3e319a6c3e83c78254b693acb
273,717,057,005,045,580,000,000,000,000,000,000,000
4
mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <[email protected]> Original-patch-by: Michal Hocko <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Michal Hocko <[email protected]> Tested-by: Helge Deller <[email protected]> # parisc Signed-off-by: Linus Torvalds <[email protected]>
static inline int mailimf_wsp_parse(const char * message, size_t length, size_t * indx) { size_t cur_token; cur_token = * indx; if (cur_token >= length) return MAILIMF_ERROR_PARSE; if ((message[cur_token] != ' ') && (message[cur_token] != '\t')) return MAILIMF_ERROR_PARSE; cur_token ++; * indx = cur_token; return MAILIMF_NO_ERROR; }
0
[ "CWE-476" ]
libetpan
1fe8fbc032ccda1db9af66d93016b49c16c1f22d
8,035,015,483,215,344,000,000,000,000,000,000,000
18
Fixed crash #274
static void fbo_close(struct tcmu_device *dev) { struct fbo_state *state = tcmu_get_dev_private(dev); close(state->fd); free(state); }
0
[ "CWE-200" ]
tcmu-runner
8cf8208775022301adaa59c240bb7f93742d1329
328,366,693,115,509,000,000,000,000,000,000,000,000
7
removed all check_config callback implementations to avoid security issues see github issue #194 qcow.c contained an information leak, could test for existance of any file in the system file_example.c and file_optical.c allow also to test for existance of any file, plus to temporarily create empty new files anywhere in the file system. This also involves a race condition, if a file didn't exist in the first place, but would be created in-between by some other process, then the file would be deleted by the check_config implementation.
gst_asf_demux_get_gst_tag_from_tag_name (const gchar * name_utf8) { const struct { const gchar *asf_name; const gchar *gst_name; } tags[] = { { "WM/Genre", GST_TAG_GENRE}, { "WM/AlbumTitle", GST_TAG_ALBUM}, { "WM/AlbumArtist", GST_TAG_ARTIST}, { "WM/Picture", GST_TAG_IMAGE}, { "WM/Track", GST_TAG_TRACK_NUMBER}, { "WM/TrackNumber", GST_TAG_TRACK_NUMBER}, { "WM/Year", GST_TAG_DATE_TIME} /* { "WM/Composer", GST_TAG_COMPOSER } */ }; gsize out; guint i; if (name_utf8 == NULL) { GST_WARNING ("Failed to convert name to UTF8, skipping"); return NULL; } out = strlen (name_utf8); for (i = 0; i < G_N_ELEMENTS (tags); ++i) { if (strncmp (tags[i].asf_name, name_utf8, out) == 0) { GST_LOG ("map tagname '%s' -> '%s'", name_utf8, tags[i].gst_name); return tags[i].gst_name; } } return NULL; }
0
[ "CWE-125", "CWE-787" ]
gst-plugins-ugly
d21017b52a585f145e8d62781bcc1c5fefc7ee37
309,388,332,738,213,520,000,000,000,000,000,000,000
36
asfdemux: Check that we have enough data available before parsing bool/uint extended content descriptors https://bugzilla.gnome.org/show_bug.cgi?id=777955
static void bind_fetch(int row_count) { MYSQL_STMT *stmt; int rc, i, count= row_count; int32 data[10]; int8 i8_data; int16 i16_data; int32 i32_data; longlong i64_data; float f_data; double d_data; char s_data[10]; ulong length[10]; MYSQL_BIND my_bind[7]; my_bool is_null[7]; stmt= mysql_simple_prepare(mysql, "INSERT INTO test_bind_fetch VALUES " "(?, ?, ?, ?, ?, ?, ?)"); check_stmt(stmt); verify_param_count(stmt, 7); /* Always memset all members of bind parameter */ memset(my_bind, 0, sizeof(my_bind)); for (i= 0; i < (int) array_elements(my_bind); i++) { my_bind[i].buffer_type= MYSQL_TYPE_LONG; my_bind[i].buffer= (void *) &data[i]; } rc= mysql_stmt_bind_param(stmt, my_bind); check_execute(stmt, rc); while (count--) { rc= 10+count; for (i= 0; i < (int) array_elements(my_bind); i++) { data[i]= rc+i; rc+= 12; } rc= mysql_stmt_execute(stmt); check_execute(stmt, rc); } rc= mysql_commit(mysql); myquery(rc); mysql_stmt_close(stmt); rc= my_stmt_result("SELECT * FROM test_bind_fetch"); DIE_UNLESS(row_count == rc); stmt= mysql_simple_prepare(mysql, "SELECT * FROM test_bind_fetch"); check_stmt(stmt); for (i= 0; i < (int) array_elements(my_bind); i++) { my_bind[i].buffer= (void *) &data[i]; my_bind[i].length= &length[i]; my_bind[i].is_null= &is_null[i]; } my_bind[0].buffer_type= MYSQL_TYPE_TINY; my_bind[0].buffer= (void *)&i8_data; my_bind[1].buffer_type= MYSQL_TYPE_SHORT; my_bind[1].buffer= (void *)&i16_data; my_bind[2].buffer_type= MYSQL_TYPE_LONG; my_bind[2].buffer= (void *)&i32_data; my_bind[3].buffer_type= MYSQL_TYPE_LONGLONG; my_bind[3].buffer= (void *)&i64_data; my_bind[4].buffer_type= MYSQL_TYPE_FLOAT; my_bind[4].buffer= (void *)&f_data; my_bind[5].buffer_type= MYSQL_TYPE_DOUBLE; my_bind[5].buffer= (void *)&d_data; my_bind[6].buffer_type= MYSQL_TYPE_STRING; my_bind[6].buffer= (void *)&s_data; my_bind[6].buffer_length= sizeof(s_data); rc= mysql_stmt_bind_result(stmt, my_bind); check_execute(stmt, rc); rc= mysql_stmt_execute(stmt); check_execute(stmt, rc); rc= mysql_stmt_store_result(stmt); check_execute(stmt, rc); while (row_count--) { rc= mysql_stmt_fetch(stmt); check_execute(stmt, rc); if (!opt_silent) { fprintf(stdout, "\n"); fprintf(stdout, "\n tiny : %ld(%lu)", (ulong) i8_data, length[0]); fprintf(stdout, "\n short : %ld(%lu)", (ulong) i16_data, length[1]); fprintf(stdout, "\n int : %ld(%lu)", (ulong) i32_data, length[2]); fprintf(stdout, "\n longlong : %ld(%lu)", (ulong) i64_data, length[3]); fprintf(stdout, "\n float : %f(%lu)", f_data, length[4]); fprintf(stdout, "\n double : %g(%lu)", d_data, length[5]); fprintf(stdout, "\n char : %s(%lu)", s_data, length[6]); } rc= 10+row_count; /* TINY */ DIE_UNLESS((int) i8_data == rc); DIE_UNLESS(length[0] == 1); rc+= 13; /* SHORT */ DIE_UNLESS((int) i16_data == rc); DIE_UNLESS(length[1] == 2); rc+= 13; /* LONG */ DIE_UNLESS((int) i32_data == rc); DIE_UNLESS(length[2] == 4); rc+= 13; /* LONGLONG */ DIE_UNLESS((int) i64_data == rc); DIE_UNLESS(length[3] == 8); rc+= 13; /* FLOAT */ DIE_UNLESS((int)f_data == rc); DIE_UNLESS(length[4] == 4); rc+= 13; /* DOUBLE */ DIE_UNLESS((int)d_data == rc); DIE_UNLESS(length[5] == 8); rc+= 13; /* CHAR */ { char buff[20]; long len= sprintf(buff, "%d", rc); DIE_UNLESS(strcmp(s_data, buff) == 0); DIE_UNLESS(length[6] == (ulong) len); } } rc= mysql_stmt_fetch(stmt); DIE_UNLESS(rc == MYSQL_NO_DATA); mysql_stmt_close(stmt); }
0
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
175,010,706,201,107,730,000,000,000,000,000,000,000
155
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, struct hw_perf_event *event) { int idx; unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; /* Always place a cycle counter into the cycle counter. */ if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; return ARMV8_IDX_CYCLE_COUNTER; } /* * For anything other than a cycle counter, try and use * the events counters */ for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { if (!test_and_set_bit(idx, cpuc->used_mask)) return idx; } /* The counters are all in use. */ return -EAGAIN; }
0
[ "CWE-284", "CWE-264" ]
linux
8fff105e13041e49b82f92eef034f363a6b1c071
286,338,483,208,071,500,000,000,000,000,000,000,000
26
arm64: perf: reject groups spanning multiple HW PMUs The perf core implicitly rejects events spanning multiple HW PMUs, as in these cases the event->ctx will differ. However this validation is performed after pmu::event_init() is called in perf_init_event(), and thus pmu::event_init() may be called with a group leader from a different HW PMU. The ARM64 PMU driver does not take this fact into account, and when validating groups assumes that it can call to_arm_pmu(event->pmu) for any HW event. When the event in question is from another HW PMU this is wrong, and results in dereferencing garbage. This patch updates the ARM64 PMU driver to first test for and reject events from other PMUs, moving the to_arm_pmu and related logic after this test. Fixes a crash triggered by perf_fuzzer on Linux-4.0-rc2, with a CCI PMU present: Bad mode in Synchronous Abort handler detected, code 0x86000006 -- IABT (current EL) CPU: 0 PID: 1371 Comm: perf_fuzzer Not tainted 3.19.0+ #249 Hardware name: V2F-1XV7 Cortex-A53x2 SMM (DT) task: ffffffc07c73a280 ti: ffffffc07b0a0000 task.ti: ffffffc07b0a0000 PC is at 0x0 LR is at validate_event+0x90/0xa8 pc : [<0000000000000000>] lr : [<ffffffc000090228>] pstate: 00000145 sp : ffffffc07b0a3ba0 [< (null)>] (null) [<ffffffc0000907d8>] armpmu_event_init+0x174/0x3cc [<ffffffc00015d870>] perf_try_init_event+0x34/0x70 [<ffffffc000164094>] perf_init_event+0xe0/0x10c [<ffffffc000164348>] perf_event_alloc+0x288/0x358 [<ffffffc000164c5c>] SyS_perf_event_open+0x464/0x98c Code: bad PC value Also cleans up the code to use the arm_pmu only when we know that we are dealing with an arm pmu event. Cc: Will Deacon <[email protected]> Acked-by: Mark Rutland <[email protected]> Acked-by: Peter Ziljstra (Intel) <[email protected]> Signed-off-by: Suzuki K. Poulose <[email protected]> Signed-off-by: Will Deacon <[email protected]>
} void dump_hevc_track_info(GF_ISOFile *file, u32 trackNum, GF_HEVCConfig *hevccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , HEVCState *hevc_state #endif /*GPAC_DISABLE_AV_PARSERS && defined(GPAC_DISABLE_HEVC)*/ ) { #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) u32 idx; #endif u32 k; Bool non_hevc_base_layer=GF_FALSE; fprintf(stderr, "\t%s Info:", hevccfg->is_lhvc ? "LHVC" : "HEVC"); if (!hevccfg->is_lhvc) fprintf(stderr, " Profile %s @ Level %g - Chroma Format %s\n", gf_hevc_get_profile_name(hevccfg->profile_idc), ((Double)hevccfg->level_idc) / 30.0, gf_avc_hevc_get_chroma_format_name(hevccfg->chromaFormat)); fprintf(stderr, "\n"); fprintf(stderr, "\tNAL Unit length bits: %d", 8*hevccfg->nal_unit_size); if (!hevccfg->is_lhvc) fprintf(stderr, " - general profile compatibility 0x%08X\n", hevccfg->general_profile_compatibility_flags); fprintf(stderr, "\n"); fprintf(stderr, "\tParameter Sets: "); for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_HEVCParamArray *ar=gf_list_get(hevccfg->param_array, k); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { fprintf(stderr, "%d SPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_HEVC_NALU_PIC_PARAM) { fprintf(stderr, "%d PPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_HEVC_NALU_VID_PARAM) { fprintf(stderr, "%d VPS ", gf_list_count(ar->nalus)); #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_AVCConfigSlot *vps = gf_list_get(ar->nalus, idx); s32 ps_idx=gf_media_hevc_read_vps(vps->data, vps->size, hevc_state); if (hevccfg->is_lhvc && (ps_idx>=0)) { non_hevc_base_layer = ! hevc_state->vps[ps_idx].base_layer_internal_flag; } } #endif } } fprintf(stderr, "\n"); #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_HEVCParamArray *ar=gf_list_get(hevccfg->param_array, k); u32 width, height; s32 par_n, par_d; if (ar->type !=GF_HEVC_NALU_SEQ_PARAM) continue; for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_Err e; GF_AVCConfigSlot *sps = gf_list_get(ar->nalus, idx); par_n = par_d = -1; e = gf_hevc_get_sps_info_with_state(hevc_state, sps->data, sps->size, NULL, &width, &height, &par_n, &par_d); if (e==GF_OK) { fprintf(stderr, "\tSPS resolution %dx%d", width, height); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, " - Pixel Aspect Ratio %d:%d - Indicated track size %d x %d", par_n, par_d, tw, th); } fprintf(stderr, "\n"); } else { fprintf(stderr, "\nFailed to read SPS: %s\n\n", gf_error_to_string((e) )); } } } #endif if (!hevccfg->is_lhvc) fprintf(stderr, "\tBit Depth luma %d - Chroma %d - %d temporal layers\n", hevccfg->luma_bit_depth, hevccfg->chroma_bit_depth, hevccfg->numTemporalLayers); else fprintf(stderr, "\t%d temporal layers\n", hevccfg->numTemporalLayers); if (hevccfg->is_lhvc) { fprintf(stderr, "\t%sHEVC base layer - Complete representation %d\n", non_hevc_base_layer ? "Non-" : "", hevccfg->complete_representation); } for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_HEVCParamArray *ar=gf_list_get(hevccfg->param_array, k); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) print_config_hash(ar->nalus, "SPS"); else if (ar->type==GF_HEVC_NALU_PIC_PARAM) print_config_hash(ar->nalus, "PPS"); else if (ar->type==GF_HEVC_NALU_VID_PARAM) print_config_hash(ar->nalus, "VPS");
0
[ "CWE-476" ]
gpac
ce01bd15f711d4575b7424b54b3a395ec64c1784
174,340,170,124,960,200,000,000,000,000,000,000,000
86
fixed #1566
pdf14_push_transparency_group(pdf14_ctx *ctx, gs_int_rect *rect, bool isolated, bool knockout, byte alpha, byte shape, gs_blend_mode_t blend_mode, bool idle, uint mask_id, int numcomps, bool cm_back_drop, cmm_profile_t *group_profile, cmm_profile_t *tos_profile, gs_gstate *pgs, gx_device *dev) { pdf14_buf *tos = ctx->stack; pdf14_buf *buf, *backdrop; bool has_shape, has_tags; if_debug1m('v', ctx->memory, "[v]pdf14_push_transparency_group, idle = %d\n", idle); /* We are going to use the shape in the knockout computation. If previous buffer has a shape or if this is a knockout then we will have a shape here */ has_shape = tos->has_shape || tos->knockout; // has_shape = false; /* If previous buffer has tags, then add tags here */ has_tags = tos->has_tags; /* If the group is NOT isolated we add in the alpha_g plane. This enables recompositing to be performed ala art_pdf_recomposite_group_8 so that the backdrop is only included one time in the computation. */ /* Order of buffer data is color data, followed by alpha channel, followed by shape (if present), then alpha_g (if present), then tags (if present) */ buf = pdf14_buf_new(rect, has_tags, !isolated, has_shape, idle, numcomps + 1, tos->num_spots, ctx->memory); if_debug4m('v', ctx->memory, "[v]base buf: %d x %d, %d color channels, %d planes\n", buf->rect.q.x, buf->rect.q.y, buf->n_chan, buf->n_planes); if (buf == NULL) return_error(gs_error_VMerror); buf->isolated = isolated; buf->knockout = knockout; buf->alpha = alpha; buf->shape = shape; buf->blend_mode = blend_mode; buf->mask_id = mask_id; buf->mask_stack = ctx->mask_stack; /* Save because the group rendering may set up another (nested) mask. */ ctx->mask_stack = NULL; /* Clean the mask field for rendering this group. See pdf14_pop_transparency_group how to handle it. */ buf->saved = tos; ctx->stack = buf; if (buf->data == NULL) return 0; if (idle) return 0; backdrop = pdf14_find_backdrop_buf(ctx); if (backdrop == NULL) { memset(buf->data, 0, buf->planestride * (buf->n_chan + (buf->has_shape ? 1 : 0) + (buf->has_alpha_g ? 1 : 0) + (buf->has_tags ? 1 : 0))); } else { if (!buf->knockout) { if (!cm_back_drop) { pdf14_preserve_backdrop(buf, tos, false); } else { /* We must have an non-isolated group with a mismatch in color spaces. In this case, we can't just copy the buffer but must CM it */ pdf14_preserve_backdrop_cm(buf, group_profile, tos, tos_profile, ctx->memory, pgs, dev, false); } } } /* If knockout, we have to maintain a copy of the backdrop in case we are drawing nonisolated groups on top of the knockout group. */ if (buf->knockout) { buf->backdrop = gs_alloc_bytes(ctx->memory, buf->planestride * buf->n_chan, "pdf14_push_transparency_group"); if (buf->backdrop == NULL) { gs_free_object(ctx->memory, buf->backdrop, "pdf14_push_transparency_group"); return gs_throw(gs_error_VMerror, "Knockout backdrop allocation failed"); } if (buf->isolated) { /* We will have opaque backdrop for non-isolated compositing */ memset(buf->backdrop, 0, buf->planestride * buf->n_chan); } else { /* Save knockout backdrop for non-isolated compositing */ /* Note that we need to drill down through the non-isolated groups in our stack and make sure that we are not embedded in another knockout group */ pdf14_buf *check = tos; pdf14_buf *child = NULL; /* Needed so we can get profile */ cmm_profile_t *prev_knockout_profile; while (check != NULL) { if (check->isolated) break; if (check->knockout) { break; } child = check; check = check->saved; } /* Here we need to grab a back drop from a knockout parent group and potentially worry about color differences. */ if (check == NULL) { prev_knockout_profile = tos_profile; check = tos; } else { if (child == NULL) { prev_knockout_profile = tos_profile; } else { prev_knockout_profile = child->parent_color_info_procs->icc_profile; } } if (!cm_back_drop) { pdf14_preserve_backdrop(buf, check, false); } else { /* We must have an non-isolated group with a mismatch in color spaces. In this case, we can't just copy the buffer but must CM it */ pdf14_preserve_backdrop_cm(buf, group_profile, check, prev_knockout_profile, ctx->memory, pgs, dev, false); } memcpy(buf->backdrop, buf->data, buf->planestride * buf->n_chan); } #if RAW_DUMP /* Dump the current buffer to see what we have. */ dump_raw_buffer(ctx->stack->rect.q.y-ctx->stack->rect.p.y, ctx->stack->rowstride, buf->n_chan, ctx->stack->planestride, ctx->stack->rowstride, "KnockoutBackDrop", buf->backdrop); global_index++; #endif } else { buf->backdrop = NULL; } #if RAW_DUMP /* Dump the current buffer to see what we have. */ dump_raw_buffer(ctx->stack->rect.q.y-ctx->stack->rect.p.y, ctx->stack->rowstride, ctx->stack->n_planes, ctx->stack->planestride, ctx->stack->rowstride, "TransGroupPush", ctx->stack->data); global_index++; #endif return 0; }
0
[ "CWE-416" ]
ghostpdl
90fd0c7ca3efc1ddff64a86f4104b13b3ac969eb
200,857,153,756,096,760,000,000,000,000,000,000,000
142
Bug 697456. Dont create new ctx when pdf14 device reenabled This bug had yet another weird case where the user created a file that pushed the pdf14 device twice. We were in that case, creating a new ctx and blowing away the original one with out proper clean up. To avoid, only create a new one when we need it.
reset_stats( sockaddr_u *srcadr, endpt *inter, struct req_pkt *inpkt ) { struct reset_flags *rflags; u_long flags; struct reset_entry *rent; if (INFO_NITEMS(inpkt->err_nitems) > 1) { msyslog(LOG_ERR, "reset_stats: err_nitems > 1"); req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); return; } rflags = (struct reset_flags *)&inpkt->u; flags = ntohl(rflags->flags); if (flags & ~RESET_ALLFLAGS) { msyslog(LOG_ERR, "reset_stats: reset leaves %#lx", flags & ~RESET_ALLFLAGS); req_ack(srcadr, inter, inpkt, INFO_ERR_FMT); return; } for (rent = reset_entries; rent->flag != 0; rent++) { if (flags & rent->flag) (*rent->handler)(); } req_ack(srcadr, inter, inpkt, INFO_OKAY); }
0
[ "CWE-190" ]
ntp
c04c3d3d940dfe1a53132925c4f51aef017d2e0f
229,145,460,750,319,980,000,000,000,000,000,000,000
32
[TALOS-CAN-0052] crash by loop counter underrun.
mrb_mruby_fiber_gem_init(mrb_state* mrb) { struct RClass *c; c = mrb_define_class(mrb, "Fiber", mrb->object_class); MRB_SET_INSTANCE_TT(c, MRB_TT_FIBER); mrb_define_method(mrb, c, "initialize", fiber_init, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); mrb_define_method(mrb, c, "resume", fiber_resume, MRB_ARGS_ANY()); mrb_define_method(mrb, c, "transfer", fiber_transfer, MRB_ARGS_ANY()); mrb_define_method(mrb, c, "alive?", fiber_alive_p, MRB_ARGS_NONE()); mrb_define_method(mrb, c, "==", fiber_eq, MRB_ARGS_REQ(1)); mrb_define_class_method(mrb, c, "yield", fiber_yield, MRB_ARGS_ANY()); mrb_define_class_method(mrb, c, "current", fiber_current, MRB_ARGS_NONE()); mrb_define_class(mrb, "FiberError", mrb->eStandardError_class); }
0
[ "CWE-476", "CWE-703" ]
mruby
da48e7dbb20024c198493b8724adae1b842083aa
99,558,038,778,858,300,000,000,000,000,000,000,000
18
fiber.c: should pack 15+ arguments in an array.
static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) { u32 epnum; for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { struct dwc3_ep *dep; int ret; dep = dwc->eps[epnum]; if (!dep) continue; if (!(dep->flags & DWC3_EP_STALL)) continue; dep->flags &= ~DWC3_EP_STALL; ret = dwc3_send_clear_stall_ep_cmd(dep); WARN_ON_ONCE(ret); } }
0
[ "CWE-703", "CWE-667", "CWE-189" ]
linux
c91815b596245fd7da349ecc43c8def670d2269e
187,916,564,533,942,850,000,000,000,000,000,000,000
21
usb: dwc3: gadget: never call ->complete() from ->ep_queue() This is a requirement which has always existed but, somehow, wasn't reflected in the documentation and problems weren't found until now when Tuba Yavuz found a possible deadlock happening between dwc3 and f_hid. She described the situation as follows: spin_lock_irqsave(&hidg->write_spinlock, flags); // first acquire /* we our function has been disabled by host */ if (!hidg->req) { free_ep_req(hidg->in_ep, hidg->req); goto try_again; } [...] status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); => [...] => usb_gadget_giveback_request => f_hidg_req_complete => spin_lock_irqsave(&hidg->write_spinlock, flags); // second acquire Note that this happens because dwc3 would call ->complete() on a failed usb_ep_queue() due to failed Start Transfer command. This is, anyway, a theoretical situation because dwc3 currently uses "No Response Update Transfer" command for Bulk and Interrupt endpoints. It's still good to make this case impossible to happen even if the "No Reponse Update Transfer" command is changed. Reported-by: Tuba Yavuz <[email protected]> Signed-off-by: Felipe Balbi <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static void __io_put_task(struct task_struct *task, int nr) { struct io_uring_task *tctx = task->io_uring; percpu_counter_sub(&tctx->inflight, nr); if (unlikely(atomic_read(&tctx->in_idle))) wake_up(&tctx->wait); put_task_struct_many(task, nr); }
0
[ "CWE-416" ]
linux
9cae36a094e7e9d6e5fe8b6dcd4642138b3eb0c7
299,634,527,495,837,700,000,000,000,000,000,000,000
9
io_uring: reinstate the inflight tracking After some debugging, it was realized that we really do still need the old inflight tracking for any file type that has io_uring_fops assigned. If we don't, then trivial circular references will mean that we never get the ctx cleaned up and hence it'll leak. Just bring back the inflight tracking, which then also means we can eliminate the conditional dropping of the file when task_work is queued. Fixes: d5361233e9ab ("io_uring: drop the old style inflight file tracking") Signed-off-by: Jens Axboe <[email protected]>
void IOBuf::freeInternalBuf(void* /* buf */, void* userData) noexcept { auto storage = static_cast<HeapStorage*>(userData); releaseStorage(storage, kDataInUse); }
0
[ "CWE-787" ]
folly
4f304af1411e68851bdd00ef6140e9de4616f7d3
213,578,673,032,912,460,000,000,000,000,000,000,000
4
[folly] Add additional overflow checks to IOBuf - CVE-2021-24036 Summary: As per title CVE-2021-24036 Reviewed By: jan Differential Revision: D27938605 fbshipit-source-id: 7481c54ae6fbb7b67b15b3631d5357c2f7043f9c
void __fastcall TCustomDialog::AddImage(const UnicodeString & ImageName) { TImage * Image = new TImage(this); Image->Name = L"Image"; Image->Parent = GetDefaultParent(); LoadDialogImage(Image, ImageName); Image->SetBounds(FIndent, FPos + ScaleByTextHeight(this, 3), Image->Picture->Width, Image->Picture->Height); FIndent += Image->Width + ScaleByTextHeight(this, 12); }
0
[ "CWE-787" ]
winscp
faa96e8144e6925a380f94a97aa382c9427f688d
15,173,976,393,599,840,000,000,000,000,000,000,000
9
Bug 1943: Prevent loading session settings that can lead to remote code execution from handled URLs https://winscp.net/tracker/1943 (cherry picked from commit ec584f5189a856cd79509f754722a6898045c5e0) Source commit: 0f4be408b3f01132b00682da72d925d6c4ee649b
paste_from_archive_list_ready_cb (GObject *source_object, GAsyncResult *result, gpointer user_data) { FrWindow *window = user_data; GError *error = NULL; if (! fr_archive_operation_finish (FR_ARCHIVE (source_object), result, &error)) { _paste_from_archive_operation_completed (window, FR_ACTION_PASTING_FILES, error); g_error_free (error); return; } fr_archive_action_started (window->priv->copy_from_archive, FR_ACTION_EXTRACTING_FILES); fr_archive_extract (window->priv->copy_from_archive, window->priv->clipboard_data->files, window->priv->clipboard_data->tmp_dir, NULL, FALSE, TRUE, FALSE, window->priv->clipboard_data->password, window->priv->cancellable, paste_from_archive_extract_ready_cb, window); }
0
[ "CWE-22" ]
file-roller
b147281293a8307808475e102a14857055f81631
284,241,320,613,075,640,000,000,000,000,000,000,000
26
libarchive: sanitize filenames before extracting
int hllSparseToDense(robj *o) { sds sparse = o->ptr, dense; struct hllhdr *hdr, *oldhdr = (struct hllhdr*)sparse; int idx = 0, runlen, regval; uint8_t *p = (uint8_t*)sparse, *end = p+sdslen(sparse); /* If the representation is already the right one return ASAP. */ hdr = (struct hllhdr*) sparse; if (hdr->encoding == HLL_DENSE) return C_OK; /* Create a string of the right size filled with zero bytes. * Note that the cached cardinality is set to 0 as a side effect * that is exactly the cardinality of an empty HLL. */ dense = sdsnewlen(NULL,HLL_DENSE_SIZE); hdr = (struct hllhdr*) dense; *hdr = *oldhdr; /* This will copy the magic and cached cardinality. */ hdr->encoding = HLL_DENSE; /* Now read the sparse representation and set non-zero registers * accordingly. */ p += HLL_HDR_SIZE; while(p < end) { if (HLL_SPARSE_IS_ZERO(p)) { runlen = HLL_SPARSE_ZERO_LEN(p); idx += runlen; p++; } else if (HLL_SPARSE_IS_XZERO(p)) { runlen = HLL_SPARSE_XZERO_LEN(p); idx += runlen; p += 2; } else { runlen = HLL_SPARSE_VAL_LEN(p); regval = HLL_SPARSE_VAL_VALUE(p); while(runlen--) { HLL_DENSE_SET_REGISTER(hdr->registers,idx,regval); idx++; } p++; } } /* If the sparse representation was valid, we expect to find idx * set to HLL_REGISTERS. */ if (idx != HLL_REGISTERS) { sdsfree(dense); return C_ERR; } /* Free the old representation and set the new one. */ sdsfree(o->ptr); o->ptr = dense; return C_OK; }
1
[ "CWE-787" ]
redis
9f13b2bd4967334b1701c6eccdf53760cb13f79e
61,941,181,118,489,350,000,000,000,000,000,000,000
53
Fix hyperloglog corruption
crypt_status_info crypt_status(struct crypt_device *cd, const char *name) { int r; if (!name) return CRYPT_INVALID; if (!cd) dm_backend_init(cd); r = dm_status_device(cd, name); if (!cd) dm_backend_exit(cd); if (r < 0 && r != -ENODEV) return CRYPT_INVALID; if (r == 0) return CRYPT_ACTIVE; if (r > 0) return CRYPT_BUSY; return CRYPT_INACTIVE; }
0
[ "CWE-345" ]
cryptsetup
0113ac2d889c5322659ad0596d4cfc6da53e356c
144,325,626,837,206,830,000,000,000,000,000,000,000
26
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack Fix possible attacks against data confidentiality through LUKS2 online reencryption extension crash recovery. An attacker can modify on-disk metadata to simulate decryption in progress with crashed (unfinished) reencryption step and persistently decrypt part of the LUKS device. This attack requires repeated physical access to the LUKS device but no knowledge of user passphrases. The decryption step is performed after a valid user activates the device with a correct passphrase and modified metadata. There are no visible warnings for the user that such recovery happened (except using the luksDump command). The attack can also be reversed afterward (simulating crashed encryption from a plaintext) with possible modification of revealed plaintext. The problem was caused by reusing a mechanism designed for actual reencryption operation without reassessing the security impact for new encryption and decryption operations. While the reencryption requires calculating and verifying both key digests, no digest was needed to initiate decryption recovery if the destination is plaintext (no encryption key). Also, some metadata (like encryption cipher) is not protected, and an attacker could change it. Note that LUKS2 protects visible metadata only when a random change occurs. It does not protect against intentional modification but such modification must not cause a violation of data confidentiality. The fix introduces additional digest protection of reencryption metadata. The digest is calculated from known keys and critical reencryption metadata. Now an attacker cannot create correct metadata digest without knowledge of a passphrase for used keyslots. For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
_XimClose( Xim im) { CARD32 buf32[BUFSIZE/4]; CARD8 *buf = (CARD8 *)buf32; CARD16 *buf_s = (CARD16 *)&buf[XIM_HEADER_SIZE]; INT16 len; CARD32 reply32[BUFSIZE/4]; char *reply = (char *)reply32; XPointer preply; int buf_size; int ret_code; if (!IS_SERVER_CONNECTED(im)) return True; buf_s[0] = im->private.proto.imid; /* imid */ buf_s[1] = 0; /* unused */ len = sizeof(CARD16) /* sizeof imid */ + sizeof(CARD16); /* sizeof unused */ _XimSetHeader((XPointer)buf, XIM_CLOSE, 0, &len); if (!(_XimWrite(im, len, (XPointer)buf))) return False; _XimFlush(im); buf_size = BUFSIZE; ret_code = _XimRead(im, &len, (XPointer)reply, buf_size, _XimCloseCheck, 0); if(ret_code == XIM_TRUE) { preply = reply; } else if(ret_code == XIM_OVERFLOW) { if(len <= 0) { preply = reply; } else { buf_size = len; preply = Xmalloc(buf_size); ret_code = _XimRead(im, &len, preply, buf_size, _XimCloseCheck, 0); if(ret_code != XIM_TRUE) { Xfree(preply); return False; } } } else return False; buf_s = (CARD16 *)((char *)preply + XIM_HEADER_SIZE); if (*((CARD8 *)preply) == XIM_ERROR) { _XimProcError(im, 0, (XPointer)&buf_s[3]); if(reply != preply) Xfree(preply); return False; } if(reply != preply) Xfree(preply); return True; }
0
[ "CWE-190" ]
libx11
1a566c9e00e5f35c1f9e7f3d741a02e5170852b2
271,519,531,237,690,370,000,000,000,000,000,000,000
56
Zero out buffers in functions It looks like uninitialized stack or heap memory can leak out via padding bytes. Signed-off-by: Matthieu Herrb <[email protected]> Reviewed-by: Matthieu Herrb <[email protected]>
get_clkbug_info( sockaddr_u *srcadr, endpt *inter, struct req_pkt *inpkt ) { register int i; register struct info_clkbug *ic; register u_int32 *clkaddr; register int items; struct refclockbug bug; sockaddr_u addr; ZERO_SOCK(&addr); AF(&addr) = AF_INET; #ifdef ISC_PLATFORM_HAVESALEN addr.sa.sa_len = SOCKLEN(&addr); #endif SET_PORT(&addr, NTP_PORT); items = INFO_NITEMS(inpkt->err_nitems); clkaddr = (u_int32 *)&inpkt->u; ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt, sizeof(struct info_clkbug)); while (items-- > 0) { NSRCADR(&addr) = *clkaddr++; if (!ISREFCLOCKADR(&addr) || NULL == findexistingpeer(&addr, NULL, NULL, -1, 0)) { req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); return; } ZERO(bug); refclock_buginfo(&addr, &bug); if (bug.nvalues == 0 && bug.ntimes == 0) { req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA); return; } ic->clockadr = NSRCADR(&addr); i = bug.nvalues; if (i > NUMCBUGVALUES) i = NUMCBUGVALUES; ic->nvalues = (u_char)i; ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1))); while (--i >= 0) ic->values[i] = htonl(bug.values[i]); i = bug.ntimes; if (i > NUMCBUGTIMES) i = NUMCBUGTIMES; ic->ntimes = (u_char)i; ic->stimes = htonl(bug.stimes); while (--i >= 0) { HTONL_FP(&bug.times[i], &ic->times[i]); } ic = (struct info_clkbug *)more_pkt(); } flush_pkt(); }
1
[ "CWE-476" ]
ntp
8a0c765f3c47633fa262356b0818788d1cf249b1
329,243,862,618,845,260,000,000,000,000,000,000,000
62
[Bug 2939] reslist NULL pointer dereference [Bug 2940] Stack exhaustion in recursive traversal of restriction list -- these two where fixed together --
void fxInt16Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS2 value = (txS2)slot->value.integer; #ifdef mxMisalignedSettersCrash value = EXPORT(S16); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txS2)); #else *((txS2*)(data->value.arrayBuffer.address + offset)) = EXPORT(S16); #endif mxMeterOne(); }
0
[ "CWE-125" ]
moddable
135aa9a4a6a9b49b60aa730ebc3bcc6247d75c45
38,128,581,890,705,310,000,000,000,000,000,000,000
11
XS: #896
static void analyze_sbs(struct mddev *mddev) { int i; struct md_rdev *rdev, *freshest, *tmp; char b[BDEVNAME_SIZE]; freshest = NULL; rdev_for_each_safe(rdev, tmp, mddev) switch (super_types[mddev->major_version]. load_super(rdev, freshest, mddev->minor_version)) { case 1: freshest = rdev; break; case 0: break; default: printk( KERN_ERR \ "md: fatal superblock inconsistency in %s" " -- removing from array\n", bdevname(rdev->bdev,b)); md_kick_rdev_from_array(rdev); } super_types[mddev->major_version]. validate_super(mddev, freshest); i = 0; rdev_for_each_safe(rdev, tmp, mddev) { if (mddev->max_disks && (rdev->desc_nr >= mddev->max_disks || i > mddev->max_disks)) { printk(KERN_WARNING "md: %s: %s: only %d devices permitted\n", mdname(mddev), bdevname(rdev->bdev, b), mddev->max_disks); md_kick_rdev_from_array(rdev); continue; } if (rdev != freshest) { if (super_types[mddev->major_version]. validate_super(mddev, rdev)) { printk(KERN_WARNING "md: kicking non-fresh %s" " from array!\n", bdevname(rdev->bdev,b)); md_kick_rdev_from_array(rdev); continue; } /* No device should have a Candidate flag * when reading devices */ if (test_bit(Candidate, &rdev->flags)) { pr_info("md: kicking Cluster Candidate %s from array!\n", bdevname(rdev->bdev, b)); md_kick_rdev_from_array(rdev); } } if (mddev->level == LEVEL_MULTIPATH) { rdev->desc_nr = i++; rdev->raid_disk = rdev->desc_nr; set_bit(In_sync, &rdev->flags); } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { rdev->raid_disk = -1; clear_bit(In_sync, &rdev->flags); } } }
0
[ "CWE-200" ]
linux
b6878d9e03043695dbf3fa1caa6dfc09db225b16
286,390,739,771,931,900,000,000,000,000,000,000,000
66
md: use kzalloc() when bitmap is disabled In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a mdu_bitmap_file_t called "file". 5769 file = kmalloc(sizeof(*file), GFP_NOIO); 5770 if (!file) 5771 return -ENOMEM; This structure is copied to user space at the end of the function. 5786 if (err == 0 && 5787 copy_to_user(arg, file, sizeof(*file))) 5788 err = -EFAULT But if bitmap is disabled only the first byte of "file" is initialized with zero, so it's possible to read some bytes (up to 4095) of kernel space memory from user space. This is an information leak. 5775 /* bitmap disabled, zero the first byte and copy out */ 5776 if (!mddev->bitmap_info.file) 5777 file->pathname[0] = '\0'; Signed-off-by: Benjamin Randazzo <[email protected]> Signed-off-by: NeilBrown <[email protected]>
point_send(PG_FUNCTION_ARGS) { Point *pt = PG_GETARG_POINT_P(0); StringInfoData buf; pq_begintypsend(&buf); pq_sendfloat8(&buf, pt->x); pq_sendfloat8(&buf, pt->y); PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); }
0
[ "CWE-703", "CWE-189" ]
postgres
31400a673325147e1205326008e32135a78b4d8a
213,734,606,258,790,260,000,000,000,000,000,000,000
10
Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064
int SSL_CTX_get_ex_new_index(long argl,void *argp,CRYPTO_EX_new *new_func, CRYPTO_EX_dup *dup_func,CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_SSL_CTX, argl, argp, new_func, dup_func, free_func); }
0
[]
openssl
ee2ffc279417f15fef3b1073c7dc81a908991516
157,526,572,049,821,860,000,000,000,000,000,000,000
6
Add Next Protocol Negotiation.
download_requested_dialog_response_cb (GtkDialog *dialog, int response_id, WebKitDownload *download) { if (response_id == GTK_RESPONSE_ACCEPT) { DownloaderView *dview; char *uri; uri = gtk_file_chooser_get_uri (GTK_FILE_CHOOSER (dialog)); g_object_set_data (G_OBJECT (download), "user-destination-uri", uri); dview = EPHY_DOWNLOADER_VIEW (ephy_embed_shell_get_downloader_view (embed_shell)); downloader_view_add_download (dview, download); } else { webkit_download_cancel (download); ephy_file_delete_uri (webkit_download_get_destination_uri (download)); } gtk_widget_destroy (GTK_WIDGET (dialog)); /* User provided us with a destination or cancelled, unfreeze. */ g_object_thaw_notify (G_OBJECT (download)); g_object_unref (download); }
0
[]
epiphany
3e0f7dea754381c5ad11a06ccc62eb153382b498
231,840,181,564,892,400,000,000,000,000,000,000,000
23
Report broken certs through the padlock icon This uses a new feature in libsoup that reports through a SoupMessageFlag whether the message is talking to a server that has a trusted server. Bug #600663
ecma_string_trim_helper (const lit_utf8_byte_t **utf8_str_p, /**< [in, out] current string position */ lit_utf8_size_t *utf8_str_size) /**< [in, out] size of the given string */ { const lit_utf8_byte_t *end_p = *utf8_str_p + *utf8_str_size; const lit_utf8_byte_t *start_p = *utf8_str_p; const lit_utf8_byte_t *new_start_p = ecma_string_trim_front (start_p, end_p); const lit_utf8_byte_t *new_end_p = ecma_string_trim_back (new_start_p, end_p); *utf8_str_size = (lit_utf8_size_t) (new_end_p - new_start_p); *utf8_str_p = new_start_p; } /* ecma_string_trim_helper */
0
[ "CWE-416" ]
jerryscript
3bcd48f72d4af01d1304b754ef19fe1a02c96049
162,599,967,399,551,300,000,000,000,000,000,000,000
12
Improve parse_identifier (#4691) Ascii string length is no longer computed during string allocation. JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
static void mptsas_diag_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { MPTSASState *s = opaque; trace_mptsas_diag_write(s, addr, val); }
0
[ "CWE-416" ]
qemu
3791642c8d60029adf9b00bcb4e34d7d8a1aea4d
173,694,982,961,599,300,000,000,000,000,000,000,000
6
mptsas: Remove unused MPTSASState 'pending' field (CVE-2021-3392) While processing SCSI i/o requests in mptsas_process_scsi_io_request(), the Megaraid emulator appends new MPTSASRequest object 'req' to the 's->pending' queue. In case of an error, this same object gets dequeued in mptsas_free_request() only if SCSIRequest object 'req->sreq' is initialised. This may lead to a use-after-free issue. Since s->pending is actually not used, simply remove it from MPTSASState. Cc: [email protected] Signed-off-by: Michael Tokarev <[email protected]> Reviewed-by: Philippe Mathieu-Daudé <[email protected]> Signed-off-by: Philippe Mathieu-Daudé <[email protected]> Reported-by: Cheolwoo Myung <[email protected]> Message-id: [email protected] Message-Id: <[email protected]> Suggested-by: Paolo Bonzini <[email protected]> Reported-by: Cheolwoo Myung <[email protected]> BugLink: https://bugs.launchpad.net/qemu/+bug/1914236 (CVE-2021-3392) Fixes: e351b826112 ("hw: Add support for LSI SAS1068 (mptsas) device") [PMD: Reworded description, added more tags] Signed-off-by: Philippe Mathieu-Daudé <[email protected]> Reviewed-by: Peter Maydell <[email protected]> Signed-off-by: Peter Maydell <[email protected]>
int enc_untrusted_pipe2(int pipefd[2], int flags) { if (flags & ~(O_CLOEXEC | O_DIRECT | O_NONBLOCK)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_pipe2, pipefd, TokLinuxFileStatusFlag(flags)); }
0
[ "CWE-125" ]
asylo
b1d120a2c7d7446d2cc58d517e20a1b184b82200
28,767,290,455,608,028,000,000,000,000,000,000,000
9
Check for return size in enc_untrusted_read Check return size does not exceed requested. The returned result and content still cannot be trusted, but it's expected behavior when not using a secure file system. PiperOrigin-RevId: 333827386 Change-Id: I0bdec0aec9356ea333dc8c647eba5d2772875f29
u32 parse_dashlive(char *arg, char *arg_val, u32 opt) { dash_mode = opt ? GF_DASH_DYNAMIC_DEBUG : GF_DASH_DYNAMIC; dash_live = 1; if (arg[10] == '=') { dash_ctx_file = arg + 11; } dash_duration = atof(arg_val); return 0; }
0
[ "CWE-476" ]
gpac
87afe070cd6866df7fe80f11b26ef75161de85e0
5,364,245,157,305,020,000,000,000,000,000,000,000
10
fixed #1734
file_truncate (struct rw *rw, int64_t size) { struct rw_file *rwf = (struct rw_file *) rw; /* If the destination is an ordinary file then the original file * size doesn't matter. Truncate it to the source size. But * truncate it to zero first so the file is completely empty and * sparse. */ if (rwf->is_block) return; if (ftruncate (rwf->fd, 0) == -1 || ftruncate (rwf->fd, size) == -1) { fprintf (stderr, "%s: truncate: %m\n", rw->name); exit (EXIT_FAILURE); } rwf->rw.size = size; /* We can assume the destination is zero. */ destination_is_zero = true; }
0
[ "CWE-252" ]
libnbd
8d444b41d09a700c7ee6f9182a649f3f2d325abb
129,365,804,079,674,730,000,000,000,000,000,000,000
22
copy: CVE-2022-0485: Fail nbdcopy if NBD read or write fails nbdcopy has a nasty bug when performing multi-threaded copies using asynchronous nbd calls - it was blindly treating the completion of an asynchronous command as successful, rather than checking the *error parameter. This can result in the silent creation of a corrupted image in two different ways: when a read fails, we blindly wrote garbage to the destination; when a write fails, we did not flag that the destination was not written. Since nbdcopy already calls exit() on a synchronous read or write failure to a file, doing the same for an asynchronous op to an NBD server is the simplest solution. A nicer solution, but more invasive to code and thus not done here, might be to allow up to N retries of the transaction (in case the read or write failure was transient), or even having a mode where as much data is copied as possible (portions of the copy that failed would be logged on stderr, and nbdcopy would still fail with a non-zero exit status, but this would copy more than just stopping at the first error, as can be done with rsync or ddrescue). Note that since we rely on auto-retiring and do NOT call nbd_aio_command_completed, our completion callbacks must always return 1 (if they do not exit() first), even when acting on *error, so as not leave the command allocated until nbd_close. As such, there is no sane way to return an error to a manual caller of the callback, and therefore we can drop dead code that calls perror() and exit() if the callback "failed". It is also worth documenting the contract on when we must manually call the callback during the asynch_zero callback, so that we do not leak or double-free the command; thankfully, all the existing code paths were correct. The added testsuite script demonstrates several scenarios, some of which fail without the rest of this patch in place, and others which showcase ways in which sparse images can bypass errors. Once backports are complete, a followup patch on the main branch will edit docs/libnbd-security.pod with the mailing list announcement of the stable branch commit ids and release versions that incorporate this fix. Reported-by: Nir Soffer <[email protected]> Fixes: bc896eec4d ("copy: Implement multi-conn, multiple threads, multiple requests in flight.", v1.5.6) Fixes: https://bugzilla.redhat.com/2046194 Message-Id: <[email protected]> Acked-by: Richard W.M. Jones <[email protected]> Acked-by: Nir Soffer <[email protected]> [eblake: fix error message per Nir, tweak requires lines in unit test per Rich] Reviewed-by: Laszlo Ersek <[email protected]>
static inline unsigned long _task_util_est(struct task_struct *p) { struct util_est ue = READ_ONCE(p->se.avg.util_est); return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED); }
0
[ "CWE-400", "CWE-703", "CWE-835" ]
linux
c40f7d74c741a907cfaeb73a7697081881c497d0
33,007,127,943,833,740,000,000,000,000,000,000,000
6
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the scheduler under high loads, starting at around the v4.18 time frame, and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list manipulation. Do a (manual) revert of: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") It turns out that the list_del_leaf_cfs_rq() introduced by this commit is a surprising property that was not considered in followup commits such as: 9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list") As Vincent Guittot explains: "I think that there is a bigger problem with commit a9e7f6544b9c and cfs_rq throttling: Let take the example of the following topology TG2 --> TG1 --> root: 1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1 cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in one path because it has never been used and can't be throttled so tmp_alone_branch will point to leaf_cfs_rq_list at the end. 2) Then TG1 is throttled 3) and we add TG3 as a new child of TG1. 4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1 cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list. With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list. So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1 cfs_rq is removed from the list. Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list but tmp_alone_branch still points to TG3 cfs_rq because its throttled parent can't be enqueued when the lock is released. tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should. So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch points on another TG cfs_rq, the next TG cfs_rq that will be added, will be linked outside rq->leaf_cfs_rq_list - which is bad. In addition, we can break the ordering of the cfs_rq in rq->leaf_cfs_rq_list but this ordering is used to update and propagate the update from leaf down to root." Instead of trying to work through all these cases and trying to reproduce the very high loads that produced the lockup to begin with, simplify the code temporarily by reverting a9e7f6544b9c - which change was clearly not thought through completely. This (hopefully) gives us a kernel that doesn't lock up so people can continue to enjoy their holidays without worrying about regressions. ;-) [ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ] Analyzed-by: Xie XiuQi <[email protected]> Analyzed-by: Vincent Guittot <[email protected]> Reported-by: Zhipeng Xie <[email protected]> Reported-by: Sargun Dhillon <[email protected]> Reported-by: Xie XiuQi <[email protected]> Tested-by: Zhipeng Xie <[email protected]> Tested-by: Sargun Dhillon <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Acked-by: Vincent Guittot <[email protected]> Cc: <[email protected]> # v4.13+ Cc: Bin Li <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Thomas Gleixner <[email protected]> Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
static int handle_incoming_queue(struct netfront_queue *queue, struct sk_buff_head *rxq) { struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); int packets_dropped = 0; struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { int pull_to = NETFRONT_SKB_CB(skb)->pull_to; if (pull_to > skb_headlen(skb)) __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, queue->info->netdev); skb_reset_network_header(skb); if (checksum_setup(queue->info->netdev, skb)) { kfree_skb(skb); packets_dropped++; queue->info->netdev->stats.rx_errors++; continue; } u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; rx_stats->bytes += skb->len; u64_stats_update_end(&rx_stats->syncp); /* Pass it up. */ napi_gro_receive(&queue->napi, skb); } return packets_dropped; }
0
[]
linux
f63c2c2032c2e3caad9add3b82cc6e91c376fd26
40,491,705,797,475,110,000,000,000,000,000,000,000
35
xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses() The commit referenced below moved the invocation past the "next" label, without any explanation. In fact this allows misbehaving backends undue control over the domain the frontend runs in, as earlier detected errors require the skb to not be freed (it may be retained for later processing via xennet_move_rx_slot(), or it may simply be unsafe to have it freed). This is CVE-2022-33743 / XSA-405. Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront") Signed-off-by: Jan Beulich <[email protected]> Reviewed-by: Juergen Gross <[email protected]> Signed-off-by: Juergen Gross <[email protected]>
int select_compression_request_header(struct session *s, struct buffer *req) { struct http_txn *txn = &s->txn; struct http_msg *msg = &txn->req; struct hdr_ctx ctx; struct comp_algo *comp_algo = NULL; struct comp_algo *comp_algo_back = NULL; /* Disable compression for older user agents announcing themselves as "Mozilla/4" * unless they are known good (MSIE 6 with XP SP2, or MSIE 7 and later). * See http://zoompf.com/2012/02/lose-the-wait-http-compression for more details. */ ctx.idx = 0; if (http_find_header2("User-Agent", 10, req->p, &txn->hdr_idx, &ctx) && ctx.vlen >= 9 && memcmp(ctx.line + ctx.val, "Mozilla/4", 9) == 0 && (ctx.vlen < 31 || memcmp(ctx.line + ctx.val + 25, "MSIE ", 5) != 0 || ctx.line[ctx.val + 30] < '6' || (ctx.line[ctx.val + 30] == '6' && (ctx.vlen < 54 || memcmp(ctx.line + 51, "SV1", 3) != 0)))) { s->comp_algo = NULL; return 0; } /* search for the algo in the backend in priority or the frontend */ if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) || (s->fe->comp && (comp_algo_back = s->fe->comp->algos))) { ctx.idx = 0; while (http_find_header2("Accept-Encoding", 15, req->p, &txn->hdr_idx, &ctx)) { for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) { if (word_match(ctx.line + ctx.val, ctx.vlen, comp_algo->name, comp_algo->name_len)) { s->comp_algo = comp_algo; /* remove all occurrences of the header when "compression offload" is set */ if ((s->be->comp && s->be->comp->offload) || (s->fe->comp && s->fe->comp->offload)) { http_remove_header2(msg, &txn->hdr_idx, &ctx); ctx.idx = 0; while (http_find_header2("Accept-Encoding", 15, req->p, &txn->hdr_idx, &ctx)) { http_remove_header2(msg, &txn->hdr_idx, &ctx); } } return 1; } } } } /* identity is implicit does not require headers */ if ((s->be->comp && (comp_algo_back = s->be->comp->algos)) || (s->fe->comp && (comp_algo_back = s->fe->comp->algos))) { for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) { if (comp_algo->add_data == identity_add_data) { s->comp_algo = comp_algo; return 1; } } } s->comp_algo = NULL; return 0; }
0
[]
haproxy
aae75e3279c6c9bd136413a72dafdcd4986bb89a
194,782,781,172,889,740,000,000,000,000,000,000,000
62
BUG/CRITICAL: using HTTP information in tcp-request content may crash the process During normal HTTP request processing, request buffers are realigned if there are less than global.maxrewrite bytes available after them, in order to leave enough room for rewriting headers after the request. This is done in http_wait_for_request(). However, if some HTTP inspection happens during a "tcp-request content" rule, this realignment is not performed. In theory this is not a problem because empty buffers are always aligned and TCP inspection happens at the beginning of a connection. But with HTTP keep-alive, it also happens at the beginning of each subsequent request. So if a second request was pipelined by the client before the first one had a chance to be forwarded, the second request will not be realigned. Then, http_wait_for_request() will not perform such a realignment either because the request was already parsed and marked as such. The consequence of this, is that the rewrite of a sufficient number of such pipelined, unaligned requests may leave less room past the request been processed than the configured reserve, which can lead to a buffer overflow if request processing appends some data past the end of the buffer. A number of conditions are required for the bug to be triggered : - HTTP keep-alive must be enabled ; - HTTP inspection in TCP rules must be used ; - some request appending rules are needed (reqadd, x-forwarded-for) - since empty buffers are always realigned, the client must pipeline enough requests so that the buffer always contains something till the point where there is no more room for rewriting. While such a configuration is quite unlikely to be met (which is confirmed by the bug's lifetime), a few people do use these features together for very specific usages. And more importantly, writing such a configuration and the request to attack it is trivial. A quick workaround consists in forcing keep-alive off by adding "option httpclose" or "option forceclose" in the frontend. Alternatively, disabling HTTP-based TCP inspection rules enough if the application supports it. At first glance, this bug does not look like it could lead to remote code execution, as the overflowing part is controlled by the configuration and not by the user. But some deeper analysis should be performed to confirm this. And anyway, corrupting the process' memory and crashing it is quite trivial. Special thanks go to Yves Lafon from the W3C who reported this bug and deployed significant efforts to collect the relevant data needed to understand it in less than one week. CVE-2013-1912 was assigned to this issue. Note that 1.4 is also affected so the fix must be backported.
int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode) { struct inode *inode = old_dentry->d_inode; unsigned max_links = dir->i_sb->s_max_links; int error; if (!inode) return -ENOENT; error = may_create(dir, new_dentry); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A link to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EPERM; error = security_inode_link(old_dentry, dir, new_dentry); if (error) return error; mutex_lock(&inode->i_mutex); /* Make sure we don't allow creating hardlink to an unlinked file */ if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) error = -ENOENT; else if (max_links && inode->i_nlink >= max_links) error = -EMLINK; else { error = try_break_deleg(inode, delegated_inode); if (!error) error = dir->i_op->link(old_dentry, dir, new_dentry); } if (!error && (inode->i_state & I_LINKABLE)) { spin_lock(&inode->i_lock); inode->i_state &= ~I_LINKABLE; spin_unlock(&inode->i_lock); } mutex_unlock(&inode->i_mutex); if (!error) fsnotify_link(dir, inode, new_dentry); return error; }
0
[ "CWE-416" ]
linux
f15133df088ecadd141ea1907f2c96df67c729f0
207,295,833,490,218,240,000,000,000,000,000,000,000
52
path_openat(): fix double fput() path_openat() jumps to the wrong place after do_tmpfile() - it has already done path_cleanup() (as part of path_lookupat() called by do_tmpfile()), so doing that again can lead to double fput(). Cc: [email protected] # v3.11+ Signed-off-by: Al Viro <[email protected]>
int sock_create_kern(int family, int type, int protocol, struct socket **res) { return __sock_create(&init_net, family, type, protocol, res, 1); }
0
[ "CWE-264" ]
net
4de930efc23b92ddf88ce91c405ee645fe6e27ea
283,776,425,088,374,060,000,000,000,000,000,000,000
4
net: validate the range we feed to iov_iter_init() in sys_sendto/sys_recvfrom Cc: [email protected] # v3.19 Signed-off-by: Al Viro <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int xml_complete(modsec_rec *msr, char **error_msg) { if (error_msg == NULL) return -1; *error_msg = NULL; /* Only if we have a context, meaning we've done some work. */ if (msr->xml->parsing_ctx != NULL) { /* This is how we signalise the end of parsing to libxml. */ xmlParseChunk(msr->xml->parsing_ctx, NULL, 0, 1); /* Preserve the results for our reference. */ msr->xml->well_formed = msr->xml->parsing_ctx->wellFormed; msr->xml->doc = msr->xml->parsing_ctx->myDoc; /* Clean up everything else. */ xmlFreeParserCtxt(msr->xml->parsing_ctx); msr->xml->parsing_ctx = NULL; msr_log(msr, 4, "XML: Parsing complete (well_formed %u).", msr->xml->well_formed); if (msr->xml->well_formed != 1) { *error_msg = apr_psprintf(msr->mp, "XML: Failed parsing document."); return -1; } } return 1; }
0
[ "CWE-20", "CWE-611" ]
ModSecurity
d4d80b38aa85eccb26e3c61b04d16e8ca5de76fe
193,729,920,373,243,200,000,000,000,000,000,000,000
26
Added SecXmlExternalEntity
getfs_by_devdir (const char *dev, const char *dir) { struct mntentchn *mc, *mc0; mc0 = fstab_head(); for (mc = mc0->nxt; mc && mc != mc0; mc = mc->nxt) { int ok = 1; /* dir */ if (!streq(mc->m.mnt_dir, dir)) { char *dr = canonicalize(mc->m.mnt_dir); ok = streq(dr, dir); my_free(dr); } /* spec */ if (ok && !streq(mc->m.mnt_fsname, dev)) { const char *fs = mc->m.mnt_fsname; if (strncmp (fs, "LABEL=", 6) == 0) { ok = has_label(dev, fs + 6); } else if (strncmp (fs, "UUID=", 5) == 0) { ok = has_uuid(dev, fs + 5); } else { fs = canonicalize_spec(mc->m.mnt_fsname); ok = streq(fs, dev); my_free(fs); } } if (ok) return mc; } return NULL; }
0
[ "CWE-399" ]
util-linux
4b39b6aefd5dd8ac68a92adc650dc13d5d54d704
41,039,945,588,463,075,000,000,000,000,000,000,000
35
mount: use fflush() and temporary file for mtab updates (CVE-2011-1089) http://thread.gmane.org/gmane.comp.security.oss.general/4374 Changes: - force mount(8) to use /etc/mtab.tmp file every time. The original code used the tmp file for remount/move operations only. - call and check fflush() return code for the tmp file Note mount(8) blocks all signals when writing to mtab, so it's not affected by SIGXFSZ and the mtab lock file is always removed. This patch does not fix the same issue in umount(8) and libmount. Signed-off-by: Karel Zak <[email protected]>
static void hns_rcb_set_port_timeout( struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) { if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout * HNS_RCB_CLK_FREQ_MHZ); } else if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) { if (timeout > HNS_RCB_DEF_GAP_TIME_USECS) dsaf_write_dev(rcb_common, RCB_PORT_INT_GAPTIME_REG + port_idx * 4, HNS_RCB_DEF_GAP_TIME_USECS); else dsaf_write_dev(rcb_common, RCB_PORT_INT_GAPTIME_REG + port_idx * 4, timeout); dsaf_write_dev(rcb_common, RCB_PORT_CFG_OVERTIME_REG + port_idx * 4, timeout); } else { dsaf_write_dev(rcb_common, RCB_PORT_CFG_OVERTIME_REG + port_idx * 4, timeout); } }
0
[ "CWE-119", "CWE-703" ]
linux
412b65d15a7f8a93794653968308fc100f2aa87c
938,362,013,755,591,800,000,000,000,000,000,000
25
net: hns: fix ethtool_get_strings overflow in hns driver hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated is not enough for ethtool_get_strings(), which will cause random memory corruption. When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the the following can be observed without this patch: [ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80 [ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070. [ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70) [ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk [ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k [ 43.115218] Next obj: start=ffff801fb0b69098, len=80 [ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b. [ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38) [ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_ [ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai Signed-off-by: Timmy Li <[email protected]> Signed-off-by: David S. Miller <[email protected]>
*/ PHPAPI void php_print_info_htmlhead(TSRMLS_D) { php_info_print("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"DTD/xhtml1-transitional.dtd\">\n"); php_info_print("<html xmlns=\"http://www.w3.org/1999/xhtml\">"); php_info_print("<head>\n"); php_info_print_style(TSRMLS_C); php_info_print("<title>phpinfo()</title>"); php_info_print("<meta name=\"ROBOTS\" content=\"NOINDEX,NOFOLLOW,NOARCHIVE\" />"); php_info_print("</head>\n"); php_info_print("<body><div class=\"center\">\n");
0
[ "CWE-200" ]
php-src
3804c0d00fa6e629173fb1c8c61f8f88d5fe39b9
234,113,185,692,034,430,000,000,000,000,000,000,000
11
Fix bug #67498 - phpinfo() Type Confusion Information Leak Vulnerability
asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) { char buffer[256]; /* We only trust the superuser with rebooting the system. */ if (!capable(CAP_SYS_BOOT)) return -EPERM; /* For safety, we require "magic" arguments. */ if (magic1 != LINUX_REBOOT_MAGIC1 || (magic2 != LINUX_REBOOT_MAGIC2 && magic2 != LINUX_REBOOT_MAGIC2A && magic2 != LINUX_REBOOT_MAGIC2B && magic2 != LINUX_REBOOT_MAGIC2C)) return -EINVAL; /* Instead of trying to make the power_off code look like * halt when pm_power_off is not set do it the easy way. */ if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) cmd = LINUX_REBOOT_CMD_HALT; lock_kernel(); switch (cmd) { case LINUX_REBOOT_CMD_RESTART: kernel_restart(NULL); break; case LINUX_REBOOT_CMD_CAD_ON: C_A_D = 1; break; case LINUX_REBOOT_CMD_CAD_OFF: C_A_D = 0; break; case LINUX_REBOOT_CMD_HALT: kernel_halt(); unlock_kernel(); do_exit(0); break; case LINUX_REBOOT_CMD_POWER_OFF: kernel_power_off(); unlock_kernel(); do_exit(0); break; case LINUX_REBOOT_CMD_RESTART2: if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { unlock_kernel(); return -EFAULT; } buffer[sizeof(buffer) - 1] = '\0'; kernel_restart(buffer); break; case LINUX_REBOOT_CMD_KEXEC: kernel_kexec(); unlock_kernel(); return -EINVAL; #ifdef CONFIG_SOFTWARE_SUSPEND case LINUX_REBOOT_CMD_SW_SUSPEND: { int ret = pm_suspend(PM_SUSPEND_DISK); unlock_kernel(); return ret; } #endif default: unlock_kernel(); return -EINVAL; } unlock_kernel(); return 0; }
0
[ "CWE-20" ]
linux-2.6
9926e4c74300c4b31dee007298c6475d33369df0
173,489,383,013,473,050,000,000,000,000,000,000,000
79
CPU time limit patch / setrlimit(RLIMIT_CPU, 0) cheat fix As discovered here today, the change in Kernel 2.6.17 intended to inhibit users from setting RLIMIT_CPU to 0 (as that is equivalent to unlimited) by "cheating" and setting it to 1 in such a case, does not make a difference, as the check is done in the wrong place (too late), and only applies to the profiling code. On all systems I checked running kernels above 2.6.17, no matter what the hard and soft CPU time limits were before, a user could escape them by issuing in the shell (sh/bash/zsh) "ulimit -t 0", and then the user's process was not ever killed. Attached is a trivial patch to fix that. Simply moving the check to a slightly earlier location (specifically, before the line that actually assigns the limit - *old_rlim = new_rlim), does the trick. Do note that at least the zsh (but not ash, dash, or bash) shell has the problem of "caching" the limits set by the ulimit command, so when running zsh the fix will not immediately be evident - after entering "ulimit -t 0", "ulimit -a" will show "-t: cpu time (seconds) 0", even though the actual limit as returned by getrlimit(...) will be 1. It can be verified by opening a subshell (which will not have the values of the parent shell in cache) and checking in it, or just by running a CPU intensive command like "echo '65536^1048576' | bc" and verifying that it dumps core after one second. Regardless of whether that is a misfeature in the shell, perhaps it would be better to return -EINVAL from setrlimit in such a case instead of cheating and setting to 1, as that does not really reflect the actual state of the process anymore. I do not however know what the ground for that decision was in the original 2.6.17 change, and whether there would be any "backward" compatibility issues, so I preferred not to touch that right now. Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
\param user_path Specified path, or \c 0 to get the path currently used. \param reinit_path Force path to be recalculated (may take some time). \return Path containing the \c ffmpeg binary. **/ inline const char *ffmpeg_path(const char *const user_path, const bool reinit_path) { static CImg<char> s_path; cimg::mutex(7); if (reinit_path) s_path.assign(); if (user_path) { if (!s_path) s_path.assign(1024); std::strncpy(s_path,user_path,1023); } else if (!s_path) { s_path.assign(1024); bool path_found = false; std::FILE *file = 0; #if cimg_OS==2 if (!path_found) { std::strcpy(s_path,".\\ffmpeg.exe"); if ((file=std_fopen(s_path,"r"))!=0) { cimg::fclose(file); path_found = true; } } if (!path_found) std::strcpy(s_path,"ffmpeg.exe"); #else if (!path_found) { std::strcpy(s_path,"./ffmpeg"); if ((file=std_fopen(s_path,"r"))!=0) { cimg::fclose(file); path_found = true; } } if (!path_found) std::strcpy(s_path,"ffmpeg"); #endif winformat_string(s_path);
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
250,150,337,487,254,300,000,000,000,000,000,000,000
29
Fix other issues in 'CImg<T>::load_bmp()'.
static void vhost_flush_work(struct vhost_work *work) { struct vhost_flush_struct *s; s = container_of(work, struct vhost_flush_struct, work); complete(&s->wait_event); }
0
[ "CWE-120" ]
linux
060423bfdee3f8bc6e2c1bac97de24d5415e2bc4
98,542,426,921,188,180,000,000,000,000,000,000,000
7
vhost: make sure log_num < in_num The code assumes log_num < in_num everywhere, and that is true as long as in_num is incremented by descriptor iov count, and log_num by 1. However this breaks if there's a zero sized descriptor. As a result, if a malicious guest creates a vring desc with desc.len = 0, it may cause the host kernel to crash by overflowing the log array. This bug can be triggered during the VM migration. There's no need to log when desc.len = 0, so just don't increment log_num in this case. Fixes: 3a4d5c94e959 ("vhost_net: a kernel-level virtio server") Cc: [email protected] Reviewed-by: Lidong Chen <[email protected]> Signed-off-by: ruippan <[email protected]> Signed-off-by: yongduan <[email protected]> Acked-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Tyler Hicks <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]>
u32 svm_msrpm_offset(u32 msr) { u32 offset; int i; for (i = 0; i < NUM_MSR_MAPS; i++) { if (msr < msrpm_ranges[i] || msr >= msrpm_ranges[i] + MSRS_IN_RANGE) continue; offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ offset += (i * MSRS_RANGE_SIZE); /* add range offset */ /* Now we have the u8 offset - but need the u32 offset */ return offset / 4; } /* MSR not in any range */ return MSR_INVALID; }
0
[ "CWE-835" ]
linux
e72436bc3a5206f95bb384e741154166ddb3202e
51,727,428,998,082,320,000,000,000,000,000,000,000
20
KVM: SVM: avoid infinite loop on NPF from bad address When a nested page fault is taken from an address that does not have a memslot associated to it, kvm_mmu_do_page_fault returns RET_PF_EMULATE (via mmu_set_spte) and kvm_mmu_page_fault then invokes svm_need_emulation_on_page_fault. The default answer there is to return false, but in this case this just causes the page fault to be retried ad libitum. Since this is not a fast path, and the only other case where it is taken is an erratum, just stick a kvm_vcpu_gfn_to_memslot check in there to detect the common case where the erratum is not happening. This fixes an infinite loop in the new set_memory_region_test. Signed-off-by: Paolo Bonzini <[email protected]>
static void h2_wake_some_streams(struct h2c *h2c, int last, uint32_t flags) { struct eb32_node *node; struct h2s *h2s; if (h2c->st0 >= H2_CS_ERROR || h2c->conn->flags & CO_FL_ERROR) flags |= CS_FL_ERR_PENDING; if (conn_xprt_read0_pending(h2c->conn)) flags |= CS_FL_REOS; node = eb32_lookup_ge(&h2c->streams_by_id, last + 1); while (node) { h2s = container_of(node, struct h2s, by_id); if (h2s->id <= last) break; node = eb32_next(node); if (!h2s->cs) { /* this stream was already orphaned */ h2s_destroy(h2s); continue; } h2s->cs->flags |= flags; if ((flags & CS_FL_ERR_PENDING) && (h2s->cs->flags & CS_FL_EOS)) h2s->cs->flags |= CS_FL_ERROR; h2s_alert(h2s); if (flags & CS_FL_ERR_PENDING && h2s->st < H2_SS_ERROR) h2s->st = H2_SS_ERROR; else if (flags & CS_FL_REOS && h2s->st == H2_SS_OPEN) h2s->st = H2_SS_HREM; else if (flags & CS_FL_REOS && h2s->st == H2_SS_HLOC) h2s_close(h2s); } }
0
[ "CWE-125" ]
haproxy
a01f45e3ced23c799f6e78b5efdbd32198a75354
80,917,759,821,681,600,000,000,000,000,000,000,000
38
BUG/CRITICAL: mux-h2: re-check the frame length when PRIORITY is used Tim D�sterhus reported a possible crash in the H2 HEADERS frame decoder when the PRIORITY flag is present. A check is missing to ensure the 5 extra bytes needed with this flag are actually part of the frame. As per RFC7540#4.2, let's return a connection error with code FRAME_SIZE_ERROR. Many thanks to Tim for responsibly reporting this issue with a working config and reproducer. This issue was assigned CVE-2018-20615. This fix must be backported to 1.9 and 1.8.
gpk_build_pin_apdu(sc_card_t *card, sc_apdu_t *apdu, struct sc_pin_cmd_data *data) { static u8 sbuf[8]; int r; if (data->pin_type != SC_AC_CHV) return SC_ERROR_INVALID_ARGUMENTS; /* XXX deal with secure messaging here */ memset(apdu, 0, sizeof(*apdu)); apdu->cse = SC_APDU_CASE_3_SHORT; data->flags |= SC_PIN_CMD_NEED_PADDING; switch (data->cmd) { case SC_PIN_CMD_VERIFY: /* Copy PIN to buffer and pad */ data->pin1.encoding = SC_PIN_ENCODING_ASCII; data->pin1.pad_length = 8; data->pin1.pad_char = 0x00; data->pin1.offset = 5; r = sc_build_pin(sbuf, 8, &data->pin1, 1); if (r < 0) return r; apdu->cla = 0x00; apdu->ins = 0x20; apdu->p1 = 0x00; break; case SC_PIN_CMD_CHANGE: case SC_PIN_CMD_UNBLOCK: /* Copy PINs to buffer, BCD-encoded, and pad */ data->pin1.encoding = SC_PIN_ENCODING_BCD; data->pin1.pad_length = 8; data->pin1.pad_char = 0x00; data->pin1.offset = 5; data->pin2.encoding = SC_PIN_ENCODING_BCD; data->pin2.pad_length = 8; data->pin2.pad_char = 0x00; data->pin2.offset = 5 + 4; if ((r = sc_build_pin(sbuf, 4, &data->pin1, 1)) < 0 || (r = sc_build_pin(sbuf + 4, 4, &data->pin2, 1)) < 0) return r; apdu->cla = 0x80; apdu->ins = 0x24; apdu->p1 = (data->cmd == SC_PIN_CMD_CHANGE)? 0x00 : 0x01; break; default: return SC_ERROR_NOT_SUPPORTED; } apdu->p2 = data->pin_reference & 7; apdu->lc = 8; apdu->datalen = 8; apdu->data = sbuf; return 0; }
0
[ "CWE-125" ]
OpenSC
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
274,378,166,892,329,870,000,000,000,000,000,000,000
59
fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes.
crypt_pw_enc(const char *pwd) { return crypt_pw_enc_by_hash(pwd, CRYPT_UNIX); }
0
[ "CWE-284" ]
389-ds-base
aeb90eb0c41fc48541d983f323c627b2e6c328c7
173,759,437,276,223,900,000,000,000,000,000,000,000
4
Issue 4817 - BUG - locked crypt accounts on import may allow all passwords (#4819) Bug Description: Due to mishanding of short dbpwd hashes, the crypt_r algorithm was misused and was only comparing salts in some cases, rather than checking the actual content of the password. Fix Description: Stricter checks on dbpwd lengths to ensure that content passed to crypt_r has at least 2 salt bytes and 1 hash byte, as well as stricter checks on ct_memcmp to ensure that compared values are the same length, rather than potentially allowing overruns/short comparisons. fixes: https://github.com/389ds/389-ds-base/issues/4817 Author: William Brown <[email protected]> Review by: @mreynolds389
contentProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = doContent( parser, 0, parser->m_encoding, start, end, endPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_ACCOUNT_DIRECT); if (result == XML_ERROR_NONE) { if (! storeRawNames(parser)) return XML_ERROR_NO_MEMORY; } return result; }
0
[ "CWE-400", "CWE-703" ]
libexpat
9b4ce651b26557f16103c3a366c91934ecd439ab
310,846,433,060,961,230,000,000,000,000,000,000,000
11
Prevent stack exhaustion in build_model It is possible to trigger stack exhaustion in build_model function if depth of nested children in DTD element is large enough. This happens because build_node is a recursively called function within build_model. The code has been adjusted to run iteratively. It uses the already allocated heap space as temporary stack (growing from top to bottom). Output is identical to recursive version. No new fields in data structures were added, i.e. it keeps full API and ABI compatibility. Instead the numchildren variable is used to temporarily keep the index of items (uint vs int). Documentation and readability improvements kindly added by Sebastian. Proof of Concept: 1. Compile poc binary which parses XML file line by line ``` cat > poc.c << EOF #include <err.h> #include <expat.h> #include <stdio.h> XML_Parser parser; static void XMLCALL dummy_element_decl_handler(void *userData, const XML_Char *name, XML_Content *model) { XML_FreeContentModel(parser, model); } int main(int argc, char *argv[]) { FILE *fp; char *p = NULL; size_t s = 0; ssize_t l; if (argc != 2) errx(1, "usage: poc poc.xml"); if ((parser = XML_ParserCreate(NULL)) == NULL) errx(1, "XML_ParserCreate"); XML_SetElementDeclHandler(parser, dummy_element_decl_handler); if ((fp = fopen(argv[1], "r")) == NULL) err(1, "fopen"); while ((l = getline(&p, &s, fp)) > 0) if (XML_Parse(parser, p, (int)l, XML_FALSE) != XML_STATUS_OK) errx(1, "XML_Parse"); XML_ParserFree(parser); free(p); fclose(fp); return 0; } EOF cc -std=c11 -D_POSIX_C_SOURCE=200809L -lexpat -o poc poc.c ``` 2. Create XML file with a lot of nested groups in DTD element ``` cat > poc.xml.zst.b64 << EOF KLUv/aQkACAAPAEA+DwhRE9DVFlQRSB1d3UgWwo8IUVMRU1FTlQgdXd1CigBAHv/58AJAgAQKAIA ECgCABAoAgAQKAIAECgCABAoAgAQKHwAAChvd28KKQIA2/8gV24XBAIAECkCABApAgAQKQIAECkC ABApAgAQKQIAEClVAAAgPl0+CgEA4A4I2VwwnQ== EOF base64 -d poc.xml.zst.b64 | zstd -d > poc.xml ``` 3. Run Proof of Concept ``` ./poc poc.xml ``` Co-authored-by: Sebastian Pipping <[email protected]>
int tls1_enc(SSL *s, int send) { SSL3_RECORD *rec; EVP_CIPHER_CTX *ds; unsigned long l; int bs,i,j,k,pad=0,ret,mac_size=0; int n; const EVP_CIPHER *enc; if (send) { if (EVP_MD_CTX_md(s->write_hash)) { n=EVP_MD_CTX_size(s->write_hash); OPENSSL_assert(n >= 0); } ds=s->enc_write_ctx; rec= &(s->s3->wrec); if (s->enc_write_ctx == NULL) enc=NULL; else enc=EVP_CIPHER_CTX_cipher(s->enc_write_ctx); } else { if (EVP_MD_CTX_md(s->read_hash)) { n=EVP_MD_CTX_size(s->read_hash); OPENSSL_assert(n >= 0); } ds=s->enc_read_ctx; rec= &(s->s3->rrec); if (s->enc_read_ctx == NULL) enc=NULL; else enc=EVP_CIPHER_CTX_cipher(s->enc_read_ctx); } #ifdef KSSL_DEBUG printf("tls1_enc(%d)\n", send); #endif /* KSSL_DEBUG */ if ((s->session == NULL) || (ds == NULL) || (enc == NULL)) { memmove(rec->data,rec->input,rec->length); rec->input=rec->data; ret = 1; } else { l=rec->length; bs=EVP_CIPHER_block_size(ds->cipher); if ((bs != 1) && send) { i=bs-((int)l%bs); /* Add weird padding of upto 256 bytes */ /* we need to add 'i' padding bytes of value j */ j=i-1; if (s->options & SSL_OP_TLS_BLOCK_PADDING_BUG) { if (s->s3->flags & TLS1_FLAGS_TLS_PADDING_BUG) j++; } for (k=(int)l; k<(int)(l+i); k++) rec->input[k]=j; l+=i; rec->length+=i; } #ifdef KSSL_DEBUG { unsigned long ui; printf("EVP_Cipher(ds=%p,rec->data=%p,rec->input=%p,l=%ld) ==>\n", ds,rec->data,rec->input,l); printf("\tEVP_CIPHER_CTX: %d buf_len, %d key_len [%d %d], %d iv_len\n", ds->buf_len, ds->cipher->key_len, DES_KEY_SZ, DES_SCHEDULE_SZ, ds->cipher->iv_len); printf("\t\tIV: "); for (i=0; i<ds->cipher->iv_len; i++) printf("%02X", ds->iv[i]); printf("\n"); printf("\trec->input="); for (ui=0; ui<l; ui++) printf(" %02x", rec->input[ui]); printf("\n"); } #endif /* KSSL_DEBUG */ if (!send) { if (l == 0 || l%bs != 0) { SSLerr(SSL_F_TLS1_ENC,SSL_R_BLOCK_CIPHER_PAD_IS_WRONG); ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECRYPTION_FAILED); return 0; } } EVP_Cipher(ds,rec->data,rec->input,l); #ifdef KSSL_DEBUG { unsigned long i; printf("\trec->data="); for (i=0; i<l; i++) printf(" %02x", rec->data[i]); printf("\n"); } #endif /* KSSL_DEBUG */ rec->orig_len = rec->length; ret = 1; if (EVP_MD_CTX_md(s->read_hash) != NULL) mac_size = EVP_MD_CTX_size(s->read_hash); if ((bs != 1) && !send) ret = tls1_cbc_remove_padding(s, rec, bs, mac_size); if (pad && !send) rec->length -= pad; } return ret; }
1
[ "CWE-310" ]
openssl
610dfc3ef4c4019394534023115226f4ed0e7204
12,924,756,489,357,260,000,000,000,000,000,000,000
123
Don't crash when processing a zero-length, TLS >= 1.1 record. The previous CBC patch was bugged in that there was a path through enc() in s3_pkt.c/d1_pkt.c which didn't set orig_len. orig_len would be left at the previous value which could suggest that the packet was a sufficient length when it wasn't. (cherry picked from commit 6cb19b7681f600b2f165e4adc57547b097b475fd)
doit (void) { char *badutf8 = strdup ("\x7e\x64\x61\x72\x10\x2f\x2f\xf9\x2b\x71" "\x60\x79\x7b\x2e\x63\x75\x2b\x61\x65\x72" "\x75\x65\x56\x66\x7f\x62\xc5\x76\xe5\x00"); char *s = NULL; int rc; rc = idna_to_ascii_8z (badutf8, &s, 0); free (badutf8); if (rc != IDNA_ICONV_ERROR) fail ("rc %d\n", rc); idn_free (s); }
0
[]
libidn
2e97c2796581c27213962c77f5a8571a598f9a2e
78,624,231,428,163,810,000,000,000,000,000,000,000
15
libidn: stringprep_utf8_to_ucs4 now rejects invalid UTF-8. CVE-2015-2059
int snd_timer_open(struct snd_timer_instance **ti, char *owner, struct snd_timer_id *tid, unsigned int slave_id) { struct snd_timer *timer; struct snd_timer_instance *timeri = NULL; struct device *card_dev_to_put = NULL; int err; mutex_lock(&register_mutex); if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) { /* open a slave instance */ if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE || tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) { pr_debug("ALSA: timer: invalid slave class %i\n", tid->dev_sclass); err = -EINVAL; goto unlock; } timeri = snd_timer_instance_new(owner, NULL); if (!timeri) { err = -ENOMEM; goto unlock; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = tid->device; timeri->flags |= SNDRV_TIMER_IFLG_SLAVE; list_add_tail(&timeri->open_list, &snd_timer_slave_list); err = snd_timer_check_slave(timeri); if (err < 0) { snd_timer_close_locked(timeri, &card_dev_to_put); timeri = NULL; } goto unlock; } /* open a master instance */ timer = snd_timer_find(tid); #ifdef CONFIG_MODULES if (!timer) { mutex_unlock(&register_mutex); snd_timer_request(tid); mutex_lock(&register_mutex); timer = snd_timer_find(tid); } #endif if (!timer) { err = -ENODEV; goto unlock; } if (!list_empty(&timer->open_list_head)) { timeri = list_entry(timer->open_list_head.next, struct snd_timer_instance, open_list); if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) { err = -EBUSY; timeri = NULL; goto unlock; } } if (timer->num_instances >= timer->max_instances) { err = -EBUSY; goto unlock; } timeri = snd_timer_instance_new(owner, timer); if (!timeri) { err = -ENOMEM; goto unlock; } /* take a card refcount for safe disconnection */ if (timer->card) get_device(&timer->card->card_dev); timeri->slave_class = tid->dev_sclass; timeri->slave_id = slave_id; if (list_empty(&timer->open_list_head) && timer->hw.open) { err = timer->hw.open(timer); if (err) { kfree(timeri->owner); kfree(timeri); timeri = NULL; if (timer->card) card_dev_to_put = &timer->card->card_dev; module_put(timer->module); goto unlock; } } list_add_tail(&timeri->open_list, &timer->open_list_head); timer->num_instances++; err = snd_timer_check_master(timeri); if (err < 0) { snd_timer_close_locked(timeri, &card_dev_to_put); timeri = NULL; } unlock: mutex_unlock(&register_mutex); /* put_device() is called after unlock for avoiding deadlock */ if (card_dev_to_put) put_device(card_dev_to_put); *ti = timeri; return err; }
1
[ "CWE-416", "CWE-703" ]
linux
e7af6307a8a54f0b873960b32b6a644f2d0fbd97
251,259,451,354,317,680,000,000,000,000,000,000,000
104
ALSA: timer: Fix incorrectly assigned timer instance The clean up commit 41672c0c24a6 ("ALSA: timer: Simplify error path in snd_timer_open()") unified the error handling code paths with the standard goto, but it introduced a subtle bug: the timer instance is stored in snd_timer_open() incorrectly even if it returns an error. This may eventually lead to UAF, as spotted by fuzzer. The culprit is the snd_timer_open() code checks the SNDRV_TIMER_IFLG_EXCLUSIVE flag with the common variable timeri. This variable is supposed to be the newly created instance, but we (ab-)used it for a temporary check before the actual creation of a timer instance. After that point, there is another check for the max number of instances, and it bails out if over the threshold. Before the refactoring above, it worked fine because the code returned directly from that point. After the refactoring, however, it jumps to the unified error path that stores the timeri variable in return -- even if it returns an error. Unfortunately this stored value is kept in the caller side (snd_timer_user_tselect()) in tu->timeri. This causes inconsistency later, as if the timer was successfully assigned. In this patch, we fix it by not re-using timeri variable but a temporary variable for testing the exclusive connection, so timeri remains NULL at that point. Fixes: 41672c0c24a6 ("ALSA: timer: Simplify error path in snd_timer_open()") Reported-and-tested-by: Tristan Madani <[email protected]> Cc: <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Takashi Iwai <[email protected]>
static zend_long firebird_handle_doer(pdo_dbh_t *dbh, const char *sql, size_t sql_len) /* {{{ */ { pdo_firebird_db_handle *H = (pdo_firebird_db_handle *)dbh->driver_data; isc_stmt_handle stmt = PDO_FIREBIRD_HANDLE_INITIALIZER; static char const info_count[] = { isc_info_sql_records }; char result[64]; int ret = 0; XSQLDA in_sqlda, out_sqlda; /* TODO no placeholders in exec() for now */ in_sqlda.version = out_sqlda.version = PDO_FB_SQLDA_VERSION; in_sqlda.sqld = out_sqlda.sqld = 0; out_sqlda.sqln = 1; /* allocate and prepare statement */ if (!firebird_alloc_prepare_stmt(dbh, sql, sql_len, &out_sqlda, &stmt, 0)) { return -1; } /* execute the statement */ if (isc_dsql_execute2(H->isc_status, &H->tr, &stmt, PDO_FB_SQLDA_VERSION, &in_sqlda, &out_sqlda)) { RECORD_ERROR(dbh); ret = -1; goto free_statement; } /* find out how many rows were affected */ if (isc_dsql_sql_info(H->isc_status, &stmt, sizeof(info_count), const_cast(info_count), sizeof(result), result)) { RECORD_ERROR(dbh); ret = -1; goto free_statement; } if (result[0] == isc_info_sql_records) { unsigned i = 3, result_size = isc_vax_integer(&result[1],2); if (result_size > sizeof(result)) { ret = -1; goto free_statement; } while (result[i] != isc_info_end && i < result_size) { short len = (short)isc_vax_integer(&result[i+1],2); /* bail out on bad len */ if (len != 1 && len != 2 && len != 4) { ret = -1; goto free_statement; } if (result[i] != isc_info_req_select_count) { ret += isc_vax_integer(&result[i+3],len); } i += len+3; } } /* commit if we're in auto_commit mode */ if (dbh->auto_commit && isc_commit_retaining(H->isc_status, &H->tr)) { RECORD_ERROR(dbh); } free_statement: if (isc_dsql_free_statement(H->isc_status, &stmt, DSQL_drop)) { RECORD_ERROR(dbh); } return ret; }
0
[ "CWE-787" ]
php-src
08da7c73726f7b86b67d6f0ff87c73c585a7834a
144,911,972,521,096,340,000,000,000,000,000,000,000
68
Fix #76449: SIGSEGV in firebird_handle_doer We need to verify that the `result_size` is not larger than our buffer, and also should make sure that the `len` which is passed to `isc_vax_integer()` has a permissible value; otherwise we bail out.
display_dollar(colnr_T col) { colnr_T save_col; if (!redrawing()) return; cursor_off(); save_col = curwin->w_cursor.col; curwin->w_cursor.col = col; if (has_mbyte) { char_u *p; // If on the last byte of a multi-byte move to the first byte. p = ml_get_curline(); curwin->w_cursor.col -= (*mb_head_off)(p, p + col); } curs_columns(FALSE); // recompute w_wrow and w_wcol if (curwin->w_wcol < curwin->w_width) { edit_putchar('$', FALSE); dollar_vcol = curwin->w_virtcol; } curwin->w_cursor.col = save_col; }
1
[ "CWE-126", "CWE-787" ]
vim
e98c88c44c308edaea5994b8ad4363e65030968c
265,859,281,386,315,900,000,000,000,000,000,000,000
26
patch 9.0.0218: reading before the start of the line Problem: Reading before the start of the line. Solution: When displaying "$" check the column is not negative.
xmlXPtrGetIndex(xmlNodePtr cur) { int i; if ((cur == NULL) || (cur->type == XML_NAMESPACE_DECL)) return(-1); for (i = 1;cur != NULL;cur = cur->prev) { if ((cur->type == XML_ELEMENT_NODE) || (cur->type == XML_DOCUMENT_NODE) || (cur->type == XML_HTML_DOCUMENT_NODE)) { i++; } } return(i); }
0
[ "CWE-416" ]
libxml2
9ab01a277d71f54d3143c2cf333c5c2e9aaedd9e
104,459,908,739,806,360,000,000,000,000,000,000,000
13
Fix XPointer paths beginning with range-to The old code would invoke the broken xmlXPtrRangeToFunction. range-to isn't really a function but a special kind of location step. Remove this function and always handle range-to in the XPath code. The old xmlXPtrRangeToFunction could also be abused to trigger a use-after-free error with the potential for remote code execution. Found with afl-fuzz. Fixes CVE-2016-5131.
void set_reserved_vector(const unsigned int arg) { unsigned int siz = _cimg_mp_size(arg); int *ptr = memtype.data(arg + 1); while (siz-->0) *(ptr++) = -1; }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
158,208,362,648,627,340,000,000,000,000,000,000,000
5
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) { if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) return -EFAULT; return post_copy_siginfo_from_user(to, from); }
0
[ "CWE-190" ]
linux
d1e7fd6462ca9fc76650fbe6ca800e35b24267da
124,625,986,354,863,300,000,000,000,000,000,000,000
6
signal: Extend exec_id to 64bits Replace the 32bit exec_id with a 64bit exec_id to make it impossible to wrap the exec_id counter. With care an attacker can cause exec_id wrap and send arbitrary signals to a newly exec'd parent. This bypasses the signal sending checks if the parent changes their credentials during exec. The severity of this problem can been seen that in my limited testing of a 32bit exec_id it can take as little as 19s to exec 65536 times. Which means that it can take as little as 14 days to wrap a 32bit exec_id. Adam Zabrocki has succeeded wrapping the self_exe_id in 7 days. Even my slower timing is in the uptime of a typical server. Which means self_exec_id is simply a speed bump today, and if exec gets noticably faster self_exec_id won't even be a speed bump. Extending self_exec_id to 64bits introduces a problem on 32bit architectures where reading self_exec_id is no longer atomic and can take two read instructions. Which means that is is possible to hit a window where the read value of exec_id does not match the written value. So with very lucky timing after this change this still remains expoiltable. I have updated the update of exec_id on exec to use WRITE_ONCE and the read of exec_id in do_notify_parent to use READ_ONCE to make it clear that there is no locking between these two locations. Link: https://lore.kernel.org/kernel-hardening/[email protected] Fixes: 2.3.23pre2 Cc: [email protected] Signed-off-by: "Eric W. Biederman" <[email protected]>
SpoolssRouterReplyPrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; }
0
[ "CWE-399" ]
wireshark
b4d16b4495b732888e12baf5b8a7e9bf2665e22b
17,576,365,486,811,965,000,000,000,000,000,000,000
10
SPOOLSS: Try to avoid an infinite loop. Use tvb_reported_length_remaining in dissect_spoolss_uint16uni. Make sure our offset always increments in dissect_spoolss_keybuffer. Change-Id: I7017c9685bb2fa27161d80a03b8fca4ef630e793 Reviewed-on: https://code.wireshark.org/review/14687 Reviewed-by: Gerald Combs <[email protected]> Petri-Dish: Gerald Combs <[email protected]> Tested-by: Petri Dish Buildbot <[email protected]> Reviewed-by: Michael Mann <[email protected]>
void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) { Manager *m; bool unexpected; assert(u); assert(os < _UNIT_ACTIVE_STATE_MAX); assert(ns < _UNIT_ACTIVE_STATE_MAX); /* Note that this is called for all low-level state changes, * even if they might map to the same high-level * UnitActiveState! That means that ns == os is OK an expected * behavior here. For example: if a mount point is remounted * this function will be called too! */ m = u->manager; if (m->n_reloading <= 0) { dual_timestamp ts; dual_timestamp_get(&ts); if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns)) u->inactive_exit_timestamp = ts; else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns)) u->inactive_enter_timestamp = ts; if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns)) u->active_enter_timestamp = ts; else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns)) u->active_exit_timestamp = ts; } if (UNIT_IS_INACTIVE_OR_FAILED(ns)) unit_destroy_cgroup(u); /* Note that this doesn't apply to RemainAfterExit services exiting * sucessfully, since there's no change of state in that case. Which is * why it is handled in service_set_state() */ if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) { ExecContext *ec = unit_get_exec_context(u); if (ec && exec_context_may_touch_console(ec)) { if (UNIT_IS_INACTIVE_OR_FAILED(ns)) { m->n_on_console --; if (m->n_on_console == 0) /* unset no_console_output flag, since the console is free */ m->no_console_output = false; } else m->n_on_console ++; } } if (u->job) { unexpected = false; if (u->job->state == JOB_WAITING) /* So we reached a different state for this * job. Let's see if we can run it now if it * failed previously due to EAGAIN. */ job_add_to_run_queue(u->job); /* Let's check whether this state change constitutes a * finished job, or maybe contradicts a running job and * hence needs to invalidate jobs. */ switch (u->job->type) { case JOB_START: case JOB_VERIFY_ACTIVE: if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) job_finish_and_invalidate(u->job, JOB_DONE, true); else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) { unexpected = true; if (UNIT_IS_INACTIVE_OR_FAILED(ns)) job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true); } break; case JOB_RELOAD: case JOB_RELOAD_OR_START: if (u->job->state == JOB_RUNNING) { if (ns == UNIT_ACTIVE) job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true); else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) { unexpected = true; if (UNIT_IS_INACTIVE_OR_FAILED(ns)) job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true); } } break; case JOB_STOP: case JOB_RESTART: case JOB_TRY_RESTART: if (UNIT_IS_INACTIVE_OR_FAILED(ns)) job_finish_and_invalidate(u->job, JOB_DONE, true); else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) { unexpected = true; job_finish_and_invalidate(u->job, JOB_FAILED, true); } break; default: assert_not_reached("Job type unknown"); } } else unexpected = true; if (m->n_reloading <= 0) { /* If this state change happened without being * requested by a job, then let's retroactively start * or stop dependencies. We skip that step when * deserializing, since we don't want to create any * additional jobs just because something is already * activated. */ if (unexpected) { if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns)) retroactively_start_dependencies(u); else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns)) retroactively_stop_dependencies(u); } /* stop unneeded units regardless if going down was expected or not */ if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns)) check_unneeded_dependencies(u); if (ns != os && ns == UNIT_FAILED) { log_notice_unit(u->id, "Unit %s entered failed state.", u->id); unit_start_on_failure(u); } } /* Some names are special */ if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) { if (unit_has_name(u, SPECIAL_DBUS_SERVICE)) /* The bus just might have become available, * hence try to connect to it, if we aren't * yet connected. */ bus_init(m, true); if (u->type == UNIT_SERVICE && !UNIT_IS_ACTIVE_OR_RELOADING(os) && m->n_reloading <= 0) { /* Write audit record if we have just finished starting up */ manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true); u->in_audit = true; } if (!UNIT_IS_ACTIVE_OR_RELOADING(os)) manager_send_unit_plymouth(m, u); } else { /* We don't care about D-Bus here, since we'll get an * asynchronous notification for it anyway. */ if (u->type == UNIT_SERVICE && UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os) && m->n_reloading <= 0) { /* Hmm, if there was no start record written * write it now, so that we always have a nice * pair */ if (!u->in_audit) { manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE); if (ns == UNIT_INACTIVE) manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true); } else /* Write audit record if we have just finished shutting down */ manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE); u->in_audit = false; } } manager_recheck_journal(m); unit_trigger_notify(u); /* Maybe we finished startup and are now ready for being * stopped because unneeded? */ if (u->manager->n_reloading <= 0) unit_check_unneeded(u); unit_add_to_dbus_queue(u); unit_add_to_gc_queue(u); }
0
[]
systemd
5ba6985b6c8ef85a8bcfeb1b65239c863436e75b
177,327,248,126,312,050,000,000,000,000,000,000,000
202
core: allow PIDs to be watched by two units at the same time In some cases it is interesting to map a PID to two units at the same time. For example, when a user logs in via a getty, which is reexeced to /sbin/login that binary will be explicitly referenced as main pid of the getty service, as well as implicitly referenced as part of the session scope.
static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb) { return false; }
0
[ "CWE-400", "CWE-401" ]
linux
3f93616951138a598d930dcaec40f2bfd9ce43bb
114,628,340,653,968,060,000,000,000,000,000,000,000
6
rtlwifi: prevent memory leak in rtl_usb_probe In rtl_usb_probe if allocation for usb_data fails the allocated hw should be released. In addition the allocated rtlpriv->usb_data should be released on error handling path. Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
static int kvm_set_memslot(struct kvm *kvm, struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { struct kvm_memory_slot *invalid_slot; int r; /* * Released in kvm_swap_active_memslots. * * Must be held from before the current memslots are copied until * after the new memslots are installed with rcu_assign_pointer, * then released before the synchronize srcu in kvm_swap_active_memslots. * * When modifying memslots outside of the slots_lock, must be held * before reading the pointer to the current memslots until after all * changes to those memslots are complete. * * These rules ensure that installing new memslots does not lose * changes made to the previous memslots. */ mutex_lock(&kvm->slots_arch_lock); /* * Invalidate the old slot if it's being deleted or moved. This is * done prior to actually deleting/moving the memslot to allow vCPUs to * continue running by ensuring there are no mappings or shadow pages * for the memslot when it is deleted/moved. Without pre-invalidation * (and without a lock), a window would exist between effecting the * delete/move and committing the changes in arch code where KVM or a * guest could access a non-existent memslot. * * Modifications are done on a temporary, unreachable slot. The old * slot needs to be preserved in case a later step fails and the * invalidation needs to be reverted. */ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); if (!invalid_slot) { mutex_unlock(&kvm->slots_arch_lock); return -ENOMEM; } kvm_invalidate_memslot(kvm, old, invalid_slot); } r = kvm_prepare_memory_region(kvm, old, new, change); if (r) { /* * For DELETE/MOVE, revert the above INVALID change. No * modifications required since the original slot was preserved * in the inactive slots. Changing the active memslots also * release slots_arch_lock. */ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { kvm_activate_memslot(kvm, invalid_slot, old); kfree(invalid_slot); } else { mutex_unlock(&kvm->slots_arch_lock); } return r; } /* * For DELETE and MOVE, the working slot is now active as the INVALID * version of the old slot. MOVE is particularly special as it reuses * the old slot and returns a copy of the old slot (in working_slot). * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the * old slot is detached but otherwise preserved. */ if (change == KVM_MR_CREATE) kvm_create_memslot(kvm, new); else if (change == KVM_MR_DELETE) kvm_delete_memslot(kvm, old, invalid_slot); else if (change == KVM_MR_MOVE) kvm_move_memslot(kvm, old, new, invalid_slot); else if (change == KVM_MR_FLAGS_ONLY) kvm_update_flags_memslot(kvm, old, new); else BUG(); /* Free the temporary INVALID slot used for DELETE and MOVE. */ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) kfree(invalid_slot); /* * No need to refresh new->arch, changes after dropping slots_arch_lock * will directly hit the final, active memsot. Architectures are * responsible for knowing that new->arch may be stale. */ kvm_commit_memory_region(kvm, old, new, change); return 0;
0
[ "CWE-476" ]
linux
5593473a1e6c743764b08e3b6071cb43b5cfa6c4
294,304,306,722,988,600,000,000,000,000,000,000,000
94
KVM: avoid NULL pointer dereference in kvm_dirty_ring_push kvm_vcpu_release() will call kvm_dirty_ring_free(), freeing ring->dirty_gfns and setting it to NULL. Afterwards, it calls kvm_arch_vcpu_destroy(). However, if closing the file descriptor races with KVM_RUN in such away that vcpu->arch.st.preempted == 0, the following call stack leads to a NULL pointer dereference in kvm_dirty_run_push(): mark_page_dirty_in_slot+0x192/0x270 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3171 kvm_steal_time_set_preempted arch/x86/kvm/x86.c:4600 [inline] kvm_arch_vcpu_put+0x34e/0x5b0 arch/x86/kvm/x86.c:4618 vcpu_put+0x1b/0x70 arch/x86/kvm/../../../virt/kvm/kvm_main.c:211 vmx_free_vcpu+0xcb/0x130 arch/x86/kvm/vmx/vmx.c:6985 kvm_arch_vcpu_destroy+0x76/0x290 arch/x86/kvm/x86.c:11219 kvm_vcpu_destroy arch/x86/kvm/../../../virt/kvm/kvm_main.c:441 [inline] The fix is to release the dirty page ring after kvm_arch_vcpu_destroy has run. Reported-by: Qiuhao Li <[email protected]> Reported-by: Gaoning Pan <[email protected]> Reported-by: Yongkang Jia <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
static int create_auto_midi_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver) { struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct usb_endpoint_descriptor *epd; int err; alts = &iface->altsetting[0]; altsd = get_iface_desc(alts); /* must have at least one bulk/interrupt endpoint for streaming */ if (altsd->bNumEndpoints < 1) return -ENODEV; epd = get_endpoint(alts, 0); if (!usb_endpoint_xfer_bulk(epd) && !usb_endpoint_xfer_int(epd)) return -ENODEV; switch (USB_ID_VENDOR(chip->usb_id)) { case 0x0499: /* Yamaha */ err = create_yamaha_midi_quirk(chip, iface, driver, alts); if (err != -ENODEV) return err; break; case 0x0582: /* Roland */ err = create_roland_midi_quirk(chip, iface, driver, alts); if (err != -ENODEV) return err; break; } return create_std_midi_quirk(chip, iface, driver, alts); }
0
[]
sound
0f886ca12765d20124bd06291c82951fd49a33be
99,634,432,750,079,140,000,000,000,000,000,000,000
35
ALSA: usb-audio: Fix NULL dereference in create_fixed_stream_quirk() create_fixed_stream_quirk() may cause a NULL-pointer dereference by accessing the non-existing endpoint when a USB device with a malformed USB descriptor is used. This patch avoids it simply by adding a sanity check of bNumEndpoints before the accesses. Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=971125 Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
cmsToneCurve* _cmsBuildKToneCurve(cmsContext ContextID, cmsUInt32Number nPoints, cmsUInt32Number nProfiles, const cmsUInt32Number Intents[], const cmsHPROFILE hProfiles[], const cmsBool BPC[], const cmsFloat64Number AdaptationStates[], cmsUInt32Number dwFlags) { cmsToneCurve *in, *out, *KTone; // Make sure CMYK -> CMYK if (cmsGetColorSpace(hProfiles[0]) != cmsSigCmykData || cmsGetColorSpace(hProfiles[nProfiles-1])!= cmsSigCmykData) return NULL; // Make sure last is an output profile if (cmsGetDeviceClass(hProfiles[nProfiles - 1]) != cmsSigOutputClass) return NULL; // Create individual curves. BPC works also as each K to L* is // computed as a BPC to zero black point in case of L* in = ComputeKToLstar(ContextID, nPoints, nProfiles - 1, Intents, hProfiles, BPC, AdaptationStates, dwFlags); if (in == NULL) return NULL; out = ComputeKToLstar(ContextID, nPoints, 1, Intents + (nProfiles - 1), hProfiles + (nProfiles - 1), BPC + (nProfiles - 1), AdaptationStates + (nProfiles - 1), dwFlags); if (out == NULL) { cmsFreeToneCurve(in); return NULL; } // Build the relationship. This effectively limits the maximum accuracy to 16 bits, but // since this is used on black-preserving LUTs, we are not loosing accuracy in any case KTone = cmsJoinToneCurve(ContextID, in, out, nPoints); // Get rid of components cmsFreeToneCurve(in); cmsFreeToneCurve(out); // Something went wrong... if (KTone == NULL) return NULL; // Make sure it is monotonic if (!cmsIsToneCurveMonotonic(KTone)) { cmsFreeToneCurve(KTone); return NULL; } return KTone; }
0
[]
Little-CMS
41d222df1bc6188131a8f46c32eab0a4d4cdf1b6
333,691,934,699,905,660,000,000,000,000,000,000,000
53
Memory squeezing fix: lcms2 cmsPipeline construction When creating a new pipeline, lcms would often try to allocate a stage and pass it to cmsPipelineInsertStage without checking whether the allocation succeeded. cmsPipelineInsertStage would then assert (or crash) if it had not. The fix here is to change cmsPipelineInsertStage to check and return an error value. All calling code is then checked to test this return value and cope.
void gpp_read_box(GF_BitStream *bs, GF_BoxRecord *rec) { rec->top = gf_bs_read_u16(bs); rec->left = gf_bs_read_u16(bs); rec->bottom = gf_bs_read_u16(bs); rec->right = gf_bs_read_u16(bs); }
0
[ "CWE-476" ]
gpac
d527325a9b72218612455a534a508f9e1753f76e
202,820,718,924,572,170,000,000,000,000,000,000,000
7
fixed #1768
init_class_tab(void) { int i; static int done = FALSE; if (done) return; for (i = 0; i < 256; ++i) { if (i >= '0' && i <= '7') class_tab[i] = RI_DIGIT + RI_HEX + RI_OCTAL + RI_WORD; else if (i >= '8' && i <= '9') class_tab[i] = RI_DIGIT + RI_HEX + RI_WORD; else if (i >= 'a' && i <= 'f') class_tab[i] = RI_HEX + RI_WORD + RI_HEAD + RI_ALPHA + RI_LOWER; else if (i >= 'g' && i <= 'z') class_tab[i] = RI_WORD + RI_HEAD + RI_ALPHA + RI_LOWER; else if (i >= 'A' && i <= 'F') class_tab[i] = RI_HEX + RI_WORD + RI_HEAD + RI_ALPHA + RI_UPPER; else if (i >= 'G' && i <= 'Z') class_tab[i] = RI_WORD + RI_HEAD + RI_ALPHA + RI_UPPER; else if (i == '_') class_tab[i] = RI_WORD + RI_HEAD; else class_tab[i] = 0; } class_tab[' '] |= RI_WHITE; class_tab['\t'] |= RI_WHITE; done = TRUE; }
0
[ "CWE-416" ]
vim
32acf1f1a72ebb9d8942b9c9d80023bf1bb668ea
178,126,752,784,473,370,000,000,000,000,000,000,000
31
patch 9.0.0047: using freed memory with recursive substitute Problem: Using freed memory with recursive substitute. Solution: Always make a copy for reg_prev_sub.
irc_nick_realloc_prefixes (struct t_irc_server *server, int old_length, int new_length) { struct t_irc_channel *ptr_channel; struct t_irc_nick *ptr_nick; char *new_prefixes; for (ptr_channel = server->channels; ptr_channel; ptr_channel = ptr_channel->next_channel) { for (ptr_nick = ptr_channel->nicks; ptr_nick; ptr_nick = ptr_nick->next_nick) { if (ptr_nick->prefixes) { new_prefixes = realloc (ptr_nick->prefixes, new_length + 1); if (new_prefixes) { ptr_nick->prefixes = new_prefixes; if (new_length > old_length) { memset (ptr_nick->prefixes + old_length, ' ', new_length - old_length); } ptr_nick->prefixes[new_length] = '\0'; } } else { ptr_nick->prefixes = malloc (new_length + 1); if (ptr_nick->prefixes) { memset (ptr_nick->prefixes, ' ', new_length); ptr_nick->prefixes[new_length] = '\0'; } } } } }
0
[ "CWE-120", "CWE-787" ]
weechat
40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f
242,063,702,986,238,340,000,000,000,000,000,000,000
40
irc: fix crash when a new message 005 is received with longer nick prefixes Thanks to Stuart Nevans Locke for reporting the issue.
static void task_new_fair(struct rq *rq, struct task_struct *p) { struct cfs_rq *cfs_rq = task_cfs_rq(p); struct sched_entity *se = &p->se, *curr = cfs_rq->curr; int this_cpu = smp_processor_id(); sched_info_queued(p); update_curr(cfs_rq); place_entity(cfs_rq, se, 1); /* 'curr' will be NULL if the child belongs to a different group */ if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && curr && curr->vruntime < se->vruntime) { /* * Upon rescheduling, sched_class::put_prev_task() will place * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); } enqueue_task_fair(rq, p, 0); resched_task(rq->curr); }
0
[]
linux-2.6
6a6029b8cefe0ca7e82f27f3904dbedba3de4e06
20,302,769,891,696,013,000,000,000,000,000,000,000
24
sched: simplify sched_slice() Use the existing calc_delta_mine() calculation for sched_slice(). This saves a divide and simplifies the code because we share it with the other /cfs_rq->load users. It also improves code size: text data bss dec hex filename 42659 2740 144 45543 b1e7 sched.o.before 42093 2740 144 44977 afb1 sched.o.after Signed-off-by: Ingo Molnar <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]>
void xgroupCommand(client *c) { const char *help[] = { "CREATE <key> <groupname> <id or $> [opt] -- Create a new consumer group.", " option MKSTREAM: create the empty stream if it does not exist.", "SETID <key> <groupname> <id or $> -- Set the current group ID.", "DESTROY <key> <groupname> -- Remove the specified group.", "DELCONSUMER <key> <groupname> <consumer> -- Remove the specified consumer.", "HELP -- Prints this help.", NULL }; stream *s = NULL; sds grpname = NULL; streamCG *cg = NULL; char *opt = c->argv[1]->ptr; /* Subcommand name. */ int mkstream = 0; robj *o; /* CREATE has an MKSTREAM option that creates the stream if it * does not exist. */ if (c->argc == 6 && !strcasecmp(opt,"CREATE")) { if (strcasecmp(c->argv[5]->ptr,"MKSTREAM")) { addReplySubcommandSyntaxError(c); return; } mkstream = 1; grpname = c->argv[3]->ptr; } /* Everything but the "HELP" option requires a key and group name. */ if (c->argc >= 4) { o = lookupKeyWrite(c->db,c->argv[2]); if (o) { if (checkType(c,o,OBJ_STREAM)) return; s = o->ptr; } grpname = c->argv[3]->ptr; } /* Check for missing key/group. */ if (c->argc >= 4 && !mkstream) { /* At this point key must exist, or there is an error. */ if (s == NULL) { addReplyError(c, "The XGROUP subcommand requires the key to exist. " "Note that for CREATE you may want to use the MKSTREAM " "option to create an empty stream automatically."); return; } /* Certain subcommands require the group to exist. */ if ((cg = streamLookupCG(s,grpname)) == NULL && (!strcasecmp(opt,"SETID") || !strcasecmp(opt,"DELCONSUMER"))) { addReplyErrorFormat(c, "-NOGROUP No such consumer group '%s' " "for key name '%s'", (char*)grpname, (char*)c->argv[2]->ptr); return; } } /* Dispatch the different subcommands. */ if (!strcasecmp(opt,"CREATE") && (c->argc == 5 || c->argc == 6)) { streamID id; if (!strcmp(c->argv[4]->ptr,"$")) { if (s) { id = s->last_id; } else { id.ms = 0; id.seq = 0; } } else if (streamParseStrictIDOrReply(c,c->argv[4],&id,0) != C_OK) { return; } /* Handle the MKSTREAM option now that the command can no longer fail. */ if (s == NULL) { serverAssert(mkstream); o = createStreamObject(); dbAdd(c->db,c->argv[2],o); s = o->ptr; signalModifiedKey(c,c->db,c->argv[2]); } streamCG *cg = streamCreateCG(s,grpname,sdslen(grpname),&id); if (cg) { addReply(c,shared.ok); server.dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-create", c->argv[2],c->db->id); } else { addReplySds(c, sdsnew("-BUSYGROUP Consumer Group name already exists\r\n")); } } else if (!strcasecmp(opt,"SETID") && c->argc == 5) { streamID id; if (!strcmp(c->argv[4]->ptr,"$")) { id = s->last_id; } else if (streamParseIDOrReply(c,c->argv[4],&id,0) != C_OK) { return; } cg->last_id = id; addReply(c,shared.ok); server.dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-setid",c->argv[2],c->db->id); } else if (!strcasecmp(opt,"DESTROY") && c->argc == 4) { if (cg) { raxRemove(s->cgroups,(unsigned char*)grpname,sdslen(grpname),NULL); streamFreeCG(cg); addReply(c,shared.cone); server.dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-destroy", c->argv[2],c->db->id); /* We want to unblock any XREADGROUP consumers with -NOGROUP. */ signalKeyAsReady(c->db,c->argv[2]); } else { addReply(c,shared.czero); } } else if (!strcasecmp(opt,"DELCONSUMER") && c->argc == 5) { /* Delete the consumer and returns the number of pending messages * that were yet associated with such a consumer. */ long long pending = streamDelConsumer(cg,c->argv[4]->ptr); addReplyLongLong(c,pending); server.dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-delconsumer", c->argv[2],c->db->id); } else if (c->argc == 2 && !strcasecmp(opt,"HELP")) { addReplyHelp(c, help); } else { addReplySubcommandSyntaxError(c); } }
0
[ "CWE-190" ]
redis
f6a40570fa63d5afdd596c78083d754081d80ae3
64,543,542,043,160,790,000,000,000,000,000,000,000
132
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628) - fix possible heap corruption in ziplist and listpack resulting by trying to allocate more than the maximum size of 4GB. - prevent ziplist (hash and zset) from reaching size of above 1GB, will be converted to HT encoding, that's not a useful size. - prevent listpack (stream) from reaching size of above 1GB. - XADD will start a new listpack if the new record may cause the previous listpack to grow over 1GB. - XADD will respond with an error if a single stream record is over 1GB - List type (ziplist in quicklist) was truncating strings that were over 4GB, now it'll respond with an error.
static void initMemArray(Mem *p, int N, sqlite3 *db, u16 flags){ while( (N--)>0 ){ p->db = db; p->flags = flags; p->szMalloc = 0; #ifdef SQLITE_DEBUG p->pScopyFrom = 0; #endif p++; } }
0
[ "CWE-755" ]
sqlite
8654186b0236d556aa85528c2573ee0b6ab71be3
321,610,772,949,466,240,000,000,000,000,000,000,000
11
When an error occurs while rewriting the parser tree for window functions in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set, and make sure that this shuts down any subsequent code generation that might depend on the transformations that were implemented. This fixes a problem discovered by the Yongheng and Rui fuzzer. FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f
static int _waiter_init (uint32_t jobid) { if (!waiters) waiters = list_create((ListDelF) _waiter_destroy); /* * Exit this thread if another thread is waiting on job */ if (list_find_first (waiters, (ListFindF) _find_waiter, &jobid)) return SLURM_ERROR; else list_append(waiters, _waiter_create(jobid)); return (SLURM_SUCCESS); }
0
[ "CWE-20" ]
slurm
df545955e4f119974c278bff0c47155257d5afc7
48,627,884,222,997,565,000,000,000,000,000,000,000
15
Validate gid and user_name values provided to slurmd up front. Do not defer until later, and do not potentially miss out on proper validation of the user_name field which can lead to improper authentication handling. CVE-2018-10995.
int message_parser_read_more(struct message_parser_ctx *ctx, struct message_block *block_r, bool *full_r) { int ret; if (ctx->skip > 0) { i_stream_skip(ctx->input, ctx->skip); ctx->skip = 0; } *full_r = FALSE; ret = i_stream_read_bytes(ctx->input, &block_r->data, &block_r->size, ctx->want_count + 1); if (ret <= 0) { switch (ret) { case 0: if (!ctx->input->eof) { i_assert(!ctx->input->blocking); return 0; } break; case -1: i_assert(ctx->input->eof || ctx->input->stream_errno != 0); ctx->eof = TRUE; if (block_r->size != 0) { /* EOF, but we still have some data. return it. */ return 1; } return -1; case -2: *full_r = TRUE; break; default: i_unreached(); } } if (!*full_r) { /* reset number of wanted characters if we actually got them */ ctx->want_count = 1; } return 1; }
0
[ "CWE-20" ]
core
fb97a1cddbda4019e327fa736972a1c7433fedaa
267,464,397,074,224,600,000,000,000,000,000,000,000
45
lib-mail: message-parser - Fix assert-crash when enforcing MIME part limit The limit could have been exceeded with message/rfc822 parts.
* %NULL on failure */ struct iscsi_bus_flash_conn * iscsi_create_flashnode_conn(struct Scsi_Host *shost, struct iscsi_bus_flash_session *fnode_sess, struct iscsi_transport *transport, int dd_size) { struct iscsi_bus_flash_conn *fnode_conn; int err; fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL); if (!fnode_conn) return NULL; fnode_conn->transport = transport; fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type; fnode_conn->dev.bus = &iscsi_flashnode_bus; fnode_conn->dev.parent = &fnode_sess->dev; dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0", shost->host_no, fnode_sess->target_id); err = device_register(&fnode_conn->dev); if (err) goto free_fnode_conn; if (dd_size) fnode_conn->dd_data = &fnode_conn[1]; return fnode_conn; free_fnode_conn:
0
[ "CWE-787" ]
linux
ec98ea7070e94cc25a422ec97d1421e28d97b7ee
127,345,786,112,221,450,000,000,000,000,000,000,000
32
scsi: iscsi: Ensure sysfs attributes are limited to PAGE_SIZE As the iSCSI parameters are exported back through sysfs, it should be enforcing that they never are more than PAGE_SIZE (which should be more than enough) before accepting updates through netlink. Change all iSCSI sysfs attributes to use sysfs_emit(). Cc: [email protected] Reported-by: Adam Nichols <[email protected]> Reviewed-by: Lee Duncan <[email protected]> Reviewed-by: Greg Kroah-Hartman <[email protected]> Reviewed-by: Mike Christie <[email protected]> Signed-off-by: Chris Leech <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
Bool rfbSendRectEncodingRaw(rfbClientPtr cl, int x, int y, int w, int h) { rfbFramebufferUpdateRectHeader rect; int nlines; int bytesPerLine = w * (cl->format.bitsPerPixel / 8); char *fbptr = (cl->fb + (rfbFB.paddedWidthInBytes * y) + (x * (rfbFB.bitsPerPixel / 8))); /* Flush the buffer to guarantee correct alignment for translateFn(). */ if (ublen > 0) { if (!rfbSendUpdateBuf(cl)) return FALSE; } rect.r.x = Swap16IfLE(x); rect.r.y = Swap16IfLE(y); rect.r.w = Swap16IfLE(w); rect.r.h = Swap16IfLE(h); rect.encoding = Swap32IfLE(rfbEncodingRaw); memcpy(&updateBuf[ublen], (char *)&rect, sz_rfbFramebufferUpdateRectHeader); ublen += sz_rfbFramebufferUpdateRectHeader; cl->rfbRectanglesSent[rfbEncodingRaw]++; cl->rfbBytesSent[rfbEncodingRaw] += sz_rfbFramebufferUpdateRectHeader + bytesPerLine * h; nlines = (UPDATE_BUF_SIZE - ublen) / bytesPerLine; while (TRUE) { if (nlines > h) nlines = h; (*cl->translateFn) (cl->translateLookupTable, &rfbServerFormat, &cl->format, fbptr, &updateBuf[ublen], rfbFB.paddedWidthInBytes, w, nlines); ublen += nlines * bytesPerLine; h -= nlines; if (h == 0) /* rect fitted in buffer, do next one */ return TRUE; /* buffer full - flush partial rect and do another nlines */ if (!rfbSendUpdateBuf(cl)) return FALSE; fbptr += (rfbFB.paddedWidthInBytes * nlines); nlines = (UPDATE_BUF_SIZE - ublen) / bytesPerLine; if (nlines == 0) { rfbLog("rfbSendRectEncodingRaw: send buffer too small for %d bytes per line\n", bytesPerLine); rfbCloseClient(cl); return FALSE; } } }
0
[ "CWE-787" ]
turbovnc
cea98166008301e614e0d36776bf9435a536136e
85,852,924,783,758,190,000,000,000,000,000,000,000
59
Server: Fix two issues identified by ASan 1. If the TLSPlain and X509Plain security types were both disabled, then rfbOptPamAuth() would overflow the name field in the secTypes structure when testing the "none" security type, since the name of that security type has less than five characters. This issue was innocuous, since the overflow was fully contained within the secTypes structure, but the ASan error caused Xvnc to abort, which made it difficult to detect other errors. 2. If an ill-behaved RFB client sent the TurboVNC Server a fence message with more than 64 bytes, then the TurboVNC Server would try to read that message and subsequently overflow the stack before it detected that the payload was too large. This could never have occurred with any of the VNC viewers that currently support the RFB flow control extensions (TigerVNC and TurboVNC, namely.) This issue was also innocuous, since the stack overflow affected two variables (newScreens and errMsg) that were never accessed before the function returned.
TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { initialize(); Buffer::OwnedImpl buffer("GET * HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer, "http1.invalid_url"); }
0
[ "CWE-770" ]
envoy
7ca28ff7d46454ae930e193d97b7d08156b1ba59
241,887,986,914,794,100,000,000,000,000,000,000,000
6
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145) Signed-off-by: antonio <[email protected]>
void DeallocateRaw(void* ptr) override { if (ptr != memory_region_->data()) { LOG(ERROR) << "Deallocating not allocated region for readonly memory region"; } if (delete_on_deallocate_) { delete this; } }
0
[ "CWE-125" ]
tensorflow
1cb6bb6c2a6019417c9adaf9e6843ba75ee2580b
57,661,419,415,885,650,000,000,000,000,000,000,000
9
Add error checking to ImmutableConst OP that strings are not yet supported. PiperOrigin-RevId: 401065359 Change-Id: I9dd2bd2a2c36f22f4a05153daf6ebdc4613469d2
static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ int rc = SQLITE_NOMEM; Rtree *pRtree = (Rtree *)pVTab; RtreeCursor *pCsr; pCsr = (RtreeCursor *)sqlite3_malloc64(sizeof(RtreeCursor)); if( pCsr ){ memset(pCsr, 0, sizeof(RtreeCursor)); pCsr->base.pVtab = pVTab; rc = SQLITE_OK; pRtree->nCursor++; } *ppCursor = (sqlite3_vtab_cursor *)pCsr; return rc; }
0
[ "CWE-125" ]
sqlite
e41fd72acc7a06ce5a6a7d28154db1ffe8ba37a8
88,459,725,844,604,610,000,000,000,000,000,000,000
16
Enhance the rtreenode() function of rtree (used for testing) so that it uses the newer sqlite3_str object for better performance and improved error reporting. FossilOrigin-Name: 90acdbfce9c088582d5165589f7eac462b00062bbfffacdcc786eb9cf3ea5377
private uint64_t file_strncmp16(const char *a, const char *b, size_t len, uint32_t flags) { /* * XXX - The 16-bit string compare probably needs to be done * differently, especially if the flags are to be supported. * At the moment, I am unsure. */ flags = 0;
0
[ "CWE-20" ]
php-src
74555e7c26b2c61bb8e67b7d6a6f4d2b8eb3a5f3
239,352,830,117,474,630,000,000,000,000,000,000,000
10
Fixed bug #64830 mimetype detection segfaults on mp3 file
static int fuse_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_file *ff = file->private_data; struct fuse_flush_in inarg; FUSE_ARGS(args); int err; if (is_bad_inode(inode)) return -EIO; err = write_inode_now(inode, 1); if (err) return err; inode_lock(inode); fuse_sync_writes(inode); inode_unlock(inode); err = filemap_check_errors(file->f_mapping); if (err) return err; err = 0; if (fm->fc->no_flush) goto inval_attr_out; memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; inarg.lock_owner = fuse_lock_owner_id(fm->fc, id); args.opcode = FUSE_FLUSH; args.nodeid = get_node_id(inode); args.in_numargs = 1; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; args.force = true; err = fuse_simple_request(fm, &args); if (err == -ENOSYS) { fm->fc->no_flush = 1; err = 0; } inval_attr_out: /* * In memory i_blocks is not maintained by fuse, if writeback cache is * enabled, i_blocks from cached attr may not be accurate. */ if (!err && fm->fc->writeback_cache) fuse_invalidate_attr(inode); return err; }
1
[ "CWE-459" ]
linux
5d069dbe8aaf2a197142558b6fb2978189ba3454
125,913,326,447,398,000,000,000,000,000,000,000,000
53
fuse: fix bad inode Jan Kara's analysis of the syzbot report (edited): The reproducer opens a directory on FUSE filesystem, it then attaches dnotify mark to the open directory. After that a fuse_do_getattr() call finds that attributes returned by the server are inconsistent, and calls make_bad_inode() which, among other things does: inode->i_mode = S_IFREG; This then confuses dnotify which doesn't tear down its structures properly and eventually crashes. Avoid calling make_bad_inode() on a live inode: switch to a private flag on the fuse inode. Also add the test to ops which the bad_inode_ops would have caught. This bug goes back to the initial merge of fuse in 2.6.14... Reported-by: [email protected] Signed-off-by: Miklos Szeredi <[email protected]> Tested-by: Jan Kara <[email protected]> Cc: <[email protected]>
int git_delta_index_init( git_delta_index **out, const void *buf, size_t bufsize) { unsigned int i, hsize, hmask, entries, prev_val, *hash_count; const unsigned char *data, *buffer = buf; struct git_delta_index *index; struct index_entry *entry, **hash; void *mem; unsigned long memsize; *out = NULL; if (!buf || !bufsize) return 0; /* Determine index hash size. Note that indexing skips the first byte to allow for optimizing the rabin polynomial initialization in create_delta(). */ entries = (unsigned int)(bufsize - 1) / RABIN_WINDOW; if (bufsize >= 0xffffffffUL) { /* * Current delta format can't encode offsets into * reference buffer with more than 32 bits. */ entries = 0xfffffffeU / RABIN_WINDOW; } hsize = entries / 4; for (i = 4; i < 31 && (1u << i) < hsize; i++); hsize = 1 << i; hmask = hsize - 1; if (lookup_index_alloc(&mem, &memsize, entries, hsize) < 0) return -1; index = mem; mem = index->hash; hash = mem; mem = hash + hsize; entry = mem; index->memsize = memsize; index->src_buf = buf; index->src_size = bufsize; index->hash_mask = hmask; memset(hash, 0, hsize * sizeof(*hash)); /* allocate an array to count hash entries */ hash_count = git__calloc(hsize, sizeof(*hash_count)); if (!hash_count) { git__free(index); return -1; } /* then populate the index */ prev_val = ~0; for (data = buffer + entries * RABIN_WINDOW - RABIN_WINDOW; data >= buffer; data -= RABIN_WINDOW) { unsigned int val = 0; for (i = 1; i <= RABIN_WINDOW; i++) val = ((val << 8) | data[i]) ^ T[val >> RABIN_SHIFT]; if (val == prev_val) { /* keep the lowest of consecutive identical blocks */ entry[-1].ptr = data + RABIN_WINDOW; } else { prev_val = val; i = val & hmask; entry->ptr = data + RABIN_WINDOW; entry->val = val; entry->next = hash[i]; hash[i] = entry++; hash_count[i]++; } } /* * Determine a limit on the number of entries in the same hash * bucket. This guard us against patological data sets causing * really bad hash distribution with most entries in the same hash * bucket that would bring us to O(m*n) computing costs (m and n * corresponding to reference and target buffer sizes). * * Make sure none of the hash buckets has more entries than * we're willing to test. Otherwise we cull the entry list * uniformly to still preserve a good repartition across * the reference buffer. */ for (i = 0; i < hsize; i++) { if (hash_count[i] < HASH_LIMIT) continue; entry = hash[i]; do { struct index_entry *keep = entry; int skip = hash_count[i] / HASH_LIMIT / 2; do { entry = entry->next; } while(--skip && entry); keep->next = entry; } while (entry); } git__free(hash_count); *out = index; return 0; }
0
[ "CWE-190", "CWE-125" ]
libgit2
3f461902dc1072acb8b7607ee65d0a0458ffac2a
288,130,658,058,151,800,000,000,000,000,000,000,000
106
delta: fix sign-extension of big left-shift Our delta code was originally adapted from JGit, which itself adapted it from git itself. Due to this heritage, we inherited a bug from git.git in how we compute the delta offset, which was fixed upstream in 48fb7deb5 (Fix big left-shifts of unsigned char, 2009-06-17). As explained by Linus: Shifting 'unsigned char' or 'unsigned short' left can result in sign extension errors, since the C integer promotion rules means that the unsigned char/short will get implicitly promoted to a signed 'int' due to the shift (or due to other operations). This normally doesn't matter, but if you shift things up sufficiently, it will now set the sign bit in 'int', and a subsequent cast to a bigger type (eg 'long' or 'unsigned long') will now sign-extend the value despite the original expression being unsigned. One example of this would be something like unsigned long size; unsigned char c; size += c << 24; where despite all the variables being unsigned, 'c << 24' ends up being a signed entity, and will get sign-extended when then doing the addition in an 'unsigned long' type. Since git uses 'unsigned char' pointers extensively, we actually have this bug in a couple of places. In our delta code, we inherited such a bogus shift when computing the offset at which the delta base is to be found. Due to the sign extension we can end up with an offset where all the bits are set. This can allow an arbitrary memory read, as the addition in `base_len < off + len` can now overflow if `off` has all its bits set. Fix the issue by casting the result of `*delta++ << 24UL` to an unsigned integer again. Add a test with a crafted delta that would actually succeed with an out-of-bounds read in case where the cast wouldn't exist. Reported-by: Riccardo Schirone <[email protected]> Test-provided-by: Riccardo Schirone <[email protected]>
__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) { struct i915_mm_struct *mm; /* Protected by dev_priv->mm_lock */ hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) if (mm->mm == real) return mm; return NULL; }
0
[ "CWE-362" ]
linux
17839856fd588f4ab6b789f482ed3ffd7c403e1f
96,061,711,801,043,900,000,000,000,000,000,000,000
11
gup: document and work around "COW can break either way" issue Doing a "get_user_pages()" on a copy-on-write page for reading can be ambiguous: the page can be COW'ed at any time afterwards, and the direction of a COW event isn't defined. Yes, whoever writes to it will generally do the COW, but if the thread that did the get_user_pages() unmapped the page before the write (and that could happen due to memory pressure in addition to any outright action), the writer could also just take over the old page instead. End result: the get_user_pages() call might result in a page pointer that is no longer associated with the original VM, and is associated with - and controlled by - another VM having taken it over instead. So when doing a get_user_pages() on a COW mapping, the only really safe thing to do would be to break the COW when getting the page, even when only getting it for reading. At the same time, some users simply don't even care. For example, the perf code wants to look up the page not because it cares about the page, but because the code simply wants to look up the physical address of the access for informational purposes, and doesn't really care about races when a page might be unmapped and remapped elsewhere. This adds logic to force a COW event by setting FOLL_WRITE on any copy-on-write mapping when FOLL_GET (or FOLL_PIN) is used to get a page pointer as a result. The current semantics end up being: - __get_user_pages_fast(): no change. If you don't ask for a write, you won't break COW. You'd better know what you're doing. - get_user_pages_fast(): the fast-case "look it up in the page tables without anything getting mmap_sem" now refuses to follow a read-only page, since it might need COW breaking. Which happens in the slow path - the fast path doesn't know if the memory might be COW or not. - get_user_pages() (including the slow-path fallback for gup_fast()): for a COW mapping, turn on FOLL_WRITE for FOLL_GET/FOLL_PIN, with very similar semantics to FOLL_FORCE. If it turns out that we want finer granularity (ie "only break COW when it might actually matter" - things like the zero page are special and don't need to be broken) we might need to push these semantics deeper into the lookup fault path. So if people care enough, it's possible that we might end up adding a new internal FOLL_BREAK_COW flag to go with the internal FOLL_COW flag we already have for tracking "I had a COW". Alternatively, if it turns out that different callers might want to explicitly control the forced COW break behavior, we might even want to make such a flag visible to the users of get_user_pages() instead of using the above default semantics. But for now, this is mostly commentary on the issue (this commit message being a lot bigger than the patch, and that patch in turn is almost all comments), with that minimal "enable COW breaking early" logic using the existing FOLL_WRITE behavior. [ It might be worth noting that we've always had this ambiguity, and it could arguably be seen as a user-space issue. You only get private COW mappings that could break either way in situations where user space is doing cooperative things (ie fork() before an execve() etc), but it _is_ surprising and very subtle, and fork() is supposed to give you independent address spaces. So let's treat this as a kernel issue and make the semantics of get_user_pages() easier to understand. Note that obviously a true shared mapping will still get a page that can change under us, so this does _not_ mean that get_user_pages() somehow returns any "stable" page ] Reported-by: Jann Horn <[email protected]> Tested-by: Christoph Hellwig <[email protected]> Acked-by: Oleg Nesterov <[email protected]> Acked-by: Kirill Shutemov <[email protected]> Acked-by: Jan Kara <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Matthew Wilcox <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
TEST_P(PerRouteIntegrationTest, PerRouteConfigOK) { // A config with a requirement_map const std::string filter_conf = R"( providers: example_provider: issuer: https://example.com audiences: - example_service requirement_map: abc: provider_name: "example_provider" )"; // Per-route config with correct requirement_name PerRouteConfig per_route; per_route.set_requirement_name("abc"); setup(filter_conf, per_route); codec_client_ = makeHttpConnection(lookupPort("http")); // So the request with a JWT token is OK. auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}, {"Authorization", "Bearer " + std::string(GoodToken)}, }); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); ASSERT_TRUE(response->waitForEndStream()); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); // A request with missing token is rejected. auto response1 = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}, }); ASSERT_TRUE(response1->waitForEndStream()); ASSERT_TRUE(response1->complete()); EXPECT_EQ("401", response1->headers().getStatusValue()); }
0
[ "CWE-476", "CWE-703" ]
envoy
9371333230b1a6e1be2eccf4868771e11af6253a
11,325,188,768,667,374,000,000,000,000,000,000,000
48
CVE-2021-43824 jwt_atuhn: fixed the crash when a CONNECT request is sent to JWT filter configured with regex match. Signed-off-by: Yan Avlasov <[email protected]>
static int MP4_ReadBox_sample_soun( stream_t *p_stream, MP4_Box_t *p_box ) { p_box->i_handler = ATOM_soun; MP4_READBOX_ENTER( MP4_Box_data_sample_soun_t ); p_box->data.p_sample_soun->p_qt_description = NULL; /* Sanity check needed because the "wave" box does also contain an * "mp4a" box that we don't understand. */ if( i_read < 28 ) { i_read -= 30; MP4_READBOX_EXIT( 1 ); } for( unsigned i = 0; i < 6 ; i++ ) { MP4_GET1BYTE( p_box->data.p_sample_soun->i_reserved1[i] ); } MP4_GET2BYTES( p_box->data.p_sample_soun->i_data_reference_index ); /* * XXX hack -> produce a copy of the nearly complete chunk */ p_box->data.p_sample_soun->i_qt_description = 0; p_box->data.p_sample_soun->p_qt_description = NULL; if( i_read > 0 ) { p_box->data.p_sample_soun->p_qt_description = malloc( i_read ); if( p_box->data.p_sample_soun->p_qt_description ) { p_box->data.p_sample_soun->i_qt_description = i_read; memcpy( p_box->data.p_sample_soun->p_qt_description, p_peek, i_read ); } } MP4_GET2BYTES( p_box->data.p_sample_soun->i_qt_version ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_qt_revision_level ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_qt_vendor ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_channelcount ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_samplesize ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_compressionid ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_reserved3 ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_sampleratehi ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_sampleratelo ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" stsd qt_version %"PRIu16" compid=%"PRIx16, p_box->data.p_sample_soun->i_qt_version, p_box->data.p_sample_soun->i_compressionid ); #endif if( p_box->data.p_sample_soun->i_qt_version == 1 && i_read >= 16 ) { /* SoundDescriptionV1 */ MP4_GET4BYTES( p_box->data.p_sample_soun->i_sample_per_packet ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_bytes_per_packet ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_bytes_per_frame ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_bytes_per_sample ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" V1 sample/packet=%d bytes/packet=%d " "bytes/frame=%d bytes/sample=%d", p_box->data.p_sample_soun->i_sample_per_packet, p_box->data.p_sample_soun->i_bytes_per_packet, p_box->data.p_sample_soun->i_bytes_per_frame, p_box->data.p_sample_soun->i_bytes_per_sample ); #endif stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 44 ); } else if( p_box->data.p_sample_soun->i_qt_version == 2 && i_read >= 36 ) { /* SoundDescriptionV2 */ double f_sample_rate; int64_t i_dummy64; uint32_t i_channel, i_extoffset, i_dummy32; /* Checks */ if ( p_box->data.p_sample_soun->i_channelcount != 0x3 || p_box->data.p_sample_soun->i_samplesize != 0x0010 || p_box->data.p_sample_soun->i_compressionid != 0xFFFE || p_box->data.p_sample_soun->i_reserved3 != 0x0 || p_box->data.p_sample_soun->i_sampleratehi != 0x1 ||//65536 p_box->data.p_sample_soun->i_sampleratelo != 0x0 ) //remainder { msg_Err( p_stream, "invalid stsd V2 box defaults" ); MP4_READBOX_EXIT( 0 ); } /* !Checks */ MP4_GET4BYTES( i_extoffset ); /* offset to stsd extentions */ MP4_GET8BYTES( i_dummy64 ); memcpy( &f_sample_rate, &i_dummy64, 8 ); msg_Dbg( p_stream, "read box: %f Hz", f_sample_rate ); p_box->data.p_sample_soun->i_sampleratehi = (int)f_sample_rate % BLOCK16x16; p_box->data.p_sample_soun->i_sampleratelo = f_sample_rate / BLOCK16x16; MP4_GET4BYTES( i_channel ); p_box->data.p_sample_soun->i_channelcount = i_channel; MP4_GET4BYTES( i_dummy32 ); if ( i_dummy32 != 0x7F000000 ) { msg_Err( p_stream, "invalid stsd V2 box" ); MP4_READBOX_EXIT( 0 ); } MP4_GET4BYTES( p_box->data.p_sample_soun->i_constbitsperchannel ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_formatflags ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_constbytesperaudiopacket ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_constLPCMframesperaudiopacket ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" V2 rate=%f bitsperchannel=%u " "flags=%u bytesperpacket=%u lpcmframesperpacket=%u", f_sample_rate, p_box->data.p_sample_soun->i_constbitsperchannel, p_box->data.p_sample_soun->i_formatflags, p_box->data.p_sample_soun->i_constbytesperaudiopacket, p_box->data.p_sample_soun->i_constLPCMframesperaudiopacket ); #endif if ( i_extoffset < p_box->i_size ) stream_Seek( p_stream, p_box->i_pos + i_extoffset ); else stream_Seek( p_stream, p_box->i_pos + p_box->i_size ); } else { p_box->data.p_sample_soun->i_sample_per_packet = 0; p_box->data.p_sample_soun->i_bytes_per_packet = 0; p_box->data.p_sample_soun->i_bytes_per_frame = 0; p_box->data.p_sample_soun->i_bytes_per_sample = 0; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" V0 or qt1/2 (rest=%"PRId64")", i_read ); #endif stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 28 ); } if( p_box->i_type == ATOM_drms ) { msg_Warn( p_stream, "DRM protected streams are not supported." ); MP4_READBOX_EXIT( 0 ); } if( p_box->i_type == ATOM_samr || p_box->i_type == ATOM_sawb ) { /* Ignore channelcount for AMR (3gpp AMRSpecificBox) */ p_box->data.p_sample_soun->i_channelcount = 1; } /* Loads extensions */ MP4_ReadBoxContainerRaw( p_stream, p_box ); /* esds/wave/... */ #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" in stsd channel %d " "sample size %d sample rate %f", p_box->data.p_sample_soun->i_channelcount, p_box->data.p_sample_soun->i_samplesize, (float)p_box->data.p_sample_soun->i_sampleratehi + (float)p_box->data.p_sample_soun->i_sampleratelo / BLOCK16x16 ); #endif MP4_READBOX_EXIT( 1 ); }
0
[ "CWE-120", "CWE-191", "CWE-787" ]
vlc
2e7c7091a61aa5d07e7997b393d821e91f593c39
84,128,509,204,929,890,000,000,000,000,000,000,000
171
demux: mp4: fix buffer overflow in parsing of string boxes. We ensure that pbox->i_size is never smaller than 8 to avoid an integer underflow in the third argument of the subsequent call to memcpy. We also make sure no truncation occurs when passing values derived from the 64 bit integer p_box->i_size to arguments of malloc and memcpy that may be 32 bit integers on 32 bit platforms. Signed-off-by: Jean-Baptiste Kempf <[email protected]>
void bat_socket_init(void) { memset(socket_client_hash, 0, sizeof(socket_client_hash)); }
0
[ "CWE-119" ]
linux-2.6
b5a1eeef04cc7859f34dec9b72ea1b28e4aba07c
281,800,091,747,055,200,000,000,000,000,000,000,000
4
batman-adv: Only write requested number of byte to user buffer Don't write more than the requested number of bytes of an batman-adv icmp packet to the userspace buffer. Otherwise unrelated userspace memory might get overridden by the kernel. Signed-off-by: Sven Eckelmann <[email protected]> Signed-off-by: Marek Lindner <[email protected]>
void init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_lock(&rq->lock); __sched_fork(0, idle); idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); kasan_unpoison_task_stack(idle); #ifdef CONFIG_SMP /* * Its possible that init_idle() gets called multiple times on a task, * in that case do_set_cpus_allowed() will not do the right thing. * * And since this is boot we can forgo the serialization. */ set_cpus_allowed_common(idle, cpumask_of(cpu)); #endif /* * We're having a chicken and egg problem, even though we are * holding rq->lock, the cpu isn't yet set to this cpu so the * lockdep check in task_group() will fail. * * Similar case to sched_fork(). / Alternatively we could * use task_rq_lock() here and obtain the other rq->lock. * * Silence PROVE_RCU */ rcu_read_lock(); __set_task_cpu(idle, cpu); rcu_read_unlock(); rq->curr = rq->idle = idle; idle->on_rq = TASK_ON_RQ_QUEUED; #ifdef CONFIG_SMP idle->on_cpu = 1; #endif raw_spin_unlock(&rq->lock); raw_spin_unlock_irqrestore(&idle->pi_lock, flags); /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); /* * The idle tasks have their own, simple scheduling class: */ idle->sched_class = &idle_sched_class; ftrace_graph_init_idle_task(idle, cpu); vtime_init_idle(idle, cpu); #ifdef CONFIG_SMP sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); #endif }
0
[ "CWE-119" ]
linux
29d6455178a09e1dc340380c582b13356227e8df
42,986,810,940,898,590,000,000,000,000,000,000,000
58
sched: panic on corrupted stack end Until now, hitting this BUG_ON caused a recursive oops (because oops handling involves do_exit(), which calls into the scheduler, which in turn raises an oops), which caused stuff below the stack to be overwritten until a panic happened (e.g. via an oops in interrupt context, caused by the overwritten CPU index in the thread_info). Just panic directly. Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
int X509_print_fp(FILE *fp, X509 *x) { return X509_print_ex_fp(fp, x, XN_FLAG_COMPAT, X509_FLAG_COMPAT); }
0
[ "CWE-125" ]
openssl
d9d838ddc0ed083fb4c26dd067e71aad7c65ad16
133,522,003,145,174,240,000,000,000,000,000,000,000
4
Fix a read buffer overrun in X509_aux_print(). The ASN1_STRING_get0_data(3) manual explitely cautions the reader that the data is not necessarily NUL-terminated, and the function X509_alias_set1(3) does not sanitize the data passed into it in any way either, so we must assume the return value from X509_alias_get0(3) is merely a byte array and not necessarily a string in the sense of the C language. I found this bug while writing manual pages for X509_print_ex(3) and related functions. Theo Buehler <[email protected]> checked my patch to fix the same bug in LibreSSL, see http://cvsweb.openbsd.org/src/lib/libcrypto/asn1/t_x509a.c#rev1.9 As an aside, note that the function still produces incomplete and misleading results when the data contains a NUL byte in the middle and that error handling is consistently absent throughout, even though the function provides an "int" return value obviously intended to be 1 for success and 0 for failure, and even though this function is called by another function that also wants to return 1 for success and 0 for failure and even does so in many of its code paths, though not in others. But let's stay focussed. Many things would be nice to have in the wide wild world, but a buffer overflow must not be allowed to remain in our backyard. CLA: trivial Reviewed-by: Tim Hudson <[email protected]> Reviewed-by: Paul Dale <[email protected]> Reviewed-by: Tomas Mraz <[email protected]> (Merged from https://github.com/openssl/openssl/pull/16108) (cherry picked from commit c5dc9ab965f2a69bca964c709e648158f3e4cd67)
unsigned long __init absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn) { return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); }
0
[]
linux
400e22499dd92613821374c8c6c88c7225359980
297,924,267,904,322,580,000,000,000,000,000,000,000
5
mm: don't warn about allocations which stall for too long Commit 63f53dea0c98 ("mm: warn about allocations which stall for too long") was a great step for reducing possibility of silent hang up problem caused by memory allocation stalls. But this commit reverts it, for it is possible to trigger OOM lockup and/or soft lockups when many threads concurrently called warn_alloc() (in order to warn about memory allocation stalls) due to current implementation of printk(), and it is difficult to obtain useful information due to limitation of synchronous warning approach. Current printk() implementation flushes all pending logs using the context of a thread which called console_unlock(). printk() should be able to flush all pending logs eventually unless somebody continues appending to printk() buffer. Since warn_alloc() started appending to printk() buffer while waiting for oom_kill_process() to make forward progress when oom_kill_process() is processing pending logs, it became possible for warn_alloc() to force oom_kill_process() loop inside printk(). As a result, warn_alloc() significantly increased possibility of preventing oom_kill_process() from making forward progress. ---------- Pseudo code start ---------- Before warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } goto retry; After warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else if (waited_for_10seconds()) { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- Although waited_for_10seconds() becomes true once per 10 seconds, unbounded number of threads can call waited_for_10seconds() at the same time. Also, since threads doing waited_for_10seconds() keep doing almost busy loop, the thread doing print_one_log() can use little CPU resource. Therefore, this situation can be simplified like ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- when printk() is called faster than print_one_log() can process a log. One of possible mitigation would be to introduce a new lock in order to make sure that no other series of printk() (either oom_kill_process() or warn_alloc()) can append to printk() buffer when one series of printk() (either oom_kill_process() or warn_alloc()) is already in progress. Such serialization will also help obtaining kernel messages in readable form. ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { mutex_lock(&oom_printk_lock); while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_printk_lock); mutex_unlock(&oom_lock) } else { if (mutex_trylock(&oom_printk_lock)) { atomic_inc(&printk_pending_logs); mutex_unlock(&oom_printk_lock); } } goto retry; ---------- Pseudo code end ---------- But this commit does not go that direction, for we don't want to introduce a new lock dependency, and we unlikely be able to obtain useful information even if we serialized oom_kill_process() and warn_alloc(). Synchronous approach is prone to unexpected results (e.g. too late [1], too frequent [2], overlooked [3]). As far as I know, warn_alloc() never helped with providing information other than "something is going wrong". I want to consider asynchronous approach which can obtain information during stalls with possibly relevant threads (e.g. the owner of oom_lock and kswapd-like threads) and serve as a trigger for actions (e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump of stalling KVM guest for diagnostic purpose). This commit temporarily loses ability to report e.g. OOM lockup due to unable to invoke the OOM killer due to !__GFP_FS allocation request. But asynchronous approach will be able to detect such situation and emit warning. Thus, let's remove warn_alloc(). [1] https://bugzilla.kernel.org/show_bug.cgi?id=192981 [2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com [3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever")) Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <[email protected]> Reported-by: Cong Wang <[email protected]> Reported-by: yuwang.yuwang <[email protected]> Reported-by: Johannes Weiner <[email protected]> Acked-by: Michal Hocko <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Sergey Senozhatsky <[email protected]> Cc: Petr Mladek <[email protected]> Cc: Steven Rostedt <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static zval **php_zip_get_property_ptr_ptr(zval *object, zval *member TSRMLS_DC) /* {{{ */ { ze_zip_object *obj; zval tmp_member; zval **retval = NULL; zip_prop_handler *hnd; zend_object_handlers *std_hnd; int ret; if (member->type != IS_STRING) { tmp_member = *member; zval_copy_ctor(&tmp_member); convert_to_string(&tmp_member); member = &tmp_member; } ret = FAILURE; obj = (ze_zip_object *)zend_objects_get_address(object TSRMLS_CC); if (obj->prop_handler != NULL) { ret = zend_hash_find(obj->prop_handler, Z_STRVAL_P(member), Z_STRLEN_P(member)+1, (void **) &hnd); } if (ret == FAILURE) { std_hnd = zend_get_std_object_handlers(); retval = std_hnd->get_property_ptr_ptr(object, member TSRMLS_CC); } if (member == &tmp_member) { zval_dtor(member); } return retval; }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
121,997,068,859,138,600,000,000,000,000,000,000,000
35
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
static PHP_FUNCTION(session_name) { char *name = NULL; int name_len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &name, &name_len) == FAILURE) { return; } RETVAL_STRING(PS(session_name), 1); if (name) { zend_alter_ini_entry("session.name", sizeof("session.name"), name, name_len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); } }
0
[ "CWE-264" ]
php-src
25e8fcc88fa20dc9d4c47184471003f436927cde
72,300,881,532,835,100,000,000,000,000,000,000,000
15
Strict session
static int vfswrap_statvfs(struct vfs_handle_struct *handle, const char *path, vfs_statvfs_struct *statbuf) { return sys_statvfs(path, statbuf); }
0
[ "CWE-665" ]
samba
30e724cbff1ecd90e5a676831902d1e41ec1b347
130,460,745,405,610,300,000,000,000,000,000,000,000
4
FSCTL_GET_SHADOW_COPY_DATA: Initialize output array to zero Otherwise num_volumes and the end marker can return uninitialized data to the client. Signed-off-by: Christof Schmitt <[email protected]> Reviewed-by: Jeremy Allison <[email protected]> Reviewed-by: Simo Sorce <[email protected]>
enum row_type ha_maria::get_row_type() const { switch (file->s->data_file_type) { case STATIC_RECORD: return ROW_TYPE_FIXED; case DYNAMIC_RECORD: return ROW_TYPE_DYNAMIC; case BLOCK_RECORD: return ROW_TYPE_PAGE; case COMPRESSED_RECORD: return ROW_TYPE_COMPRESSED; default: return ROW_TYPE_NOT_USED; } }
0
[ "CWE-400" ]
server
9e39d0ae44595dbd1570805d97c9c874778a6be8
288,839,827,937,121,430,000,000,000,000,000,000,000
10
MDEV-25787 Bug report: crash on SELECT DISTINCT thousands_blob_fields fix a debug assert to account for not opened temp tables
sync_cookie_isvalid(Sync_Cookie *testcookie, Sync_Cookie *refcookie) { /* client and server info must match */ if (testcookie == NULL || refcookie == NULL) { return 0; } if ((testcookie->openldap_compat != refcookie->openldap_compat || strcmp(testcookie->cookie_client_signature, refcookie->cookie_client_signature) || testcookie->cookie_change_info == -1 || testcookie->cookie_change_info > refcookie->cookie_change_info)) { return 0; } if (refcookie->openldap_compat) { if (testcookie->cookie_server_signature != NULL || refcookie->cookie_server_signature != NULL) { return 0; } } else { if (strcmp(testcookie->cookie_server_signature, refcookie->cookie_server_signature)) { return 0; } } /* could add an additional check if the requested state in client cookie is still * available. Accept any state request for now. */ return 1; }
0
[ "CWE-476" ]
389-ds-base
d7eef2fcfbab2ef8aa6ee0bf60f0a9b16ede66e0
235,058,665,187,629,700,000,000,000,000,000,000,000
28
Issue 4711 - SIGSEV with sync_repl (#4738) Bug description: sync_repl sends back entries identified with a unique identifier that is 'nsuniqueid'. If 'nsuniqueid' is missing, then it may crash Fix description: Check a nsuniqueid is available else returns OP_ERR relates: https://github.com/389ds/389-ds-base/issues/4711 Reviewed by: Pierre Rogier, James Chapman, William Brown (Thanks!) Platforms tested: F33
void GC_deinit(void) { # ifdef THREADS if (GC_is_initialized) { DeleteCriticalSection(&GC_write_cs); } # endif }
0
[ "CWE-119" ]
bdwgc
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
83,480,140,125,016,100,000,000,000,000,000,000,000
8
Fix malloc routines to prevent size value wrap-around See issue #135 on Github. * allchblk.c (GC_allochblk, GC_allochblk_nth): Use OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS. * malloc.c (GC_alloc_large): Likewise. * alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent overflow when computing GC_heapsize+bytes > GC_max_heapsize. * dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page, GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc, GC_debug_generic_malloc_inner, GC_debug_generic_malloc_inner_ignore_off_page, GC_debug_malloc_stubborn, GC_debug_malloc_atomic, GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable): Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb value. * fnlz_mlc.c (GC_finalized_malloc): Likewise. * gcj_mlc.c (GC_debug_gcj_malloc): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, ROUNDUP_PAGESIZE): Likewise. * include/private/gcconfig.h (GET_MEM): Likewise. * mallocx.c (GC_malloc_many, GC_memalign): Likewise. * os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise. * typd_mlc.c (GC_malloc_explicitly_typed, GC_malloc_explicitly_typed_ignore_off_page, GC_calloc_explicitly_typed): Likewise. * headers.c (GC_scratch_alloc): Change type of bytes_to_get from word to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed). * include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already defined). * include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from malloc.c file. * include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before include gcconfig.h). * include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type to size_t. * os_dep.c (GC_page_size): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument. * include/private/gcconfig.h (GET_MEM): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE, ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb". * include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro. * include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem, GC_unix_get_mem): Change argument type from word to int. * os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem, GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise. * malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only if no value wrap around is guaranteed. * malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case (because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value wrap around). * mallocx.c (GC_generic_malloc_ignore_off_page): Likewise. * misc.c (GC_init_size_map): Change "i" local variable type from int to size_t. * os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise. * misc.c (GC_envfile_init): Cast len to size_t when passed to ROUNDUP_PAGESIZE_IF_MMAP. * os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and GETPAGESIZE() to size_t (when setting GC_page_size). * os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection): Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking (the argument is of word type). * os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with ~GC_page_size+1 (because GC_page_size is unsigned); remove redundant cast to size_t. * os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size to SBRK_ARG_T. * os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable to size_t. * typd_mlc.c: Do not include limits.h. * typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in gc_priv.h now).
replace_interleave(PyStringObject *self, const char *to_s, Py_ssize_t to_len, Py_ssize_t maxcount) { char *self_s, *result_s; Py_ssize_t self_len, result_len; Py_ssize_t count, i; PyStringObject *result; self_len = PyString_GET_SIZE(self); /* 1 at the end plus 1 after every character; count = min(maxcount, self_len + 1) */ if (maxcount <= self_len) { count = maxcount; } else { /* Can't overflow: self_len + 1 <= maxcount <= PY_SSIZE_T_MAX. */ count = self_len + 1; } /* Check for overflow */ /* result_len = count * to_len + self_len; */ assert(count > 0); if (to_len > (PY_SSIZE_T_MAX - self_len) / count) { PyErr_SetString(PyExc_OverflowError, "replace string is too long"); return NULL; } result_len = count * to_len + self_len; if (! (result = (PyStringObject *) PyString_FromStringAndSize(NULL, result_len)) ) return NULL; self_s = PyString_AS_STRING(self); result_s = PyString_AS_STRING(result); /* TODO: special case single character, which doesn't need memcpy */ /* Lay the first one down (guaranteed this will occur) */ Py_MEMCPY(result_s, to_s, to_len); result_s += to_len; count -= 1; for (i=0; i<count; i++) { *result_s++ = *self_s++; Py_MEMCPY(result_s, to_s, to_len); result_s += to_len; } /* Copy the rest of the original string */ Py_MEMCPY(result_s, self_s, self_len-i); return result; }
0
[ "CWE-190" ]
cpython
c3c9db89273fabc62ea1b48389d9a3000c1c03ae
253,206,030,867,790,430,000,000,000,000,000,000,000
55
[2.7] bpo-30657: Check & prevent integer overflow in PyString_DecodeEscape (#2174)
usm_lookup_auth_type(const char *str) { return usm_lookup_alg_type(str, usm_auth_type ); }
0
[ "CWE-415" ]
net-snmp
5f881d3bf24599b90d67a45cae7a3eb099cd71c9
226,917,884,336,528,870,000,000,000,000,000,000,000
4
libsnmp, USM: Introduce a reference count in struct usmStateReference This patch fixes https://sourceforge.net/p/net-snmp/bugs/2956/.
do_query_fs_info (GVfsBackend *backend, GVfsJobQueryFsInfo *job, const char *filename, GFileInfo *info, GFileAttributeMatcher *attribute_matcher) { SoupMessage *msg; Multistatus ms; xmlNodeIter iter; gboolean res; GError *error; g_file_info_set_attribute_string (info, G_FILE_ATTRIBUTE_FILESYSTEM_TYPE, "webdav"); if (! (g_file_attribute_matcher_matches (attribute_matcher, G_FILE_ATTRIBUTE_FILESYSTEM_SIZE) || g_file_attribute_matcher_matches (attribute_matcher, G_FILE_ATTRIBUTE_FILESYSTEM_USED) || g_file_attribute_matcher_matches (attribute_matcher, G_FILE_ATTRIBUTE_FILESYSTEM_FREE))) { g_vfs_job_succeeded (G_VFS_JOB (job)); return; } msg = propfind_request_new (backend, filename, 0, fs_info_propnames); if (msg == NULL) { g_vfs_job_failed (G_VFS_JOB (job), G_IO_ERROR, G_IO_ERROR_FAILED, _("Could not create request")); return; } g_vfs_backend_dav_send_message (backend, msg); error = NULL; res = multistatus_parse (msg, &ms, &error); if (res == FALSE) { g_vfs_job_failed_from_error (G_VFS_JOB (job), error); g_error_free (error); g_object_unref (msg); return; } res = FALSE; multistatus_get_response_iter (&ms, &iter); while (xml_node_iter_next (&iter)) { MsResponse response; if (! multistatus_get_response (&iter, &response)) continue; if (response.is_target) { ms_response_to_fs_info (&response, info); res = TRUE; } ms_response_clear (&response); } multistatus_free (&ms); g_object_unref (msg); if (res) g_vfs_job_succeeded (G_VFS_JOB (job)); else g_vfs_job_failed (G_VFS_JOB (job), G_IO_ERROR, G_IO_ERROR_FAILED, _("Response invalid")); }
0
[]
gvfs
f81ff2108ab3b6e370f20dcadd8708d23f499184
232,843,501,204,057,970,000,000,000,000,000,000,000
81
dav: don't unescape the uri twice path_equal tries to unescape path before comparing. Unfortunately this function is used also for already unescaped paths. Therefore unescaping can fail. This commit reverts changes which was done in commit 50af53d and unescape just uris, which aren't unescaped yet. https://bugzilla.gnome.org/show_bug.cgi?id=743298
static inline struct sock *unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, int len, int type, unsigned int hash) { struct sock *s; spin_lock(&unix_table_lock); s = __unix_find_socket_byname(net, sunname, len, type, hash); if (s) sock_hold(s); spin_unlock(&unix_table_lock); return s; }
0
[ "CWE-287", "CWE-284" ]
linux
e0e3cea46d31d23dc40df0a49a7a2c04fe8edfea
128,809,843,114,673,510,000,000,000,000,000,000,000
14
af_netlink: force credentials passing [CVE-2012-3520] Pablo Neira Ayuso discovered that avahi and potentially NetworkManager accept spoofed Netlink messages because of a kernel bug. The kernel passes all-zero SCM_CREDENTIALS ancillary data to the receiver if the sender did not provide such data, instead of not including any such data at all or including the correct data from the peer (as it is the case with AF_UNIX). This bug was introduced in commit 16e572626961 (af_unix: dont send SCM_CREDENTIALS by default) This patch forces passing credentials for netlink, as before the regression. Another fix would be to not add SCM_CREDENTIALS in netlink messages if not provided by the sender, but it might break some programs. With help from Florian Weimer & Petr Matousek This issue is designated as CVE-2012-3520 Signed-off-by: Eric Dumazet <[email protected]> Cc: Petr Matousek <[email protected]> Cc: Florian Weimer <[email protected]> Cc: Pablo Neira Ayuso <[email protected]> Signed-off-by: David S. Miller <[email protected]>
Status FusedBatchNormGradShape(shape_inference::InferenceContext* c) { string data_format_str; TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str)); TensorFormat data_format; if (!FormatFromString(data_format_str, &data_format)) { return errors::InvalidArgument("Invalid data format string: ", data_format_str); } const int rank = (data_format_str == "NDHWC" || data_format_str == "NCDHW") ? 5 : 4; ShapeHandle y_backprop; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &y_backprop)); ShapeHandle x; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), rank, &x)); bool is_training; TF_RETURN_IF_ERROR(c->GetAttr("is_training", &is_training)); int channel_dim_index = GetTensorFeatureDimIndex(rank, data_format); DimensionHandle channel_dim = c->Dim(y_backprop, channel_dim_index); TF_RETURN_IF_ERROR( c->Merge(channel_dim, c->Dim(x, channel_dim_index), &channel_dim)); // covers scale, mean (reserve_space_1), variance (reserve_space_2) for (int i = 2; i < 5; ++i) { ShapeHandle vec; TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec)); TF_RETURN_IF_ERROR(c->Merge(channel_dim, c->Dim(vec, 0), &channel_dim)); } ShapeHandle x_backprop; TF_RETURN_IF_ERROR( c->ReplaceDim(y_backprop, channel_dim_index, channel_dim, &x_backprop)); c->set_output(0, x_backprop); c->set_output(1, c->Vector(channel_dim)); c->set_output(2, c->Vector(channel_dim)); c->set_output(3, c->Vector(0)); c->set_output(4, c->Vector(0)); return Status::OK(); }
0
[ "CWE-369" ]
tensorflow
8a793b5d7f59e37ac7f3cd0954a750a2fe76bad4
250,915,972,027,392,830,000,000,000,000,000,000,000
40
Prevent division by 0 in common shape functions. PiperOrigin-RevId: 387712197 Change-Id: Id25c7460e35b68aeeeac23b9a88e455b443ee149
update_line (old, new, current_line, omax, nmax, inv_botlin) register char *old, *new; int current_line, omax, nmax, inv_botlin; { register char *ofd, *ols, *oe, *nfd, *nls, *ne; int temp, lendiff, wsatend, od, nd, twidth, o_cpos; int current_invis_chars; int col_lendiff, col_temp; int bytes_to_insert; int mb_cur_max = MB_CUR_MAX; #if defined (HANDLE_MULTIBYTE) mbstate_t ps_new, ps_old; int new_offset, old_offset; #endif /* If we're at the right edge of a terminal that supports xn, we're ready to wrap around, so do so. This fixes problems with knowing the exact cursor position and cut-and-paste with certain terminal emulators. In this calculation, TEMP is the physical screen position of the cursor. */ if (mb_cur_max > 1 && rl_byte_oriented == 0) temp = _rl_last_c_pos; else temp = _rl_last_c_pos - WRAP_OFFSET (_rl_last_v_pos, visible_wrap_offset); if (temp == _rl_screenwidth && _rl_term_autowrap && !_rl_horizontal_scroll_mode && _rl_last_v_pos == current_line - 1) { #if defined (HANDLE_MULTIBYTE) if (mb_cur_max > 1 && rl_byte_oriented == 0) { wchar_t wc; mbstate_t ps; int tempwidth, bytes; size_t ret; /* This fixes only double-column characters, but if the wrapped character consumes more than three columns, spaces will be inserted in the string buffer. */ if (current_line < line_state_visible->wbsize && line_state_visible->wrapped_line[current_line] > 0) _rl_clear_to_eol (line_state_visible->wrapped_line[current_line]); memset (&ps, 0, sizeof (mbstate_t)); ret = mbrtowc (&wc, new, mb_cur_max, &ps); if (MB_INVALIDCH (ret)) { tempwidth = 1; ret = 1; } else if (MB_NULLWCH (ret)) tempwidth = 0; else tempwidth = WCWIDTH (wc); if (tempwidth > 0) { int count, i; bytes = ret; for (count = 0; count < bytes; count++) putc (new[count], rl_outstream); _rl_last_c_pos = tempwidth; _rl_last_v_pos++; memset (&ps, 0, sizeof (mbstate_t)); ret = mbrtowc (&wc, old, mb_cur_max, &ps); if (ret != 0 && bytes != 0) { if (MB_INVALIDCH (ret)) ret = 1; memmove (old+bytes, old+ret, strlen (old+ret)); memcpy (old, new, bytes); /* Fix up indices if we copy data from one line to another */ omax += bytes - ret; for (i = current_line+1; i <= inv_botlin+1; i++) vis_lbreaks[i] += bytes - ret; } } else { putc (' ', rl_outstream); _rl_last_c_pos = 1; _rl_last_v_pos++; if (old[0] && new[0]) old[0] = new[0]; } } else #endif { if (new[0]) putc (new[0], rl_outstream); else putc (' ', rl_outstream); _rl_last_c_pos = 1; _rl_last_v_pos++; if (old[0] && new[0]) old[0] = new[0]; } } /* Find first difference. */ #if defined (HANDLE_MULTIBYTE) if (mb_cur_max > 1 && rl_byte_oriented == 0) { /* See if the old line is a subset of the new line, so that the only change is adding characters. */ temp = (omax < nmax) ? omax : nmax; if (memcmp (old, new, temp) == 0) /* adding at the end */ { new_offset = old_offset = temp; ofd = old + temp; nfd = new + temp; } else { memset (&ps_new, 0, sizeof(mbstate_t)); memset (&ps_old, 0, sizeof(mbstate_t)); if (omax == nmax && STREQN (new, old, omax)) { old_offset = omax; new_offset = nmax; ofd = old + omax; nfd = new + nmax; } else { new_offset = old_offset = 0; for (ofd = old, nfd = new; (ofd - old < omax) && *ofd && _rl_compare_chars(old, old_offset, &ps_old, new, new_offset, &ps_new); ) { old_offset = _rl_find_next_mbchar (old, old_offset, 1, MB_FIND_ANY); new_offset = _rl_find_next_mbchar (new, new_offset, 1, MB_FIND_ANY); ofd = old + old_offset; nfd = new + new_offset; } } } } else #endif for (ofd = old, nfd = new; (ofd - old < omax) && *ofd && (*ofd == *nfd); ofd++, nfd++) ; /* Move to the end of the screen line. ND and OD are used to keep track of the distance between ne and new and oe and old, respectively, to move a subtraction out of each loop. */ for (od = ofd - old, oe = ofd; od < omax && *oe; oe++, od++); for (nd = nfd - new, ne = nfd; nd < nmax && *ne; ne++, nd++); /* If no difference, continue to next line. */ if (ofd == oe && nfd == ne) return; #if defined (HANDLE_MULTIBYTE) if (mb_cur_max > 1 && rl_byte_oriented == 0 && _rl_utf8locale) { wchar_t wc; mbstate_t ps = { 0 }; int t; /* If the first character in the difference is a zero-width character, assume it's a combining character and back one up so the two base characters no longer compare equivalently. */ t = mbrtowc (&wc, ofd, mb_cur_max, &ps); if (t > 0 && UNICODE_COMBINING_CHAR (wc) && WCWIDTH (wc) == 0) { old_offset = _rl_find_prev_mbchar (old, ofd - old, MB_FIND_ANY); new_offset = _rl_find_prev_mbchar (new, nfd - new, MB_FIND_ANY); ofd = old + old_offset; /* equal by definition */ nfd = new + new_offset; } } #endif wsatend = 1; /* flag for trailing whitespace */ #if defined (HANDLE_MULTIBYTE) if (mb_cur_max > 1 && rl_byte_oriented == 0) { ols = old + _rl_find_prev_mbchar (old, oe - old, MB_FIND_ANY); nls = new + _rl_find_prev_mbchar (new, ne - new, MB_FIND_ANY); while ((ols > ofd) && (nls > nfd)) { memset (&ps_old, 0, sizeof (mbstate_t)); memset (&ps_new, 0, sizeof (mbstate_t)); #if 0 /* On advice from [email protected] */ _rl_adjust_point (old, ols - old, &ps_old); _rl_adjust_point (new, nls - new, &ps_new); #endif if (_rl_compare_chars (old, ols - old, &ps_old, new, nls - new, &ps_new) == 0) break; if (*ols == ' ') wsatend = 0; ols = old + _rl_find_prev_mbchar (old, ols - old, MB_FIND_ANY); nls = new + _rl_find_prev_mbchar (new, nls - new, MB_FIND_ANY); } } else { #endif /* HANDLE_MULTIBYTE */ ols = oe - 1; /* find last same */ nls = ne - 1; while ((ols > ofd) && (nls > nfd) && (*ols == *nls)) { if (*ols != ' ') wsatend = 0; ols--; nls--; } #if defined (HANDLE_MULTIBYTE) } #endif if (wsatend) { ols = oe; nls = ne; } #if defined (HANDLE_MULTIBYTE) /* This may not work for stateful encoding, but who cares? To handle stateful encoding properly, we have to scan each string from the beginning and compare. */ else if (_rl_compare_chars (ols, 0, NULL, nls, 0, NULL) == 0) #else else if (*ols != *nls) #endif { if (*ols) /* don't step past the NUL */ { if (mb_cur_max > 1 && rl_byte_oriented == 0) ols = old + _rl_find_next_mbchar (old, ols - old, 1, MB_FIND_ANY); else ols++; } if (*nls) { if (mb_cur_max > 1 && rl_byte_oriented == 0) nls = new + _rl_find_next_mbchar (new, nls - new, 1, MB_FIND_ANY); else nls++; } } /* count of invisible characters in the current invisible line. */ current_invis_chars = W_OFFSET (current_line, wrap_offset); if (_rl_last_v_pos != current_line) { _rl_move_vert (current_line); /* We have moved up to a new screen line. This line may or may not have invisible characters on it, but we do our best to recalculate visible_wrap_offset based on what we know. */ if (current_line == 0) visible_wrap_offset = prompt_invis_chars_first_line; /* XXX */ if ((mb_cur_max == 1 || rl_byte_oriented) && current_line == 0 && visible_wrap_offset) _rl_last_c_pos += visible_wrap_offset; } /* If this is the first line and there are invisible characters in the prompt string, and the prompt string has not changed, and the current cursor position is before the last invisible character in the prompt, and the index of the character to move to is past the end of the prompt string, then redraw the entire prompt string. We can only do this reliably if the terminal supports a `cr' capability. This can also happen if the prompt string has changed, and the first difference in the line is in the middle of the prompt string, after a sequence of invisible characters (worst case) and before the end of the prompt. In this case, we have to redraw the entire prompt string so that the entire sequence of invisible characters is drawn. We need to handle the worst case, when the difference is after (or in the middle of) a sequence of invisible characters that changes the text color and before the sequence that restores the text color to normal. Then we have to make sure that the lines still differ -- if they don't, we can return immediately. This is not an efficiency hack -- there is a problem with redrawing portions of the prompt string if they contain terminal escape sequences (like drawing the `unbold' sequence without a corresponding `bold') that manifests itself on certain terminals. */ lendiff = local_prompt_len; if (lendiff > nmax) lendiff = nmax; od = ofd - old; /* index of first difference in visible line */ nd = nfd - new; /* nd, od are buffer indexes */ if (current_line == 0 && !_rl_horizontal_scroll_mode && _rl_term_cr && lendiff > prompt_visible_length && _rl_last_c_pos > 0 && (((od > 0 || nd > 0) && (od <= prompt_last_invisible || nd <= prompt_last_invisible)) || ((od >= lendiff) && _rl_last_c_pos < PROMPT_ENDING_INDEX))) { #if defined (__MSDOS__) putc ('\r', rl_outstream); #else tputs (_rl_term_cr, 1, _rl_output_character_function); #endif if (modmark) _rl_output_some_chars ("*", 1); _rl_output_some_chars (local_prompt, lendiff); if (mb_cur_max > 1 && rl_byte_oriented == 0) { /* We take wrap_offset into account here so we can pass correct information to _rl_move_cursor_relative. */ _rl_last_c_pos = _rl_col_width (local_prompt, 0, lendiff, 1) - wrap_offset + modmark; cpos_adjusted = 1; } else _rl_last_c_pos = lendiff + modmark; /* Now if we have printed the prompt string because the first difference was within the prompt, see if we need to recompute where the lines differ. Check whether where we are now is past the last place where the old and new lines are the same and short-circuit now if we are. */ if ((od <= prompt_last_invisible || nd <= prompt_last_invisible) && omax == nmax && lendiff > (ols-old) && lendiff > (nls-new)) return; /* XXX - we need to fix up our calculations if we are now past the old ofd/nfd and the prompt length (or line length) has changed. We punt on the problem and do a dumb update. We'd like to be able to just output the prompt from the beginning of the line up to the first difference, but you don't know the number of invisible characters in that case. This needs a lot of work to be efficient. */ if ((od <= prompt_last_invisible || nd <= prompt_last_invisible)) { nfd = new + lendiff; /* number of characters we output above */ nd = lendiff; /* Do a dumb update and return */ temp = ne - nfd; if (temp > 0) { _rl_output_some_chars (nfd, temp); if (mb_cur_max > 1 && rl_byte_oriented == 0) _rl_last_c_pos += _rl_col_width (new, nd, ne - new, 1); else _rl_last_c_pos += temp; } if (nmax < omax) goto clear_rest_of_line; /* XXX */ else return; } } o_cpos = _rl_last_c_pos; /* When this function returns, _rl_last_c_pos is correct, and an absolute cursor position in multibyte mode, but a buffer index when not in a multibyte locale. */ _rl_move_cursor_relative (od, old); #if defined (HANDLE_MULTIBYTE) /* We need to indicate that the cursor position is correct in the presence of invisible characters in the prompt string. Let's see if setting this when we make sure we're at the end of the drawn prompt string works. */ if (current_line == 0 && mb_cur_max > 1 && rl_byte_oriented == 0 && (_rl_last_c_pos > 0 || o_cpos > 0) && _rl_last_c_pos == prompt_physical_chars) cpos_adjusted = 1; #endif /* if (len (new) > len (old)) lendiff == difference in buffer (bytes) col_lendiff == difference on screen (columns) When not using multibyte characters, these are equal */ lendiff = (nls - nfd) - (ols - ofd); if (mb_cur_max > 1 && rl_byte_oriented == 0) col_lendiff = _rl_col_width (new, nfd - new, nls - new, 1) - _rl_col_width (old, ofd - old, ols - old, 1); else col_lendiff = lendiff; /* If we are changing the number of invisible characters in a line, and the spot of first difference is before the end of the invisible chars, lendiff needs to be adjusted. */ if (current_line == 0 && /* !_rl_horizontal_scroll_mode && */ current_invis_chars != visible_wrap_offset) { if (mb_cur_max > 1 && rl_byte_oriented == 0) { lendiff += visible_wrap_offset - current_invis_chars; col_lendiff += visible_wrap_offset - current_invis_chars; } else { lendiff += visible_wrap_offset - current_invis_chars; col_lendiff = lendiff; } } /* We use temp as a count of the number of bytes from the first difference to the end of the new line. col_temp is the corresponding number of screen columns. A `dumb' update moves to the spot of first difference and writes TEMP bytes. */ /* Insert (diff (len (old), len (new)) ch. */ temp = ne - nfd; if (mb_cur_max > 1 && rl_byte_oriented == 0) col_temp = _rl_col_width (new, nfd - new, ne - new, 1); else col_temp = temp; /* how many bytes from the new line buffer to write to the display */ bytes_to_insert = nls - nfd; /* col_lendiff > 0 if we are adding characters to the line */ if (col_lendiff > 0) /* XXX - was lendiff */ { /* Non-zero if we're increasing the number of lines. */ int gl = current_line >= _rl_vis_botlin && inv_botlin > _rl_vis_botlin; /* If col_lendiff is > 0, implying that the new string takes up more screen real estate than the old, but lendiff is < 0, meaning that it takes fewer bytes, we need to just output the characters starting from the first difference. These will overwrite what is on the display, so there's no reason to do a smart update. This can really only happen in a multibyte environment. */ if (lendiff < 0) { _rl_output_some_chars (nfd, temp); _rl_last_c_pos += col_temp; /* XXX - was _rl_col_width (nfd, 0, temp, 1); */ /* If nfd begins before any invisible characters in the prompt, adjust _rl_last_c_pos to account for wrap_offset and set cpos_adjusted to let the caller know. */ if (current_line == 0 && displaying_prompt_first_line && wrap_offset && ((nfd - new) <= prompt_last_invisible)) { _rl_last_c_pos -= wrap_offset; cpos_adjusted = 1; } return; } /* Sometimes it is cheaper to print the characters rather than use the terminal's capabilities. If we're growing the number of lines, make sure we actually cause the new line to wrap around on auto-wrapping terminals. */ else if (_rl_terminal_can_insert && ((2 * col_temp) >= col_lendiff || _rl_term_IC) && (!_rl_term_autowrap || !gl)) { /* If lendiff > prompt_visible_length and _rl_last_c_pos == 0 and _rl_horizontal_scroll_mode == 1, inserting the characters with _rl_term_IC or _rl_term_ic will screw up the screen because of the invisible characters. We need to just draw them. */ /* The same thing happens if we're trying to draw before the last invisible character in the prompt string or we're increasing the number of invisible characters in the line and we're not drawing the entire prompt string. */ if (*ols && ((_rl_horizontal_scroll_mode && _rl_last_c_pos == 0 && lendiff > prompt_visible_length && current_invis_chars > 0) == 0) && (((mb_cur_max > 1 && rl_byte_oriented == 0) && current_line == 0 && wrap_offset && ((nfd - new) <= prompt_last_invisible) && (col_lendiff < prompt_visible_length)) == 0) && (visible_wrap_offset >= current_invis_chars)) { open_some_spaces (col_lendiff); _rl_output_some_chars (nfd, bytes_to_insert); if (mb_cur_max > 1 && rl_byte_oriented == 0) _rl_last_c_pos += _rl_col_width (nfd, 0, bytes_to_insert, 1); else _rl_last_c_pos += bytes_to_insert; } else if ((mb_cur_max == 1 || rl_byte_oriented != 0) && *ols == 0 && lendiff > 0) { /* At the end of a line the characters do not have to be "inserted". They can just be placed on the screen. */ _rl_output_some_chars (nfd, temp); _rl_last_c_pos += col_temp; return; } else /* just write from first difference to end of new line */ { _rl_output_some_chars (nfd, temp); _rl_last_c_pos += col_temp; /* If nfd begins before the last invisible character in the prompt, adjust _rl_last_c_pos to account for wrap_offset and set cpos_adjusted to let the caller know. */ if ((mb_cur_max > 1 && rl_byte_oriented == 0) && current_line == 0 && displaying_prompt_first_line && wrap_offset && ((nfd - new) <= prompt_last_invisible)) { _rl_last_c_pos -= wrap_offset; cpos_adjusted = 1; } return; } if (bytes_to_insert > lendiff) { /* If nfd begins before the last invisible character in the prompt, adjust _rl_last_c_pos to account for wrap_offset and set cpos_adjusted to let the caller know. */ if ((mb_cur_max > 1 && rl_byte_oriented == 0) && current_line == 0 && displaying_prompt_first_line && wrap_offset && ((nfd - new) <= prompt_last_invisible)) { _rl_last_c_pos -= wrap_offset; cpos_adjusted = 1; } } } else { /* cannot insert chars, write to EOL */ _rl_output_some_chars (nfd, temp); _rl_last_c_pos += col_temp; /* If we're in a multibyte locale and were before the last invisible char in the current line (which implies we just output some invisible characters) we need to adjust _rl_last_c_pos, since it represents a physical character position. */ /* The current_line*rl_screenwidth+prompt_invis_chars_first_line is a crude attempt to compute how far into the new line buffer we are. It doesn't work well in the face of multibyte characters and needs to be rethought. XXX */ if ((mb_cur_max > 1 && rl_byte_oriented == 0) && current_line == prompt_last_screen_line && wrap_offset && displaying_prompt_first_line && wrap_offset != prompt_invis_chars_first_line && ((nfd-new) < (prompt_last_invisible-(current_line*_rl_screenwidth+prompt_invis_chars_first_line)))) { _rl_last_c_pos -= wrap_offset - prompt_invis_chars_first_line; cpos_adjusted = 1; } } } else /* Delete characters from line. */ { /* If possible and inexpensive to use terminal deletion, then do so. */ if (_rl_term_dc && (2 * col_temp) >= -col_lendiff) { /* If all we're doing is erasing the invisible characters in the prompt string, don't bother. It screws up the assumptions about what's on the screen. */ if (_rl_horizontal_scroll_mode && _rl_last_c_pos == 0 && displaying_prompt_first_line && -lendiff == visible_wrap_offset) col_lendiff = 0; /* If we have moved lmargin and we're shrinking the line, we've already moved the cursor to the first character of the new line, so deleting -col_lendiff characters will mess up the cursor position calculation */ if (_rl_horizontal_scroll_mode && displaying_prompt_first_line == 0 && col_lendiff && _rl_last_c_pos < -col_lendiff) col_lendiff = 0; if (col_lendiff) delete_chars (-col_lendiff); /* delete (diff) characters */ /* Copy (new) chars to screen from first diff to last match, overwriting what is there. */ if (bytes_to_insert > 0) { /* If nfd begins at the prompt, or before the invisible characters in the prompt, we need to adjust _rl_last_c_pos in a multibyte locale to account for the wrap offset and set cpos_adjusted accordingly. */ _rl_output_some_chars (nfd, bytes_to_insert); if (mb_cur_max > 1 && rl_byte_oriented == 0) { _rl_last_c_pos += _rl_col_width (nfd, 0, bytes_to_insert, 1); if (current_line == 0 && wrap_offset && displaying_prompt_first_line && _rl_last_c_pos > wrap_offset && ((nfd - new) <= prompt_last_invisible)) { _rl_last_c_pos -= wrap_offset; cpos_adjusted = 1; } } else _rl_last_c_pos += bytes_to_insert; /* XXX - we only want to do this if we are at the end of the line so we move there with _rl_move_cursor_relative */ if (_rl_horizontal_scroll_mode && ((oe-old) > (ne-new))) { _rl_move_cursor_relative (ne-new, new); goto clear_rest_of_line; } } } /* Otherwise, print over the existing material. */ else { if (temp > 0) { /* If nfd begins at the prompt, or before the invisible characters in the prompt, we need to adjust _rl_last_c_pos in a multibyte locale to account for the wrap offset and set cpos_adjusted accordingly. */ _rl_output_some_chars (nfd, temp); _rl_last_c_pos += col_temp; /* XXX */ if (mb_cur_max > 1 && rl_byte_oriented == 0) { if (current_line == 0 && wrap_offset && displaying_prompt_first_line && _rl_last_c_pos > wrap_offset && ((nfd - new) <= prompt_last_invisible)) { _rl_last_c_pos -= wrap_offset; cpos_adjusted = 1; } } } clear_rest_of_line: lendiff = (oe - old) - (ne - new); if (mb_cur_max > 1 && rl_byte_oriented == 0) col_lendiff = _rl_col_width (old, 0, oe - old, 1) - _rl_col_width (new, 0, ne - new, 1); else col_lendiff = lendiff; /* If we've already printed over the entire width of the screen, including the old material, then col_lendiff doesn't matter and space_to_eol will insert too many spaces. XXX - maybe we should adjust col_lendiff based on the difference between _rl_last_c_pos and _rl_screenwidth */ if (col_lendiff && ((mb_cur_max == 1 || rl_byte_oriented) || (_rl_last_c_pos < _rl_screenwidth))) { if (_rl_term_autowrap && current_line < inv_botlin) space_to_eol (col_lendiff); else _rl_clear_to_eol (col_lendiff); } } } }
0
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
219,417,445,363,121,500,000,000,000,000,000,000,000
632
bash-4.4-rc2 release
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { int ret; struct kvm_coalesced_mmio_dev *dev; if (zone->pio != 1 && zone->pio != 0) return -EINVAL; dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL_ACCOUNT); if (!dev) return -ENOMEM; kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); dev->kvm = kvm; dev->zone = *zone; mutex_lock(&kvm->slots_lock); ret = kvm_io_bus_register_dev(kvm, zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, zone->addr, zone->size, &dev->dev); if (ret < 0) goto out_free_dev; list_add_tail(&dev->list, &kvm->coalesced_zones); mutex_unlock(&kvm->slots_lock); return 0; out_free_dev: mutex_unlock(&kvm->slots_lock); kfree(dev); return ret; }
0
[ "CWE-787" ]
kvm
b60fe990c6b07ef6d4df67bc0530c7c90a62623a
320,830,746,629,620,600,000,000,000,000,000,000,000
35
KVM: coalesced_mmio: add bounds checking The first/last indexes are typically shared with a user app. The app can change the 'last' index that the kernel uses to store the next result. This change sanity checks the index before using it for writing to a potentially arbitrary address. This fixes CVE-2019-14821. Cc: [email protected] Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)") Signed-off-by: Matt Delco <[email protected]> Signed-off-by: Jim Mattson <[email protected]> Reported-by: [email protected] [Use READ_ONCE. - Paolo] Signed-off-by: Paolo Bonzini <[email protected]>
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, const struct in6_addr *addr) { return NULL; }
0
[ "CWE-416", "CWE-284", "CWE-264" ]
linux
45f6fad84cc305103b28d73482b344d7f5b76f39
66,028,485,355,124,000,000,000,000,000,000,000,000
5
ipv6: add complete rcu protection around np->opt This patch addresses multiple problems : UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions while socket is not locked : Other threads can change np->opt concurrently. Dmitry posted a syzkaller (http://github.com/google/syzkaller) program desmonstrating use-after-free. Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock() and dccp_v6_request_recv_sock() also need to use RCU protection to dereference np->opt once (before calling ipv6_dup_options()) This patch adds full RCU protection to np->opt Reported-by: Dmitry Vyukov <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Acked-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
size_t olm_unpickle_pk_decryption( OlmPkDecryption * decryption, void const * key, size_t key_length, void *pickled, size_t pickled_length, void *pubkey, size_t pubkey_length ) { OlmPkDecryption & object = *decryption; if (pubkey != NULL && pubkey_length < olm_pk_key_length()) { object.last_error = OlmErrorCode::OLM_OUTPUT_BUFFER_TOO_SMALL; return std::size_t(-1); } std::uint8_t * const pos = reinterpret_cast<std::uint8_t *>(pickled); std::size_t raw_length = _olm_enc_input( reinterpret_cast<std::uint8_t const *>(key), key_length, pos, pickled_length, &object.last_error ); if (raw_length == std::size_t(-1)) { return std::size_t(-1); } std::uint8_t * const end = pos + raw_length; /* On success unpickle will return (pos + raw_length). If unpickling * terminates too soon then it will return a pointer before * (pos + raw_length). On error unpickle will return (pos + raw_length + 1). */ if (end != unpickle(pos, end + 1, object)) { if (object.last_error == OlmErrorCode::OLM_SUCCESS) { object.last_error = OlmErrorCode::OLM_CORRUPTED_PICKLE; } return std::size_t(-1); } if (pubkey != NULL) { olm::encode_base64( (const uint8_t *)object.key_pair.public_key.public_key, CURVE25519_KEY_LENGTH, (uint8_t *)pubkey ); } return pickled_length; }
0
[ "CWE-787" ]
olm
ccc0d122ee1b4d5e5ca4ec1432086be17d5f901b
312,045,742,178,222,820,000,000,000,000,000,000,000
39
olm_pk_decrypt: Ensure inputs are of correct length.