func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
int sa_get_record_timestamp_struct(uint64_t l_flags, struct record_header *record_hdr, struct tm *rectime) { struct tm *ltm; int rc = 0; /* * Fill generic rectime structure in local time. * Done so that we have some default values. */ ltm = localtime_r((const time_t *) &(record_hdr->ust_time), rectime); if (!PRINT_LOCAL_TIME(l_flags) && !PRINT_TRUE_TIME(l_flags)) { /* * Get time in UTC * (the user doesn't want local time nor time of file's creator). */ ltm = gmtime_r((const time_t *) &(record_hdr->ust_time), rectime); } if (!ltm) { rc = 1; } if (PRINT_TRUE_TIME(l_flags)) { /* Time of file's creator */ rectime->tm_hour = record_hdr->hour; rectime->tm_min = record_hdr->minute; rectime->tm_sec = record_hdr->second; } return rc; }
0
[ "CWE-415" ]
sysstat
a5c8abd4a481ee6e27a3acf00e6d9b0f023e20ed
143,499,772,908,278,040,000,000,000,000,000,000,000
33
Fix #242: Double free in check_file_actlst() Avoid freeing buffer() twice. Signed-off-by: Sebastien GODARD <[email protected]>
static void iommu_set_root_entry(struct intel_iommu *iommu) { u64 addr; u32 sts; unsigned long flag; addr = virt_to_phys(iommu->root_entry); if (ecs_enabled(iommu)) addr |= DMA_RTADDR_RTT; raw_spin_lock_irqsave(&iommu->register_lock, flag); dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_RTPS), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); }
0
[]
linux
fb58fdcd295b914ece1d829b24df00a17a9624bc
260,580,530,492,371,940,000,000,000,000,000,000,000
21
iommu/vt-d: Do not enable ATS for untrusted devices Currently Linux automatically enables ATS (Address Translation Service) for any device that supports it (and IOMMU is turned on). ATS is used to accelerate DMA access as the device can cache translations locally so there is no need to do full translation on IOMMU side. However, as pointed out in [1] ATS can be used to bypass IOMMU based security completely by simply sending PCIe read/write transaction with AT (Address Translation) field set to "translated". To mitigate this modify the Intel IOMMU code so that it does not enable ATS for any device that is marked as being untrusted. In case this turns out to cause performance issues we may selectively allow ATS based on user decision but currently use big hammer and disable it completely to be on the safe side. [1] https://www.repository.cam.ac.uk/handle/1810/274352 Signed-off-by: Mika Westerberg <[email protected]> Reviewed-by: Ashok Raj <[email protected]> Reviewed-by: Joerg Roedel <[email protected]> Acked-by: Joerg Roedel <[email protected]>
xfs_agfl_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; xfs_failaddr_t fa; /* * There is no verification of non-crc AGFLs because mkfs does not * initialise the AGFL to zero or NULL. Hence the only valid part of the * AGFL is what the AGF says is active. We can't get to the AGF, so we * can't verify just those entries are valid. */ if (!xfs_sb_version_hascrc(&mp->m_sb)) return; if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF)) xfs_verifier_error(bp, -EFSBADCRC, __this_address); else { fa = xfs_agfl_verify(bp); if (fa) xfs_verifier_error(bp, -EFSCORRUPTED, fa); } }
0
[ "CWE-400", "CWE-703", "CWE-835" ]
linux
d0c7feaf87678371c2c09b3709400be416b2dc62
83,603,769,948,588,520,000,000,000,000,000,000,000
23
xfs: add agf freeblocks verify in xfs_agf_verify We recently used fuzz(hydra) to test XFS and automatically generate tmp.img(XFS v5 format, but some metadata is wrong) xfs_repair information(just one AG): agf_freeblks 0, counted 3224 in ag 0 agf_longest 536874136, counted 3224 in ag 0 sb_fdblocks 613, counted 3228 Test as follows: mount tmp.img tmpdir cp file1M tmpdir sync In 4.19-stable, sync will stuck, the reason is: xfs_mountfs xfs_check_summary_counts if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) || XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) && !xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS)) return 0; -->just return, incore sb_fdblocks still be 613 xfs_initialize_perag_data cp file1M tmpdir -->ok(write file to pagecache) sync -->stuck(write pagecache to disk) xfs_map_blocks xfs_iomap_write_allocate while (count_fsb != 0) { nimaps = 0; while (nimaps == 0) { --> endless loop nimaps = 1; xfs_bmapi_write(..., &nimaps) --> nimaps becomes 0 again xfs_bmapi_write xfs_bmap_alloc xfs_bmap_btalloc xfs_alloc_vextent xfs_alloc_fix_freelist xfs_alloc_space_available -->fail(agf_freeblks is 0) In linux-next, sync not stuck, cause commit c2b3164320b5 ("xfs: use the latest extent at writeback delalloc conversion time") remove the above while, dmesg is as follows: [ 55.250114] XFS (loop0): page discard on page ffffea0008bc7380, inode 0x1b0c, offset 0. Users do not know why this page is discard, the better soultion is: 1. Like xfs_repair, make sure sb_fdblocks is equal to counted (xfs_initialize_perag_data did this, who is not called at this mount) 2. Add agf verify, if fail, will tell users to repair This patch use the second soultion. Signed-off-by: Zheng Bin <[email protected]> Signed-off-by: Ren Xudong <[email protected]> Reviewed-by: Darrick J. Wong <[email protected]> Signed-off-by: Darrick J. Wong <[email protected]>
static Image *ReadTEXTImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MaxTextExtent], geometry[MaxTextExtent], *p, text[MaxTextExtent]; DrawInfo *draw_info; Image *image, *texture; MagickBooleanType status; PointInfo delta; RectangleInfo page; ssize_t offset; TypeMetric metrics; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) memset(text,0,sizeof(text)); (void) ReadBlobString(image,text); /* Set the page geometry. */ delta.x=DefaultResolution; delta.y=DefaultResolution; if ((image->x_resolution == 0.0) || (image->y_resolution == 0.0)) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(PSDensityGeometry,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } page.width=612; page.height=792; page.x=43; page.y=43; if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); /* Initialize Image structure. */ image->columns=(size_t) floor((((double) page.width*image->x_resolution)/ delta.x)+0.5); image->rows=(size_t) floor((((double) page.height*image->y_resolution)/ delta.y)+0.5); status=SetImageExtent(image,image->columns,image->rows); if (status != MagickFalse) status=ResetImagePixels(image,&image->exception); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } image->page.x=0; image->page.y=0; texture=(Image *) NULL; if (image_info->texture != (char *) NULL) { ImageInfo *read_info; read_info=CloneImageInfo(image_info); SetImageInfoBlob(read_info,(void *) NULL,0); (void) CopyMagickString(read_info->filename,image_info->texture, MaxTextExtent); texture=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); } /* Annotate the text image. */ (void) SetImageBackgroundColor(image); draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); (void) CloneString(&draw_info->text,image_info->filename); (void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g%+g%+g",(double) image->columns,(double) image->rows,(double) page.x,(double) page.y); (void) CloneString(&draw_info->geometry,geometry); status=GetTypeMetrics(image,draw_info,&metrics); if (status == MagickFalse) { draw_info=DestroyDrawInfo(draw_info); ThrowReaderException(TypeError,"UnableToGetTypeMetrics"); } page.y=(ssize_t) ceil((double) page.y+metrics.ascent-0.5); (void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g%+g%+g",(double) image->columns,(double) image->rows,(double) page.x,(double) page.y); (void) CloneString(&draw_info->geometry,geometry); (void) CopyMagickString(filename,image_info->filename,MaxTextExtent); if (*draw_info->text != '\0') *draw_info->text='\0'; p=text; for (offset=2*page.y; p != (char *) NULL; ) { /* Annotate image with text. */ (void) ConcatenateString(&draw_info->text,text); (void) ConcatenateString(&draw_info->text,"\n"); offset+=(ssize_t) (metrics.ascent-metrics.descent); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) offset, image->rows); if (status == MagickFalse) break; } p=ReadBlobString(image,text); if ((offset < (ssize_t) image->rows) && (p != (char *) NULL)) continue; if (texture != (Image *) NULL) { MagickProgressMonitor progress_monitor; progress_monitor=SetImageProgressMonitor(image, (MagickProgressMonitor) NULL,image->client_data); (void) TextureImage(image,texture); (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); } (void) AnnotateImage(image,draw_info); if (p == (char *) NULL) break; /* Page is full-- allocate next image structure. */ *draw_info->text='\0'; offset=2*page.y; AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image->next->columns=image->columns; image->next->rows=image->rows; image=SyncNextImageInList(image); (void) CopyMagickString(image->filename,filename,MaxTextExtent); (void) SetImageBackgroundColor(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } if (texture != (Image *) NULL) { MagickProgressMonitor progress_monitor; progress_monitor=SetImageProgressMonitor(image, (MagickProgressMonitor) NULL,image->client_data); (void) TextureImage(image,texture); (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); } (void) AnnotateImage(image,draw_info); if (texture != (Image *) NULL) texture=DestroyImage(texture); draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
0
[ "CWE-401" ]
ImageMagick6
210474b2fac6a661bfa7ed563213920e93e76395
108,891,002,703,829,930,000,000,000,000,000,000,000
198
Fix ultra rare but potential memory-leak
xps_draw_radial_gradient(xps_document *doc, const fz_matrix *ctm, const fz_rect *area, struct stop *stops, int count, fz_xml *root, int spread) { float x0, y0, r0; float x1, y1, r1; float xrad = 1; float yrad = 1; float invscale; int i, ma = 1; fz_matrix local_ctm = *ctm; fz_matrix inv; fz_rect local_area = *area; char *center_att = fz_xml_att(root, "Center"); char *origin_att = fz_xml_att(root, "GradientOrigin"); char *radius_x_att = fz_xml_att(root, "RadiusX"); char *radius_y_att = fz_xml_att(root, "RadiusY"); x0 = y0 = 0.0; x1 = y1 = 1.0; xrad = 1.0; yrad = 1.0; if (origin_att) xps_parse_point(origin_att, &x0, &y0); if (center_att) xps_parse_point(center_att, &x1, &y1); if (radius_x_att) xrad = fz_atof(radius_x_att); if (radius_y_att) yrad = fz_atof(radius_y_att); xrad = fz_max(0.01f, xrad); yrad = fz_max(0.01f, yrad); /* scale the ctm to make ellipses */ if (fz_abs(xrad) > FLT_EPSILON) { fz_pre_scale(&local_ctm, 1, yrad/xrad); } if (yrad != 0.0) { invscale = xrad / yrad; y0 = y0 * invscale; y1 = y1 * invscale; } r0 = 0; r1 = xrad; fz_transform_rect(&local_area, fz_invert_matrix(&inv, &local_ctm)); ma = fz_maxi(ma, ceilf(hypotf(local_area.x0 - x0, local_area.y0 - y0) / xrad)); ma = fz_maxi(ma, ceilf(hypotf(local_area.x1 - x0, local_area.y0 - y0) / xrad)); ma = fz_maxi(ma, ceilf(hypotf(local_area.x0 - x0, local_area.y1 - y0) / xrad)); ma = fz_maxi(ma, ceilf(hypotf(local_area.x1 - x0, local_area.y1 - y0) / xrad)); if (spread == SPREAD_REPEAT) { for (i = ma - 1; i >= 0; i--) xps_draw_one_radial_gradient(doc, &local_ctm, stops, count, 0, x0, y0, r0 + i * xrad, x1, y1, r1 + i * xrad); } else if (spread == SPREAD_REFLECT) { if ((ma % 2) != 0) ma++; for (i = ma - 2; i >= 0; i -= 2) { xps_draw_one_radial_gradient(doc, &local_ctm, stops, count, 0, x0, y0, r0 + i * xrad, x1, y1, r1 + i * xrad); xps_draw_one_radial_gradient(doc, &local_ctm, stops, count, 0, x0, y0, r0 + (i + 2) * xrad, x1, y1, r1 + i * xrad); } } else { xps_draw_one_radial_gradient(doc, &local_ctm, stops, count, 1, x0, y0, r0, x1, y1, r1); } }
0
[ "CWE-119" ]
mupdf
60dabde18d7fe12b19da8b509bdfee9cc886aafc
26,995,002,086,141,220,000,000,000,000,000,000,000
78
Bug 694957: fix stack buffer overflow in xps_parse_color xps_parse_color happily reads more than FZ_MAX_COLORS values out of a ContextColor array which overflows the passed in samples array. Limiting the number of allowed samples to FZ_MAX_COLORS and make sure to use that constant for all callers fixes the problem. Thanks to Jean-Jamil Khalifé for reporting and investigating the issue and providing a sample exploit file.
int http_skip_chunk_crlf(struct buffer *buf, struct http_msg *msg) { char *ptr; int bytes; /* NB: we'll check data availabilty at the end. It's not a * problem because whatever we match first will be checked * against the correct length. */ bytes = 1; ptr = buf->lr; if (*ptr == '\r') { bytes++; ptr++; if (ptr >= buf->data + buf->size) ptr = buf->data; } if (bytes > buf->l - buf->send_max) return 0; if (*ptr != '\n') { msg->err_pos = ptr - buf->data; return -1; } ptr++; if (ptr >= buf->data + buf->size) ptr = buf->data; buf->lr = ptr; /* prepare the CRLF to be forwarded. msg->som may be before data but we don't care */ msg->sov = ptr - buf->data; msg->som = msg->sov - bytes; msg->msg_state = HTTP_MSG_CHUNK_SIZE; return 1; }
0
[]
haproxy-1.4
dc80672211e085c211f1fc47e15cfe57ab587d38
189,455,040,529,961,630,000,000,000,000,000,000,000
36
BUG/CRITICAL: using HTTP information in tcp-request content may crash the process During normal HTTP request processing, request buffers are realigned if there are less than global.maxrewrite bytes available after them, in order to leave enough room for rewriting headers after the request. This is done in http_wait_for_request(). However, if some HTTP inspection happens during a "tcp-request content" rule, this realignment is not performed. In theory this is not a problem because empty buffers are always aligned and TCP inspection happens at the beginning of a connection. But with HTTP keep-alive, it also happens at the beginning of each subsequent request. So if a second request was pipelined by the client before the first one had a chance to be forwarded, the second request will not be realigned. Then, http_wait_for_request() will not perform such a realignment either because the request was already parsed and marked as such. The consequence of this, is that the rewrite of a sufficient number of such pipelined, unaligned requests may leave less room past the request been processed than the configured reserve, which can lead to a buffer overflow if request processing appends some data past the end of the buffer. A number of conditions are required for the bug to be triggered : - HTTP keep-alive must be enabled ; - HTTP inspection in TCP rules must be used ; - some request appending rules are needed (reqadd, x-forwarded-for) - since empty buffers are always realigned, the client must pipeline enough requests so that the buffer always contains something till the point where there is no more room for rewriting. While such a configuration is quite unlikely to be met (which is confirmed by the bug's lifetime), a few people do use these features together for very specific usages. And more importantly, writing such a configuration and the request to attack it is trivial. A quick workaround consists in forcing keep-alive off by adding "option httpclose" or "option forceclose" in the frontend. Alternatively, disabling HTTP-based TCP inspection rules enough if the application supports it. At first glance, this bug does not look like it could lead to remote code execution, as the overflowing part is controlled by the configuration and not by the user. But some deeper analysis should be performed to confirm this. And anyway, corrupting the process' memory and crashing it is quite trivial. Special thanks go to Yves Lafon from the W3C who reported this bug and deployed significant efforts to collect the relevant data needed to understand it in less than one week. CVE-2013-1912 was assigned to this issue. Note that 1.4 is also affected so the fix must be backported. (cherry picked from commit aae75e3279c6c9bd136413a72dafdcd4986bb89a)
void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr) { struct ib_uevent_object *uobj = container_of(event->element.wq->uobject, struct ib_uevent_object, uobject); ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, event->event, &uobj->event_list, &uobj->events_reported); }
0
[ "CWE-362", "CWE-703", "CWE-667" ]
linux
04f5866e41fb70690e28397487d8bd8eea7d712a
79,111,025,769,984,340,000,000,000,000,000,000,000
9
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping The core dumping code has always run without holding the mmap_sem for writing, despite that is the only way to ensure that the entire vma layout will not change from under it. Only using some signal serialization on the processes belonging to the mm is not nearly enough. This was pointed out earlier. For example in Hugh's post from Jul 2017: https://lkml.kernel.org/r/[email protected] "Not strictly relevant here, but a related note: I was very surprised to discover, only quite recently, how handle_mm_fault() may be called without down_read(mmap_sem) - when core dumping. That seems a misguided optimization to me, which would also be nice to correct" In particular because the growsdown and growsup can move the vm_start/vm_end the various loops the core dump does around the vma will not be consistent if page faults can happen concurrently. Pretty much all users calling mmget_not_zero()/get_task_mm() and then taking the mmap_sem had the potential to introduce unexpected side effects in the core dumping code. Adding mmap_sem for writing around the ->core_dump invocation is a viable long term fix, but it requires removing all copy user and page faults and to replace them with get_dump_page() for all binary formats which is not suitable as a short term fix. For the time being this solution manually covers the places that can confuse the core dump either by altering the vma layout or the vma flags while it runs. Once ->core_dump runs under mmap_sem for writing the function mmget_still_valid() can be dropped. Allowing mmap_sem protected sections to run in parallel with the coredump provides some minor parallelism advantage to the swapoff code (which seems to be safe enough by never mangling any vma field and can keep doing swapins in parallel to the core dumping) and to some other corner case. In order to facilitate the backporting I added "Fixes: 86039bd3b4e6" however the side effect of this same race condition in /proc/pid/mem should be reproducible since before 2.6.12-rc2 so I couldn't add any other "Fixes:" because there's no hash beyond the git genesis commit. Because find_extend_vma() is the only location outside of the process context that could modify the "mm" structures under mmap_sem for reading, by adding the mmget_still_valid() check to it, all other cases that take the mmap_sem for reading don't need the new check after mmget_not_zero()/get_task_mm(). The expand_stack() in page fault context also doesn't need the new check, because all tasks under core dumping are frozen. Link: http://lkml.kernel.org/r/[email protected] Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization") Signed-off-by: Andrea Arcangeli <[email protected]> Reported-by: Jann Horn <[email protected]> Suggested-by: Oleg Nesterov <[email protected]> Acked-by: Peter Xu <[email protected]> Reviewed-by: Mike Rapoport <[email protected]> Reviewed-by: Oleg Nesterov <[email protected]> Reviewed-by: Jann Horn <[email protected]> Acked-by: Jason Gunthorpe <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
Status FillCollectiveParams(CollectiveParams* col_params, CollectiveType collective_type, const Tensor& group_size, const Tensor& group_key, const Tensor& instance_key) { if (group_size.dims() > 0) { return errors::Internal("Unexpected dimensions on input group_size, got ", group_size.shape().DebugString()); } if (group_key.dims() > 0) { return errors::Internal("Unexpected dimensions on input group_key, got ", group_key.shape().DebugString()); } if (instance_key.dims() > 0) { return errors::Internal( "Unexpected dimensions on input instance_key, got ", instance_key.shape().DebugString()); } col_params->name = name_; col_params->group.device_type = device_type_; col_params->group.group_size = group_size.unaligned_flat<int32>()(0); if (col_params->group.group_size <= 0) { return errors::InvalidArgument( "group_size must be positive integer but got ", col_params->group.group_size); } col_params->group.group_key = group_key.unaligned_flat<int32>()(0); col_params->instance.type = collective_type; col_params->instance.instance_key = instance_key.unaligned_flat<int32>()(0); col_params->instance.data_type = data_type_; col_params->instance.impl_details.communication_hint = communication_hint_; col_params->instance.impl_details.timeout_seconds = timeout_seconds_; return Status::OK(); }
1
[ "CWE-416" ]
tensorflow
ca38dab9d3ee66c5de06f11af9a4b1200da5ef75
53,889,960,801,950,790,000,000,000,000,000,000,000
33
Fix undefined behavior in CollectiveReduceV2 and others We should not call done after it's moved. PiperOrigin-RevId: 400838185 Change-Id: Ifc979740054b8f8c6f4d50acc89472fe60c4fdb1
void restore_tm_state(struct pt_regs *regs) { unsigned long msr_diff; clear_thread_flag(TIF_RESTORE_TM); if (!MSR_TM_ACTIVE(regs->msr)) return; msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; if (msr_diff & MSR_FP) { fp_enable(); load_fp_state(&current->thread.fp_state); regs->msr |= current->thread.fpexc_mode; } if (msr_diff & MSR_VEC) { vec_enable(); load_vr_state(&current->thread.vr_state); } regs->msr |= msr_diff; }
0
[ "CWE-284" ]
linux
7f821fc9c77a9b01fe7b1d6e72717b33d8d64142
88,888,525,961,487,030,000,000,000,000,000,000,000
21
powerpc/tm: Check for already reclaimed tasks Currently we can hit a scenario where we'll tm_reclaim() twice. This results in a TM bad thing exception because the second reclaim occurs when not in suspend mode. The scenario in which this can happen is the following. We attempt to deliver a signal to userspace. To do this we need obtain the stack pointer to write the signal context. To get this stack pointer we must tm_reclaim() in case we need to use the checkpointed stack pointer (see get_tm_stackpointer()). Normally we'd then return directly to userspace to deliver the signal without going through __switch_to(). Unfortunatley, if at this point we get an error (such as a bad userspace stack pointer), we need to exit the process. The exit will result in a __switch_to(). __switch_to() will attempt to save the process state which results in another tm_reclaim(). This tm_reclaim() now causes a TM Bad Thing exception as this state has already been saved and the processor is no longer in TM suspend mode. Whee! This patch checks the state of the MSR to ensure we are TM suspended before we attempt the tm_reclaim(). If we've already saved the state away, we should no longer be in TM suspend mode. This has the additional advantage of checking for a potential TM Bad Thing exception. Found using syscall fuzzer. Fixes: fb09692e71f1 ("powerpc: Add reclaim and recheckpoint functions for context switching transactional memory processes") Cc: [email protected] # v3.9+ Signed-off-by: Michael Neuling <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
deep_count_start (NautilusDirectory *directory, NautilusFile *file, gboolean *doing_io) { GFile *location; DeepCountState *state; if (directory->details->deep_count_in_progress != NULL) { *doing_io = TRUE; return; } if (!is_needy (file, lacks_deep_count, REQUEST_DEEP_COUNT)) { return; } *doing_io = TRUE; if (!nautilus_file_is_directory (file)) { file->details->deep_counts_status = NAUTILUS_REQUEST_DONE; nautilus_directory_async_state_changed (directory); return; } if (!async_job_start (directory, "deep count")) { return; } /* Start counting. */ file->details->deep_counts_status = NAUTILUS_REQUEST_IN_PROGRESS; file->details->deep_directory_count = 0; file->details->deep_file_count = 0; file->details->deep_unreadable_count = 0; file->details->deep_size = 0; directory->details->deep_count_file = file; state = g_new0 (DeepCountState, 1); state->directory = directory; state->cancellable = g_cancellable_new (); state->seen_deep_count_inodes = g_array_new (FALSE, TRUE, sizeof (guint64)); directory->details->deep_count_in_progress = state; location = nautilus_file_get_location (file); deep_count_load (state, location); g_object_unref (location); }
0
[]
nautilus
7632a3e13874a2c5e8988428ca913620a25df983
29,918,755,031,605,714,000,000,000,000,000,000,000
49
Check for trusted desktop file launchers. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-directory-async.c: Check for trusted desktop file launchers. * libnautilus-private/nautilus-file-private.h: * libnautilus-private/nautilus-file.c: * libnautilus-private/nautilus-file.h: Add nautilus_file_is_trusted_link. Allow unsetting of custom display name. * libnautilus-private/nautilus-mime-actions.c: Display dialog when trying to launch a non-trusted desktop file. svn path=/trunk/; revision=15003
hstore_each(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; HStore *hs; int i; if (SRF_IS_FIRSTCALL()) { hs = PG_GETARG_HS(0); funcctx = SRF_FIRSTCALL_INIT(); setup_firstcall(funcctx, hs, fcinfo); } funcctx = SRF_PERCALL_SETUP(); hs = (HStore *) funcctx->user_fctx; i = funcctx->call_cntr; if (i < HS_COUNT(hs)) { HEntry *entries = ARRPTR(hs); char *ptr = STRPTR(hs); Datum res, dvalues[2]; bool nulls[2] = {false, false}; text *item; HeapTuple tuple; item = cstring_to_text_with_len(HS_KEY(entries, ptr, i), HS_KEYLEN(entries, i)); dvalues[0] = PointerGetDatum(item); if (HS_VALISNULL(entries, i)) { dvalues[1] = (Datum) 0; nulls[1] = true; } else { item = cstring_to_text_with_len(HS_VAL(entries, ptr, i), HS_VALLEN(entries, i)); dvalues[1] = PointerGetDatum(item); } tuple = heap_form_tuple(funcctx->tuple_desc, dvalues, nulls); res = HeapTupleGetDatum(tuple); SRF_RETURN_NEXT(funcctx, PointerGetDatum(res)); } SRF_RETURN_DONE(funcctx); }
0
[ "CWE-703", "CWE-189" ]
postgres
31400a673325147e1205326008e32135a78b4d8a
248,043,825,676,231,000,000,000,000,000,000,000,000
51
Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064
notationDeclDebug(void *ctx ATTRIBUTE_UNUSED, const xmlChar *name, const xmlChar *publicId, const xmlChar *systemId) { callbacks++; if (quiet) return; fprintf(SAXdebug, "SAX.notationDecl(%s, %s, %s)\n", (char *) name, (char *) publicId, (char *) systemId); }
0
[ "CWE-125" ]
libxml2
a820dbeac29d330bae4be05d9ecd939ad6b4aa33
117,934,056,072,746,520,000,000,000,000,000,000,000
9
Bug 758605: Heap-based buffer overread in xmlDictAddString <https://bugzilla.gnome.org/show_bug.cgi?id=758605> Reviewed by David Kilzer. * HTMLparser.c: (htmlParseName): Add bounds check. (htmlParseNameComplex): Ditto. * result/HTML/758605.html: Added. * result/HTML/758605.html.err: Added. * result/HTML/758605.html.sax: Added. * runtest.c: (pushParseTest): The input for the new test case was so small (4 bytes) that htmlParseChunk() was never called after htmlCreatePushParserCtxt(), thereby creating a false positive test failure. Fixed by using a do-while loop so we always call htmlParseChunk() at least once. * test/HTML/758605.html: Added.
remote_filter_free (RemoteFilter *remote_filter) { g_free (remote_filter->checksum); g_object_unref (remote_filter->path); if (remote_filter->allow) g_regex_unref (remote_filter->allow); if (remote_filter->deny) g_regex_unref (remote_filter->deny); g_free (remote_filter); }
0
[ "CWE-74" ]
flatpak
fb473cad801c6b61706353256cab32330557374a
66,189,740,078,220,240,000,000,000,000,000,000,000
11
dir: Pass environment via bwrap --setenv when running apply_extra This means we can systematically pass the environment variables through bwrap(1), even if it is setuid and thus is filtering out security-sensitive environment variables. bwrap ends up being run with an empty environment instead. As with the previous commit, this regressed while fixing CVE-2021-21261. Fixes: 6d1773d2 "run: Convert all environment variables into bwrap arguments" Signed-off-by: Simon McVittie <[email protected]>
int rtnl_af_register(struct rtnl_af_ops *ops) { int err; rtnl_lock(); err = __rtnl_af_register(ops); rtnl_unlock(); return err; }
0
[ "CWE-399" ]
linux-2.6
84d73cd3fb142bf1298a8c13fd4ca50fd2432372
329,404,797,623,998,870,000,000,000,000,000,000,000
9
rtnl: fix info leak on RTM_GETLINK request for VF devices Initialize the mac address buffer with 0 as the driver specific function will probably not fill the whole buffer. In fact, all in-kernel drivers fill only ETH_ALEN of the MAX_ADDR_LEN bytes, i.e. 6 of the 32 possible bytes. Therefore we currently leak 26 bytes of stack memory to userland via the netlink interface. Signed-off-by: Mathias Krause <[email protected]> Signed-off-by: David S. Miller <[email protected]>
u32 gf_media_nalu_is_start_code(GF_BitStream *bs) { u8 s1, s2, s3, s4; Bool is_sc = 0; u64 pos = gf_bs_get_position(bs); s1 = gf_bs_read_int(bs, 8); s2 = gf_bs_read_int(bs, 8); if (!s1 && !s2) { s3 = gf_bs_read_int(bs, 8); if (s3==0x01) is_sc = 3; else if (!s3) { s4 = gf_bs_read_int(bs, 8); if (s4==0x01) is_sc = 4; } } gf_bs_seek(bs, pos+is_sc); return is_sc; }
0
[ "CWE-119", "CWE-787" ]
gpac
90dc7f853d31b0a4e9441cba97feccf36d8b69a4
236,792,231,667,398,800,000,000,000,000,000,000,000
18
fix some exploitable overflows (#994, #997)
finish_input_bmp(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) { /* no work */ }
0
[ "CWE-369" ]
libjpeg-turbo
43e84cff1bb2bd8293066f6ac4eb0df61ddddbc6
197,714,846,886,192,500,000,000,000,000,000,000,000
4
tjLoadImage(): Fix FPE triggered by malformed BMP In rdbmp.c, it is necessary to guard against 32-bit overflow/wraparound when allocating the row buffer, because since BMP files have 32-bit width and height fields, the value of biWidth can be up to 4294967295. Specifically, if biWidth is 1073741824 and cinfo->input_components = 4, then the samplesperrow argument in alloc_sarray() would wrap around to 0, and a division by zero error would occur at line 458 in jmemmgr.c. If biWidth is set to a higher value, then samplesperrow would wrap around to a small number, which would likely cause a buffer overflow (this has not been tested or verified.)
int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags) { int ret = 0; WARN_ON_ONCE(dev->interrupt == NULL); if (dev->interrupt) { mutex_lock(&dev->interrupt_mutex); if (++dev->interrupt_count == 1) ret = usb_submit_urb(dev->interrupt, mem_flags); dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n", dev->interrupt_count); mutex_unlock(&dev->interrupt_mutex); } return ret; }
0
[ "CWE-703" ]
linux
1666984c8625b3db19a9abc298931d35ab7bc64b
72,731,494,048,624,150,000,000,000,000,000,000,000
17
usbnet: cleanup after bind() in probe() In case bind() works, but a later error forces bailing in probe() in error cases work and a timer may be scheduled. They must be killed. This fixes an error case related to the double free reported in http://www.spinics.net/lists/netdev/msg367669.html and needs to go on top of Linus' fix to cdc-ncm. Signed-off-by: Oliver Neukum <[email protected]> Signed-off-by: David S. Miller <[email protected]>
handle_server_data_anonymous_mech (DBusAuth *auth, const DBusString *data) { if (_dbus_string_get_length (data) > 0) { /* Client is allowed to send "trace" data, the only defined * meaning is that if it contains '@' it is an email address, * and otherwise it is anything else, and it's supposed to be * UTF-8 */ if (!_dbus_string_validate_utf8 (data, 0, _dbus_string_get_length (data))) { _dbus_verbose ("%s: Received invalid UTF-8 trace data from ANONYMOUS client\n", DBUS_AUTH_NAME (auth)); return send_rejected (auth); } _dbus_verbose ("%s: ANONYMOUS client sent trace string: '%s'\n", DBUS_AUTH_NAME (auth), _dbus_string_get_const_data (data)); } /* We want to be anonymous (clear in case some other protocol got midway through I guess) */ _dbus_credentials_clear (auth->desired_identity); /* Copy process ID from the socket credentials */ if (!_dbus_credentials_add_credential (auth->authorized_identity, DBUS_CREDENTIAL_UNIX_PROCESS_ID, auth->credentials)) return FALSE; /* Anonymous is always allowed */ if (!send_ok (auth)) return FALSE; _dbus_verbose ("%s: authenticated client as anonymous\n", DBUS_AUTH_NAME (auth)); return TRUE; }
0
[ "CWE-59" ]
dbus
47b1a4c41004bf494b87370987b222c934b19016
283,209,633,783,270,480,000,000,000,000,000,000,000
41
auth: Reject DBUS_COOKIE_SHA1 for users other than the server owner The DBUS_COOKIE_SHA1 authentication mechanism aims to prove ownership of a shared home directory by having the server write a secret "cookie" into a .dbus-keyrings subdirectory of the desired identity's home directory with 0700 permissions, and having the client prove that it can read the cookie. This never actually worked for non-malicious clients in the case where server uid != client uid (unless the server and client both have privileges, such as Linux CAP_DAC_OVERRIDE or traditional Unix uid 0) because an unprivileged server would fail to write out the cookie, and an unprivileged client would be unable to read the resulting file owned by the server. Additionally, since dbus 1.7.10 we have checked that ~/.dbus-keyrings is owned by the uid of the server (a side-effect of a check added to harden our use of XDG_RUNTIME_DIR), further ruling out successful use by a non-malicious client with a uid differing from the server's. Joe Vennix of Apple Information Security discovered that the implementation of DBUS_COOKIE_SHA1 was susceptible to a symbolic link attack: a malicious client with write access to its own home directory could manipulate a ~/.dbus-keyrings symlink to cause the DBusServer to read and write in unintended locations. In the worst case this could result in the DBusServer reusing a cookie that is known to the malicious client, and treating that cookie as evidence that a subsequent client connection came from an attacker-chosen uid, allowing authentication bypass. This is mitigated by the fact that by default, the well-known system dbus-daemon (since 2003) and the well-known session dbus-daemon (in stable releases since dbus 1.10.0 in 2015) only accept the EXTERNAL authentication mechanism, and as a result will reject DBUS_COOKIE_SHA1 at an early stage, before manipulating cookies. As a result, this vulnerability only applies to: * system or session dbus-daemons with non-standard configuration * third-party dbus-daemon invocations such as at-spi2-core (although in practice at-spi2-core also only accepts EXTERNAL by default) * third-party uses of DBusServer such as the one in Upstart Avoiding symlink attacks in a portable way is difficult, because APIs like openat() and Linux /proc/self/fd are not universally available. However, because DBUS_COOKIE_SHA1 already doesn't work in practice for a non-matching uid, we can solve this vulnerability in an easier way without regressions, by rejecting it early (before looking at ~/.dbus-keyrings) whenever the requested identity doesn't match the identity of the process hosting the DBusServer. Signed-off-by: Simon McVittie <[email protected]> Closes: https://gitlab.freedesktop.org/dbus/dbus/issues/269 Closes: CVE-2019-12749
GF_Box *lsrc_New() { ISOM_DECL_BOX_ALLOC(GF_LASERConfigurationBox, GF_ISOM_BOX_TYPE_LSRC); return (GF_Box *)tmp;
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
37,304,567,322,129,055,000,000,000,000,000,000,000
5
prevent dref memleak on invalid input (#1183)
static void printResult(cl_ulong4 seed, cl_ulong round, result r, cl_uchar score, const std::chrono::time_point<std::chrono::steady_clock> & timeStart, const Mode & mode) { // Time delta const auto seconds = std::chrono::duration_cast<std::chrono::seconds>(std::chrono::steady_clock::now() - timeStart).count(); // Format private key cl_ulong carry = 0; cl_ulong4 seedRes; seedRes.s[0] = seed.s[0] + round; carry = seedRes.s[0] < round; seedRes.s[1] = seed.s[1] + carry; carry = !seedRes.s[1]; seedRes.s[2] = seed.s[2] + carry; carry = !seedRes.s[2]; seedRes.s[3] = seed.s[3] + carry + r.foundId; std::ostringstream ss; ss << std::hex << std::setfill('0'); ss << std::setw(16) << seedRes.s[3] << std::setw(16) << seedRes.s[2] << std::setw(16) << seedRes.s[1] << std::setw(16) << seedRes.s[0]; const std::string strPrivate = ss.str(); // Format public key const std::string strPublic = toHex(r.foundHash, 20); // Print const std::string strVT100ClearLine = "\33[2K\r"; std::cout << strVT100ClearLine << " Time: " << std::setw(5) << seconds << "s Score: " << std::setw(2) << (int) score << " Private: 0x" << strPrivate << ' '; std::cout << mode.transformName(); std::cout << ": 0x" << strPublic << std::endl; }
0
[ "CWE-703" ]
profanity
69ff010c14ff80ec14246772db6a245aa59e6689
162,656,428,741,407,100,000,000,000,000,000,000,000
28
[FIX] pritive key seed .
void stop_dispatching(JSObject *self) { MOZ_ASSERT(is_dispatching(self)); JS::SetReservedSlot(self, Slots::Dispatch, JS::FalseValue()); }
0
[ "CWE-94" ]
js-compute-runtime
65524ffc962644e9fc39f4b368a326b6253912a9
162,423,531,228,841,920,000,000,000,000,000,000,000
4
use rangom_get instead of arc4random as arc4random does not work correctly with wizer wizer causes the seed in arc4random to be the same between executions which is not random
static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, int len, unsigned long sum) { int start = skb_headlen(skb); struct sk_buff *frag_iter; int i, copy; /* checksum stuff in header space */ if ((copy = start - offset) > 0) { if (copy > len) copy = len; sum = atalk_sum_partial(skb->data + offset, copy, sum); if ((len -= copy) == 0) return sum; offset += copy; } /* checksum stuff in frags */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { u8 *vaddr; if (copy > len) copy = len; vaddr = kmap_atomic(skb_frag_page(frag)); sum = atalk_sum_partial(vaddr + skb_frag_off(frag) + offset - start, copy, sum); kunmap_atomic(vaddr); if (!(len -= copy)) return sum; offset += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; sum = atalk_sum_skb(frag_iter, offset - start, copy, sum); if ((len -= copy) == 0) return sum; offset += copy; } start = end; } BUG_ON(len > 0); return sum; }
0
[ "CWE-276" ]
linux
6cc03e8aa36c51f3b26a0d21a3c4ce2809c842ac
209,322,408,835,058,400,000,000,000,000,000,000,000
64
appletalk: enforce CAP_NET_RAW for raw sockets When creating a raw AF_APPLETALK socket, CAP_NET_RAW needs to be checked first. Signed-off-by: Ori Nimron <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static inline bool nested_exit_on_init(struct vcpu_svm *svm) { return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); }
0
[ "CWE-862" ]
kvm
0f923e07124df069ba68d8bb12324398f4b6b709
231,946,343,071,023,440,000,000,000,000,000,000,000
4
KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653) * Invert the mask of bits that we pick from L2 in nested_vmcb02_prepare_control * Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr This fixes a security issue that allowed a malicious L1 to run L2 with AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled AVIC to read/write the host physical memory at some offsets. Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler") Signed-off-by: Maxim Levitsky <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
Status TrySimplify(NodeDef* reduction_node, string* simplified_node_name) override { if (IsInPreserveSet(*reduction_node)) return Status::OK(); // Input 0 (data) of the reduction node must be a tf.gather() on the 0th // axis. NodeDef* gather_node = nullptr; TF_RETURN_IF_ERROR(GetInputNode(reduction_node->input(0), &gather_node)); if (!IsGather(*gather_node) || IsInPreserveSet(*gather_node) || gather_node->device() != reduction_node->device()) return Status::OK(); if (gather_node->op() == "GatherV2" && !IsAxis0(*gather_node, 2)) return Status::OK(); // Input 1 (indices) of the gather node must be a tf.unique() on the 0th // axis. NodeDef* unique_node = nullptr; TF_RETURN_IF_ERROR(GetInputNode(gather_node->input(1), &unique_node)); if (!IsUnique(*unique_node) || IsInPreserveSet(*unique_node) || unique_node->device() != gather_node->device()) return Status::OK(); if (unique_node->op() == "UniqueV2" && !IsAxis0(*unique_node, 1)) return Status::OK(); DataType unique_element_type; TF_RETURN_IF_ERROR(GetNodeAttr(*unique_node, "T", &unique_element_type)); // Input 1 (indices) of the reduction node must be output 1 of the unique // node. const TensorId idx_tensor = ParseTensorName(reduction_node->input(1)); if (idx_tensor != TensorId(unique_node->name(), 1)) return Status::OK(); // Input 0 (data) of the reduction node becomes input 1 (params) of the // gather node. reduction_node->set_input(0, gather_node->input(0)); ctx().node_map->UpdateInput(reduction_node->name(), reduction_node->input(0), gather_node->input(0)); // Input 1 (indices) of the reduction node becomes input 0 (x) of the unique // node. reduction_node->set_input(1, unique_node->input(0)); ctx().node_map->UpdateInput(reduction_node->name(), reduction_node->input(1), unique_node->input(0)); SetDataTypeToAttr(unique_element_type, "Tidx", reduction_node); *simplified_node_name = reduction_node->name(); return Status::OK(); }
0
[ "CWE-476" ]
tensorflow
e6340f0665d53716ef3197ada88936c2a5f7a2d3
27,369,739,750,341,950,000,000,000,000,000,000,000
50
Handle a special grappler case resulting in crash. It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault. PiperOrigin-RevId: 369242852 Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
static const char *am_set_setenv_slot(cmd_parms *cmd, void *struct_ptr, const char *newName, const char *oldName) { am_dir_cfg_rec *d = (am_dir_cfg_rec *)struct_ptr; /* Configure as prefixed attribute name */ am_envattr_conf_t *envattr_conf = (am_envattr_conf_t *)apr_palloc(cmd->pool, sizeof(am_envattr_conf_t)); envattr_conf->name = newName; envattr_conf->prefixed = 1; apr_hash_set(d->envattr, oldName, APR_HASH_KEY_STRING, envattr_conf); return NULL; }
0
[ "CWE-601" ]
mod_auth_mellon
9d28908e28ef70a12196c215503fb0075e1fd7f3
304,885,602,723,487,400,000,000,000,000,000,000,000
13
Add MellonRedirectDomains option. Limit the domains that we will redirect to after login / logout to a set of trusted domains. By default we only allow redirects to the current domain. This change breaks backwards compatibility with any site that relies on redirects to separate domains. Fixes #35
static const char *set_access_name(cmd_parms *cmd, void *dummy, const char *arg) { void *sconf = cmd->server->module_config; core_server_config *conf = ap_get_core_module_config(sconf); const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_LOC_FILE); if (err != NULL) { return err; } conf->access_name = apr_pstrdup(cmd->pool, arg); return NULL; }
0
[ "CWE-416", "CWE-284" ]
httpd
4cc27823899e070268b906ca677ee838d07cf67a
42,337,004,762,618,180,000,000,000,000,000,000,000
14
core: Disallow Methods' registration at run time (.htaccess), they may be used only if registered at init time (httpd.conf). Calling ap_method_register() in children processes is not the right scope since it won't be shared for all requests. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1807655 13f79535-47bb-0310-9956-ffa450edef68
void Server::addLink(Channel *c, Channel *l) { c->link(l); if (c->bTemporary || l->bTemporary) return; TransactionHolder th; QSqlQuery &query = *th.qsqQuery; SQLPREP("INSERT INTO `%1channel_links` (`server_id`, `channel_id`, `link_id`) VALUES (?,?,?)"); query.addBindValue(iServerNum); query.addBindValue(c->iId); query.addBindValue(l->iId); SQLEXEC(); query.addBindValue(iServerNum); query.addBindValue(l->iId); query.addBindValue(c->iId); SQLEXEC(); }
0
[ "CWE-20" ]
mumble
6b33dda344f89e5a039b7d79eb43925040654242
136,508,939,705,847,150,000,000,000,000,000,000,000
19
Don't crash on long usernames
static bool tight_can_send_png_rect(VncState *vs, int w, int h) { if (vs->tight->type != VNC_ENCODING_TIGHT_PNG) { return false; } if (surface_bytes_per_pixel(vs->vd->ds) == 1 || vs->client_pf.bytes_per_pixel == 1) { return false; } return true; }
0
[ "CWE-401" ]
qemu
6bf21f3d83e95bcc4ba35a7a07cc6655e8b010b0
284,289,414,415,156,200,000,000,000,000,000,000,000
13
vnc: fix memory leak when vnc disconnect Currently when qemu receives a vnc connect, it creates a 'VncState' to represent this connection. In 'vnc_worker_thread_loop' it creates a local 'VncState'. The connection 'VcnState' and local 'VncState' exchange data in 'vnc_async_encoding_start' and 'vnc_async_encoding_end'. In 'zrle_compress_data' it calls 'deflateInit2' to allocate the libz library opaque data. The 'VncState' used in 'zrle_compress_data' is the local 'VncState'. In 'vnc_zrle_clear' it calls 'deflateEnd' to free the libz library opaque data. The 'VncState' used in 'vnc_zrle_clear' is the connection 'VncState'. In currently implementation there will be a memory leak when the vnc disconnect. Following is the asan output backtrack: Direct leak of 29760 byte(s) in 5 object(s) allocated from: 0 0xffffa67ef3c3 in __interceptor_calloc (/lib64/libasan.so.4+0xd33c3) 1 0xffffa65071cb in g_malloc0 (/lib64/libglib-2.0.so.0+0x571cb) 2 0xffffa5e968f7 in deflateInit2_ (/lib64/libz.so.1+0x78f7) 3 0xaaaacec58613 in zrle_compress_data ui/vnc-enc-zrle.c:87 4 0xaaaacec58613 in zrle_send_framebuffer_update ui/vnc-enc-zrle.c:344 5 0xaaaacec34e77 in vnc_send_framebuffer_update ui/vnc.c:919 6 0xaaaacec5e023 in vnc_worker_thread_loop ui/vnc-jobs.c:271 7 0xaaaacec5e5e7 in vnc_worker_thread ui/vnc-jobs.c:340 8 0xaaaacee4d3c3 in qemu_thread_start util/qemu-thread-posix.c:502 9 0xffffa544e8bb in start_thread (/lib64/libpthread.so.0+0x78bb) 10 0xffffa53965cb in thread_start (/lib64/libc.so.6+0xd55cb) This is because the opaque allocated in 'deflateInit2' is not freed in 'deflateEnd'. The reason is that the 'deflateEnd' calls 'deflateStateCheck' and in the latter will check whether 's->strm != strm'(libz's data structure). This check will be true so in 'deflateEnd' it just return 'Z_STREAM_ERROR' and not free the data allocated in 'deflateInit2'. The reason this happens is that the 'VncState' contains the whole 'VncZrle', so when calling 'deflateInit2', the 's->strm' will be the local address. So 's->strm != strm' will be true. To fix this issue, we need to make 'zrle' of 'VncState' to be a pointer. Then the connection 'VncState' and local 'VncState' exchange mechanism will work as expection. The 'tight' of 'VncState' has the same issue, let's also turn it to a pointer. Reported-by: Ying Fang <[email protected]> Signed-off-by: Li Qiang <[email protected]> Message-id: [email protected] Signed-off-by: Gerd Hoffmann <[email protected]>
static inline int i40e_setup_hw_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch, u16 uplink_seid, u8 type) { int ret; ch->initialized = false; ch->base_queue = vsi->next_base_queue; ch->type = type; /* Proceed with creation of channel (VMDq2) VSI */ ret = i40e_add_channel(pf, uplink_seid, ch); if (ret) { dev_info(&pf->pdev->dev, "failed to add_channel using uplink_seid %u\n", uplink_seid); return ret; } /* Mark the successful creation of channel */ ch->initialized = true; /* Reconfigure TX queues using QTX_CTL register */ ret = i40e_channel_config_tx_ring(pf, vsi, ch); if (ret) { dev_info(&pf->pdev->dev, "failed to configure TX rings for channel %u\n", ch->seid); return ret; } /* update 'next_base_queue' */ vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; dev_dbg(&pf->pdev->dev, "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n", ch->seid, ch->vsi_number, ch->stat_counter_idx, ch->num_queue_pairs, vsi->next_base_queue); return ret; }
0
[ "CWE-400", "CWE-401" ]
linux
27d461333459d282ffa4a2bdb6b215a59d493a8f
207,011,958,623,867,320,000,000,000,000,000,000,000
41
i40e: prevent memory leak in i40e_setup_macvlans In i40e_setup_macvlans if i40e_setup_channel fails the allocated memory for ch should be released. Signed-off-by: Navid Emamdoost <[email protected]> Tested-by: Andrew Bowers <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
static void exec_command_done(ExecCommand *c) { assert(c); c->path = mfree(c->path); c->argv = strv_free(c->argv); }
0
[ "CWE-269" ]
systemd
f69567cbe26d09eac9d387c0be0fc32c65a83ada
117,653,389,487,846,100,000,000,000,000,000,000,000
6
core: expose SUID/SGID restriction as new unit setting RestrictSUIDSGID=
void WebContents::DownloadURL(const GURL& url) { auto* browser_context = web_contents()->GetBrowserContext(); auto* download_manager = content::BrowserContext::GetDownloadManager(browser_context); std::unique_ptr<download::DownloadUrlParameters> download_params( content::DownloadRequestUtils::CreateDownloadForWebContentsMainFrame( web_contents(), url, MISSING_TRAFFIC_ANNOTATION)); download_manager->DownloadUrl(std::move(download_params)); }
0
[ "CWE-284", "CWE-693" ]
electron
18613925610ba319da7f497b6deed85ad712c59b
275,298,459,711,571,800,000,000,000,000,000,000,000
9
refactor: wire will-navigate up to a navigation throttle instead of OpenURL (#25108) * refactor: wire will-navigate up to a navigation throttle instead of OpenURL (#25065) * refactor: wire will-navigate up to a navigation throttle instead of OpenURL * spec: add test for x-site _top navigation * chore: old code be old
rad_cvt_int(const void *data) { u_int32_t value; memcpy(&value, data, sizeof value); return ntohl(value); }
0
[ "CWE-119", "CWE-787" ]
php-radius
13c149b051f82b709e8d7cc32111e84b49d57234
278,848,233,905,468,460,000,000,000,000,000,000,000
7
Fix a security issue in radius_get_vendor_attr(). The underlying rad_get_vendor_attr() function assumed that it would always be given valid VSA data. Indeed, the buffer length wasn't even passed in; the assumption was that the length field within the VSA structure would be valid. This could result in denial of service by providing a length that would be beyond the memory limit, or potential arbitrary memory access by providing a length greater than the actual data given. rad_get_vendor_attr() has been changed to require the raw data length be provided, and this is then used to check that the VSA is valid. Conflicts: radlib_vs.h
int bnx2x_setup_leading(struct bnx2x *bp) { if (IS_PF(bp)) return bnx2x_setup_queue(bp, &bp->fp[0], true); else /* VF */ return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); }
0
[ "CWE-20" ]
linux
8914a595110a6eca69a5e275b323f5d09e18f4f9
16,644,354,521,103,460,000,000,000,000,000,000,000
7
bnx2x: disable GSO where gso_size is too big for hardware If a bnx2x card is passed a GSO packet with a gso_size larger than ~9700 bytes, it will cause a firmware error that will bring the card down: bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert! bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2 bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052 bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1 ... (dump of values continues) ... Detect when the mac length of a GSO packet is greater than the maximum packet size (9700 bytes) and disable GSO. Signed-off-by: Daniel Axtens <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int read_wvc_block (WavpackContext *wpc) { WavpackStream *wps = wpc->streams [wpc->current_stream]; int64_t bcount, file2pos; WavpackHeader orig_wphdr; WavpackHeader wphdr; int compare_result; while (1) { file2pos = wpc->reader->get_pos (wpc->wvc_in); bcount = read_next_header (wpc->reader, wpc->wvc_in, &wphdr); if (bcount == (uint32_t) -1) { wps->wvc_skip = TRUE; wpc->crc_errors++; return FALSE; } memcpy (&orig_wphdr, &wphdr, 32); // save original header for verify step if (wpc->open_flags & OPEN_STREAMING) SET_BLOCK_INDEX (wphdr, wps->sample_index = 0); else SET_BLOCK_INDEX (wphdr, GET_BLOCK_INDEX (wphdr) - wpc->initial_index); if (wphdr.flags & INITIAL_BLOCK) wpc->file2pos = file2pos + bcount; compare_result = match_wvc_header (&wps->wphdr, &wphdr); if (!compare_result) { wps->block2buff = malloc (wphdr.ckSize + 8); if (!wps->block2buff) return FALSE; if (wpc->reader->read_bytes (wpc->wvc_in, wps->block2buff + 32, wphdr.ckSize - 24) != wphdr.ckSize - 24) { free (wps->block2buff); wps->block2buff = NULL; wps->wvc_skip = TRUE; wpc->crc_errors++; return FALSE; } memcpy (wps->block2buff, &orig_wphdr, 32); // don't use corrupt blocks if (!WavpackVerifySingleBlock (wps->block2buff, !(wpc->open_flags & OPEN_NO_CHECKSUM))) { free (wps->block2buff); wps->block2buff = NULL; wps->wvc_skip = TRUE; wpc->crc_errors++; return TRUE; } wps->wvc_skip = FALSE; memcpy (wps->block2buff, &wphdr, 32); memcpy (&wps->wphdr, &wphdr, 32); return TRUE; } else if (compare_result == -1) { wps->wvc_skip = TRUE; wpc->reader->set_pos_rel (wpc->wvc_in, -32, SEEK_CUR); wpc->crc_errors++; return TRUE; } } }
0
[ "CWE-125" ]
WavPack
4bc05fc490b66ef2d45b1de26abf1455b486b0dc
106,973,865,621,355,460,000,000,000,000,000,000,000
68
fixes for 4 fuzz failures posted to SourceForge mailing list
static inline int bond_slave_override(struct bonding *bond, struct sk_buff *skb) { int i, res = 1; struct slave *slave = NULL; struct slave *check_slave; if (!skb->queue_mapping) return 1; /* Find out if any slaves have the same mapping as this skb. */ bond_for_each_slave(bond, check_slave, i) { if (check_slave->queue_id == skb->queue_mapping) { slave = check_slave; break; } } /* If the slave isn't UP, use default transmit policy. */ if (slave && slave->queue_id && IS_UP(slave->dev) && (slave->link == BOND_LINK_UP)) { res = bond_dev_queue_xmit(bond, skb, slave->dev); } return res; }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
206,868,879,236,461,420,000,000,000,000,000,000,000
26
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int tree_entry_interesting(struct tree_desc *desc, const char *base, int baselen, struct diff_options *opt) { const char *path; const unsigned char *sha1; unsigned mode; int i; int pathlen; int never_interesting = -1; if (!opt->nr_paths) return 1; sha1 = tree_entry_extract(desc, &path, &mode); pathlen = tree_entry_len(path, sha1); for (i = 0; i < opt->nr_paths; i++) { const char *match = opt->paths[i]; int matchlen = opt->pathlens[i]; int m = -1; /* signals that we haven't called strncmp() */ if (baselen >= matchlen) { /* If it doesn't match, move along... */ if (strncmp(base, match, matchlen)) continue; /* * The base is a subdirectory of a path which * was specified, so all of them are interesting. */ return 2; } /* Does the base match? */ if (strncmp(base, match, baselen)) continue; match += baselen; matchlen -= baselen; if (never_interesting) { /* * We have not seen any match that sorts later * than the current path. */ /* * Does match sort strictly earlier than path * with their common parts? */ m = strncmp(match, path, (matchlen < pathlen) ? matchlen : pathlen); if (m < 0) continue; /* * If we come here even once, that means there is at * least one pathspec that would sort equal to or * later than the path we are currently looking at. * In other words, if we have never reached this point * after iterating all pathspecs, it means all * pathspecs are either outside of base, or inside the * base but sorts strictly earlier than the current * one. In either case, they will never match the * subsequent entries. In such a case, we initialized * the variable to -1 and that is what will be * returned, allowing the caller to terminate early. */ never_interesting = 0; } if (pathlen > matchlen) continue; if (matchlen > pathlen) { if (match[pathlen] != '/') continue; if (!S_ISDIR(mode)) continue; } if (m == -1) /* * we cheated and did not do strncmp(), so we do * that here. */ m = strncmp(match, path, pathlen); /* * If common part matched earlier then it is a hit, * because we rejected the case where path is not a * leading directory and is shorter than match. */ if (!m) return 1; } return never_interesting; /* No matches */ }
0
[ "CWE-119" ]
git
fd55a19eb1d49ae54008d932a65f79cd6fda45c9
69,693,860,731,494,760,000,000,000,000,000,000,000
98
Fix buffer overflow in git diff If PATH_MAX on your system is smaller than a path stored, it may cause buffer overflow and stack corruption in diff_addremove() and diff_change() functions when running git-diff Signed-off-by: Dmitry Potapov <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
static int __net_init sctp_ctrlsock_init(struct net *net) { int status; /* Initialize the control inode/socket for handling OOTB packets. */ status = sctp_ctl_sock_init(net); if (status) pr_err("Failed to initialize the SCTP control sock\n"); return status; }
0
[ "CWE-119", "CWE-787" ]
linux
8e2d61e0aed2b7c4ecb35844fe07e0b2b762dee4
81,472,219,333,576,450,000,000,000,000,000,000,000
11
sctp: fix race on protocol/netns initialization Consider sctp module is unloaded and is being requested because an user is creating a sctp socket. During initialization, sctp will add the new protocol type and then initialize pernet subsys: status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; status = sctp_v6_protosw_init(); if (status) goto err_v6_protosw_init; status = register_pernet_subsys(&sctp_net_ops); The problem is that after those calls to sctp_v{4,6}_protosw_init(), it is possible for userspace to create SCTP sockets like if the module is already fully loaded. If that happens, one of the possible effects is that we will have readers for net->sctp.local_addr_list list earlier than expected and sctp_net_init() does not take precautions while dealing with that list, leading to a potential panic but not limited to that, as sctp_sock_init() will copy a bunch of blank/partially initialized values from net->sctp. The race happens like this: CPU 0 | CPU 1 socket() | __sock_create | socket() inet_create | __sock_create list_for_each_entry_rcu( | answer, &inetsw[sock->type], | list) { | inet_create /* no hits */ | if (unlikely(err)) { | ... | request_module() | /* socket creation is blocked | * the module is fully loaded | */ | sctp_init | sctp_v4_protosw_init | inet_register_protosw | list_add_rcu(&p->list, | last_perm); | | list_for_each_entry_rcu( | answer, &inetsw[sock->type], sctp_v6_protosw_init | list) { | /* hit, so assumes protocol | * is already loaded | */ | /* socket creation continues | * before netns is initialized | */ register_pernet_subsys | Simply inverting the initialization order between register_pernet_subsys() and sctp_v4_protosw_init() is not possible because register_pernet_subsys() will create a control sctp socket, so the protocol must be already visible by then. Deferring the socket creation to a work-queue is not good specially because we loose the ability to handle its errors. So, as suggested by Vlad, the fix is to split netns initialization in two moments: defaults and control socket, so that the defaults are already loaded by when we register the protocol, while control socket initialization is kept at the same moment it is today. Fixes: 4db67e808640 ("sctp: Make the address lists per network namespace") Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: Marcelo Ricardo Leitner <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, struct btrfs_path *p, u64 time_seq) { struct btrfs_fs_info *fs_info = root->fs_info; struct extent_buffer *b; int slot; int ret; int err; int level; int lowest_unlock = 1; u8 lowest_level = 0; lowest_level = p->lowest_level; WARN_ON(p->nodes[0] != NULL); if (p->search_commit_root) { BUG_ON(time_seq); return btrfs_search_slot(NULL, root, key, p, 0, 0); } again: b = get_old_root(root, time_seq); if (!b) { ret = -EIO; goto done; } level = btrfs_header_level(b); p->locks[level] = BTRFS_READ_LOCK; while (b) { int dec = 0; level = btrfs_header_level(b); p->nodes[level] = b; /* * we have a lock on b and as long as we aren't changing * the tree, there is no way to for the items in b to change. * It is safe to drop the lock on our parent before we * go through the expensive btree search on b. */ btrfs_unlock_up_safe(p, level + 1); ret = btrfs_bin_search(b, key, &slot); if (ret < 0) goto done; if (level == 0) { p->slots[level] = slot; unlock_up(p, level, lowest_unlock, 0, NULL); goto done; } if (ret && slot > 0) { dec = 1; slot--; } p->slots[level] = slot; unlock_up(p, level, lowest_unlock, 0, NULL); if (level == lowest_level) { if (dec) p->slots[level]++; goto done; } err = read_block_for_search(root, p, &b, level, slot, key); if (err == -EAGAIN) goto again; if (err) { ret = err; goto done; } level = btrfs_header_level(b); btrfs_tree_read_lock(b); b = tree_mod_log_rewind(fs_info, p, b, time_seq); if (!b) { ret = -ENOMEM; goto done; } p->locks[level] = BTRFS_READ_LOCK; p->nodes[level] = b; } ret = 1; done: if (ret < 0) btrfs_release_path(p); return ret; }
0
[ "CWE-362" ]
linux
dbcc7d57bffc0c8cac9dac11bec548597d59a6a5
8,869,117,548,569,977,000,000,000,000,000,000,000
91
btrfs: fix race when cloning extent buffer during rewind of an old root While resolving backreferences, as part of a logical ino ioctl call or fiemap, we can end up hitting a BUG_ON() when replaying tree mod log operations of a root, triggering a stack trace like the following: ------------[ cut here ]------------ kernel BUG at fs/btrfs/ctree.c:1210! invalid opcode: 0000 [#1] SMP KASAN PTI CPU: 1 PID: 19054 Comm: crawl_335 Tainted: G W 5.11.0-2d11c0084b02-misc-next+ #89 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 RIP: 0010:__tree_mod_log_rewind+0x3b1/0x3c0 Code: 05 48 8d 74 10 (...) RSP: 0018:ffffc90001eb70b8 EFLAGS: 00010297 RAX: 0000000000000000 RBX: ffff88812344e400 RCX: ffffffffb28933b6 RDX: 0000000000000007 RSI: dffffc0000000000 RDI: ffff88812344e42c RBP: ffffc90001eb7108 R08: 1ffff11020b60a20 R09: ffffed1020b60a20 R10: ffff888105b050f9 R11: ffffed1020b60a1f R12: 00000000000000ee R13: ffff8880195520c0 R14: ffff8881bc958500 R15: ffff88812344e42c FS: 00007fd1955e8700(0000) GS:ffff8881f5600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007efdb7928718 CR3: 000000010103a006 CR4: 0000000000170ee0 Call Trace: btrfs_search_old_slot+0x265/0x10d0 ? lock_acquired+0xbb/0x600 ? btrfs_search_slot+0x1090/0x1090 ? free_extent_buffer.part.61+0xd7/0x140 ? free_extent_buffer+0x13/0x20 resolve_indirect_refs+0x3e9/0xfc0 ? lock_downgrade+0x3d0/0x3d0 ? __kasan_check_read+0x11/0x20 ? add_prelim_ref.part.11+0x150/0x150 ? lock_downgrade+0x3d0/0x3d0 ? __kasan_check_read+0x11/0x20 ? lock_acquired+0xbb/0x600 ? __kasan_check_write+0x14/0x20 ? do_raw_spin_unlock+0xa8/0x140 ? rb_insert_color+0x30/0x360 ? prelim_ref_insert+0x12d/0x430 find_parent_nodes+0x5c3/0x1830 ? resolve_indirect_refs+0xfc0/0xfc0 ? lock_release+0xc8/0x620 ? fs_reclaim_acquire+0x67/0xf0 ? lock_acquire+0xc7/0x510 ? lock_downgrade+0x3d0/0x3d0 ? lockdep_hardirqs_on_prepare+0x160/0x210 ? lock_release+0xc8/0x620 ? fs_reclaim_acquire+0x67/0xf0 ? lock_acquire+0xc7/0x510 ? poison_range+0x38/0x40 ? unpoison_range+0x14/0x40 ? trace_hardirqs_on+0x55/0x120 btrfs_find_all_roots_safe+0x142/0x1e0 ? find_parent_nodes+0x1830/0x1830 ? btrfs_inode_flags_to_xflags+0x50/0x50 iterate_extent_inodes+0x20e/0x580 ? tree_backref_for_extent+0x230/0x230 ? lock_downgrade+0x3d0/0x3d0 ? read_extent_buffer+0xdd/0x110 ? lock_downgrade+0x3d0/0x3d0 ? __kasan_check_read+0x11/0x20 ? lock_acquired+0xbb/0x600 ? __kasan_check_write+0x14/0x20 ? _raw_spin_unlock+0x22/0x30 ? __kasan_check_write+0x14/0x20 iterate_inodes_from_logical+0x129/0x170 ? iterate_inodes_from_logical+0x129/0x170 ? btrfs_inode_flags_to_xflags+0x50/0x50 ? iterate_extent_inodes+0x580/0x580 ? __vmalloc_node+0x92/0xb0 ? init_data_container+0x34/0xb0 ? init_data_container+0x34/0xb0 ? kvmalloc_node+0x60/0x80 btrfs_ioctl_logical_to_ino+0x158/0x230 btrfs_ioctl+0x205e/0x4040 ? __might_sleep+0x71/0xe0 ? btrfs_ioctl_get_supported_features+0x30/0x30 ? getrusage+0x4b6/0x9c0 ? __kasan_check_read+0x11/0x20 ? lock_release+0xc8/0x620 ? __might_fault+0x64/0xd0 ? lock_acquire+0xc7/0x510 ? lock_downgrade+0x3d0/0x3d0 ? lockdep_hardirqs_on_prepare+0x210/0x210 ? lockdep_hardirqs_on_prepare+0x210/0x210 ? __kasan_check_read+0x11/0x20 ? do_vfs_ioctl+0xfc/0x9d0 ? ioctl_file_clone+0xe0/0xe0 ? lock_downgrade+0x3d0/0x3d0 ? lockdep_hardirqs_on_prepare+0x210/0x210 ? __kasan_check_read+0x11/0x20 ? lock_release+0xc8/0x620 ? __task_pid_nr_ns+0xd3/0x250 ? lock_acquire+0xc7/0x510 ? __fget_files+0x160/0x230 ? __fget_light+0xf2/0x110 __x64_sys_ioctl+0xc3/0x100 do_syscall_64+0x37/0x80 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7fd1976e2427 Code: 00 00 90 48 8b 05 (...) RSP: 002b:00007fd1955e5cf8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00007fd1955e5f40 RCX: 00007fd1976e2427 RDX: 00007fd1955e5f48 RSI: 00000000c038943b RDI: 0000000000000004 RBP: 0000000001000000 R08: 0000000000000000 R09: 00007fd1955e6120 R10: 0000557835366b00 R11: 0000000000000246 R12: 0000000000000004 R13: 00007fd1955e5f48 R14: 00007fd1955e5f40 R15: 00007fd1955e5ef8 Modules linked in: ---[ end trace ec8931a1c36e57be ]--- (gdb) l *(__tree_mod_log_rewind+0x3b1) 0xffffffff81893521 is in __tree_mod_log_rewind (fs/btrfs/ctree.c:1210). 1205 * the modification. as we're going backwards, we do the 1206 * opposite of each operation here. 1207 */ 1208 switch (tm->op) { 1209 case MOD_LOG_KEY_REMOVE_WHILE_FREEING: 1210 BUG_ON(tm->slot < n); 1211 fallthrough; 1212 case MOD_LOG_KEY_REMOVE_WHILE_MOVING: 1213 case MOD_LOG_KEY_REMOVE: 1214 btrfs_set_node_key(eb, &tm->key, tm->slot); Here's what happens to hit that BUG_ON(): 1) We have one tree mod log user (through fiemap or the logical ino ioctl), with a sequence number of 1, so we have fs_info->tree_mod_seq == 1; 2) Another task is at ctree.c:balance_level() and we have eb X currently as the root of the tree, and we promote its single child, eb Y, as the new root. Then, at ctree.c:balance_level(), we call: tree_mod_log_insert_root(eb X, eb Y, 1); 3) At tree_mod_log_insert_root() we create tree mod log elements for each slot of eb X, of operation type MOD_LOG_KEY_REMOVE_WHILE_FREEING each with a ->logical pointing to ebX->start. These are placed in an array named tm_list. Lets assume there are N elements (N pointers in eb X); 4) Then, still at tree_mod_log_insert_root(), we create a tree mod log element of operation type MOD_LOG_ROOT_REPLACE, ->logical set to ebY->start, ->old_root.logical set to ebX->start, ->old_root.level set to the level of eb X and ->generation set to the generation of eb X; 5) Then tree_mod_log_insert_root() calls tree_mod_log_free_eb() with tm_list as argument. After that, tree_mod_log_free_eb() calls __tree_mod_log_insert() for each member of tm_list in reverse order, from highest slot in eb X, slot N - 1, to slot 0 of eb X; 6) __tree_mod_log_insert() sets the sequence number of each given tree mod log operation - it increments fs_info->tree_mod_seq and sets fs_info->tree_mod_seq as the sequence number of the given tree mod log operation. This means that for the tm_list created at tree_mod_log_insert_root(), the element corresponding to slot 0 of eb X has the highest sequence number (1 + N), and the element corresponding to the last slot has the lowest sequence number (2); 7) Then, after inserting tm_list's elements into the tree mod log rbtree, the MOD_LOG_ROOT_REPLACE element is inserted, which gets the highest sequence number, which is N + 2; 8) Back to ctree.c:balance_level(), we free eb X by calling btrfs_free_tree_block() on it. Because eb X was created in the current transaction, has no other references and writeback did not happen for it, we add it back to the free space cache/tree; 9) Later some other task T allocates the metadata extent from eb X, since it is marked as free space in the space cache/tree, and uses it as a node for some other btree; 10) The tree mod log user task calls btrfs_search_old_slot(), which calls get_old_root(), and finally that calls __tree_mod_log_oldest_root() with time_seq == 1 and eb_root == eb Y; 11) First iteration of the while loop finds the tree mod log element with sequence number N + 2, for the logical address of eb Y and of type MOD_LOG_ROOT_REPLACE; 12) Because the operation type is MOD_LOG_ROOT_REPLACE, we don't break out of the loop, and set root_logical to point to tm->old_root.logical which corresponds to the logical address of eb X; 13) On the next iteration of the while loop, the call to tree_mod_log_search_oldest() returns the smallest tree mod log element for the logical address of eb X, which has a sequence number of 2, an operation type of MOD_LOG_KEY_REMOVE_WHILE_FREEING and corresponds to the old slot N - 1 of eb X (eb X had N items in it before being freed); 14) We then break out of the while loop and return the tree mod log operation of type MOD_LOG_ROOT_REPLACE (eb Y), and not the one for slot N - 1 of eb X, to get_old_root(); 15) At get_old_root(), we process the MOD_LOG_ROOT_REPLACE operation and set "logical" to the logical address of eb X, which was the old root. We then call tree_mod_log_search() passing it the logical address of eb X and time_seq == 1; 16) Then before calling tree_mod_log_search(), task T adds a key to eb X, which results in adding a tree mod log operation of type MOD_LOG_KEY_ADD to the tree mod log - this is done at ctree.c:insert_ptr() - but after adding the tree mod log operation and before updating the number of items in eb X from 0 to 1... 17) The task at get_old_root() calls tree_mod_log_search() and gets the tree mod log operation of type MOD_LOG_KEY_ADD just added by task T. Then it enters the following if branch: if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { (...) } (...) Calls read_tree_block() for eb X, which gets a reference on eb X but does not lock it - task T has it locked. Then it clones eb X while it has nritems set to 0 in its header, before task T sets nritems to 1 in eb X's header. From hereupon we use the clone of eb X which no other task has access to; 18) Then we call __tree_mod_log_rewind(), passing it the MOD_LOG_KEY_ADD mod log operation we just got from tree_mod_log_search() in the previous step and the cloned version of eb X; 19) At __tree_mod_log_rewind(), we set the local variable "n" to the number of items set in eb X's clone, which is 0. Then we enter the while loop, and in its first iteration we process the MOD_LOG_KEY_ADD operation, which just decrements "n" from 0 to (u32)-1, since "n" is declared with a type of u32. At the end of this iteration we call rb_next() to find the next tree mod log operation for eb X, that gives us the mod log operation of type MOD_LOG_KEY_REMOVE_WHILE_FREEING, for slot 0, with a sequence number of N + 1 (steps 3 to 6); 20) Then we go back to the top of the while loop and trigger the following BUG_ON(): (...) switch (tm->op) { case MOD_LOG_KEY_REMOVE_WHILE_FREEING: BUG_ON(tm->slot < n); fallthrough; (...) Because "n" has a value of (u32)-1 (4294967295) and tm->slot is 0. Fix this by taking a read lock on the extent buffer before cloning it at ctree.c:get_old_root(). This should be done regardless of the extent buffer having been freed and reused, as a concurrent task might be modifying it (while holding a write lock on it). Reported-by: Zygo Blaxell <[email protected]> Link: https://lore.kernel.org/linux-btrfs/[email protected]/ Fixes: 834328a8493079 ("Btrfs: tree mod log's old roots could still be part of the tree") CC: [email protected] # 4.4+ Signed-off-by: Filipe Manana <[email protected]> Signed-off-by: David Sterba <[email protected]>
fix_assignment_words (words) WORD_LIST *words; { WORD_LIST *w, *wcmd; struct builtin *b; int assoc; if (words == 0) return; b = 0; assoc = 0; wcmd = words; for (w = words; w; w = w->next) if (w->word->flags & W_ASSIGNMENT) { if (b == 0) { /* Posix (post-2008) says that `command' doesn't change whether or not the builtin it shadows is a `declaration command', even though it removes other special builtin properties. In Posix mode, we skip over one or more instances of `command' and deal with the next word as the assignment builtin. */ while (posixly_correct && wcmd && wcmd->word && wcmd->word->word && STREQ (wcmd->word->word, "command")) wcmd = wcmd->next; b = builtin_address_internal (wcmd->word->word, 0); if (b == 0 || (b->flags & ASSIGNMENT_BUILTIN) == 0) return; else if (b && (b->flags & ASSIGNMENT_BUILTIN)) wcmd->word->flags |= W_ASSNBLTIN; } w->word->flags |= (W_NOSPLIT|W_NOGLOB|W_TILDEEXP|W_ASSIGNARG); #if defined (ARRAY_VARS) if (assoc) w->word->flags |= W_ASSIGNASSOC; #endif } #if defined (ARRAY_VARS) /* Note that we saw an associative array option to a builtin that takes assignment statements. This is a bit of a kludge. */ else if (w->word->word[0] == '-' && strchr (w->word->word, 'A')) { if (b == 0) { while (posixly_correct && wcmd && wcmd->word && wcmd->word->word && STREQ (wcmd->word->word, "command")) wcmd = wcmd->next; b = builtin_address_internal (wcmd->word->word, 0); if (b == 0 || (b->flags & ASSIGNMENT_BUILTIN) == 0) return; else if (b && (b->flags & ASSIGNMENT_BUILTIN)) wcmd->word->flags |= W_ASSNBLTIN; } if (wcmd->word->flags & W_ASSNBLTIN) assoc = 1; } #endif }
0
[]
bash
863d31ae775d56b785dc5b0105b6d251515d81d5
252,403,824,827,696,700,000,000,000,000,000,000,000
58
commit bash-20120224 snapshot
UnicodeString::copyFrom(const UnicodeString &src, UBool fastCopy) { // if assigning to ourselves, do nothing if(this == &src) { return *this; } // is the right side bogus? if(src.isBogus()) { setToBogus(); return *this; } // delete the current contents releaseArray(); if(src.isEmpty()) { // empty string - use the stack buffer setToEmpty(); return *this; } // fLength>0 and not an "open" src.getBuffer(minCapacity) fUnion.fFields.fLengthAndFlags = src.fUnion.fFields.fLengthAndFlags; switch(src.fUnion.fFields.fLengthAndFlags & kAllStorageFlags) { case kShortString: // short string using the stack buffer, do the same uprv_memcpy(fUnion.fStackFields.fBuffer, src.fUnion.fStackFields.fBuffer, getShortLength() * U_SIZEOF_UCHAR); break; case kLongString: // src uses a refCounted string buffer, use that buffer with refCount // src is const, use a cast - we don't actually change it ((UnicodeString &)src).addRef(); // copy all fields, share the reference-counted buffer fUnion.fFields.fArray = src.fUnion.fFields.fArray; fUnion.fFields.fCapacity = src.fUnion.fFields.fCapacity; if(!hasShortLength()) { fUnion.fFields.fLength = src.fUnion.fFields.fLength; } break; case kReadonlyAlias: if(fastCopy) { // src is a readonly alias, do the same // -> maintain the readonly alias as such fUnion.fFields.fArray = src.fUnion.fFields.fArray; fUnion.fFields.fCapacity = src.fUnion.fFields.fCapacity; if(!hasShortLength()) { fUnion.fFields.fLength = src.fUnion.fFields.fLength; } break; } // else if(!fastCopy) fall through to case kWritableAlias // -> allocate a new buffer and copy the contents U_FALLTHROUGH; case kWritableAlias: { // src is a writable alias; we make a copy of that instead int32_t srcLength = src.length(); if(allocate(srcLength)) { u_memcpy(getArrayStart(), src.getArrayStart(), srcLength); setLength(srcLength); break; } // if there is not enough memory, then fall through to setting to bogus U_FALLTHROUGH; } default: // if src is bogus, set ourselves to bogus // do not call setToBogus() here because fArray and flags are not consistent here fUnion.fFields.fLengthAndFlags = kIsBogus; fUnion.fFields.fArray = 0; fUnion.fFields.fCapacity = 0; break; } return *this; }
0
[ "CWE-190", "CWE-787" ]
icu
b7d08bc04a4296982fcef8b6b8a354a9e4e7afca
304,550,123,016,949,600,000,000,000,000,000,000,000
76
ICU-20958 Prevent SEGV_MAPERR in append See #971
size_t parse_core(const char *s, size_t n, SemanticValues &sv, Context & /*c*/, any &dt) const override { assert(fn_); return fn_(s, n, sv, dt); }
0
[ "CWE-125" ]
cpp-peglib
b3b29ce8f3acf3a32733d930105a17d7b0ba347e
40,414,186,725,827,560,000,000,000,000,000,000,000
5
Fix #122
rtadv_init (struct zebra_vrf *zvrf) { zvrf->rtadv.sock = rtadv_make_socket (zvrf->vrf_id); }
0
[ "CWE-119", "CWE-787" ]
quagga
cfb1fae25f8c092e0d17073eaf7bd428ce1cd546
178,806,735,969,894,150,000,000,000,000,000,000,000
4
zebra: stack overrun in IPv6 RA receive code (CVE-2016-1245) The IPv6 RA code also receives ICMPv6 RS and RA messages. Unfortunately, by bad coding practice, the buffer size specified on receiving such messages mixed up 2 constants that in fact have different values. The code itself has: #define RTADV_MSG_SIZE 4096 While BUFSIZ is system-dependent, in my case (x86_64 glibc): /usr/include/_G_config.h:#define _G_BUFSIZ 8192 /usr/include/libio.h:#define _IO_BUFSIZ _G_BUFSIZ /usr/include/stdio.h:# define BUFSIZ _IO_BUFSIZ FreeBSD, OpenBSD, NetBSD and Illumos are not affected, since all of them have BUFSIZ == 1024. As the latter is passed to the kernel on recvmsg(), it's possible to overwrite 4kB of stack -- with ICMPv6 packets that can be globally sent to any of the system's addresses (using fragmentation to get to 8k). (The socket has filters installed limiting this to RS and RA packets, but does not have a filter for source address or TTL.) Issue discovered by trying to test other stuff, which randomly caused the stack to be smaller than 8kB in that code location, which then causes the kernel to report EFAULT (Bad address). Signed-off-by: David Lamparter <[email protected]> Reviewed-by: Donald Sharp <[email protected]>
Field *Field::make_new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type __attribute__((unused))) { Field *tmp; if (!(tmp= (Field*) memdup_root(root,(char*) this,size_of()))) return 0; if (tmp->table->maybe_null) tmp->flags&= ~NOT_NULL_FLAG; tmp->table= new_table; tmp->key_start.init(0); tmp->part_of_key.init(0); tmp->part_of_sortkey.init(0); /* TODO: it is not clear why this method needs to reset unireg_check. Try not to reset it, or explain why it needs to be reset. */ tmp->unireg_check= Field::NONE; tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG | ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG); tmp->reset_fields(); return tmp; }
0
[ "CWE-120" ]
server
eca207c46293bc72dd8d0d5622153fab4d3fccf1
31,956,181,316,074,860,000,000,000,000,000,000,000
23
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size. Precision should be kept below DECIMAL_MAX_SCALE for computations. It can be bigger in Item_decimal. I'd fix this too but it changes the existing behaviour so problemmatic to ix.
static uint32_t fdctrl_read (void *opaque, uint32_t reg) { FDCtrl *fdctrl = opaque; uint32_t retval; reg &= 7; switch (reg) { case FD_REG_SRA: retval = fdctrl_read_statusA(fdctrl); break; case FD_REG_SRB: retval = fdctrl_read_statusB(fdctrl); break; case FD_REG_DOR: retval = fdctrl_read_dor(fdctrl); break; case FD_REG_TDR: retval = fdctrl_read_tape(fdctrl); break; case FD_REG_MSR: retval = fdctrl_read_main_status(fdctrl); break; case FD_REG_FIFO: retval = fdctrl_read_data(fdctrl); break; case FD_REG_DIR: retval = fdctrl_read_dir(fdctrl); break; default: retval = (uint32_t)(-1); break; } FLOPPY_DPRINTF("read reg%d: 0x%02x\n", reg & 7, retval); return retval; }
0
[ "CWE-119" ]
qemu
e907746266721f305d67bc0718795fedee2e824c
101,573,293,479,976,000,000,000,000,000,000,000,000
36
fdc: force the fifo access to be in bounds of the allocated buffer During processing of certain commands such as FD_CMD_READ_ID and FD_CMD_DRIVE_SPECIFICATION_COMMAND the fifo memory access could get out of bounds leading to memory corruption with values coming from the guest. Fix this by making sure that the index is always bounded by the allocated memory. This is CVE-2015-3456. Signed-off-by: Petr Matousek <[email protected]> Reviewed-by: John Snow <[email protected]> Signed-off-by: John Snow <[email protected]>
TPMT_KDF_SCHEME_Unmarshal(TPMT_KDF_SCHEME *target, BYTE **buffer, INT32 *size, BOOL allowNull) { TPM_RC rc = TPM_RC_SUCCESS; if (rc == TPM_RC_SUCCESS) { rc = TPMI_ALG_KDF_Unmarshal(&target->scheme, buffer, size, allowNull); } if (rc == TPM_RC_SUCCESS) { rc = TPMU_KDF_SCHEME_Unmarshal(&target->details, buffer, size, target->scheme); } return rc; }
0
[ "CWE-787" ]
libtpms
5cc98a62dc6f204dcf5b87c2ee83ac742a6a319b
216,508,979,686,384,170,000,000,000,000,000,000,000
12
tpm2: Restore original value if unmarshalled value was illegal Restore the original value of the memory location where data from a stream was unmarshalled and the unmarshalled value was found to be illegal. The goal is to not keep illegal values in memory. Signed-off-by: Stefan Berger <[email protected]>
static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) { struct cmng_init_input input; memset(&input, 0, sizeof(struct cmng_init_input)); input.port_rate = bp->link_vars.line_speed; if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { int vn; /* read mf conf from shmem */ if (read_cfg) bnx2x_read_mf_cfg(bp); /* vn_weight_sum and enable fairness if not 0 */ bnx2x_calc_vn_min(bp, &input); /* calculate and set min-max rate for each vn */ if (bp->port.pmf) for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) bnx2x_calc_vn_max(bp, vn, &input); /* always enable rate shaping and fairness */ input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; bnx2x_init_cmng(&input, &bp->cmng); return; } /* rate shaping and fairness are disabled */ DP(NETIF_MSG_IFUP, "rate shaping and fairness are disabled\n"); }
0
[ "CWE-20" ]
linux
8914a595110a6eca69a5e275b323f5d09e18f4f9
101,017,993,665,850,380,000,000,000,000,000,000,000
34
bnx2x: disable GSO where gso_size is too big for hardware If a bnx2x card is passed a GSO packet with a gso_size larger than ~9700 bytes, it will cause a firmware error that will bring the card down: bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert! bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2 bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052 bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1 ... (dump of values continues) ... Detect when the mac length of a GSO packet is greater than the maximum packet size (9700 bytes) and disable GSO. Signed-off-by: Daniel Axtens <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel, HeapTuple old_row, HeapTuple new_row) { const RI_ConstraintInfo *riinfo; /* * Get arguments. */ riinfo = ri_FetchConstraintInfo(trigger, fk_rel, false); switch (riinfo->confmatchtype) { case FKCONSTR_MATCH_SIMPLE: /* * If any new key value is NULL, the row must satisfy the * constraint, so no check is needed. */ if (ri_NullCheck(new_row, riinfo, false) != RI_KEYS_NONE_NULL) return false; /* * If the original row was inserted by our own transaction, we * must fire the trigger whether or not the keys are equal. This * is because our UPDATE will invalidate the INSERT so that the * INSERT RI trigger will not do anything; so we had better do the * UPDATE check. (We could skip this if we knew the INSERT * trigger already fired, but there is no easy way to know that.) */ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(old_row->t_data))) return true; /* If all old and new key values are equal, no check is needed */ if (ri_KeysEqual(fk_rel, old_row, new_row, riinfo, false)) return false; /* Else we need to fire the trigger. */ return true; case FKCONSTR_MATCH_FULL: /* * If all new key values are NULL, the row must satisfy the * constraint, so no check is needed. On the other hand, if only * some of them are NULL, the row must fail the constraint. We * must not throw error here, because the row might get * invalidated before the constraint is to be checked, but we * should queue the event to apply the check later. */ switch (ri_NullCheck(new_row, riinfo, false)) { case RI_KEYS_ALL_NULL: return false; case RI_KEYS_SOME_NULL: return true; case RI_KEYS_NONE_NULL: break; /* continue with the check */ } /* * If the original row was inserted by our own transaction, we * must fire the trigger whether or not the keys are equal. This * is because our UPDATE will invalidate the INSERT so that the * INSERT RI trigger will not do anything; so we had better do the * UPDATE check. (We could skip this if we knew the INSERT * trigger already fired, but there is no easy way to know that.) */ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(old_row->t_data))) return true; /* If all old and new key values are equal, no check is needed */ if (ri_KeysEqual(fk_rel, old_row, new_row, riinfo, false)) return false; /* Else we need to fire the trigger. */ return true; /* Handle MATCH PARTIAL check. */ case FKCONSTR_MATCH_PARTIAL: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("MATCH PARTIAL not yet implemented"))); break; default: elog(ERROR, "unrecognized confmatchtype: %d", riinfo->confmatchtype); break; } /* Never reached */ return false; }
0
[ "CWE-209" ]
postgres
804b6b6db4dcfc590a468e7be390738f9f7755fb
313,698,671,624,095,900,000,000,000,000,000,000,000
93
Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.
PeiStartupAllAPs ( IN CONST EFI_PEI_SERVICES **PeiServices, IN EFI_PEI_MP_SERVICES_PPI *This, IN EFI_AP_PROCEDURE Procedure, IN BOOLEAN SingleThread, IN UINTN TimeoutInMicroSeconds, IN VOID *ProcedureArgument OPTIONAL ) { return MpInitLibStartupAllAPs ( Procedure, SingleThread, NULL, TimeoutInMicroSeconds, ProcedureArgument, NULL ); }
0
[ "CWE-787" ]
edk2
0a0d5296e448fc350de1594c49b9c0deff7fad60
36,589,335,960,978,084,000,000,000,000,000,000,000
18
UefiCpuPkg/CpuMpPei: support stack guard feature This feature is the same as Stack Guard enabled in driver CpuDxe but applies to PEI phase. Due to the specialty in PEI module dispatching, this driver is changed to do the actual initialization in notify callback of event gEfiPeiMemoryDiscoveredPpiGuid. This can let the stack guard apply to as most PEI drivers as possible. To let Stack Guard work, some simple page table management code are introduced to setup Guard page at base of stack for each processor. Cc: Eric Dong <[email protected]> Cc: Laszlo Ersek <[email protected]> Cc: Ruiyu Ni <[email protected]> Cc: Jiewen Yao <[email protected]> Cc: Star Zeng <[email protected]> Cc: "Ware, Ryan R" <[email protected]> Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Jian J Wang <[email protected]> Regression-tested-by: Laszlo Ersek <[email protected]> Reviewed-by: Eric Dong <[email protected]>
connection_dirserv_finish_spooling(dir_connection_t *conn) { if (conn->zlib_state) { connection_write_to_buf_zlib("", 0, conn, 1); tor_zlib_free(conn->zlib_state); conn->zlib_state = NULL; } conn->dir_spool_src = DIR_SPOOL_NONE; return 0; }
0
[ "CWE-264" ]
tor
00fffbc1a15e2696a89c721d0c94dc333ff419ef
88,666,108,673,533,980,000,000,000,000,000,000,000
10
Don't give the Guard flag to relays without the CVE-2011-2768 fix
m4_dumpdef (struct obstack *obs, int argc, token_data **argv) { symbol *s; int i; struct dump_symbol_data data; const builtin *bp; data.obs = obs; data.base = (symbol **) obstack_base (obs); data.size = 0; if (argc == 1) { hack_all_symbols (dump_symbol, &data); } else { for (i = 1; i < argc; i++) { s = lookup_symbol (TOKEN_DATA_TEXT (argv[i]), SYMBOL_LOOKUP); if (s != NULL && SYMBOL_TYPE (s) != TOKEN_VOID) dump_symbol (s, &data); else M4ERROR ((warning_status, 0, "undefined macro `%s'", TOKEN_DATA_TEXT (argv[i]))); } } /* Make table of symbols invisible to expand_macro (). */ obstack_finish (obs); qsort (data.base, data.size, sizeof (symbol *), dumpdef_cmp); for (; data.size > 0; --data.size, data.base++) { DEBUG_PRINT1 ("%s:\t", SYMBOL_NAME (data.base[0])); switch (SYMBOL_TYPE (data.base[0])) { case TOKEN_TEXT: if (debug_level & DEBUG_TRACE_QUOTE) DEBUG_PRINT3 ("%s%s%s\n", lquote.string, SYMBOL_TEXT (data.base[0]), rquote.string); else DEBUG_PRINT1 ("%s\n", SYMBOL_TEXT (data.base[0])); break; case TOKEN_FUNC: bp = find_builtin_by_addr (SYMBOL_FUNC (data.base[0])); if (bp == NULL) { M4ERROR ((warning_status, 0, "\ INTERNAL ERROR: builtin not found in builtin table")); abort (); } DEBUG_PRINT1 ("<%s>\n", bp->name); break; default: M4ERROR ((warning_status, 0, "INTERNAL ERROR: bad token data type in m4_dumpdef ()")); abort (); break; } } }
0
[]
m4
5345bb49077bfda9fabd048e563f9e7077fe335d
268,247,597,836,390,130,000,000,000,000,000,000,000
67
Minor security fix: Quote output of mkstemp. * src/builtin.c (mkstemp_helper): Produce quoted output. * doc/m4.texinfo (Mkstemp): Update the documentation and tests. * NEWS: Document this change. Signed-off-by: Eric Blake <[email protected]> (cherry picked from commit bd9900d65eb9cd5add0f107e94b513fa267495ba)
static void neigh_proxy_process(unsigned long arg) { struct neigh_table *tbl = (struct neigh_table *)arg; long sched_next = 0; unsigned long now = jiffies; struct sk_buff *skb; spin_lock(&tbl->proxy_queue.lock); skb = tbl->proxy_queue.next; while (skb != (struct sk_buff *)&tbl->proxy_queue) { struct sk_buff *back = skb; long tdif = back->stamp.tv_usec - now; skb = skb->next; if (tdif <= 0) { struct net_device *dev = back->dev; __skb_unlink(back, &tbl->proxy_queue); if (tbl->proxy_redo && netif_running(dev)) tbl->proxy_redo(back); else kfree_skb(back); dev_put(dev); } else if (!sched_next || tdif < sched_next) sched_next = tdif; } del_timer(&tbl->proxy_timer); if (sched_next) mod_timer(&tbl->proxy_timer, jiffies + sched_next); spin_unlock(&tbl->proxy_queue.lock); }
0
[ "CWE-200" ]
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
29,566,678,190,055,754,000,000,000,000,000,000,000
33
[NETLINK]: Missing initializations in dumped data Mostly missing initialization of padding fields of 1 or 2 bytes length, two instances of uninitialized nlmsgerr->msg of 16 bytes length. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
const T* data(const unsigned int x, const unsigned int y=0, const unsigned int z=0, const unsigned int c=0) const { return _data + x + (ulongT)y*_width + (ulongT)z*_width*_height + (ulongT)c*_width*_height*_depth; }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
227,219,577,512,646,250,000,000,000,000,000,000,000
3
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf) { struct msgbuf_ioctl_resp_hdr *ioctl_resp; ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf; msgbuf->ioctl_resp_status = (s16)le16_to_cpu(ioctl_resp->compl_hdr.status); msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len); msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id); brcmf_msgbuf_ioctl_resp_wake(msgbuf); if (msgbuf->cur_ioctlrespbuf) msgbuf->cur_ioctlrespbuf--; brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); }
0
[ "CWE-20" ]
linux
a4176ec356c73a46c07c181c6d04039fafa34a9f
172,593,698,218,985,030,000,000,000,000,000,000,000
17
brcmfmac: add subtype check for event handling in data path For USB there is no separate channel being used to pass events from firmware to the host driver and as such are passed over the data path. In order to detect mock event messages an additional check is needed on event subtype. This check is added conditionally using unlikely() keyword. Reviewed-by: Hante Meuleman <[email protected]> Reviewed-by: Pieter-Paul Giesberts <[email protected]> Reviewed-by: Franky Lin <[email protected]> Signed-off-by: Arend van Spriel <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info) { /* if we are reconnecting then should we check to see if * any requested capabilities changed locally e.g. via * remount but we can not do much about it here * if they have (even if we could detect it by the following) * Perhaps we could add a backpointer to array of sb from tcon * or if we change to make all sb to same share the same * sb as NFS - then we only have one backpointer to sb. * What if we wanted to mount the server share twice once with * and once without posixacls or posix paths? */ __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); if (vol_info && vol_info->no_linux_ext) { tcon->fsUnixInfo.Capability = 0; tcon->unix_ext = 0; /* Unix Extensions disabled */ cifs_dbg(FYI, "Linux protocol extensions disabled\n"); return; } else if (vol_info) tcon->unix_ext = 1; /* Unix Extensions supported */ if (tcon->unix_ext == 0) { cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n"); return; } if (!CIFSSMBQFSUnixInfo(xid, tcon)) { __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); cifs_dbg(FYI, "unix caps which server supports %lld\n", cap); /* check for reconnect case in which we do not want to change the mount behavior if we can avoid it */ if (vol_info == NULL) { /* turn off POSIX ACL and PATHNAMES if not set originally at mount time */ if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cifs_dbg(VFS, "POSIXPATH support change\n"); cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { cifs_dbg(VFS, "possible reconnect error\n"); cifs_dbg(VFS, "server disabled POSIX path support\n"); } } if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) cifs_dbg(VFS, "per-share encryption not supported yet\n"); cap &= CIFS_UNIX_CAP_MASK; if (vol_info && vol_info->no_psx_acl) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { cifs_dbg(FYI, "negotiated posix acl support\n"); if (cifs_sb) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIXACL; } if (vol_info && vol_info->posix_paths == 0) cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { cifs_dbg(FYI, "negotiate posix pathnames\n"); if (cifs_sb) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; } cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap); #ifdef CONFIG_CIFS_DEBUG2 if (cap & CIFS_UNIX_FCNTL_CAP) cifs_dbg(FYI, "FCNTL cap\n"); if (cap & CIFS_UNIX_EXTATTR_CAP) cifs_dbg(FYI, "EXTATTR cap\n"); if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cifs_dbg(FYI, "POSIX path cap\n"); if (cap & CIFS_UNIX_XATTR_CAP) cifs_dbg(FYI, "XATTR cap\n"); if (cap & CIFS_UNIX_POSIX_ACL_CAP) cifs_dbg(FYI, "POSIX ACL cap\n"); if (cap & CIFS_UNIX_LARGE_READ_CAP) cifs_dbg(FYI, "very large read cap\n"); if (cap & CIFS_UNIX_LARGE_WRITE_CAP) cifs_dbg(FYI, "very large write cap\n"); if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP) cifs_dbg(FYI, "transport encryption cap\n"); if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) cifs_dbg(FYI, "mandatory transport encryption cap\n"); #endif /* CIFS_DEBUG2 */ if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { if (vol_info == NULL) { cifs_dbg(FYI, "resetting capabilities failed\n"); } else cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n"); } } }
0
[ "CWE-703", "CWE-189" ]
linux
1fc29bacedeabb278080e31bb9c1ecb49f143c3b
22,709,255,992,680,950,000,000,000,000,000,000,000
99
cifs: fix off-by-one bug in build_unc_path_to_root commit 839db3d10a (cifs: fix up handling of prefixpath= option) changed the code such that the vol->prepath no longer contained a leading delimiter and then fixed up the places that accessed that field to account for that change. One spot in build_unc_path_to_root was missed however. When doing the pointer addition on pos, that patch failed to account for the fact that we had already incremented "pos" by one when adding the length of the prepath. This caused a buffer overrun by one byte. This patch fixes the problem by correcting the handling of "pos". Cc: <[email protected]> # v3.8+ Reported-by: Marcus Moeller <[email protected]> Reported-by: Ken Fallon <[email protected]> Signed-off-by: Jeff Layton <[email protected]> Signed-off-by: Steve French <[email protected]>
void set_cron_uid(void) { #if defined(BSD) || defined(POSIX) if (seteuid(ROOT_UID) < OK) { perror("seteuid"); exit(ERROR_EXIT); } #else if (setuid(ROOT_UID) < OK) { perror("setuid"); exit(ERROR_EXIT); } #endif }
0
[ "CWE-476" ]
cronie
a6576769f01325303b11edc3e0cfb05ef382ce56
268,893,995,032,845,630,000,000,000,000,000,000,000
13
Fix CVE-2019-9704 and CVE-2019-9705 The users can cause DoS of the crond by loading huge crontab files. We now allow maximum 1000 environment variables and 1000 crontab entries. Also the comments and whitespace between the entries and variables are now limited to 32768 characters.
CImg<Tfloat> get_blur(const float sigma, const unsigned int boundary_conditions=1, const bool is_gaussian=true) const { return CImg<Tfloat>(*this,false).blur(sigma,boundary_conditions,is_gaussian); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
187,622,574,521,910,150,000,000,000,000,000,000,000
4
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
display_shell_version (count, c) int count, c; { rl_crlf (); show_shell_version (0); putc ('\r', rl_outstream); fflush (rl_outstream); rl_on_new_line (); rl_redisplay (); return 0; }
0
[ "CWE-20" ]
bash
4f747edc625815f449048579f6e65869914dd715
132,971,087,282,279,180,000,000,000,000,000,000,000
11
Bash-4.4 patch 7
static int phar_tar_octal(char *buf, uint32_t val, int len) /* {{{ */ { char *p = buf; int s = len; p += len; /* Start at the end and work backwards. */ while (s-- > 0) { *--p = (char)('0' + (val & 7)); val >>= 3; } if (val == 0) return SUCCESS; /* If it overflowed, fill field with max value. */ while (len-- > 0) *p++ = '7'; return FAILURE; }
0
[ "CWE-119" ]
php-src
e0f5d62bd6690169998474b62f92a8c5ddf0e699
96,932,303,581,064,740,000,000,000,000,000,000,000
20
Fix bug #77586 - phar_tar_writeheaders_int() buffer overflow
static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { void __user *buffer = (void __user *)(uintptr_t)binder_buffer; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; int ret = 0; int wait_for_proc_work; if (*consumed == 0) { if (put_user(BR_NOOP, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); } retry: binder_inner_proc_lock(proc); wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); binder_inner_proc_unlock(proc); thread->looper |= BINDER_LOOPER_STATE_WAITING; trace_binder_wait_for_work(wait_for_proc_work, !!thread->transaction_stack, !binder_worklist_empty(proc, &thread->todo)); if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", proc->pid, thread->pid, thread->looper); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } binder_set_nice(proc->default_priority); } if (non_block) { if (!binder_has_work(thread, wait_for_proc_work)) ret = -EAGAIN; } else { ret = binder_wait_for_work(thread, wait_for_proc_work); } thread->looper &= ~BINDER_LOOPER_STATE_WAITING; if (ret) return ret; while (1) { uint32_t cmd; struct binder_transaction_data tr; struct binder_work *w = NULL; struct list_head *list = NULL; struct binder_transaction *t = NULL; struct binder_thread *t_from; binder_inner_proc_lock(proc); if (!binder_worklist_empty_ilocked(&thread->todo)) list = &thread->todo; else if (!binder_worklist_empty_ilocked(&proc->todo) && wait_for_proc_work) list = &proc->todo; else { binder_inner_proc_unlock(proc); /* no data added */ if (ptr - buffer == 4 && !thread->looper_need_return) goto retry; break; } if (end - ptr < sizeof(tr) + 4) { binder_inner_proc_unlock(proc); break; } w = binder_dequeue_work_head_ilocked(list); if (binder_worklist_empty_ilocked(&thread->todo)) thread->process_todo = false; switch (w->type) { case BINDER_WORK_TRANSACTION: { binder_inner_proc_unlock(proc); t = container_of(w, struct binder_transaction, work); } break; case BINDER_WORK_RETURN_ERROR: { struct binder_error *e = container_of( w, struct binder_error, work); WARN_ON(e->cmd == BR_OK); binder_inner_proc_unlock(proc); if (put_user(e->cmd, (uint32_t __user *)ptr)) return -EFAULT; cmd = e->cmd; e->cmd = BR_OK; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_inner_proc_unlock(proc); cmd = BR_TRANSACTION_COMPLETE; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); int strong, weak; binder_uintptr_t node_ptr = node->ptr; binder_uintptr_t node_cookie = node->cookie; int node_debug_id = node->debug_id; int has_weak_ref; int has_strong_ref; void __user *orig_ptr = ptr; BUG_ON(proc != node->proc); strong = node->internal_strong_refs || node->local_strong_refs; weak = !hlist_empty(&node->refs) || node->local_weak_refs || node->tmp_refs || strong; has_strong_ref = node->has_strong_ref; has_weak_ref = node->has_weak_ref; if (weak && !has_weak_ref) { node->has_weak_ref = 1; node->pending_weak_ref = 1; node->local_weak_refs++; } if (strong && !has_strong_ref) { node->has_strong_ref = 1; node->pending_strong_ref = 1; node->local_strong_refs++; } if (!strong && has_strong_ref) node->has_strong_ref = 0; if (!weak && has_weak_ref) node->has_weak_ref = 0; if (!weak && !strong) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx deleted\n", proc->pid, thread->pid, node_debug_id, (u64)node_ptr, (u64)node_cookie); rb_erase(&node->rb_node, &proc->nodes); binder_inner_proc_unlock(proc); binder_node_lock(node); /* * Acquire the node lock before freeing the * node to serialize with other threads that * may have been holding the node lock while * decrementing this node (avoids race where * this thread frees while the other thread * is unlocking the node after the final * decrement) */ binder_node_unlock(node); binder_free_node(node); } else binder_inner_proc_unlock(proc); if (weak && !has_weak_ref) ret = binder_put_node_cmd( proc, thread, &ptr, node_ptr, node_cookie, node_debug_id, BR_INCREFS, "BR_INCREFS"); if (!ret && strong && !has_strong_ref) ret = binder_put_node_cmd( proc, thread, &ptr, node_ptr, node_cookie, node_debug_id, BR_ACQUIRE, "BR_ACQUIRE"); if (!ret && !strong && has_strong_ref) ret = binder_put_node_cmd( proc, thread, &ptr, node_ptr, node_cookie, node_debug_id, BR_RELEASE, "BR_RELEASE"); if (!ret && !weak && has_weak_ref) ret = binder_put_node_cmd( proc, thread, &ptr, node_ptr, node_cookie, node_debug_id, BR_DECREFS, "BR_DECREFS"); if (orig_ptr == ptr) binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx state unchanged\n", proc->pid, thread->pid, node_debug_id, (u64)node_ptr, (u64)node_cookie); if (ret) return ret; } break; case BINDER_WORK_DEAD_BINDER: case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; uint32_t cmd; binder_uintptr_t cookie; death = container_of(w, struct binder_ref_death, work); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; else cmd = BR_DEAD_BINDER; cookie = death->cookie; binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %016llx\n", proc->pid, thread->pid, cmd == BR_DEAD_BINDER ? "BR_DEAD_BINDER" : "BR_CLEAR_DEATH_NOTIFICATION_DONE", (u64)cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { binder_inner_proc_unlock(proc); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } else { binder_enqueue_work_ilocked( w, &proc->delivered_death); binder_inner_proc_unlock(proc); } if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); binder_stat_br(proc, thread, cmd); if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; } if (!t) continue; BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) binder_set_nice(t->priority); else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority) binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { tr.target.ptr = 0; tr.cookie = 0; cmd = BR_REPLY; } tr.code = t->code; tr.flags = t->flags; tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); t_from = binder_get_txn_from(t); if (t_from) { struct task_struct *sender = t_from->proc->tsk; tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current)); } else { tr.sender_pid = 0; } ret = binder_apply_fd_fixups(t); if (ret) { struct binder_buffer *buffer = t->buffer; bool oneway = !!(t->flags & TF_ONE_WAY); int tid = t->debug_id; if (t_from) binder_thread_dec_tmpref(t_from); buffer->transaction = NULL; binder_cleanup_transaction(t, "fd fixups failed", BR_FAILED_REPLY); binder_free_buf(proc, buffer); binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", proc->pid, thread->pid, oneway ? "async " : (cmd == BR_REPLY ? "reply " : ""), tid, BR_FAILED_REPLY, ret, __LINE__); if (cmd == BR_REPLY) { cmd = BR_FAILED_REPLY; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); break; } continue; } tr.data_size = t->buffer->data_size; tr.offsets_size = t->buffer->offsets_size; tr.data.ptr.buffer = (binder_uintptr_t) ((uintptr_t)t->buffer->data + binder_alloc_get_user_buffer_offset(&proc->alloc)); tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); binder_cleanup_transaction(t, "put_user failed", BR_FAILED_REPLY); return -EFAULT; } ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, sizeof(tr))) { if (t_from) binder_thread_dec_tmpref(t_from); binder_cleanup_transaction(t, "copy_to_user failed", BR_FAILED_REPLY); return -EFAULT; } ptr += sizeof(tr); trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY", t->debug_id, t_from ? t_from->proc->pid : 0, t_from ? t_from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); if (t_from) binder_thread_dec_tmpref(t_from); t->buffer->allow_user_free = 1; if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { binder_inner_proc_lock(thread->proc); t->to_parent = thread->transaction_stack; t->to_thread = thread; thread->transaction_stack = t; binder_inner_proc_unlock(thread->proc); } else { binder_free_transaction(t); } break; } done: *consumed = ptr - buffer; binder_inner_proc_lock(proc); if (proc->requested_threads == 0 && list_empty(&thread->proc->waiting_threads) && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; binder_inner_proc_unlock(proc); binder_debug(BINDER_DEBUG_THREADS, "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; binder_stat_br(proc, thread, BR_SPAWN_LOOPER); } else binder_inner_proc_unlock(proc); return 0; }
0
[ "CWE-416" ]
linux
7bada55ab50697861eee6bb7d60b41e68a961a9c
163,274,491,142,912,400,000,000,000,000,000,000,000
388
binder: fix race that allows malicious free of live buffer Malicious code can attempt to free buffers using the BC_FREE_BUFFER ioctl to binder. There are protections against a user freeing a buffer while in use by the kernel, however there was a window where BC_FREE_BUFFER could be used to free a recently allocated buffer that was not completely initialized. This resulted in a use-after-free detected by KASAN with a malicious test program. This window is closed by setting the buffer's allow_user_free attribute to 0 when the buffer is allocated or when the user has previously freed it instead of waiting for the caller to set it. The problem was that when the struct buffer was recycled, allow_user_free was stale and set to 1 allowing a free to go through. Signed-off-by: Todd Kjos <[email protected]> Acked-by: Arve Hjønnevåg <[email protected]> Cc: stable <[email protected]> # 4.14 Signed-off-by: Greg Kroah-Hartman <[email protected]>
mrb_instance_new(mrb_state *mrb, mrb_value cv) { mrb_value obj, blk; mrb_value *argv; mrb_int argc; mrb_sym init; mrb_method_t m; mrb_get_args(mrb, "*&", &argv, &argc, &blk); obj = mrb_instance_alloc(mrb, cv); init = mrb_intern_lit(mrb, "initialize"); m = mrb_method_search(mrb, mrb_class(mrb, obj), init); if (MRB_METHOD_CFUNC_P(m)) { mrb_func_t f = MRB_METHOD_CFUNC(m); if (f != mrb_bob_init) { f(mrb, obj); } } else { mrb_funcall_with_block(mrb, obj, init, argc, argv, blk); } return obj; }
0
[ "CWE-476", "CWE-415" ]
mruby
faa4eaf6803bd11669bc324b4c34e7162286bfa3
302,791,395,787,650,400,000,000,000,000,000,000,000
24
`mrb_class_real()` did not work for `BasicObject`; fix #4037
int bdrv_parse_discard_flags(const char *mode, int *flags) { *flags &= ~BDRV_O_UNMAP; if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) { /* do nothing */ } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) { *flags |= BDRV_O_UNMAP; } else { return -1; } return 0; }
0
[ "CWE-190" ]
qemu
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
73,936,532,243,607,740,000,000,000,000,000,000,000
14
block: Limit request size (CVE-2014-0143) Limiting the size of a single request to INT_MAX not only fixes a direct integer overflow in bdrv_check_request() (which would only trigger bad behaviour with ridiculously huge images, as in close to 2^64 bytes), but can also prevent overflows in all block drivers. Signed-off-by: Kevin Wolf <[email protected]> Reviewed-by: Max Reitz <[email protected]> Signed-off-by: Stefan Hajnoczi <[email protected]>
qemuProcessReconnectHelper(virDomainObjPtr obj, void *opaque) { virThread thread; struct qemuProcessReconnectData *src = opaque; struct qemuProcessReconnectData *data; g_autofree char *name = NULL; /* If the VM was inactive, we don't need to reconnect */ if (!obj->pid) return 0; data = g_new0(struct qemuProcessReconnectData, 1); memcpy(data, src, sizeof(*data)); data->obj = obj; data->identity = virIdentityGetCurrent(); virNWFilterReadLockFilterUpdates(); /* this lock and reference will be eventually transferred to the thread * that handles the reconnect */ virObjectLock(obj); virObjectRef(obj); name = g_strdup_printf("init-%s", obj->def->name); if (virThreadCreateFull(&thread, false, qemuProcessReconnect, name, false, data) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not create thread. QEMU initialization " "might be incomplete")); /* We can't spawn a thread and thus connect to monitor. Kill qemu. * It's safe to call qemuProcessStop without a job here since there * is no thread that could be doing anything else with the same domain * object. */ qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED, QEMU_ASYNC_JOB_NONE, 0); qemuDomainRemoveInactiveJobLocked(src->driver, obj); virDomainObjEndAPI(&obj); virNWFilterUnlockFilterUpdates(); g_clear_object(&data->identity); VIR_FREE(data); return -1; } return 0; }
0
[ "CWE-416" ]
libvirt
1ac703a7d0789e46833f4013a3876c2e3af18ec7
288,632,650,987,677,900,000,000,000,000,000,000,000
50
qemu: Add missing lock in qemuProcessHandleMonitorEOF qemuMonitorUnregister will be called in multiple threads (e.g. threads in rpc worker pool and the vm event thread). In some cases, it isn't protected by the monitor lock, which may lead to call g_source_unref more than one time and a use-after-free problem eventually. Add the missing lock in qemuProcessHandleMonitorEOF (which is the only position missing lock of monitor I found). Suggested-by: Michal Privoznik <[email protected]> Signed-off-by: Peng Liang <[email protected]> Signed-off-by: Michal Privoznik <[email protected]> Reviewed-by: Michal Privoznik <[email protected]>
static void ext4_put_super(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int i, err; ext4_unregister_li_request(sb); dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); flush_workqueue(sbi->dio_unwritten_wq); destroy_workqueue(sbi->dio_unwritten_wq); lock_super(sb); if (sb->s_dirt) ext4_commit_super(sb, 1); if (sbi->s_journal) { err = jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; if (err < 0) ext4_abort(sb, "Couldn't clean up the journal"); } del_timer(&sbi->s_err_report); ext4_release_system_zone(sb); ext4_mb_release(sb); ext4_ext_release(sb); ext4_xattr_put_super(sb); if (!(sb->s_flags & MS_RDONLY)) { EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); es->s_state = cpu_to_le16(sbi->s_mount_state); ext4_commit_super(sb, 1); } if (sbi->s_proc) { remove_proc_entry(sb->s_id, ext4_proc_root); } kobject_del(&sbi->s_kobj); for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); if (is_vmalloc_addr(sbi->s_flex_groups)) vfree(sbi->s_flex_groups); else kfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyblocks_counter); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif /* Debugging code just in case the in-memory inode orphan list * isn't empty. The on-disk one can be non-empty if we've * detected an error and taken the fs readonly, but the * in-memory list had better be clean by this point. */ if (!list_empty(&sbi->s_orphan)) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); invalidate_bdev(sbi->journal_bdev); ext4_blkdev_remove(sbi); } sb->s_fs_info = NULL; /* * Now that we are completely done shutting down the * superblock, we need to actually destroy the kobject. */ unlock_super(sb); kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); kfree(sbi->s_blockgroup_lock); kfree(sbi); }
0
[ "CWE-703" ]
linux
0449641130f5652b344ef6fa39fa019d7e94660a
7,762,363,974,658,940,000,000,000,000,000,000,000
86
ext4: init timer earlier to avoid a kernel panic in __save_error_info During mount, when we fail to open journal inode or root inode, the __save_error_info will mod_timer. But actually s_err_report isn't initialized yet and the kernel oops. The detailed information can be found https://bugzilla.kernel.org/show_bug.cgi?id=32082. The best way is to check whether the timer s_err_report is initialized or not. But it seems that in include/linux/timer.h, we can't find a good function to check the status of this timer, so this patch just move the initializtion of s_err_report earlier so that we can avoid the kernel panic. The corresponding del_timer is also added in the error path. Reported-by: Sami Liedes <[email protected]> Signed-off-by: Tao Ma <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]>
const char *libraw_strerror(int e) { enum LibRaw_errors errorcode = (LibRaw_errors)e; switch(errorcode) { case LIBRAW_SUCCESS: return "No error"; case LIBRAW_UNSPECIFIED_ERROR: return "Unspecified error"; case LIBRAW_FILE_UNSUPPORTED: return "Unsupported file format or not RAW file"; case LIBRAW_REQUEST_FOR_NONEXISTENT_IMAGE: return "Request for nonexisting image number"; case LIBRAW_OUT_OF_ORDER_CALL: return "Out of order call of libraw function"; case LIBRAW_NO_THUMBNAIL: return "No thumbnail in file"; case LIBRAW_UNSUPPORTED_THUMBNAIL: return "Unsupported thumbnail format"; case LIBRAW_INPUT_CLOSED: return "No input stream, or input stream closed"; case LIBRAW_UNSUFFICIENT_MEMORY: return "Unsufficient memory"; case LIBRAW_DATA_ERROR: return "Corrupted data or unexpected EOF"; case LIBRAW_IO_ERROR: return "Input/output error"; case LIBRAW_CANCELLED_BY_CALLBACK: return "Cancelled by user callback"; case LIBRAW_BAD_CROP: return "Bad crop box"; default: return "Unknown error code"; } }
0
[ "CWE-119", "CWE-787" ]
LibRaw
2f912f5b33582961b1cdbd9fd828589f8b78f21d
260,030,817,327,681,500,000,000,000,000,000,000,000
35
fixed wrong data_maximum calcluation; prevent out-of-buffer in exp_bef
int oidc_handle_redirect_uri_request(request_rec *r, oidc_cfg *c, oidc_session_t *session) { if (oidc_proto_is_redirect_authorization_response(r, c)) { /* this is an authorization response from the OP using the Basic Client profile or a Hybrid flow*/ return oidc_handle_redirect_authorization_response(r, c, session); } else if (oidc_proto_is_post_authorization_response(r, c)) { /* this is an authorization response using the fragment(+POST) response_mode with the Implicit Client profile */ return oidc_handle_post_authorization_response(r, c, session); } else if (oidc_is_discovery_response(r, c)) { /* this is response from the OP discovery page */ return oidc_handle_discovery_response(r, c); } else if (oidc_util_request_has_parameter(r, "logout")) { /* handle logout */ return oidc_handle_logout(r, c, session); } else if (oidc_util_request_has_parameter(r, "jwks")) { /* handle JWKs request */ return oidc_handle_jwks(r, c); } else if (oidc_util_request_has_parameter(r, "session")) { /* handle session management request */ return oidc_handle_session_management(r, c, session); } else if (oidc_util_request_has_parameter(r, "refresh")) { /* handle refresh token request */ return oidc_handle_refresh_token_request(r, c, session); } else if (oidc_util_request_has_parameter(r, "request_uri")) { /* handle request object by reference request */ return oidc_handle_request_uri(r, c); } else if (oidc_util_request_has_parameter(r, "remove_at_cache")) { /* handle request to invalidate access token cache */ return oidc_handle_remove_at_cache(r, c); } else if ((r->args == NULL) || (apr_strnatcmp(r->args, "") == 0)) { /* this is a "bare" request to the redirect URI, indicating implicit flow using the fragment response_mode */ return oidc_proto_javascript_implicit(r, c); } /* this is not an authorization response or logout request */ /* check for "error" response */ if (oidc_util_request_has_parameter(r, "error")) { // char *error = NULL, *descr = NULL; // oidc_util_get_request_parameter(r, "error", &error); // oidc_util_get_request_parameter(r, "error_description", &descr); // // /* send user facing error to browser */ // return oidc_util_html_send_error(r, error, descr, DONE); oidc_handle_redirect_authorization_response(r, c, session); } /* something went wrong */ return oidc_util_html_send_error(r, c->error_template, "Invalid Request", apr_psprintf(r->pool, "The OpenID Connect callback URL received an invalid request: %s", r->args), HTTP_INTERNAL_SERVER_ERROR); }
1
[ "CWE-20" ]
mod_auth_openidc
612e309bfffd6f9b8ad7cdccda3019fc0865f3b4
146,101,632,360,201,360,000,000,000,000,000,000,000
74
don't echo query params on invalid requests to redirect URI; closes #212 thanks @LukasReschke; I'm sure there's some OWASP guideline that warns against this
decode_bits_in_field(const guint bit_offset, const gint no_of_bits, const guint64 value) { guint64 mask; char *str; int bit, str_p = 0; int i; int max_bits = MIN(64, no_of_bits); mask = G_GUINT64_CONSTANT(1) << (max_bits-1); /* Prepare the string, 256 pos for the bits and zero termination, + 64 for the spaces */ str=(char *)wmem_alloc0(wmem_packet_scope(), 256+64); for(bit=0;bit<((int)(bit_offset&0x07));bit++){ if(bit&&(!(bit%4))){ str[str_p] = ' '; str_p++; } str[str_p] = '.'; str_p++; } /* read the bits for the int */ for(i=0;i<max_bits;i++){ if(bit&&(!(bit%4))){ str[str_p] = ' '; str_p++; } if(bit&&(!(bit%8))){ str[str_p] = ' '; str_p++; } bit++; if((value & mask) != 0){ str[str_p] = '1'; str_p++; } else { str[str_p] = '0'; str_p++; } mask = mask>>1; } for(;bit%8;bit++){ if(bit&&(!(bit%4))){ str[str_p] = ' '; str_p++; } str[str_p] = '.'; str_p++; } return str; }
0
[ "CWE-125" ]
wireshark
d5f2657825e63e4126ebd7d13a59f3c6e8a9e4e1
233,951,493,442,874,870,000,000,000,000,000,000,000
52
epan: Limit our bits in decode_bits_in_field. Limit the number of bits we process in decode_bits_in_field, otherwise we'll overrun our buffer. Fixes #16958.
static void rdma_umap_close(struct vm_area_struct *vma) { struct ib_uverbs_file *ufile = vma->vm_file->private_data; struct rdma_umap_priv *priv = vma->vm_private_data; if (!priv) return; /* * The vma holds a reference on the struct file that created it, which * in turn means that the ib_uverbs_file is guaranteed to exist at * this point. */ mutex_lock(&ufile->umap_lock); list_del(&priv->list); mutex_unlock(&ufile->umap_lock); kfree(priv); }
0
[ "CWE-362", "CWE-703", "CWE-667" ]
linux
04f5866e41fb70690e28397487d8bd8eea7d712a
45,730,067,775,992,040,000,000,000,000,000,000,000
18
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping The core dumping code has always run without holding the mmap_sem for writing, despite that is the only way to ensure that the entire vma layout will not change from under it. Only using some signal serialization on the processes belonging to the mm is not nearly enough. This was pointed out earlier. For example in Hugh's post from Jul 2017: https://lkml.kernel.org/r/[email protected] "Not strictly relevant here, but a related note: I was very surprised to discover, only quite recently, how handle_mm_fault() may be called without down_read(mmap_sem) - when core dumping. That seems a misguided optimization to me, which would also be nice to correct" In particular because the growsdown and growsup can move the vm_start/vm_end the various loops the core dump does around the vma will not be consistent if page faults can happen concurrently. Pretty much all users calling mmget_not_zero()/get_task_mm() and then taking the mmap_sem had the potential to introduce unexpected side effects in the core dumping code. Adding mmap_sem for writing around the ->core_dump invocation is a viable long term fix, but it requires removing all copy user and page faults and to replace them with get_dump_page() for all binary formats which is not suitable as a short term fix. For the time being this solution manually covers the places that can confuse the core dump either by altering the vma layout or the vma flags while it runs. Once ->core_dump runs under mmap_sem for writing the function mmget_still_valid() can be dropped. Allowing mmap_sem protected sections to run in parallel with the coredump provides some minor parallelism advantage to the swapoff code (which seems to be safe enough by never mangling any vma field and can keep doing swapins in parallel to the core dumping) and to some other corner case. In order to facilitate the backporting I added "Fixes: 86039bd3b4e6" however the side effect of this same race condition in /proc/pid/mem should be reproducible since before 2.6.12-rc2 so I couldn't add any other "Fixes:" because there's no hash beyond the git genesis commit. Because find_extend_vma() is the only location outside of the process context that could modify the "mm" structures under mmap_sem for reading, by adding the mmget_still_valid() check to it, all other cases that take the mmap_sem for reading don't need the new check after mmget_not_zero()/get_task_mm(). The expand_stack() in page fault context also doesn't need the new check, because all tasks under core dumping are frozen. Link: http://lkml.kernel.org/r/[email protected] Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization") Signed-off-by: Andrea Arcangeli <[email protected]> Reported-by: Jann Horn <[email protected]> Suggested-by: Oleg Nesterov <[email protected]> Acked-by: Peter Xu <[email protected]> Reviewed-by: Mike Rapoport <[email protected]> Reviewed-by: Oleg Nesterov <[email protected]> Reviewed-by: Jann Horn <[email protected]> Acked-by: Jason Gunthorpe <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void append_domain(int index, const char *domain) { update_domain(index, domain, true); }
0
[ "CWE-119" ]
connman
5c281d182ecdd0a424b64f7698f32467f8f67b71
204,906,634,749,982,760,000,000,000,000,000,000,000
4
dnsproxy: Fix crash on malformed DNS response If the response query string is malformed, we might access memory pass the end of "name" variable in parse_response().
asmlinkage long sys_sethostname(char __user *name, int len) { int errno; char tmp[__NEW_UTS_LEN]; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (len < 0 || len > __NEW_UTS_LEN) return -EINVAL; down_write(&uts_sem); errno = -EFAULT; if (!copy_from_user(tmp, name, len)) { memcpy(utsname()->nodename, tmp, len); utsname()->nodename[len] = 0; errno = 0; } up_write(&uts_sem); return errno; }
0
[ "CWE-20" ]
linux-2.6
9926e4c74300c4b31dee007298c6475d33369df0
318,400,137,120,926,630,000,000,000,000,000,000,000
19
CPU time limit patch / setrlimit(RLIMIT_CPU, 0) cheat fix As discovered here today, the change in Kernel 2.6.17 intended to inhibit users from setting RLIMIT_CPU to 0 (as that is equivalent to unlimited) by "cheating" and setting it to 1 in such a case, does not make a difference, as the check is done in the wrong place (too late), and only applies to the profiling code. On all systems I checked running kernels above 2.6.17, no matter what the hard and soft CPU time limits were before, a user could escape them by issuing in the shell (sh/bash/zsh) "ulimit -t 0", and then the user's process was not ever killed. Attached is a trivial patch to fix that. Simply moving the check to a slightly earlier location (specifically, before the line that actually assigns the limit - *old_rlim = new_rlim), does the trick. Do note that at least the zsh (but not ash, dash, or bash) shell has the problem of "caching" the limits set by the ulimit command, so when running zsh the fix will not immediately be evident - after entering "ulimit -t 0", "ulimit -a" will show "-t: cpu time (seconds) 0", even though the actual limit as returned by getrlimit(...) will be 1. It can be verified by opening a subshell (which will not have the values of the parent shell in cache) and checking in it, or just by running a CPU intensive command like "echo '65536^1048576' | bc" and verifying that it dumps core after one second. Regardless of whether that is a misfeature in the shell, perhaps it would be better to return -EINVAL from setrlimit in such a case instead of cheating and setting to 1, as that does not really reflect the actual state of the process anymore. I do not however know what the ground for that decision was in the original 2.6.17 change, and whether there would be any "backward" compatibility issues, so I preferred not to touch that right now. Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void virtnet_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); channels->combined_count = vi->curr_queue_pairs; channels->max_combined = vi->max_queue_pairs; channels->max_other = 0; channels->rx_count = 0; channels->tx_count = 0; channels->other_count = 0; }
0
[ "CWE-119", "CWE-787" ]
linux
48900cb6af4282fa0fb6ff4d72a81aa3dadb5c39
61,522,803,978,429,800,000,000,000,000,000,000,000
12
virtio-net: drop NETIF_F_FRAGLIST virtio declares support for NETIF_F_FRAGLIST, but assumes that there are at most MAX_SKB_FRAGS + 2 fragments which isn't always true with a fraglist. A longer fraglist in the skb will make the call to skb_to_sgvec overflow the sg array, leading to memory corruption. Drop NETIF_F_FRAGLIST so we only get what we can handle. Cc: Michael S. Tsirkin <[email protected]> Signed-off-by: Jason Wang <[email protected]> Acked-by: Michael S. Tsirkin <[email protected]> Signed-off-by: David S. Miller <[email protected]>
LZWPreDecode(TIFF* tif, uint16 s) { static const char module[] = "LZWPreDecode"; LZWCodecState *sp = DecoderState(tif); (void) s; assert(sp != NULL); if( sp->dec_codetab == NULL ) { tif->tif_setupdecode( tif ); if( sp->dec_codetab == NULL ) return (0); } /* * Check for old bit-reversed codes. */ if (tif->tif_rawcc >= 2 && tif->tif_rawdata[0] == 0 && (tif->tif_rawdata[1] & 0x1)) { #ifdef LZW_COMPAT if (!sp->dec_decode) { TIFFWarningExt(tif->tif_clientdata, module, "Old-style LZW codes, convert file"); /* * Override default decoding methods with * ones that deal with the old coding. * Otherwise the predictor versions set * above will call the compatibility routines * through the dec_decode method. */ tif->tif_decoderow = LZWDecodeCompat; tif->tif_decodestrip = LZWDecodeCompat; tif->tif_decodetile = LZWDecodeCompat; /* * If doing horizontal differencing, must * re-setup the predictor logic since we * switched the basic decoder methods... */ (*tif->tif_setupdecode)(tif); sp->dec_decode = LZWDecodeCompat; } sp->lzw_maxcode = MAXCODE(BITS_MIN); #else /* !LZW_COMPAT */ if (!sp->dec_decode) { TIFFErrorExt(tif->tif_clientdata, module, "Old-style LZW codes not supported"); sp->dec_decode = LZWDecode; } return (0); #endif/* !LZW_COMPAT */ } else { sp->lzw_maxcode = MAXCODE(BITS_MIN)-1; sp->dec_decode = LZWDecode; } sp->lzw_nbits = BITS_MIN; sp->lzw_nextbits = 0; sp->lzw_nextdata = 0; sp->dec_restart = 0; sp->dec_nbitsmask = MAXCODE(BITS_MIN); #ifdef LZW_CHECKEOS sp->dec_bitsleft = 0; #endif sp->dec_free_entp = sp->dec_codetab + CODE_FIRST; /* * Zero entries that are not yet filled in. We do * this to guard against bogus input data that causes * us to index into undefined entries. If you can * come up with a way to safely bounds-check input codes * while decoding then you can remove this operation. */ _TIFFmemset(sp->dec_free_entp, 0, (CSIZE-CODE_FIRST)*sizeof (code_t)); sp->dec_oldcodep = &sp->dec_codetab[-1]; sp->dec_maxcodep = &sp->dec_codetab[sp->dec_nbitsmask-1]; return (1); }
0
[ "CWE-787" ]
libtiff
58a898cb4459055bb488ca815c23b880c242a27d
120,855,964,163,764,520,000,000,000,000,000,000,000
76
LZWDecodeCompat(): fix potential index-out-of-bounds write. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2780 / CVE-2018-8905 The fix consists in using the similar code LZWDecode() to validate we don't write outside of the output buffer.
archive_write_disk_set_acls(struct archive *a, int fd, const char *name, struct archive_acl *abstract_acl, __LA_MODE_T mode) { int ret = ARCHIVE_OK; (void)mode; /* UNUSED */ if ((archive_acl_types(abstract_acl) & ARCHIVE_ENTRY_ACL_TYPE_POSIX1E) != 0) { /* Solaris writes POSIX.1e access and default ACLs together */ ret = set_acl(a, fd, name, abstract_acl, ARCHIVE_ENTRY_ACL_TYPE_POSIX1E, "posix1e"); /* Simultaneous POSIX.1e and NFSv4 is not supported */ return (ret); } #if ARCHIVE_ACL_SUNOS_NFS4 else if ((archive_acl_types(abstract_acl) & ARCHIVE_ENTRY_ACL_TYPE_NFS4) != 0) { ret = set_acl(a, fd, name, abstract_acl, ARCHIVE_ENTRY_ACL_TYPE_NFS4, "nfs4"); } #endif return (ret); }
1
[ "CWE-59", "CWE-61" ]
libarchive
fba4f123cc456d2b2538f811bb831483bf336bad
55,274,404,642,822,320,000,000,000,000,000,000,000
25
Fix handling of symbolic link ACLs On Linux ACLs on symbolic links are not supported. We must avoid calling acl_set_file() on symbolic links as their targets are modified instead. While here, do not try to set default ACLs on non-directories. Fixes #1565
static void copy_buffer(int ssize, int max_sector, int max_sector_2) { int remaining; /* number of transferred 512-byte sectors */ struct bio_vec bv; char *dma_buffer; int size; struct req_iterator iter; max_sector = transfer_size(ssize, min(max_sector, max_sector_2), blk_rq_sectors(current_req)); if (current_count_sectors <= 0 && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE && buffer_max > fsector_t + blk_rq_sectors(current_req)) current_count_sectors = min_t(int, buffer_max - fsector_t, blk_rq_sectors(current_req)); remaining = current_count_sectors << 9; if (remaining > blk_rq_bytes(current_req) && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) { DPRINT("in copy buffer\n"); pr_info("current_count_sectors=%ld\n", current_count_sectors); pr_info("remaining=%d\n", remaining >> 9); pr_info("current_req->nr_sectors=%u\n", blk_rq_sectors(current_req)); pr_info("current_req->current_nr_sectors=%u\n", blk_rq_cur_sectors(current_req)); pr_info("max_sector=%d\n", max_sector); pr_info("ssize=%d\n", ssize); } buffer_max = max(max_sector, buffer_max); dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); size = blk_rq_cur_bytes(current_req); rq_for_each_segment(bv, current_req, iter) { if (!remaining) break; size = bv.bv_len; SUPBOUND(size, remaining); if (dma_buffer + size > floppy_track_buffer + (max_buffer_sectors << 10) || dma_buffer < floppy_track_buffer) { DPRINT("buffer overrun in copy buffer %d\n", (int)((floppy_track_buffer - dma_buffer) >> 9)); pr_info("fsector_t=%d buffer_min=%d\n", fsector_t, buffer_min); pr_info("current_count_sectors=%ld\n", current_count_sectors); if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) pr_info("read\n"); if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) pr_info("write\n"); break; } if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) memcpy_to_bvec(&bv, dma_buffer); else memcpy_from_bvec(dma_buffer, &bv); remaining -= size; dma_buffer += size; } if (remaining) { if (remaining > 0) max_sector -= remaining >> 9; DPRINT("weirdness: remaining %d\n", remaining >> 9); } }
0
[ "CWE-416" ]
linux
233087ca063686964a53c829d547c7571e3f67bf
246,866,795,395,796,470,000,000,000,000,000,000,000
72
floppy: disable FDRAWCMD by default Minh Yuan reported a concurrency use-after-free issue in the floppy code between raw_cmd_ioctl and seek_interrupt. [ It turns out this has been around, and that others have reported the KASAN splats over the years, but Minh Yuan had a reproducer for it and so gets primary credit for reporting it for this fix - Linus ] The problem is, this driver tends to break very easily and nowadays, nobody is expected to use FDRAWCMD anyway since it was used to manipulate non-standard formats. The risk of breaking the driver is higher than the risk presented by this race, and accessing the device requires privileges anyway. Let's just add a config option to completely disable this ioctl and leave it disabled by default. Distros shouldn't use it, and only those running on antique hardware might need to enable it. Link: https://lore.kernel.org/all/[email protected]/ Link: https://lore.kernel.org/lkml/CAKcFiNC=MfYVW-Jt9A3=FPJpTwCD2PL_ULNCpsCVE5s8ZeBQgQ@mail.gmail.com Link: https://lore.kernel.org/all/CAEAjamu1FRhz6StCe_55XY5s389ZP_xmCF69k987En+1z53=eg@mail.gmail.com Reported-by: Minh Yuan <[email protected]> Reported-by: [email protected] Reported-by: cruise k <[email protected]> Reported-by: Kyungtae Kim <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Tested-by: Denis Efremov <[email protected]> Signed-off-by: Willy Tarreau <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static int io_fsync(struct io_kiocb *req, bool force_nonblock) { loff_t end = req->sync.off + req->sync.len; int ret; /* fsync always requires a blocking context */ if (force_nonblock) return -EAGAIN; ret = vfs_fsync_range(req->file, req->sync.off, end > 0 ? end : LLONG_MAX, req->sync.flags & IORING_FSYNC_DATASYNC); if (ret < 0) req_set_fail_links(req); io_req_complete(req, ret); return 0; }
0
[]
linux
0f2122045b946241a9e549c2a76cea54fa58a7ff
164,614,796,642,400,890,000,000,000,000,000,000,000
17
io_uring: don't rely on weak ->files references Grab actual references to the files_struct. To avoid circular references issues due to this, we add a per-task note that keeps track of what io_uring contexts a task has used. When the tasks execs or exits its assigned files, we cancel requests based on this tracking. With that, we can grab proper references to the files table, and no longer need to rely on stashing away ring_fd and ring_file to check if the ring_fd may have been closed. Cc: [email protected] # v5.5+ Reviewed-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
runAndPrintExceptions(const boost::function<void ()> &func, bool toAbort) { try { func(); } catch (const boost::thread_interrupted &) { throw; } catch (const tracable_exception &e) { P_ERROR("Exception: " << e.what() << "\n" << e.backtrace()); if (toAbort) { abort(); } } }
0
[ "CWE-401" ]
passenger
94428057c602da3d6d34ef75c78091066ecac5c0
149,574,338,373,759,670,000,000,000,000,000,000,000
12
Fix a symlink-related security vulnerability. The fix in commit 34b10878 and contained a small attack time window in between two filesystem operations. This has been fixed.
xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) { int offset; int minforkoff; /* lower limit on valid forkoff locations */ int maxforkoff; /* upper limit on valid forkoff locations */ int dsize; xfs_mount_t *mp = dp->i_mount; /* rounded down */ offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3; switch (dp->i_d.di_format) { case XFS_DINODE_FMT_DEV: minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; return (offset >= minforkoff) ? minforkoff : 0; case XFS_DINODE_FMT_UUID: minforkoff = roundup(sizeof(uuid_t), 8) >> 3; return (offset >= minforkoff) ? minforkoff : 0; } /* * If the requested numbers of bytes is smaller or equal to the * current attribute fork size we can always proceed. * * Note that if_bytes in the data fork might actually be larger than * the current data fork size is due to delalloc extents. In that * case either the extent count will go down when they are converted * to real extents, or the delalloc conversion will take care of the * literal area rebalancing. */ if (bytes <= XFS_IFORK_ASIZE(dp)) return dp->i_d.di_forkoff; /* * For attr2 we can try to move the forkoff if there is space in the * literal area, but for the old format we are done if there is no * space in the fixed attribute fork. */ if (!(mp->m_flags & XFS_MOUNT_ATTR2)) return 0; dsize = dp->i_df.if_bytes; switch (dp->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: /* * If there is no attr fork and the data fork is extents, * determine if creating the default attr fork will result * in the extents form migrating to btree. If so, the * minimum offset only needs to be the space required for * the btree root. */ if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > xfs_default_attroffset(dp)) dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS); break; case XFS_DINODE_FMT_BTREE: /* * If we have a data btree then keep forkoff if we have one, * otherwise we are adding a new attr, so then we set * minforkoff to where the btree root can finish so we have * plenty of room for attrs */ if (dp->i_d.di_forkoff) { if (offset < dp->i_d.di_forkoff) return 0; return dp->i_d.di_forkoff; } dsize = XFS_BMAP_BROOT_SPACE(mp, dp->i_df.if_broot); break; } /* * A data fork btree root must have space for at least * MINDBTPTRS key/ptr pairs if the data fork is small or empty. */ minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); minforkoff = roundup(minforkoff, 8) >> 3; /* attr fork btree root can have at least this many key/ptr pairs */ maxforkoff = XFS_LITINO(mp, dp->i_d.di_version) - XFS_BMDR_SPACE_CALC(MINABTPTRS); maxforkoff = maxforkoff >> 3; /* rounded down */ if (offset >= maxforkoff) return maxforkoff; if (offset >= minforkoff) return offset; return 0; }
0
[ "CWE-241", "CWE-19" ]
linux
8275cdd0e7ac550dcce2b3ef6d2fb3b808c1ae59
3,378,253,631,291,178,600,000,000,000,000,000,000
90
xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Brian Foster <[email protected]> Signed-off-by: Dave Chinner <[email protected]>
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { switch (to_vmx(vcpu)->exit_reason.basic) { case EXIT_REASON_MSR_WRITE: return handle_fastpath_set_msr_irqoff(vcpu); case EXIT_REASON_PREEMPTION_TIMER: return handle_fastpath_preemption_timer(vcpu); default: return EXIT_FASTPATH_NONE; } }
0
[ "CWE-787" ]
linux
04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a
48,220,851,871,317,900,000,000,000,000,000,000,000
11
KVM: VMX: Don't use vcpu->run->internal.ndata as an array index __vmx_handle_exit() uses vcpu->run->internal.ndata as an index for an array access. Since vcpu->run is (can be) mapped to a user address space with a writer permission, the 'ndata' could be updated by the user process at anytime (the user process can set it to outside the bounds of the array). So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way. Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information") Signed-off-by: Reiji Watanabe <[email protected]> Reviewed-by: Jim Mattson <[email protected]> Message-Id: <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
static my_bool showstat_handlerton(THD *thd, plugin_ref plugin, void *arg) { enum ha_stat_type stat= *(enum ha_stat_type *) arg; handlerton *hton= plugin_hton(plugin); if (hton->state == SHOW_OPTION_YES && hton->show_status && hton->show_status(hton, thd, stat_print, stat)) return TRUE; return FALSE; }
0
[ "CWE-416" ]
server
af810407f78b7f792a9bb8c47c8c532eb3b3a758
211,475,444,650,614,180,000,000,000,000,000,000,000
10
MDEV-28098 incorrect key in "dup value" error after long unique reset errkey after using it, so that it wouldn't affect the next error message in the next statement
LogicalResult matchAndRewrite(TFL::AddOp add_op, PatternRewriter &rewriter) const override { // Match Add. DenseElementsAttr added_value; Value constant_val = add_op.rhs(); if (!matchPattern(constant_val, m_Constant(&added_value))) return failure(); // Match Fully Connected. auto fc_op = dyn_cast_or_null<TFL::FullyConnectedOp>(add_op.lhs().getDefiningOp()); if (!fc_op) return failure(); // Check if the constant RHS is either 0D (scalar), or a 1D with // `{num_channels}` shape. auto constant_val_type = constant_val.getType().cast<TensorType>(); // In TFLite FullyConnect definition, bias must be a 1D tensor where // the number of elements is equal to the number of channels. // If it's not 1D or 0D (which can be broadcasted to 1D), reject the // matching. bool is_scalar_rhs = false; if (constant_val_type.getRank() == 0) { is_scalar_rhs = true; } else if (constant_val_type.getRank() != 1) { return failure(); } Value filter = fc_op.filter(); Value bias = fc_op.bias(); ElementsAttr bias_value; const bool is_none_bias = bias.getType().isa<NoneType>(); if (fc_op.fused_activation_function() != "NONE") return failure(); if (!is_none_bias && !matchPattern(bias, m_Constant(&bias_value))) return failure(); // Rewrite if (is_none_bias) { if (is_scalar_rhs) { // If the `constant_val` is scalar, we must the shape of filter // to properly broadcast the scalar to `{num_channels}` shape. // Get the number of channels if possible. auto filter_type = filter.getType().dyn_cast<RankedTensorType>(); // Filter must be a `2D` tensor with `{num_channels, num_features}` // shape. The following check is rejecting unknown rank (-1). if (filter_type == nullptr || filter_type.getRank() != 2) { return failure(); } int num_channels = filter_type.getShape()[0]; // Create a zero tensor with shape {num_channels}, and the type need to // be the same as constant_val. // This is a way to gracefully handle scalar tensor. The Add will always // be constant-folded away regardless if `constant_val` is a scalar or // not. RankedTensorType type = RankedTensorType::get( {num_channels}, constant_val_type.getElementType()); auto attr = rewriter.getZeroAttr(type); bias = rewriter.create<ConstantOp>(add_op.getLoc(), type, attr); auto none_af = rewriter.getStringAttr("NONE"); bias = rewriter.create<AddOp>(add_op.getLoc(), bias, constant_val, none_af) .output(); } else { // If there no pre-existing bias and the `constant_val` is 1D, simply // use `constant_val` as bias. bias = constant_val; } } else { auto none_af = rewriter.getStringAttr("NONE"); bias = rewriter.create<AddOp>(add_op.getLoc(), bias, constant_val, none_af) .output(); } auto fc = rewriter.create<TFL::FullyConnectedOp>( FusedLoc::get(fc_op.getContext(), {fc_op.getLoc(), add_op.getLoc()}), add_op.getType(), /*input=*/fc_op.input(), /*filter=*/filter, /*bias=*/bias, /*fused_activation_function=*/ rewriter.getStringAttr(add_op.fused_activation_function()), /*weights_format=*/rewriter.getStringAttr(fc_op.weights_format()), /*keep_num_dims=*/rewriter.getBoolAttr(fc_op.keep_num_dims())); rewriter.replaceOp(add_op, fc.output()); return success(); }
0
[ "CWE-476", "CWE-125" ]
tensorflow
d6b57f461b39fd1aa8c1b870f1b974aac3554955
327,957,790,863,345,900,000,000,000,000,000,000,000
90
Prevent nullptr dereference in MLIR TFLite dialect/optimizer. PiperOrigin-RevId: 387220762 Change-Id: Id136ef04bb3d36123b4685d316ae81a9ec924d6b
static void test_offset() { assert_true_rule( "rule test { strings: $a = \"ssi\" condition: @a == 2 }", "mississippi"); assert_true_rule( "rule test { strings: $a = \"ssi\" condition: @a == @a[1] }", "mississippi"); assert_true_rule( "rule test { strings: $a = \"ssi\" condition: @a[2] == 5 }", "mississippi"); }
0
[ "CWE-476", "CWE-703", "CWE-125" ]
yara
3119b232c9c453c98d8fa8b6ae4e37ba18117cd4
336,628,974,652,384,830,000,000,000,000,000,000,000
14
re_lexer: Make reading escape sequences more robust (#586) * Add test for issue #503 * re_lexer: Make reading escape sequences more robust This commit fixes parsing incomplete escape sequences at the end of a regular expression and parsing things like \xxy (invalid hex digits) which before were silently turned into (char)255. Close #503 * Update re_lexer.c
_gnutls_get_hash_block_len (gnutls_digest_algorithm_t algo) { switch (algo) { case GNUTLS_DIG_MD5: case GNUTLS_DIG_SHA1: case GNUTLS_DIG_RMD160: case GNUTLS_DIG_SHA256: case GNUTLS_DIG_SHA384: case GNUTLS_DIG_SHA512: case GNUTLS_DIG_SHA224: return 64; default: return 0; } }
0
[ "CWE-310" ]
gnutls
458c67cf98740e7b12404f6c30e0d5317d56fd30
20,956,560,055,313,003,000,000,000,000,000,000,000
16
Fixes to avoid a timing attack in TLS CBC record parsing.
static void sco_conn_ready(struct sco_conn *conn) { struct sock *parent; struct sock *sk = conn->sk; BT_DBG("conn %p", conn); if (sk) { lock_sock(sk); sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); release_sock(sk); } else { sco_conn_lock(conn); if (!conn->hcon) { sco_conn_unlock(conn); return; } parent = sco_get_sock_listen(&conn->hcon->src); if (!parent) { sco_conn_unlock(conn); return; } lock_sock(parent); sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC, 0); if (!sk) { release_sock(parent); sco_conn_unlock(conn); return; } sco_sock_init(sk, parent); bacpy(&sco_pi(sk)->src, &conn->hcon->src); bacpy(&sco_pi(sk)->dst, &conn->hcon->dst); hci_conn_hold(conn->hcon); __sco_chan_add(conn, sk, parent); if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) sk->sk_state = BT_CONNECT2; else sk->sk_state = BT_CONNECTED; /* Wake up parent */ parent->sk_data_ready(parent); release_sock(parent); sco_conn_unlock(conn); } }
0
[ "CWE-416" ]
linux
0771cbb3b97d3c1d68eecd7f00055f599954c34e
22,388,886,189,032,200,000,000,000,000,000,000,000
58
Bluetooth: SCO: Replace use of memcpy_from_msg with bt_skb_sendmsg This makes use of bt_skb_sendmsg instead of allocating a different buffer to be used with memcpy_from_msg which cause one extra copy. Signed-off-by: Luiz Augusto von Dentz <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]>
QPDFWriter::writeLinearized() { // Optimize file and enqueue objects in order discardGeneration(this->m->object_to_object_stream, this->m->object_to_object_stream_no_gen); bool need_xref_stream = (! this->m->object_to_object_stream.empty()); this->m->pdf.optimize(this->m->object_to_object_stream_no_gen); std::vector<QPDFObjectHandle> part4; std::vector<QPDFObjectHandle> part6; std::vector<QPDFObjectHandle> part7; std::vector<QPDFObjectHandle> part8; std::vector<QPDFObjectHandle> part9; QPDF::Writer::getLinearizedParts( this->m->pdf, this->m->object_to_object_stream_no_gen, part4, part6, part7, part8, part9); // Object number sequence: // // second half // second half uncompressed objects // second half xref stream, if any // second half compressed objects // first half // linearization dictionary // first half xref stream, if any // part 4 uncompresesd objects // encryption dictionary, if any // hint stream // part 6 uncompressed objects // first half compressed objects // // Second half objects int second_half_uncompressed = QIntC::to_int(part7.size() + part8.size() + part9.size()); int second_half_first_obj = 1; int after_second_half = 1 + second_half_uncompressed; this->m->next_objid = after_second_half; int second_half_xref = 0; if (need_xref_stream) { second_half_xref = this->m->next_objid++; } // Assign numbers to all compressed objects in the second half. std::vector<QPDFObjectHandle>* vecs2[] = {&part7, &part8, &part9}; for (int i = 0; i < 3; ++i) { for (std::vector<QPDFObjectHandle>::iterator iter = (*vecs2[i]).begin(); iter != (*vecs2[i]).end(); ++iter) { assignCompressedObjectNumbers((*iter).getObjGen()); } } int second_half_end = this->m->next_objid - 1; int second_trailer_size = this->m->next_objid; // First half objects int first_half_start = this->m->next_objid; int lindict_id = this->m->next_objid++; int first_half_xref = 0; if (need_xref_stream) { first_half_xref = this->m->next_objid++; } int part4_first_obj = this->m->next_objid; this->m->next_objid += QIntC::to_int(part4.size()); int after_part4 = this->m->next_objid; if (this->m->encrypted) { this->m->encryption_dict_objid = this->m->next_objid++; } int hint_id = this->m->next_objid++; int part6_first_obj = this->m->next_objid; this->m->next_objid += QIntC::to_int(part6.size()); int after_part6 = this->m->next_objid; // Assign numbers to all compressed objects in the first half std::vector<QPDFObjectHandle>* vecs1[] = {&part4, &part6}; for (int i = 0; i < 2; ++i) { for (std::vector<QPDFObjectHandle>::iterator iter = (*vecs1[i]).begin(); iter != (*vecs1[i]).end(); ++iter) { assignCompressedObjectNumbers((*iter).getObjGen()); } } int first_half_end = this->m->next_objid - 1; int first_trailer_size = this->m->next_objid; int part4_end_marker = part4.back().getObjectID(); int part6_end_marker = part6.back().getObjectID(); qpdf_offset_t space_before_zero = 0; qpdf_offset_t file_size = 0; qpdf_offset_t part6_end_offset = 0; qpdf_offset_t first_half_max_obj_offset = 0; qpdf_offset_t second_xref_offset = 0; qpdf_offset_t first_xref_end = 0; qpdf_offset_t second_xref_end = 0; this->m->next_objid = part4_first_obj; enqueuePart(part4); assert(this->m->next_objid == after_part4); this->m->next_objid = part6_first_obj; enqueuePart(part6); assert(this->m->next_objid == after_part6); this->m->next_objid = second_half_first_obj; enqueuePart(part7); enqueuePart(part8); enqueuePart(part9); assert(this->m->next_objid == after_second_half); qpdf_offset_t hint_length = 0; PointerHolder<Buffer> hint_buffer; // Write file in two passes. Part numbers refer to PDF spec 1.4. FILE* lin_pass1_file = 0; for (int pass = 1; pass <= 2; ++pass) { if (pass == 1) { if (! this->m->lin_pass1_filename.empty()) { lin_pass1_file = QUtil::safe_fopen( this->m->lin_pass1_filename.c_str(), "wb"); pushPipeline( new Pl_StdioFile("linearization pass1", lin_pass1_file)); activatePipelineStack(); } else { pushDiscardFilter(); } if (this->m->deterministic_id) { pushMD5Pipeline(); } } // Part 1: header writeHeader(); // Part 2: linearization parameter dictionary. Save enough // space to write real dictionary. 200 characters is enough // space if all numerical values in the parameter dictionary // that contain offsets are 20 digits long plus a few extra // characters for safety. The entire linearization parameter // dictionary must appear within the first 1024 characters of // the file. qpdf_offset_t pos = this->m->pipeline->getCount(); openObject(lindict_id); writeString("<<"); if (pass == 2) { std::vector<QPDFObjectHandle> const& pages = this->m->pdf.getAllPages(); int first_page_object = this->m->obj_renumber[pages.at(0).getObjGen()]; int npages = QIntC::to_int(pages.size()); writeString(" /Linearized 1 /L "); writeString(QUtil::int_to_string(file_size + hint_length)); // Implementation note 121 states that a space is // mandatory after this open bracket. writeString(" /H [ "); writeString(QUtil::int_to_string( this->m->xref[hint_id].getOffset())); writeString(" "); writeString(QUtil::int_to_string(hint_length)); writeString(" ] /O "); writeString(QUtil::int_to_string(first_page_object)); writeString(" /E "); writeString(QUtil::int_to_string(part6_end_offset + hint_length)); writeString(" /N "); writeString(QUtil::int_to_string(npages)); writeString(" /T "); writeString(QUtil::int_to_string(space_before_zero + hint_length)); } writeString(" >>"); closeObject(lindict_id); static int const pad = 200; int spaces = QIntC::to_int(pos - this->m->pipeline->getCount() + pad); assert(spaces >= 0); writePad(spaces); writeString("\n"); // If the user supplied any additional header text, write it // here after the linearization parameter dictionary. writeString(this->m->extra_header_text); // Part 3: first page cross reference table and trailer. qpdf_offset_t first_xref_offset = this->m->pipeline->getCount(); qpdf_offset_t hint_offset = 0; if (pass == 2) { hint_offset = this->m->xref[hint_id].getOffset(); } if (need_xref_stream) { // Must pad here too. if (pass == 1) { // Set first_half_max_obj_offset to a value large // enough to force four bytes to be reserved for each // file offset. This would provide adequate space for // the xref stream as long as the last object in page // 1 starts with in the first 4 GB of the file, which // is extremely likely. In the second pass, we will // know the actual value for this, but it's okay if // it's smaller. first_half_max_obj_offset = 1 << 25; } pos = this->m->pipeline->getCount(); writeXRefStream(first_half_xref, first_half_end, first_half_max_obj_offset, t_lin_first, first_half_start, first_half_end, first_trailer_size, hint_length + second_xref_offset, hint_id, hint_offset, hint_length, (pass == 1), pass); qpdf_offset_t endpos = this->m->pipeline->getCount(); if (pass == 1) { // Pad so we have enough room for the real xref // stream. writePad(calculateXrefStreamPadding(endpos - pos)); first_xref_end = this->m->pipeline->getCount(); } else { // Pad so that the next object starts at the same // place as in pass 1. writePad(QIntC::to_int(first_xref_end - endpos)); if (this->m->pipeline->getCount() != first_xref_end) { throw std::logic_error( "insufficient padding for first pass xref stream"); } } writeString("\n"); } else { writeXRefTable(t_lin_first, first_half_start, first_half_end, first_trailer_size, hint_length + second_xref_offset, (pass == 1), hint_id, hint_offset, hint_length, pass); writeString("startxref\n0\n%%EOF\n"); } // Parts 4 through 9 for (std::list<QPDFObjectHandle>::iterator iter = this->m->object_queue.begin(); iter != this->m->object_queue.end(); ++iter) { QPDFObjectHandle cur_object = (*iter); if (cur_object.getObjectID() == part6_end_marker) { first_half_max_obj_offset = this->m->pipeline->getCount(); } writeObject(cur_object); if (cur_object.getObjectID() == part4_end_marker) { if (this->m->encrypted) { writeEncryptionDictionary(); } if (pass == 1) { this->m->xref[hint_id] = QPDFXRefEntry(1, this->m->pipeline->getCount(), 0); } else { // Part 5: hint stream writeBuffer(hint_buffer); } } if (cur_object.getObjectID() == part6_end_marker) { part6_end_offset = this->m->pipeline->getCount(); } } // Part 10: overflow hint stream -- not used // Part 11: main cross reference table and trailer second_xref_offset = this->m->pipeline->getCount(); if (need_xref_stream) { pos = this->m->pipeline->getCount(); space_before_zero = writeXRefStream(second_half_xref, second_half_end, second_xref_offset, t_lin_second, 0, second_half_end, second_trailer_size, 0, 0, 0, 0, (pass == 1), pass); qpdf_offset_t endpos = this->m->pipeline->getCount(); if (pass == 1) { // Pad so we have enough room for the real xref // stream. See comments for previous xref stream on // how we calculate the padding. writePad(calculateXrefStreamPadding(endpos - pos)); writeString("\n"); second_xref_end = this->m->pipeline->getCount(); } else { // Make the file size the same. qpdf_offset_t pos = this->m->pipeline->getCount(); writePad( QIntC::to_int(second_xref_end + hint_length - 1 - pos)); writeString("\n"); // If this assertion fails, maybe we didn't have // enough padding above. if (this->m->pipeline->getCount() != second_xref_end + hint_length) { throw std::logic_error( "count mismatch after xref stream;" " possible insufficient padding?"); } } } else { space_before_zero = writeXRefTable(t_lin_second, 0, second_half_end, second_trailer_size, 0, false, 0, 0, 0, pass); } writeString("startxref\n"); writeString(QUtil::int_to_string(first_xref_offset)); writeString("\n%%EOF\n"); discardGeneration(this->m->obj_renumber, this->m->obj_renumber_no_gen); if (pass == 1) { if (this->m->deterministic_id) { QTC::TC("qpdf", "QPDFWriter linearized deterministic ID", need_xref_stream ? 0 : 1); computeDeterministicIDData(); popPipelineStack(); assert(this->m->md5_pipeline == 0); } // Close first pass pipeline file_size = this->m->pipeline->getCount(); popPipelineStack(); // Save hint offset since it will be set to zero by // calling openObject. qpdf_offset_t hint_offset = this->m->xref[hint_id].getOffset(); // Write hint stream to a buffer pushPipeline(new Pl_Buffer("hint buffer")); activatePipelineStack(); writeHintStream(hint_id); popPipelineStack(&hint_buffer); hint_length = QIntC::to_offset(hint_buffer->getSize()); // Restore hint offset this->m->xref[hint_id] = QPDFXRefEntry(1, hint_offset, 0); if (lin_pass1_file) { // Write some debugging information fprintf(lin_pass1_file, "%% hint_offset=%s\n", QUtil::int_to_string(hint_offset).c_str()); fprintf(lin_pass1_file, "%% hint_length=%s\n", QUtil::int_to_string(hint_length).c_str()); fprintf(lin_pass1_file, "%% second_xref_offset=%s\n", QUtil::int_to_string(second_xref_offset).c_str()); fprintf(lin_pass1_file, "%% second_xref_end=%s\n", QUtil::int_to_string(second_xref_end).c_str()); fclose(lin_pass1_file); lin_pass1_file = 0; } } } }
0
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
103,732,300,492,633,480,000,000,000,000,000,000,000
393
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
0
[ "CWE-20" ]
lz4
da5373197e84ee49d75b8334d4510689731d6e90
215,899,523,286,231,450,000,000,000,000,000,000,000
1
Fixed : issue 52 (reported by Ludwig Strigeus)
nautilus_file_mark_desktop_file_trusted (GFile *file, GtkWindow *parent_window, gboolean interactive, NautilusOpCallback done_callback, gpointer done_callback_data) { GTask *task; MarkTrustedJob *job; job = op_job_new (MarkTrustedJob, parent_window); job->file = g_object_ref (file); job->interactive = interactive; job->done_callback = done_callback; job->done_callback_data = done_callback_data; task = g_task_new (NULL, NULL, mark_trusted_task_done, job); g_task_set_task_data (task, job, NULL); g_task_run_in_thread (task, mark_trusted_task_thread_func); g_object_unref (task); }
1
[ "CWE-20" ]
nautilus
1630f53481f445ada0a455e9979236d31a8d3bb0
264,983,255,663,411,820,000,000,000,000,000,000,000
20
mime-actions: use file metadata for trusting desktop files Currently we only trust desktop files that have the executable bit set, and don't replace the displayed icon or the displayed name until it's trusted, which prevents for running random programs by a malicious desktop file. However, the executable permission is preserved if the desktop file comes from a compressed file. To prevent this, add a metadata::trusted metadata to the file once the user acknowledges the file as trusted. This adds metadata to the file, which cannot be added unless it has access to the computer. Also remove the SHEBANG "trusted" content we were putting inside the desktop file, since that doesn't add more security since it can come with the file itself. https://bugzilla.gnome.org/show_bug.cgi?id=777991
blockconvLow(l_uint32 *data, l_int32 w, l_int32 h, l_int32 wpl, l_uint32 *dataa, l_int32 wpla, l_int32 wc, l_int32 hc) { l_int32 i, j, imax, imin, jmax, jmin; l_int32 wn, hn, fwc, fhc, wmwc, hmhc; l_float32 norm, normh, normw; l_uint32 val; l_uint32 *linemina, *linemaxa, *line; PROCNAME("blockconvLow"); wmwc = w - wc; hmhc = h - hc; if (wmwc <= 0 || hmhc <= 0) { L_ERROR("wc >= w || hc >=h\n", procName); return; } fwc = 2 * wc + 1; fhc = 2 * hc + 1; norm = 1.0 / ((l_float32)(fwc) * fhc); /*------------------------------------------------------------* * Compute, using b.c. only to set limits on the accum image * *------------------------------------------------------------*/ for (i = 0; i < h; i++) { imin = L_MAX(i - 1 - hc, 0); imax = L_MIN(i + hc, h - 1); line = data + wpl * i; linemina = dataa + wpla * imin; linemaxa = dataa + wpla * imax; for (j = 0; j < w; j++) { jmin = L_MAX(j - 1 - wc, 0); jmax = L_MIN(j + wc, w - 1); val = linemaxa[jmax] - linemaxa[jmin] + linemina[jmin] - linemina[jmax]; val = (l_uint8)(norm * val + 0.5); /* see comment above */ SET_DATA_BYTE(line, j, val); } } /*------------------------------------------------------------* * Fix normalization for boundary pixels * *------------------------------------------------------------*/ for (i = 0; i <= hc; i++) { /* first hc + 1 lines */ hn = hc + i; normh = (l_float32)fhc / (l_float32)hn; /* > 1 */ line = data + wpl * i; for (j = 0; j <= wc; j++) { wn = wc + j; normw = (l_float32)fwc / (l_float32)wn; /* > 1 */ val = GET_DATA_BYTE(line, j); val = (l_uint8)L_MIN(val * normh * normw, 255); SET_DATA_BYTE(line, j, val); } for (j = wc + 1; j < wmwc; j++) { val = GET_DATA_BYTE(line, j); val = (l_uint8)L_MIN(val * normh, 255); SET_DATA_BYTE(line, j, val); } for (j = wmwc; j < w; j++) { wn = wc + w - j; normw = (l_float32)fwc / (l_float32)wn; /* > 1 */ val = GET_DATA_BYTE(line, j); val = (l_uint8)L_MIN(val * normh * normw, 255); SET_DATA_BYTE(line, j, val); } } for (i = hmhc; i < h; i++) { /* last hc lines */ hn = hc + h - i; normh = (l_float32)fhc / (l_float32)hn; /* > 1 */ line = data + wpl * i; for (j = 0; j <= wc; j++) { wn = wc + j; normw = (l_float32)fwc / (l_float32)wn; /* > 1 */ val = GET_DATA_BYTE(line, j); val = (l_uint8)L_MIN(val * normh * normw, 255); SET_DATA_BYTE(line, j, val); } for (j = wc + 1; j < wmwc; j++) { val = GET_DATA_BYTE(line, j); val = (l_uint8)L_MIN(val * normh, 255); SET_DATA_BYTE(line, j, val); } for (j = wmwc; j < w; j++) { wn = wc + w - j; normw = (l_float32)fwc / (l_float32)wn; /* > 1 */ val = GET_DATA_BYTE(line, j); val = (l_uint8)L_MIN(val * normh * normw, 255); SET_DATA_BYTE(line, j, val); } } for (i = hc + 1; i < hmhc; i++) { /* intermediate lines */ line = data + wpl * i; for (j = 0; j <= wc; j++) { /* first wc + 1 columns */ wn = wc + j; normw = (l_float32)fwc / (l_float32)wn; /* > 1 */ val = GET_DATA_BYTE(line, j); val = (l_uint8)L_MIN(val * normw, 255); SET_DATA_BYTE(line, j, val); } for (j = wmwc; j < w; j++) { /* last wc columns */ wn = wc + w - j; normw = (l_float32)fwc / (l_float32)wn; /* > 1 */ val = GET_DATA_BYTE(line, j); val = (l_uint8)L_MIN(val * normw, 255); SET_DATA_BYTE(line, j, val); } } }
1
[]
leptonica
480f5e74c24fdc2003c42a4e15d1f24c9e6ea469
38,543,645,203,376,385,000,000,000,000,000,000,000
117
Fixed issue 21972 (oss-fuzz) Divide by zero in pixBlockconvGray().
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) { struct buffer_head *sbh = sbi->raw_super_buf; struct buffer_head *bh; int err; /* write back-up superblock first */ bh = sb_getblk(sbi->sb, sbh->b_blocknr ? 0 : 1); if (!bh) return -EIO; lock_buffer(bh); memcpy(bh->b_data, sbh->b_data, sbh->b_size); WARN_ON(sbh->b_size != F2FS_BLKSIZE); set_buffer_uptodate(bh); set_buffer_dirty(bh); unlock_buffer(bh); /* it's rare case, we can do fua all the time */ err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); brelse(bh); /* if we are in recovery path, skip writing valid superblock */ if (recover || err) return err; /* write current valid superblock */ lock_buffer(sbh); set_buffer_dirty(sbh); unlock_buffer(sbh); return __sync_dirty_buffer(sbh, WRITE_FLUSH_FUA); }
0
[ "CWE-787" ]
linux
9a59b62fd88196844cee5fff851bee2cfd7afb6e
128,881,112,590,356,250,000,000,000,000,000,000,000
33
f2fs: do more integrity verification for superblock Do more sanity check for superblock during ->mount. Signed-off-by: Chao Yu <[email protected]> Signed-off-by: Jaegeuk Kim <[email protected]>
static int SavePackageDescriptionToDebugDump(const char *dump_dir_name) { struct dump_dir *dd = dd_opendir(dump_dir_name, /*flags:*/ 0); if (!dd) return 1; char *analyzer = dd_load_text(dd, FILENAME_ANALYZER); if (!strcmp(analyzer, "Kerneloops")) { dd_save_text(dd, FILENAME_PACKAGE, "kernel"); dd_save_text(dd, FILENAME_COMPONENT, "kernel"); dd_close(dd); free(analyzer); return 0; } free(analyzer); char *cmdline = NULL; char *executable = NULL; char *package_short_name = NULL; struct pkg_envra *pkg_name = NULL; char *component = NULL; int error = 1; /* note: "goto ret" statements below free all the above variables, * but they don't dd_close(dd) */ cmdline = dd_load_text_ext(dd, FILENAME_CMDLINE, DD_FAIL_QUIETLY_ENOENT); executable = dd_load_text(dd, FILENAME_EXECUTABLE); /* Close dd while we query package database. It can take some time, * don't want to keep dd locked longer than necessary */ dd_close(dd); if (is_path_blacklisted(executable)) { log("Blacklisted executable '%s'", executable); goto ret; /* return 1 (failure) */ } pkg_name = rpm_get_package_nvr(executable, NULL); if (!pkg_name) { if (settings_bProcessUnpackaged) { log_info("Crash in unpackaged executable '%s', " "proceeding without packaging information", executable); goto ret0; /* no error */ } log("Executable '%s' doesn't belong to any package" " and ProcessUnpackaged is set to 'no'", executable ); goto ret; /* return 1 (failure) */ } /* Check well-known interpreter names */ const char *basename = strrchr(executable, '/'); if (basename) basename++; else basename = executable; /* if basename is known interpreter, we want to blame the running script * not the interpreter */ if (g_list_find_custom(settings_Interpreters, basename, (GCompareFunc)g_strcmp0)) { struct pkg_envra *script_pkg = get_script_name(cmdline, &executable); /* executable may have changed, check it again */ if (is_path_blacklisted(executable)) { log("Blacklisted executable '%s'", executable); goto ret; /* return 1 (failure) */ } if (!script_pkg) { /* Script name is not absolute, or it doesn't * belong to any installed package. */ if (!settings_bProcessUnpackaged) { log("Interpreter crashed, but no packaged script detected: '%s'", cmdline); goto ret; /* return 1 (failure) */ } /* Unpackaged script, but the settings says we want to keep it. * BZ plugin wont allow to report this anyway, because component * is missing, so there is no reason to mark it as not_reportable. * Someone might want to use abrt to report it using ftp. */ goto ret0; } free_pkg_envra(pkg_name); pkg_name = script_pkg; } package_short_name = xasprintf("%s", pkg_name->p_name); log_info("Package:'%s' short:'%s'", pkg_name->p_nvr, package_short_name); if (g_list_find_custom(settings_setBlackListedPkgs, package_short_name, (GCompareFunc)g_strcmp0)) { log("Blacklisted package '%s'", package_short_name); goto ret; /* return 1 (failure) */ } if (settings_bOpenGPGCheck) { if (!rpm_chk_fingerprint(package_short_name)) { log("Package '%s' isn't signed with proper key", package_short_name); goto ret; /* return 1 (failure) */ } /* We used to also check the integrity of the executable here: * if (!CheckHash(package_short_name.c_str(), executable)) BOOM(); * Checking the MD5 sum requires to run prelink to "un-prelink" the * binaries - this is considered potential security risk so we don't * do it now, until we find some non-intrusive way. */ } component = rpm_get_component(executable, NULL); dd = dd_opendir(dump_dir_name, /*flags:*/ 0); if (!dd) goto ret; /* return 1 (failure) */ if (pkg_name) { dd_save_text(dd, FILENAME_PACKAGE, pkg_name->p_nvr); dd_save_text(dd, FILENAME_PKG_EPOCH, pkg_name->p_epoch); dd_save_text(dd, FILENAME_PKG_NAME, pkg_name->p_name); dd_save_text(dd, FILENAME_PKG_VERSION, pkg_name->p_version); dd_save_text(dd, FILENAME_PKG_RELEASE, pkg_name->p_release); dd_save_text(dd, FILENAME_PKG_ARCH, pkg_name->p_arch); } if (component) dd_save_text(dd, FILENAME_COMPONENT, component); dd_close(dd); ret0: error = 0; ret: free(cmdline); free(executable); free(package_short_name); free_pkg_envra(pkg_name); free(component); return error; }
0
[ "CWE-59" ]
abrt
fdf93685d4f3fc36fe50d34a11e24662c4cb2d8c
318,365,554,164,215,800,000,000,000,000,000,000,000
154
a-a-save-package-data: turn off reading data from root directories Making copies of files from arbitrary root directories is not secure. Related: #1211835 Signed-off-by: Jakub Filak <[email protected]>
static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) { struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd; int ret; int port1; int status; bool need_debounce_delay = false; unsigned delay; /* Continue a partial initialization */ if (type == HUB_INIT2 || type == HUB_INIT3) { device_lock(hub->intfdev); /* Was the hub disconnected while we were waiting? */ if (hub->disconnected) { device_unlock(hub->intfdev); kref_put(&hub->kref, hub_release); return; } if (type == HUB_INIT2) goto init2; goto init3; } kref_get(&hub->kref); /* The superspeed hub except for root hub has to use Hub Depth * value as an offset into the route string to locate the bits * it uses to determine the downstream port number. So hub driver * should send a set hub depth request to superspeed hub after * the superspeed hub is set configuration in initialization or * reset procedure. * * After a resume, port power should still be on. * For any other type of activation, turn it on. */ if (type != HUB_RESUME) { if (hdev->parent && hub_is_superspeed(hdev)) { ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), HUB_SET_DEPTH, USB_RT_HUB, hdev->level - 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret < 0) dev_err(hub->intfdev, "set hub depth failed\n"); } /* Speed up system boot by using a delayed_work for the * hub's initial power-up delays. This is pretty awkward * and the implementation looks like a home-brewed sort of * setjmp/longjmp, but it saves at least 100 ms for each * root hub (assuming usbcore is compiled into the kernel * rather than as a module). It adds up. * * This can't be done for HUB_RESUME or HUB_RESET_RESUME * because for those activation types the ports have to be * operational when we return. In theory this could be done * for HUB_POST_RESET, but it's easier not to. */ if (type == HUB_INIT) { delay = hub_power_on_good_delay(hub); hub_power_on(hub, false); INIT_DELAYED_WORK(&hub->init_work, hub_init_func2); queue_delayed_work(system_power_efficient_wq, &hub->init_work, msecs_to_jiffies(delay)); /* Suppress autosuspend until init is done */ usb_autopm_get_interface_no_resume( to_usb_interface(hub->intfdev)); return; /* Continues at init2: below */ } else if (type == HUB_RESET_RESUME) { /* The internal host controller state for the hub device * may be gone after a host power loss on system resume. * Update the device's info so the HW knows it's a hub. */ hcd = bus_to_hcd(hdev->bus); if (hcd->driver->update_hub_device) { ret = hcd->driver->update_hub_device(hcd, hdev, &hub->tt, GFP_NOIO); if (ret < 0) { dev_err(hub->intfdev, "Host not " "accepting hub info " "update.\n"); dev_err(hub->intfdev, "LS/FS devices " "and hubs may not work " "under this hub\n."); } } hub_power_on(hub, true); } else { hub_power_on(hub, true); } } init2: /* * Check each port and set hub->change_bits to let hub_wq know * which ports need attention. */ for (port1 = 1; port1 <= hdev->maxchild; ++port1) { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; u16 portstatus, portchange; portstatus = portchange = 0; status = hub_port_status(hub, port1, &portstatus, &portchange); if (udev || (portstatus & USB_PORT_STAT_CONNECTION)) dev_dbg(&port_dev->dev, "status %04x change %04x\n", portstatus, portchange); /* * After anything other than HUB_RESUME (i.e., initialization * or any sort of reset), every port should be disabled. * Unconnected ports should likewise be disabled (paranoia), * and so should ports for which we have no usb_device. */ if ((portstatus & USB_PORT_STAT_ENABLE) && ( type != HUB_RESUME || !(portstatus & USB_PORT_STAT_CONNECTION) || !udev || udev->state == USB_STATE_NOTATTACHED)) { /* * USB3 protocol ports will automatically transition * to Enabled state when detect an USB3.0 device attach. * Do not disable USB3 protocol ports, just pretend * power was lost */ portstatus &= ~USB_PORT_STAT_ENABLE; if (!hub_is_superspeed(hdev)) usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE); } /* Clear status-change flags; we'll debounce later */ if (portchange & USB_PORT_STAT_C_CONNECTION) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); } if (portchange & USB_PORT_STAT_C_ENABLE) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_ENABLE); } if (portchange & USB_PORT_STAT_C_RESET) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_RESET); } if ((portchange & USB_PORT_STAT_C_BH_RESET) && hub_is_superspeed(hub->hdev)) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_BH_PORT_RESET); } /* We can forget about a "removed" device when there's a * physical disconnect or the connect status changes. */ if (!(portstatus & USB_PORT_STAT_CONNECTION) || (portchange & USB_PORT_STAT_C_CONNECTION)) clear_bit(port1, hub->removed_bits); if (!udev || udev->state == USB_STATE_NOTATTACHED) { /* Tell hub_wq to disconnect the device or * check for a new connection */ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || (portstatus & USB_PORT_STAT_OVERCURRENT)) set_bit(port1, hub->change_bits); } else if (portstatus & USB_PORT_STAT_ENABLE) { bool port_resumed = (portstatus & USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0; /* The power session apparently survived the resume. * If there was an overcurrent or suspend change * (i.e., remote wakeup request), have hub_wq * take care of it. Look at the port link state * for USB 3.0 hubs, since they don't have a suspend * change bit, and they don't set the port link change * bit on device-initiated resume. */ if (portchange || (hub_is_superspeed(hub->hdev) && port_resumed)) set_bit(port1, hub->change_bits); } else if (udev->persist_enabled) { #ifdef CONFIG_PM udev->reset_resume = 1; #endif /* Don't set the change_bits when the device * was powered off. */ if (test_bit(port1, hub->power_bits)) set_bit(port1, hub->change_bits); } else { /* The power session is gone; tell hub_wq */ usb_set_device_state(udev, USB_STATE_NOTATTACHED); set_bit(port1, hub->change_bits); } } /* If no port-status-change flags were set, we don't need any * debouncing. If flags were set we can try to debounce the * ports all at once right now, instead of letting hub_wq do them * one at a time later on. * * If any port-status changes do occur during this delay, hub_wq * will see them later and handle them normally. */ if (need_debounce_delay) { delay = HUB_DEBOUNCE_STABLE; /* Don't do a long sleep inside a workqueue routine */ if (type == HUB_INIT2) { INIT_DELAYED_WORK(&hub->init_work, hub_init_func3); queue_delayed_work(system_power_efficient_wq, &hub->init_work, msecs_to_jiffies(delay)); device_unlock(hub->intfdev); return; /* Continues at init3: below */ } else { msleep(delay); } } init3: hub->quiescing = 0; status = usb_submit_urb(hub->urb, GFP_NOIO); if (status < 0) dev_err(hub->intfdev, "activate --> %d\n", status); if (hub->has_indicators && blinkenlights) queue_delayed_work(system_power_efficient_wq, &hub->leds, LED_CYCLE_PERIOD); /* Scan all ports that need attention */ kick_hub_wq(hub); /* Allow autosuspend if it was suppressed */ if (type <= HUB_INIT3) usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); if (type == HUB_INIT2 || type == HUB_INIT3) device_unlock(hub->intfdev); kref_put(&hub->kref, hub_release); }
0
[ "CWE-703" ]
linux
e50293ef9775c5f1cf3fcc093037dd6a8c5684ea
119,488,251,864,184,680,000,000,000,000,000,000,000
250
USB: fix invalid memory access in hub_activate() Commit 8520f38099cc ("USB: change hub initialization sleeps to delayed_work") changed the hub_activate() routine to make part of it run in a workqueue. However, the commit failed to take a reference to the usb_hub structure or to lock the hub interface while doing so. As a result, if a hub is plugged in and quickly unplugged before the work routine can run, the routine will try to access memory that has been deallocated. Or, if the hub is unplugged while the routine is running, the memory may be deallocated while it is in active use. This patch fixes the problem by taking a reference to the usb_hub at the start of hub_activate() and releasing it at the end (when the work is finished), and by locking the hub interface while the work routine is running. It also adds a check at the start of the routine to see if the hub has already been disconnected, in which nothing should be done. Signed-off-by: Alan Stern <[email protected]> Reported-by: Alexandru Cornea <[email protected]> Tested-by: Alexandru Cornea <[email protected]> Fixes: 8520f38099cc ("USB: change hub initialization sleeps to delayed_work") CC: <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
dse_prev_search_results(void *vp) { Slapi_PBlock *pb = (Slapi_PBlock *)vp; dse_search_set *ss; slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &ss); if (ss) { dl_get_prev(&ss->dl, &ss->current_entry); } }
0
[ "CWE-200", "CWE-203" ]
389-ds-base
b6aae4d8e7c8a6ddd21646f94fef1bf7f22c3f32
132,092,816,148,783,750,000,000,000,000,000,000,000
9
Issue 4609 - CVE - info disclosure when authenticating Description: If you bind as a user that does not exist. Error 49 is returned instead of error 32. As error 32 discloses that the entry does not exist. When you bind as an entry that does not have userpassword set then error 48 (inappropriate auth) is returned, but this discloses that the entry does indeed exist. Instead we should always return error 49, even if the password is not set in the entry. This way we do not disclose to an attacker if the Bind DN exists or not. Relates: https://github.com/389ds/389-ds-base/issues/4609 Reviewed by: tbordaz(Thanks!)
void JsonAssign(json &dest, const json &src) { #ifdef TINYGLTF_USE_RAPIDJSON dest.CopyFrom(src, GetAllocator()); #else dest = src; #endif }
0
[ "CWE-20" ]
tinygltf
52ff00a38447f06a17eab1caa2cf0730a119c751
264,046,565,980,734,920,000,000,000,000,000,000,000
7
Do not expand file path since its not necessary for glTF asset path(URI) and for security reason(`wordexp`).
ArgJoin<wchar_t, It> join(It first, It last, const BasicCStringRef<wchar_t>& sep) { return ArgJoin<wchar_t, It>(first, last, sep); }
0
[ "CWE-134", "CWE-119", "CWE-787" ]
fmt
8cf30aa2be256eba07bb1cefb998c52326e846e7
22,680,666,980,102,050,000,000,000,000,000,000,000
3
Fix segfault on complex pointer formatting (#642)
allocateCharacterClasses(CharacterClass **characterClasses, TranslationTableCharacterAttributes *characterClassAttribute) { /* Allocate memory for predifined character classes */ int k = 0; *characterClasses = NULL; *characterClassAttribute = 1; while (characterClassNames[k]) { widechar wname[MAXSTRING]; int length = (int)strlen(characterClassNames[k]); int kk; for (kk = 0; kk < length; kk++) wname[kk] = (widechar)characterClassNames[k][kk]; if (!addCharacterClass( NULL, wname, length, characterClasses, characterClassAttribute)) { deallocateCharacterClasses(characterClasses); return 0; } k++; } return 1; }
0
[ "CWE-787" ]
liblouis
fb2bfce4ed49ac4656a8f7e5b5526e4838da1dde
62,990,190,320,938,810,000,000,000,000,000,000,000
20
Fix yet another buffer overflow in the braille table parser Reported by Henri Salo Fixes #592
relay_crypt(circuit_t *circ, cell_t *cell, cell_direction_t cell_direction, crypt_path_t **layer_hint, char *recognized) { relay_header_t rh; tor_assert(circ); tor_assert(cell); tor_assert(recognized); tor_assert(cell_direction == CELL_DIRECTION_IN || cell_direction == CELL_DIRECTION_OUT); if (cell_direction == CELL_DIRECTION_IN) { if (CIRCUIT_IS_ORIGIN(circ)) { /* We're at the beginning of the circuit. * We'll want to do layered decrypts. */ crypt_path_t *thishop, *cpath = TO_ORIGIN_CIRCUIT(circ)->cpath; thishop = cpath; if (thishop->state != CPATH_STATE_OPEN) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay cell before first created cell? Closing."); return -1; } do { /* Remember: cpath is in forward order, that is, first hop first. */ tor_assert(thishop); if (relay_crypt_one_payload(thishop->b_crypto, cell->payload, 0) < 0) return -1; relay_header_unpack(&rh, cell->payload); if (rh.recognized == 0) { /* it's possibly recognized. have to check digest to be sure. */ if (relay_digest_matches(thishop->b_digest, cell)) { *recognized = 1; *layer_hint = thishop; return 0; } } thishop = thishop->next; } while (thishop != cpath && thishop->state == CPATH_STATE_OPEN); log_fn(LOG_PROTOCOL_WARN, LD_OR, "Incoming cell at client not recognized. Closing."); return -1; } else { /* we're in the middle. Just one crypt. */ if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->p_crypto, cell->payload, 1) < 0) return -1; // log_fn(LOG_DEBUG,"Skipping recognized check, because we're not " // "the client."); } } else /* cell_direction == CELL_DIRECTION_OUT */ { /* we're in the middle. Just one crypt. */ if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->n_crypto, cell->payload, 0) < 0) return -1; relay_header_unpack(&rh, cell->payload); if (rh.recognized == 0) { /* it's possibly recognized. have to check digest to be sure. */ if (relay_digest_matches(TO_OR_CIRCUIT(circ)->n_digest, cell)) { *recognized = 1; return 0; } } } return 0; }
0
[ "CWE-200", "CWE-617" ]
tor
56a7c5bc15e0447203a491c1ee37de9939ad1dcd
70,677,973,686,264,340,000,000,000,000,000,000,000
67
TROVE-2017-005: Fix assertion failure in connection_edge_process_relay_cell On an hidden service rendezvous circuit, a BEGIN_DIR could be sent (maliciously) which would trigger a tor_assert() because connection_edge_process_relay_cell() thought that the circuit is an or_circuit_t but is an origin circuit in reality. Fixes #22494 Reported-by: Roger Dingledine <[email protected]> Signed-off-by: David Goulet <[email protected]>
SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc) { struct smb2_file_eof_info info; void *data; unsigned int size; info.EndOfFile = *eof; data = &info; size = sizeof(struct smb2_file_eof_info); if (is_falloc) return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size); else return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size); }
0
[ "CWE-476" ]
linux
cabfb3680f78981d26c078a26e5c748531257ebb
8,626,957,695,534,360,000,000,000,000,000,000,000
19
CIFS: Enable encryption during session setup phase In order to allow encryption on SMB connection we need to exchange a session key and generate encryption and decryption keys. Signed-off-by: Pavel Shilovsky <[email protected]>
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) { struct mem_cgroup_eventfd_list *ev; list_for_each_entry(ev, &memcg->oom_notify, list) eventfd_signal(ev->eventfd, 1); return 0; }
0
[ "CWE-264" ]
linux-2.6
1a5a9906d4e8d1976b701f889d8f35d54b928f25
16,266,979,438,647,930,000,000,000,000,000,000,000
8
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [[email protected]: checkpatch fixes] Reported-by: Ulrich Obergfell <[email protected]> Signed-off-by: Andrea Arcangeli <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Dave Jones <[email protected]> Acked-by: Larry Woodman <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: <[email protected]> [2.6.38+] Cc: Mark Salter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
void nfs_commitdata_release(struct nfs_commit_data *data) { put_nfs_open_context(data->context); nfs_commit_free(data); }
0
[]
linux
c7559663e42f4294ffe31fe159da6b6a66b35d61
196,795,394,399,039,340,000,000,000,000,000,000,000
5
NFS: Allow nfs_updatepage to extend a write under additional circumstances Currently nfs_updatepage allows a write to be extended to cover a full page only if we don't have a byte range lock lock on the file... but if we have a write delegation on the file or if we have the whole file locked for writing then we should be allowed to extend the write as well. Signed-off-by: Scott Mayhew <[email protected]> [Trond: fix up call to nfs_have_delegation()] Signed-off-by: Trond Myklebust <[email protected]>
folder_list_update_cb (gpointer user_data) { struct ScheduleUpdateData *sud = user_data; g_return_val_if_fail (sud != NULL, FALSE); if (g_cancellable_is_cancelled (sud->cancellable)) return FALSE; g_return_val_if_fail (sud->ews_store != NULL, FALSE); g_return_val_if_fail (sud->ews_store->priv != NULL, FALSE); UPDATE_LOCK (sud->ews_store); if (sud->expected_id != sud->ews_store->priv->update_folder_list_id) goto exit; sud->ews_store->priv->update_folder_list_id = 0; if (!g_cancellable_is_cancelled (sud->cancellable)) run_update_thread (sud->ews_store, TRUE, sud->cancellable); exit: UPDATE_UNLOCK (sud->ews_store); return FALSE; }
0
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
208,803,372,700,612,760,000,000,000,000,000,000,000
26
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
void xpipe(int filedes[2]) { if (pipe(filedes)) perror_msg_and_die("Can't create pipe"); }
0
[ "CWE-20" ]
libreport
1951e7282043dfe1268d492aea056b554baedb75
42,050,963,615,743,680,000,000,000,000,000,000,000
5
lib: fix races in dump directory handling code Florian Weimer <[email protected]>: dd_opendir() should keep a file handle (opened with O_DIRECTORY) and use openat() and similar functions to access files in it. ... The file system manipulation functions should guard against hard links (check that link count is <= 1, just as in the user coredump code in abrt-hook-ccpp), possibly after opening the file with O_PATH first to avoid side effects on open/close. Related: #1214745 Signed-off-by: Jakub Filak <[email protected]>
int __init arch_ioremap_pud_supported(void) { /* only 4k granule supports level 1 block mappings */ return IS_ENABLED(CONFIG_ARM64_4K_PAGES); }
0
[]
linux
15122ee2c515a253b0c66a3e618bc7ebe35105eb
150,602,948,917,095,930,000,000,000,000,000,000,000
5
arm64: Enforce BBM for huge IO/VMAP mappings ioremap_page_range doesn't honour break-before-make and attempts to put down huge mappings (using p*d_set_huge) over the top of pre-existing table entries. This leads to us leaking page table memory and also gives rise to TLB conflicts and spurious aborts, which have been seen in practice on Cortex-A75. Until this has been resolved, refuse to put block mappings when the existing entry is found to be present. Fixes: 324420bf91f60 ("arm64: add support for ioremap() block mappings") Reported-by: Hanjun Guo <[email protected]> Reported-by: Lei Li <[email protected]> Acked-by: Ard Biesheuvel <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Catalin Marinas <[email protected]>