func
stringlengths
0
484k
target
int64
0
1
cwe
sequencelengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
static inline short _swap16(short &wData) { wData = ((wData >> 8) & 0x00FF) | ((wData << 8) & 0xFF00); return wData; }
0
[ "CWE-119" ]
soundtouch
9e02d9b04fda6c1f44336ff00bb5af1e2ffc039e
117,642,661,367,914,070,000,000,000,000,000,000,000
6
Added minimum size check for WAV header block lengh values
int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data) { bool seized = child->ptrace & PT_SEIZED; int ret = -EIO; kernel_siginfo_t siginfo, *si; void __user *datavp = (void __user *) data; unsigned long __user *datalp = datavp; unsigned long flags; switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: return generic_ptrace_peekdata(child, addr, data); case PTRACE_POKETEXT: case PTRACE_POKEDATA: return generic_ptrace_pokedata(child, addr, data); #ifdef PTRACE_OLDSETOPTIONS case PTRACE_OLDSETOPTIONS: #endif case PTRACE_SETOPTIONS: ret = ptrace_setoptions(child, data); break; case PTRACE_GETEVENTMSG: ret = put_user(child->ptrace_message, datalp); break; case PTRACE_PEEKSIGINFO: ret = ptrace_peek_siginfo(child, addr, data); break; case PTRACE_GETSIGINFO: ret = ptrace_getsiginfo(child, &siginfo); if (!ret) ret = copy_siginfo_to_user(datavp, &siginfo); break; case PTRACE_SETSIGINFO: ret = copy_siginfo_from_user(&siginfo, datavp); if (!ret) ret = ptrace_setsiginfo(child, &siginfo); break; case PTRACE_GETSIGMASK: { sigset_t *mask; if (addr != sizeof(sigset_t)) { ret = -EINVAL; break; } if (test_tsk_restore_sigmask(child)) mask = &child->saved_sigmask; else mask = &child->blocked; if (copy_to_user(datavp, mask, sizeof(sigset_t))) ret = -EFAULT; else ret = 0; break; } case PTRACE_SETSIGMASK: { sigset_t new_set; if (addr != sizeof(sigset_t)) { ret = -EINVAL; break; } if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { ret = -EFAULT; break; } sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); /* * Every thread does recalc_sigpending() after resume, so * retarget_shared_pending() and recalc_sigpending() are not * called here. */ spin_lock_irq(&child->sighand->siglock); child->blocked = new_set; spin_unlock_irq(&child->sighand->siglock); clear_tsk_restore_sigmask(child); ret = 0; break; } case PTRACE_INTERRUPT: /* * Stop tracee without any side-effect on signal or job * control. At least one trap is guaranteed to happen * after this request. If @child is already trapped, the * current trap is not disturbed and another trap will * happen after the current trap is ended with PTRACE_CONT. * * The actual trap might not be PTRACE_EVENT_STOP trap but * the pending condition is cleared regardless. */ if (unlikely(!seized || !lock_task_sighand(child, &flags))) break; /* * INTERRUPT doesn't disturb existing trap sans one * exception. If ptracer issued LISTEN for the current * STOP, this INTERRUPT should clear LISTEN and re-trap * tracee into STOP. */ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); unlock_task_sighand(child, &flags); ret = 0; break; case PTRACE_LISTEN: /* * Listen for events. Tracee must be in STOP. It's not * resumed per-se but is not considered to be in TRACED by * wait(2) or ptrace(2). If an async event (e.g. group * stop state change) happens, tracee will enter STOP trap * again. Alternatively, ptracer can issue INTERRUPT to * finish listening and re-trap tracee into STOP. */ if (unlikely(!seized || !lock_task_sighand(child, &flags))) break; si = child->last_siginfo; if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { child->jobctl |= JOBCTL_LISTENING; /* * If NOTIFY is set, it means event happened between * start of this trap and now. Trigger re-trap. */ if (child->jobctl & JOBCTL_TRAP_NOTIFY) ptrace_signal_wake_up(child, true); ret = 0; } unlock_task_sighand(child, &flags); break; case PTRACE_DETACH: /* detach a process that was attached. */ ret = ptrace_detach(child, data); break; #ifdef CONFIG_BINFMT_ELF_FDPIC case PTRACE_GETFDPIC: { struct mm_struct *mm = get_task_mm(child); unsigned long tmp = 0; ret = -ESRCH; if (!mm) break; switch (addr) { case PTRACE_GETFDPIC_EXEC: tmp = mm->context.exec_fdpic_loadmap; break; case PTRACE_GETFDPIC_INTERP: tmp = mm->context.interp_fdpic_loadmap; break; default: break; } mmput(mm); ret = put_user(tmp, datalp); break; } #endif #ifdef PTRACE_SINGLESTEP case PTRACE_SINGLESTEP: #endif #ifdef PTRACE_SINGLEBLOCK case PTRACE_SINGLEBLOCK: #endif #ifdef PTRACE_SYSEMU case PTRACE_SYSEMU: case PTRACE_SYSEMU_SINGLESTEP: #endif case PTRACE_SYSCALL: case PTRACE_CONT: return ptrace_resume(child, request, data); case PTRACE_KILL: if (child->exit_state) /* already dead */ return 0; return ptrace_resume(child, request, SIGKILL); #ifdef CONFIG_HAVE_ARCH_TRACEHOOK case PTRACE_GETREGSET: case PTRACE_SETREGSET: { struct iovec kiov; struct iovec __user *uiov = datavp; if (!access_ok(uiov, sizeof(*uiov))) return -EFAULT; if (__get_user(kiov.iov_base, &uiov->iov_base) || __get_user(kiov.iov_len, &uiov->iov_len)) return -EFAULT; ret = ptrace_regset(child, request, addr, &kiov); if (!ret) ret = __put_user(kiov.iov_len, &uiov->iov_len); break; } #endif case PTRACE_SECCOMP_GET_FILTER: ret = seccomp_get_filter(child, addr, datavp); break; case PTRACE_SECCOMP_GET_METADATA: ret = seccomp_get_metadata(child, addr, datavp); break; default: break; } return ret; }
0
[ "CWE-264", "CWE-269" ]
linux
6994eefb0053799d2e07cd140df6c2ea106c41ee
12,265,125,843,513,233,000,000,000,000,000,000,000
231
ptrace: Fix ->ptracer_cred handling for PTRACE_TRACEME Fix two issues: When called for PTRACE_TRACEME, ptrace_link() would obtain an RCU reference to the parent's objective credentials, then give that pointer to get_cred(). However, the object lifetime rules for things like struct cred do not permit unconditionally turning an RCU reference into a stable reference. PTRACE_TRACEME records the parent's credentials as if the parent was acting as the subject, but that's not the case. If a malicious unprivileged child uses PTRACE_TRACEME and the parent is privileged, and at a later point, the parent process becomes attacker-controlled (because it drops privileges and calls execve()), the attacker ends up with control over two processes with a privileged ptrace relationship, which can be abused to ptrace a suid binary and obtain root privileges. Fix both of these by always recording the credentials of the process that is requesting the creation of the ptrace relationship: current_cred() can't change under us, and current is the proper subject for access control. This change is theoretically userspace-visible, but I am not aware of any code that it will actually break. Fixes: 64b875f7ac8a ("ptrace: Capture the ptracer's creds not PT_PTRACE_CAP") Signed-off-by: Jann Horn <[email protected]> Acked-by: Oleg Nesterov <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
return *this; } CImgList<T>& _load_gif_external(const char *const filename, const bool use_graphicsmagick=false) { CImg<charT> command(1024), filename_tmp(256), filename_tmp2(256); std::FILE *file = 0; do { cimg_snprintf(filename_tmp,filename_tmp._width,"%s%c%s", cimg::temporary_path(),cimg_file_separator,cimg::filenamerand()); if (use_graphicsmagick) cimg_snprintf(filename_tmp2,filename_tmp2._width,"%s.png.0",filename_tmp._data); else cimg_snprintf(filename_tmp2,filename_tmp2._width,"%s-0.png",filename_tmp._data); if ((file=cimg::std_fopen(filename_tmp2,"rb"))!=0) cimg::fclose(file); } while (file); if (use_graphicsmagick) cimg_snprintf(command,command._width,"%s convert \"%s\" \"%s.png\"", cimg::graphicsmagick_path(), CImg<charT>::string(filename)._system_strescape().data(), CImg<charT>::string(filename_tmp)._system_strescape().data()); else cimg_snprintf(command,command._width,"\"%s\" -coalesce \"%s\" \"%s.png\"", cimg::imagemagick_path(), CImg<charT>::string(filename)._system_strescape().data(), CImg<charT>::string(filename_tmp)._system_strescape().data()); cimg::system(command, cimg::imagemagick_path()); const unsigned int omode = cimg::exception_mode(); cimg::exception_mode(0); assign(); // Try to read a single frame gif. cimg_snprintf(filename_tmp2,filename_tmp2._width,"%s.png",filename_tmp._data); CImg<T> img; try { img.load_png(filename_tmp2); } catch (CImgException&) { } if (img) { img.move_to(*this); std::remove(filename_tmp2); } else { // Try to read animated gif unsigned int i = 0; for (bool stop_flag = false; !stop_flag; ++i) { if (use_graphicsmagick) cimg_snprintf(filename_tmp2,filename_tmp2._width,"%s.png.%u",filename_tmp._data,i); else cimg_snprintf(filename_tmp2,filename_tmp2._width,"%s-%u.png",filename_tmp._data,i); try { img.load_png(filename_tmp2); } catch (CImgException&) { stop_flag = true; } if (img) { img.move_to(*this); std::remove(filename_tmp2); } } }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
162,373,642,645,266,040,000,000,000,000,000,000,000
42
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
should_skip_readdir_error (CommonJob *common, GFile *dir) { if (common->skip_readdir_error != NULL) { return g_hash_table_lookup (common->skip_readdir_error, dir) != NULL; } return FALSE; }
0
[]
nautilus
ca2fd475297946f163c32dcea897f25da892b89d
33,976,971,007,230,023,000,000,000,000,000,000,000
8
Add nautilus_file_mark_desktop_file_trusted(), this now adds a #! line if 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-file-operations.c: * libnautilus-private/nautilus-file-operations.h: Add nautilus_file_mark_desktop_file_trusted(), this now adds a #! line if there is none as well as makes the file executable. * libnautilus-private/nautilus-mime-actions.c: Use nautilus_file_mark_desktop_file_trusted() instead of just setting the permissions. svn path=/trunk/; revision=15006
static int mailbox_read_header(struct mailbox *mailbox, const char *fname) { int r = 0; int flag; const char *name, *p, *tab, *eol; struct stat sbuf; const char *base = NULL; size_t len = 0; unsigned magic_size = sizeof(MAILBOX_HEADER_MAGIC) - 1; /* can't be dirty if we're reading it */ if (mailbox->header_dirty) abort(); xclose(mailbox->header_fd); if (!fname) fname = mailbox_meta_fname(mailbox, META_HEADER); mailbox->header_fd = open(fname, O_RDONLY, 0); if (mailbox->header_fd == -1) { r = IMAP_IOERROR; goto done; } if (fstat(mailbox->header_fd, &sbuf) == -1) { xclose(mailbox->header_fd); r = IMAP_IOERROR; goto done; } map_refresh(mailbox->header_fd, 1, &base, &len, sbuf.st_size, "header", mailbox_name(mailbox)); mailbox->header_file_ino = sbuf.st_ino; mailbox->header_file_crc = crc32_map(base, sbuf.st_size); /* Check magic number */ if ((unsigned) sbuf.st_size < magic_size || strncmp(base, MAILBOX_HEADER_MAGIC, magic_size)) { r = IMAP_MAILBOX_BADFORMAT; goto done; } /* Read quota data line */ p = base + sizeof(MAILBOX_HEADER_MAGIC)-1; tab = memchr(p, '\t', sbuf.st_size - (p - base)); eol = memchr(p, '\n', sbuf.st_size - (p - base)); if (!eol) { r = IMAP_MAILBOX_BADFORMAT; goto done; } xzfree(mailbox->h.quotaroot); xzfree(mailbox->h.uniqueid); /* check for DLIST mboxlist */ if (*p == '%') { r = _parse_header_data(mailbox, p, eol - p); goto done; } /* quotaroot (if present) */ if (!tab || tab > eol) { syslog(LOG_DEBUG, "mailbox '%s' has old cyrus.header", mailbox_name(mailbox)); tab = eol; } if (p < tab) { mailbox->h.quotaroot = xstrndup(p, tab - p); } /* read uniqueid (should always exist unless old format) */ if (tab < eol) { p = tab + 1; if (p == eol) { r = IMAP_MAILBOX_BADFORMAT; goto done; } tab = memchr(p, '\t', sbuf.st_size - (p - base)); if (!tab || tab > eol) tab = eol; mailbox->h.uniqueid = xstrndup(p, tab - p); } else { /* ancient cyrus.header file without a uniqueid field! */ xsyslog(LOG_ERR, "mailbox header has no uniqueid, needs reconstruct", "mboxname=<%s>", mailbox_name(mailbox)); } /* Read names of user flags */ p = eol + 1; eol = memchr(p, '\n', sbuf.st_size - (p - base)); if (!eol) { r = IMAP_MAILBOX_BADFORMAT; goto done; } name = p; /* read the names of flags */ for (flag = 0; name <= eol && flag < MAX_USER_FLAGS; flag++) { xzfree(mailbox->h.flagname[flag]); p = memchr(name, ' ', eol-name); if (!p) p = eol; if (name != p) mailbox->h.flagname[flag] = xstrndup(name, p-name); name = p+1; } /* zero out the rest */ for (; flag < MAX_USER_FLAGS; flag++) { xzfree(mailbox->h.flagname[flag]); } /* Read ACL */ p = eol + 1; eol = memchr(p, '\n', sbuf.st_size - (p - base)); if (!eol) { r = IMAP_MAILBOX_BADFORMAT; goto done; } mailbox->h.acl = xstrndup(p, eol-p); done: if (base) map_free(&base, &len); return r; }
0
[]
cyrus-imapd
1d6d15ee74e11a9bd745e80be69869e5fb8d64d6
73,364,805,767,653,155,000,000,000,000,000,000,000
124
mailbox.c/reconstruct.c: Add mailbox_mbentry_from_path()
static uint_fast32_t jpc_abstorelstepsize(jpc_fix_t absdelta, int scaleexpn) { int p; uint_fast32_t mant; uint_fast32_t expn; int n; if (absdelta < 0) { abort(); } p = jpc_firstone(absdelta) - JPC_FIX_FRACBITS; n = 11 - jpc_firstone(absdelta); mant = ((n < 0) ? (absdelta >> (-n)) : (absdelta << n)) & 0x7ff; expn = scaleexpn - p; if (scaleexpn < p) { abort(); } return JPC_QCX_EXPN(expn) | JPC_QCX_MANT(mant); }
1
[ "CWE-617" ]
jasper
e6c8d5a838b49f94616be14753aa5c89d64605b5
72,127,700,512,832,880,000,000,000,000,000,000,000
20
jpc_math: split jpc_firstone() in int/jpc_fix_t overloads Fixes CVE-2018-9055 (denial of service via a reachable assertion due to integer overflow). Based on a patch from Fridrich Strba <[email protected]>. Instead of switching to `int_fast32_t`, this patch splits jpc_firstone() into two overloads, one for `int` and one for `jpc_fix_t`. This is safer against future changes on `jpc_fix_t`. To avoid the overhead of 64 bit integer math on 32 bit CPUs, this leaves the `int` overload around. Closes https://github.com/jasper-maint/jasper/issues/9
static inline double safe_substract(ulonglong a, ulonglong b) { return (a > b)? double(a - b) : -double(b - a); }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
106,703,115,110,485,360,000,000,000,000,000,000,000
4
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <[email protected]>
Status Conv2DShapeImpl(shape_inference::InferenceContext* c, bool supports_explicit_padding) { string data_format_str, filter_format_str; if (!c->GetAttr("data_format", &data_format_str).ok()) { data_format_str = "NHWC"; } if (!c->GetAttr("filter_format", &filter_format_str).ok()) { filter_format_str = "HWIO"; } TensorFormat data_format; if (!FormatFromString(data_format_str, &data_format)) { return errors::InvalidArgument("Invalid data format string: ", data_format_str); } FilterTensorFormat filter_format; if (!FilterFormatFromString(filter_format_str, &filter_format)) { return errors::InvalidArgument("Invalid filter format string: ", filter_format_str); } constexpr int num_spatial_dims = 2; const int rank = GetTensorDimsFromSpatialDims(num_spatial_dims, data_format); ShapeHandle conv_input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &conv_input_shape)); TF_RETURN_IF_ERROR(CheckFormatConstraintsOnShape( data_format, conv_input_shape, "conv_input", c)); // The filter rank should match the input (4 for NCHW, 5 for NCHW_VECT_C). ShapeHandle filter_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), rank, &filter_shape)); TF_RETURN_IF_ERROR( CheckFormatConstraintsOnShape(data_format, filter_shape, "filter", c)); std::vector<int32> dilations; TF_RETURN_IF_ERROR(c->GetAttr("dilations", &dilations)); if (dilations.size() != 4) { return errors::InvalidArgument( "Conv2D requires the dilation attribute to contain 4 values, but got: ", dilations.size()); } std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); // strides.size() should be 4 (NCHW) even if the input is 5 (NCHW_VECT_C). if (strides.size() != 4) { return errors::InvalidArgument("Conv2D on data format ", data_format_str, " requires the stride attribute to contain" " 4 values, but got: ", strides.size()); } const int32_t stride_rows = GetTensorDim(strides, data_format, 'H'); const int32_t stride_cols = GetTensorDim(strides, data_format, 'W'); const int32_t dilation_rows = GetTensorDim(dilations, data_format, 'H'); const int32_t dilation_cols = GetTensorDim(dilations, data_format, 'W'); DimensionHandle batch_size_dim; DimensionHandle input_depth_dim; gtl::InlinedVector<DimensionHandle, 2> input_spatial_dims(2); TF_RETURN_IF_ERROR(DimensionsFromShape( conv_input_shape, data_format, &batch_size_dim, absl::MakeSpan(input_spatial_dims), &input_depth_dim, c)); DimensionHandle output_depth_dim = c->Dim( filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'O')); DimensionHandle filter_rows_dim = c->Dim( filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'H')); DimensionHandle filter_cols_dim = c->Dim( filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'W')); DimensionHandle filter_input_depth_dim; if (filter_format == FORMAT_OIHW_VECT_I) { TF_RETURN_IF_ERROR(c->Multiply( c->Dim(filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'I')), c->Dim(filter_shape, GetFilterTensorInnerInputChannelsDimIndex(rank, filter_format)), &filter_input_depth_dim)); } else { filter_input_depth_dim = c->Dim( filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'I')); } // Check that the input tensor and the filter tensor agree on the channel // count. if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) { int64_t input_depth_value = c->Value(input_depth_dim), filter_input_depth_value = c->Value(filter_input_depth_dim); if (filter_input_depth_value == 0) return errors::InvalidArgument("Depth of filter must not be 0"); if (input_depth_value % filter_input_depth_value != 0) return errors::InvalidArgument( "Depth of input (", input_depth_value, ") is not a multiple of input depth of filter (", filter_input_depth_value, ")"); if (input_depth_value != filter_input_depth_value) { int64_t num_groups = input_depth_value / filter_input_depth_value; if (c->ValueKnown(output_depth_dim)) { int64_t output_depth_value = c->Value(output_depth_dim); if (num_groups == 0) return errors::InvalidArgument("Number of groups must not be 0"); if (output_depth_value % num_groups != 0) return errors::InvalidArgument( "Depth of output (", output_depth_value, ") is not a multiple of the number of groups (", num_groups, ")"); } } } Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); std::vector<int64_t> explicit_paddings; if (supports_explicit_padding) { Status s = c->GetAttr("explicit_paddings", &explicit_paddings); // Use the default value, which is an empty list, if the attribute is not // found. Otherwise return the error to the caller. if (!s.ok() && !errors::IsNotFound(s)) { return s; } TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings, /*num_dims=*/4, data_format)); } else { CHECK(padding != Padding::EXPLICIT); // Crash ok. } DimensionHandle output_rows, output_cols; int64_t pad_rows_before = -1, pad_rows_after = -1; int64_t pad_cols_before = -1, pad_cols_after = -1; if (padding == Padding::EXPLICIT) { GetExplicitPaddingForDim(explicit_paddings, data_format, 'H', &pad_rows_before, &pad_rows_after); GetExplicitPaddingForDim(explicit_paddings, data_format, 'W', &pad_cols_before, &pad_cols_after); } TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2( c, input_spatial_dims[0], filter_rows_dim, dilation_rows, stride_rows, padding, pad_rows_before, pad_rows_after, &output_rows)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2( c, input_spatial_dims[1], filter_cols_dim, dilation_cols, stride_cols, padding, pad_cols_before, pad_cols_after, &output_cols)); absl::optional<DimensionHandle> vect_size; if (data_format == FORMAT_NCHW_VECT_C) { vect_size.emplace(c->Dim(conv_input_shape, GetTensorInnerFeatureDimIndex(rank, data_format))); } ShapeHandle output_shape; TF_RETURN_IF_ERROR(ShapeFromDimensions( batch_size_dim, {output_rows, output_cols}, output_depth_dim, data_format, vect_size, c, &output_shape)); c->set_output(0, output_shape); return Status::OK(); }
0
[ "CWE-125" ]
tensorflow
a0d64445116c43cf46a5666bd4eee28e7a82f244
19,379,298,955,210,933,000,000,000,000,000,000,000
156
Prevent OOB access in QuantizeV2 shape inference PiperOrigin-RevId: 400309614 Change-Id: I31412c71b05b4f21b677f7fa715a61499cbee39d
void automataUnitTest() { void *automa = ndpi_init_automa(); assert(automa); assert(ndpi_add_string_to_automa(automa, "hello") == 0); assert(ndpi_add_string_to_automa(automa, "world") == 0); ndpi_finalize_automa(automa); assert(ndpi_match_string(automa, "This is the wonderful world of nDPI") == 1); ndpi_free_automa(automa); }
0
[ "CWE-125" ]
nDPI
b7e666e465f138ae48ab81976726e67deed12701
164,340,388,917,932,200,000,000,000,000,000,000,000
10
Added fix to avoid potential heap buffer overflow in H.323 dissector Modified HTTP report information to make it closer to the HTTP field names
rsRetVal qqueueEnqObjDirectBatch(qqueue_t *pThis, batch_t *pBatch) { DEFiRet; ASSERT(pThis != NULL); /* calling the consumer is quite different here than it is from a worker thread */ /* we need to provide the consumer's return value back to the caller because in direct * mode the consumer probably has a lot to convey (which get's lost in the other modes * because they are asynchronous. But direct mode is deliberately synchronous. * rgerhards, 2008-02-12 * We use our knowledge about the batch_t structure below, but without that, we * pay a too-large performance toll... -- rgerhards, 2009-04-22 */ iRet = pThis->pConsumer(pThis->pUsr, pBatch, &pThis->bShutdownImmediate); RETiRet; }
0
[ "CWE-772" ]
rsyslog
dfa88369d4ca4290db56b843f9eabdae1bfe0fd5
123,758,166,742,462,580,000,000,000,000,000,000,000
18
bugfix: memory leak when $RepeatedMsgReduction on was used bug tracker: http://bugzilla.adiscon.com/show_bug.cgi?id=225
static Image *ReadMATImageV4(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { typedef struct { unsigned char Type[4]; unsigned int nRows; unsigned int nCols; unsigned int imagf; unsigned int nameLen; } MAT4_HDR; long ldblk; EndianType endian; Image *rotated_image; MagickBooleanType status; MAT4_HDR HDR; QuantumInfo *quantum_info; QuantumFormatType format_type; register ssize_t i; ssize_t count, y; unsigned char *pixels; unsigned int depth; status=MagickTrue; (void) SeekBlob(image,0,SEEK_SET); while (EOFBlob(image) == MagickFalse) { /* Object parser. */ ldblk=ReadBlobLSBLong(image); if (EOFBlob(image) != MagickFalse) break; if ((ldblk > 9999) || (ldblk < 0)) break; HDR.Type[3]=ldblk % 10; ldblk /= 10; /* T digit */ HDR.Type[2]=ldblk % 10; ldblk /= 10; /* P digit */ HDR.Type[1]=ldblk % 10; ldblk /= 10; /* O digit */ HDR.Type[0]=ldblk; /* M digit */ if (HDR.Type[3] != 0) break; /* Data format */ if (HDR.Type[2] != 0) break; /* Always 0 */ if (HDR.Type[0] == 0) { HDR.nRows=ReadBlobLSBLong(image); HDR.nCols=ReadBlobLSBLong(image); HDR.imagf=ReadBlobLSBLong(image); HDR.nameLen=ReadBlobLSBLong(image); endian=LSBEndian; } else { HDR.nRows=ReadBlobMSBLong(image); HDR.nCols=ReadBlobMSBLong(image); HDR.imagf=ReadBlobMSBLong(image); HDR.nameLen=ReadBlobMSBLong(image); endian=MSBEndian; } if ((HDR.imagf !=0) && (HDR.imagf !=1)) break; if (HDR.nameLen > 0xFFFF) return(DestroyImageList(image)); for (i=0; i < (ssize_t) HDR.nameLen; i++) { int byte; /* Skip matrix name. */ byte=ReadBlobByte(image); if (byte == EOF) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } } image->columns=(size_t) HDR.nRows; image->rows=(size_t) HDR.nCols; if ((image->columns == 0) || (image->rows == 0)) return(DestroyImageList(image)); if (image_info->ping != MagickFalse) { Swap(image->columns,image->rows); if(HDR.imagf==1) ldblk *= 2; SeekBlob(image, HDR.nCols*ldblk, SEEK_CUR); if ((image->columns == 0) || (image->rows == 0)) return(image->previous == (Image *) NULL ? DestroyImageList(image) : image); goto skip_reading_current; } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image); (void) SetImageColorspace(image,GRAYColorspace); quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) return(DestroyImageList(image)); switch(HDR.Type[1]) { case 0: format_type=FloatingPointQuantumFormat; depth=64; break; case 1: format_type=FloatingPointQuantumFormat; depth=32; break; case 2: format_type=UnsignedQuantumFormat; depth=16; break; case 3: format_type=SignedQuantumFormat; depth=16; break; case 4: format_type=UnsignedQuantumFormat; depth=8; break; default: format_type=UnsignedQuantumFormat; depth=8; break; } image->depth=depth; if (HDR.Type[0] != 0) SetQuantumEndian(image,quantum_info,MSBEndian); status=SetQuantumFormat(image,quantum_info,format_type); status=SetQuantumDepth(image,quantum_info,depth); status=SetQuantumEndian(image,quantum_info,endian); SetQuantumScale(quantum_info,1.0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; count=ReadBlob(image,depth/8*image->columns,(unsigned char *) pixels); if (count == -1) break; q=QueueAuthenticPixels(image,0,image->rows-y-1,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayQuantum,pixels,exception); if ((HDR.Type[1] == 2) || (HDR.Type[1] == 3)) FixSignedValues(q,image->columns); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if (HDR.imagf == 1) for (y=0; y < (ssize_t) image->rows; y++) { /* Read complex pixels. */ count=ReadBlob(image,depth/8*image->columns,(unsigned char *) pixels); if (count == -1) break; if (HDR.Type[1] == 0) InsertComplexDoubleRow((double *) pixels,y,image,0,0); else InsertComplexFloatRow((float *) pixels,y,image,0,0); } quantum_info=DestroyQuantumInfo(quantum_info); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } rotated_image=RotateImage(image,90.0,exception); if (rotated_image != (Image *) NULL) { rotated_image->page.x=0; rotated_image->page.y=0; rotated_image->colors = image->colors; DestroyBlob(rotated_image); rotated_image->blob=ReferenceBlob(image->blob); AppendImageToList(&image,rotated_image); DeleteImageFromList(&image); } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; /* Allocate next image structure. */ skip_reading_current: AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } (void) CloseBlob(image); if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); }
0
[ "CWE-787" ]
ImageMagick6
db7a4be592328af06d776ce3bab24b8c6de5be20
275,042,072,900,460,460,000,000,000,000,000,000,000
243
https://github.com/ImageMagick/ImageMagick/issues/1221
static void gui_windows_remove_parent(MAIN_WINDOW_REC *window) { MAIN_WINDOW_REC *new_parent; GSList *tmp; new_parent = mainwindows->data; for (tmp = windows; tmp != NULL; tmp = tmp->next) { WINDOW_REC *rec = tmp->data; if (rec->gui_data != NULL && WINDOW_MAIN(rec) == window) gui_window_reparent(rec, new_parent); } }
0
[ "CWE-476" ]
irssi
5b5bfef03596d95079c728f65f523570dd7b03aa
130,842,376,728,015,730,000,000,000,000,000,000,000
13
check the error condition of mainwindow_create
static SQInteger array_find(HSQUIRRELVM v) { SQObject &o = stack_get(v,1); SQObjectPtr &val = stack_get(v,2); SQArray *a = _array(o); SQInteger size = a->Size(); SQObjectPtr temp; for(SQInteger n = 0; n < size; n++) { bool res = false; a->Get(n,temp); if(SQVM::IsEqual(temp,val,res) && res) { v->Push(n); return 1; } } return 0; }
0
[ "CWE-703", "CWE-787" ]
squirrel
a6413aa690e0bdfef648c68693349a7b878fe60d
7,107,704,084,307,370,000,000,000,000,000,000,000
17
fix in thread.call
static void vrend_update_frontface_state(struct vrend_context *ctx) { struct pipe_rasterizer_state *state = &ctx->sub->rs_state; int front_ccw = state->front_ccw; front_ccw ^= (ctx->sub->inverted_fbo_content ? 0 : 1); if (front_ccw) glFrontFace(GL_CCW); else glFrontFace(GL_CW); }
0
[ "CWE-787" ]
virglrenderer
cbc8d8b75be360236cada63784046688aeb6d921
142,752,912,343,918,220,000,000,000,000,000,000,000
11
vrend: check transfer bounds for negative values too and report error Closes #138 Signed-off-by: Gert Wollny <[email protected]> Reviewed-by: Emil Velikov <[email protected]>
bool ValidateBaseCode() { bool pass = true, fail; byte data[255]; for (unsigned int i=0; i<255; i++) data[i] = byte(i); const char hexEncoded[] = "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627" "28292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F" "505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F7071727374757677" "78797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9F" "A0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7" "C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF" "F0F1F2F3F4F5F6F7F8F9FAFBFCFDFE"; const char base32Encoded[] = "AAASEA2EAWDAQCAJBIFS2DIQB6IBCESVCSKTNF22DEPBYHA7D2RUAIJCENUCKJTHFAWUWK3NFWZC8NBT" "GI3VIPJYG66DUQT5HS8V6R4AIFBEGTCFI3DWSUKKJPGE4VURKBIXEW4WKXMFQYC3MJPX2ZK8M7SGC2VD" "NTUYN35IPFXGY5DPP3ZZA6MUQP4HK7VZRB6ZW856RX9H9AEBSKB2JBNGS8EIVCWMTUG27D6SUGJJHFEX" "U4M3TGN4VQQJ5HW9WCS4FI7EWYVKRKFJXKX43MPQX82MDNXVYU45PP72ZG7MZRF7Z496BSQC2RCNMTYH" "3DE6XU8N3ZHN9WGT4MJ7JXQY49NPVYY55VQ77Z9A6HTQH3HF65V8T4RK7RYQ55ZR8D29F69W8Z5RR8H3" "9M7939R8"; const char base64AndHexEncoded[] = "41414543417751464267634943516F4C4441304F4478415245684D554652595847426B6147787764" "486838674953496A4A43556D4A7967704B6973734C5334764D4445794D7A51310A4E6A63344F546F" "375044302B50304242516B4E4552555A4853456C4B5330784E546B395155564A5456465657563168" "5A576C746358563566594746695932526C5A6D646F615770720A6247317562334278636E4E306458" "5A3365486C3665337839666E2B4167594B44684957476834694A696F754D6A5936506B4A47536B35" "53566C7065596D5A71626E4A32656E3643680A6F714F6B7061616E714B6D717136797472712B7773" "624B7A744C573274376935757275387662362F774D484377385446787366497963724C7A4D334F7A" "39445230745055316462580A324E6E6132397A6433742F6734654C6A354F586D352B6A7036757673" "3765377638504879382F5431397666342B6672372F50332B0A"; const char base64URLAndHexEncoded[] = "41414543417751464267634943516F4C4441304F4478415245684D554652595847426B6147787764" "486838674953496A4A43556D4A7967704B6973734C5334764D4445794D7A51314E6A63344F546F37" "5044302D50304242516B4E4552555A4853456C4B5330784E546B395155564A54564656575631685A" "576C746358563566594746695932526C5A6D646F615770726247317562334278636E4E3064585A33" "65486C3665337839666E2D4167594B44684957476834694A696F754D6A5936506B4A47536B355356" "6C7065596D5A71626E4A32656E3643686F714F6B7061616E714B6D717136797472712D7773624B7A" "744C573274376935757275387662365F774D484377385446787366497963724C7A4D334F7A394452" "3074505531646258324E6E6132397A6433745F6734654C6A354F586D352D6A703675767337653776" "38504879385F5431397666342D6672375F50332D"; std::cout << "\nBase64, Base64URL, Base32 and Base16 coding validation suite running...\n\n"; fail = !TestFilter(HexEncoder().Ref(), data, 255, (const byte *)hexEncoded, strlen(hexEncoded)); try {HexEncoder().IsolatedInitialize(g_nullNameValuePairs);} catch (const Exception&) {fail=true;} std::cout << (fail ? "FAILED:" : "passed:"); std::cout << " Hex Encoding\n"; pass = pass && !fail; fail = !TestFilter(HexDecoder().Ref(), (const byte *)hexEncoded, strlen(hexEncoded), data, 255); try {HexDecoder().IsolatedInitialize(g_nullNameValuePairs);} catch (const Exception&) {fail=true;} std::cout << (fail ? "FAILED:" : "passed:"); std::cout << " Hex Decoding\n"; pass = pass && !fail; fail = !TestFilter(Base32Encoder().Ref(), data, 255, (const byte *)base32Encoded, strlen(base32Encoded)); try {Base32Encoder().IsolatedInitialize(g_nullNameValuePairs);} catch (const Exception&) {fail=true;} std::cout << (fail ? "FAILED:" : "passed:"); std::cout << " Base32 Encoding\n"; pass = pass && !fail; fail = !TestFilter(Base32Decoder().Ref(), (const byte *)base32Encoded, strlen(base32Encoded), data, 255); try {Base32Decoder().IsolatedInitialize(g_nullNameValuePairs);} catch (const Exception&) {fail=true;} std::cout << (fail ? "FAILED:" : "passed:"); std::cout << " Base32 Decoding\n"; pass = pass && !fail; fail = !TestFilter(Base64Encoder(new HexEncoder).Ref(), data, 255, (const byte *)base64AndHexEncoded, strlen(base64AndHexEncoded)); try {Base64Encoder().IsolatedInitialize(g_nullNameValuePairs);} catch (const Exception&) {fail=true;} std::cout << (fail ? "FAILED:" : "passed:"); std::cout << " Base64 Encoding\n"; pass = pass && !fail; fail = !TestFilter(HexDecoder(new Base64Decoder).Ref(), (const byte *)base64AndHexEncoded, strlen(base64AndHexEncoded), data, 255); try {Base64Decoder().IsolatedInitialize(g_nullNameValuePairs);} catch (const Exception&) {fail=true;} std::cout << (fail ? "FAILED:" : "passed:"); std::cout << " Base64 Decoding\n"; pass = pass && !fail; fail = !TestFilter(Base64URLEncoder(new HexEncoder).Ref(), data, 255, (const byte *)base64URLAndHexEncoded, strlen(base64URLAndHexEncoded)); try {Base64URLEncoder().IsolatedInitialize(g_nullNameValuePairs);} catch (const Exception&) {fail=true;} std::cout << (fail ? "FAILED:" : "passed:"); std::cout << " Base64 URL Encoding\n"; pass = pass && !fail; fail = !TestFilter(HexDecoder(new Base64URLDecoder).Ref(), (const byte *)base64URLAndHexEncoded, strlen(base64URLAndHexEncoded), data, 255); try {Base64URLDecoder().IsolatedInitialize(g_nullNameValuePairs);} catch (const Exception&) {fail=true;} std::cout << (fail ? "FAILED:" : "passed:"); std::cout << " Base64 URL Decoding\n"; pass = pass && !fail; return pass; }
0
[ "CWE-190", "CWE-125" ]
cryptopp
07dbcc3d9644b18e05c1776db2a57fe04d780965
321,709,432,635,175,000,000,000,000,000,000,000,000
103
Add Inflator::BadDistanceErr exception (Issue 414) The improved validation and excpetion clears the Address Sanitizer and Undefined Behavior Sanitizer findings
event_nr2name(event_T event) { int i; for (i = 0; event_names[i].name != NULL; ++i) if (event_names[i].event == event) return (char_u *)event_names[i].name; return (char_u *)"Unknown"; }
0
[ "CWE-200", "CWE-668" ]
vim
5a73e0ca54c77e067c3b12ea6f35e3e8681e8cf8
31,085,162,550,169,920,000,000,000,000,000,000,000
9
patch 8.0.1263: others can read the swap file if a user is careless Problem: Others can read the swap file if a user is careless with his primary group. Solution: If the group permission allows for reading but the world permissions doesn't, make sure the group is right.
static void storm_memset_func_cfg(struct bnx2x *bp, struct tstorm_eth_function_common_config *tcfg, u16 abs_fid) { size_t size = sizeof(struct tstorm_eth_function_common_config); u32 addr = BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); __storm_memset_struct(bp, addr, size, (u32 *)tcfg); }
0
[ "CWE-20" ]
linux
8914a595110a6eca69a5e275b323f5d09e18f4f9
212,012,675,621,820,000,000,000,000,000,000,000,000
11
bnx2x: disable GSO where gso_size is too big for hardware If a bnx2x card is passed a GSO packet with a gso_size larger than ~9700 bytes, it will cause a firmware error that will bring the card down: bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert! bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2 bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052 bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1 ... (dump of values continues) ... Detect when the mac length of a GSO packet is greater than the maximum packet size (9700 bytes) and disable GSO. Signed-off-by: Daniel Axtens <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
inode_val_compare (const void *val1, const void *val2) { const struct inode_val *ival1 = val1; const struct inode_val *ival2 = val2; return ival1->inode == ival2->inode && ival1->major_num == ival2->major_num && ival1->minor_num == ival2->minor_num; }
0
[ "CWE-190" ]
cpio
dd96882877721703e19272fe25034560b794061b
145,127,091,348,972,280,000,000,000,000,000,000,000
8
Rewrite dynamic string support. * src/dstring.c (ds_init): Take a single argument. (ds_free): New function. (ds_resize): Take a single argument. Use x2nrealloc to expand the storage. (ds_reset,ds_append,ds_concat,ds_endswith): New function. (ds_fgetstr): Rewrite. In particular, this fixes integer overflow. * src/dstring.h (dynamic_string): Keep both the allocated length (ds_size) and index of the next free byte in the string (ds_idx). (ds_init,ds_resize): Change signature. (ds_len): New macro. (ds_free,ds_reset,ds_append,ds_concat,ds_endswith): New protos. * src/copyin.c: Use new ds_ functions. * src/copyout.c: Likewise. * src/copypass.c: Likewise. * src/util.c: Likewise.
static half cut(const double val) { return (half)val; }
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
330,770,419,315,365,760,000,000,000,000,000,000,000
1
Fix other issues in 'CImg<T>::load_bmp()'.
static void reclaim_consumed_buffers(struct port *port) { struct port_buffer *buf; unsigned int len; if (!port->portdev) { /* Device has been unplugged. vqs are already gone. */ return; } while ((buf = virtqueue_get_buf(port->out_vq, &len))) { free_buf(buf, false); port->outvq_full = false; } }
0
[ "CWE-119", "CWE-787" ]
linux
c4baad50297d84bde1a7ad45e50c73adae4a2192
327,255,425,201,582,980,000,000,000,000,000,000,000
14
virtio-console: avoid DMA from stack put_chars() stuffs the buffer it gets into an sg, but that buffer may be on the stack. This breaks with CONFIG_VMAP_STACK=y (for me, it manifested as printks getting turned into NUL bytes). Signed-off-by: Omar Sandoval <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Amit Shah <[email protected]>
rfbClientConnFailed(rfbClientPtr cl, char *reason) { char *buf; int len = strlen(reason); rfbLog("rfbClientConnFailed(\"%s\")\n", reason); buf = (char *)malloc(8 + len); ((uint32_t *)buf)[0] = 0; ((uint32_t *)buf)[1] = Swap32IfLE(len); memcpy(buf + 8, reason, len); if (WriteExact(cl, buf, 8 + len) < 0) rfbLogPerror("rfbClientConnFailed: write"); free(buf); rfbCloseClient(cl); }
0
[ "CWE-119" ]
vino
dff52694a384fe95195f2211254026b752d63ec4
297,753,274,351,407,320,000,000,000,000,000,000,000
19
Avoid out-of-bounds memory accesses This fixes two critical security vulnerabilities that lead to an out-of-bounds memory access with a crafted client framebuffer update request packet. The dimensions of the update from the packet are checked to ensure that they are within the screen dimensions. Thanks to Kevin Chen from the Bitblaze group for the reports in bugs 641802 and 641803. The CVE identifiers for these vulnerabilities are CVE-2011-0904 and CVE-2011-0905.
make_codepage_from_charset(const char *charset) { char cs[16]; char *p; unsigned cp; int a, b; if (charset == NULL || strlen(charset) > 15) return -1; /* Copy name to uppercase. */ p = cs; while (*charset) { char c = *charset++; if (c >= 'a' && c <= 'z') c -= 'a' - 'A'; *p++ = c; } *p++ = '\0'; cp = -1; /* Look it up in the table first, so that we can easily * override CP367, which we map to 1252 instead of 367. */ a = 0; b = sizeof(charsets)/sizeof(charsets[0]); while (b > a) { int c = (b + a) / 2; int r = strcmp(charsets[c].name, cs); if (r < 0) a = c + 1; else if (r > 0) b = c; else return charsets[c].cp; } /* If it's not in the table, try to parse it. */ switch (*cs) { case 'C': if (cs[1] == 'P' && cs[2] >= '0' && cs[2] <= '9') { cp = my_atoi(cs + 2); } else if (strcmp(cs, "CP_ACP") == 0) cp = get_current_codepage(); else if (strcmp(cs, "CP_OEMCP") == 0) cp = get_current_oemcp(); break; case 'I': if (cs[1] == 'B' && cs[2] == 'M' && cs[3] >= '0' && cs[3] <= '9') { cp = my_atoi(cs + 3); } break; case 'W': if (strncmp(cs, "WINDOWS-", 8) == 0) { cp = my_atoi(cs + 8); if (cp != 874 && (cp < 1250 || cp > 1258)) cp = -1;/* This may invalid code. */ } break; } return (cp); }
0
[ "CWE-476" ]
libarchive
42a3408ac7df1e69bea9ea12b72e14f59f7400c0
1,924,500,446,331,817,400,000,000,000,000,000,000
62
archive_strncat_l(): allocate and do not convert if length == 0 This ensures e.g. that archive_mstring_copy_mbs_len_l() does not set aes_set = AES_SET_MBS with aes_mbs.s == NULL. Resolves possible null-pointer dereference reported by OSS-Fuzz. Reported-By: OSS-Fuzz issue 286
GF_Err trun_Write(GF_Box *s, GF_BitStream *bs) { GF_TrunEntry *p; GF_Err e; u32 i, count; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->sample_count); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) { gf_bs_write_u32(bs, ptr->data_offset); } if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) { gf_bs_write_u32(bs, ptr->first_sample_flags); } if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) { return GF_OK; } count = gf_list_count(ptr->entries); for (i=0; i<count; i++) { p = (GF_TrunEntry*)gf_list_get(ptr->entries, i); if (ptr->flags & GF_ISOM_TRUN_DURATION) { gf_bs_write_u32(bs, p->Duration); } if (ptr->flags & GF_ISOM_TRUN_SIZE) { gf_bs_write_u32(bs, p->size); } //SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED if (ptr->flags & GF_ISOM_TRUN_FLAGS) { gf_bs_write_u32(bs, p->flags); } if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) { if (ptr->version==0) { gf_bs_write_u32(bs, p->CTS_Offset); } else { gf_bs_write_u32(bs, (u32) p->CTS_Offset); } } } return GF_OK;
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
154,264,481,722,027,350,000,000,000,000,000,000,000
49
prevent dref memleak on invalid input (#1183)
static inline void complete_tx(struct sdma_engine *sde, struct sdma_txreq *tx, int res) { /* protect against complete modifying */ struct iowait *wait = tx->wait; callback_t complete = tx->complete; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER trace_hfi1_sdma_out_sn(sde, tx->sn); if (WARN_ON_ONCE(sde->head_sn != tx->sn)) dd_dev_err(sde->dd, "expected %llu got %llu\n", sde->head_sn, tx->sn); sde->head_sn++; #endif __sdma_txclean(sde->dd, tx); if (complete) (*complete)(tx, res); if (iowait_sdma_dec(wait)) iowait_drain_wakeup(wait); }
0
[ "CWE-400", "CWE-401" ]
linux
34b3be18a04ecdc610aae4c48e5d1b799d8689f6
26,975,545,866,339,376,000,000,000,000,000,000,000
21
RDMA/hfi1: Prevent memory leak in sdma_init In sdma_init if rhashtable_init fails the allocated memory for tmp_sdma_rht should be released. Fixes: 5a52a7acf7e2 ("IB/hfi1: NULL pointer dereference when freeing rhashtable") Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Navid Emamdoost <[email protected]> Acked-by: Dennis Dalessandro <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
evutil_socket_(int domain, int type, int protocol) { evutil_socket_t r; #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC) r = socket(domain, type, protocol); if (r >= 0) return r; else if ((type & (SOCK_NONBLOCK|SOCK_CLOEXEC)) == 0) return -1; #endif #define SOCKET_TYPE_MASK (~(EVUTIL_SOCK_NONBLOCK|EVUTIL_SOCK_CLOEXEC)) r = socket(domain, type & SOCKET_TYPE_MASK, protocol); if (r < 0) return -1; if (type & EVUTIL_SOCK_NONBLOCK) { if (evutil_fast_socket_nonblocking(r) < 0) { evutil_closesocket(r); return -1; } } if (type & EVUTIL_SOCK_CLOEXEC) { if (evutil_fast_socket_closeonexec(r) < 0) { evutil_closesocket(r); return -1; } } return r; }
0
[ "CWE-119", "CWE-787" ]
libevent
329acc18a0768c21ba22522f01a5c7f46cacc4d5
106,902,952,853,052,820,000,000,000,000,000,000,000
28
evutil_parse_sockaddr_port(): fix buffer overflow @asn-the-goblin-slayer: "Length between '[' and ']' is cast to signed 32 bit integer on line 1815. Is the length is more than 2<<31 (INT_MAX), len will hold a negative value. Consequently, it will pass the check at line 1816. Segfault happens at line 1819. Generate a resolv.conf with generate-resolv.conf, then compile and run poc.c. See entry-functions.txt for functions in tor that might be vulnerable. Please credit 'Guido Vranken' for this discovery through the Tor bug bounty program." Reproducer for gdb (https://gist.github.com/azat/be2b0d5e9417ba0dfe2c): start p (1ULL<<31)+1ULL # $1 = 2147483649 p malloc(sizeof(struct sockaddr)) # $2 = (void *) 0x646010 p malloc(sizeof(int)) # $3 = (void *) 0x646030 p malloc($1) # $4 = (void *) 0x7fff76a2a010 p memset($4, 1, $1) # $5 = 1990369296 p (char *)$4 # $6 = 0x7fff76a2a010 '\001' <repeats 200 times>... set $6[0]='[' set $6[$1]=']' p evutil_parse_sockaddr_port($4, $2, $3) # $7 = -1 Before: $ gdb bin/http-connect < gdb (gdb) $1 = 2147483649 (gdb) (gdb) $2 = (void *) 0x646010 (gdb) (gdb) $3 = (void *) 0x646030 (gdb) (gdb) $4 = (void *) 0x7fff76a2a010 (gdb) (gdb) $5 = 1990369296 (gdb) (gdb) $6 = 0x7fff76a2a010 '\001' <repeats 200 times>... (gdb) (gdb) (gdb) (gdb) Program received signal SIGSEGV, Segmentation fault. __memcpy_sse2_unaligned () at memcpy-sse2-unaligned.S:36 After: $ gdb bin/http-connect < gdb (gdb) $1 = 2147483649 (gdb) (gdb) $2 = (void *) 0x646010 (gdb) (gdb) $3 = (void *) 0x646030 (gdb) (gdb) $4 = (void *) 0x7fff76a2a010 (gdb) (gdb) $5 = 1990369296 (gdb) (gdb) $6 = 0x7fff76a2a010 '\001' <repeats 200 times>... (gdb) (gdb) (gdb) (gdb) $7 = -1 (gdb) (gdb) quit Fixes: #318
#endif } inline FILE* _stderr(const bool throw_exception) { #ifndef cimg_use_r cimg::unused(throw_exception); return stderr; #else if (throw_exception) { cimg::exception_mode(0); throw CImgIOException("cimg::stderr(): Reference to 'stderr' stream not allowed in R mode " "('cimg_use_r' is defined)."); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
91,606,865,418,207,590,000,000,000,000,000,000,000
13
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
static void rtl8139_TxStatus_write(RTL8139State *s, uint32_t txRegOffset, uint32_t val) { int descriptor = txRegOffset/4; /* handle C+ transmit mode register configuration */ if (s->cplus_enabled) { DPRINTF("RTL8139C+ DTCCR write offset=0x%x val=0x%08x " "descriptor=%d\n", txRegOffset, val, descriptor); /* handle Dump Tally Counters command */ s->TxStatus[descriptor] = val; if (descriptor == 0 && (val & 0x8)) { hwaddr tc_addr = rtl8139_addr64(s->TxStatus[0] & ~0x3f, s->TxStatus[1]); /* dump tally counters to specified memory location */ RTL8139TallyCounters_dma_write(s, tc_addr); /* mark dump completed */ s->TxStatus[0] &= ~0x8; } return; } DPRINTF("TxStatus write offset=0x%x val=0x%08x descriptor=%d\n", txRegOffset, val, descriptor); /* mask only reserved bits */ val &= ~0xff00c000; /* these bits are reset on write */ val = SET_MASKED(val, 0x00c00000, s->TxStatus[descriptor]); s->TxStatus[descriptor] = val; /* attempt to start transmission */ rtl8139_transmit(s); }
0
[ "CWE-835" ]
qemu
5311fb805a4403bba024e83886fa0e7572265de4
244,833,278,875,818,480,000,000,000,000,000,000,000
41
rtl8139: switch to use qemu_receive_packet() for loopback This patch switches to use qemu_receive_packet() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <[email protected]> Cc: [email protected] Buglink: https://bugs.launchpad.net/qemu/+bug/1910826 Reviewed-by: Philippe Mathieu-Daudé <[email protected] Signed-off-by: Alexander Bulekov <[email protected]> Signed-off-by: Jason Wang <[email protected]>
new_scope(cctx_T *cctx, scopetype_T type) { scope_T *scope = ALLOC_CLEAR_ONE(scope_T); if (scope == NULL) return NULL; scope->se_outer = cctx->ctx_scope; cctx->ctx_scope = scope; scope->se_type = type; scope->se_local_count = cctx->ctx_locals.ga_len; return scope; }
0
[ "CWE-703", "CWE-122" ]
vim
d1d8f6bacb489036d0fd479c9dd3c0102c988889
115,350,370,000,789,230,000,000,000,000,000,000,000
12
patch 9.0.0211: invalid memory access when compiling :lockvar Problem: Invalid memory access when compiling :lockvar. Solution: Don't read past the end of the line.
ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address, bool compressed) { int status; const char *filename; const struct firmware *fw_entry; u32 fw_entry_size; switch (file) { case AR6K_OTP_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_OTP_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_OTP_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_OTP_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } break; case AR6K_FIRMWARE_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } if (eppingtest) { bypasswmi = true; if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_EPPING_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_EPPING_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_EPPING_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("eppingtest : unsupported firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } compressed = false; } #ifdef CONFIG_HOST_TCMD_SUPPORT if(testmode) { if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_TCMD_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_TCMD_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_TCMD_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } compressed = false; } #endif #ifdef HTC_RAW_INTERFACE if (!eppingtest && bypasswmi) { if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_ART_FIRMWARE_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_ART_FIRMWARE_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } compressed = false; } #endif break; case AR6K_PATCH_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_PATCH_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_PATCH_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_PATCH_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } break; case AR6K_BOARD_DATA_FILE: if (ar->arVersion.target_ver == AR6003_REV1_VERSION) { filename = AR6003_REV1_BOARD_DATA_FILE; } else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) { filename = AR6003_REV2_BOARD_DATA_FILE; } else if (ar->arVersion.target_ver == AR6003_REV3_VERSION) { filename = AR6003_REV3_BOARD_DATA_FILE; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown firmware revision: %d\n", ar->arVersion.target_ver)); return A_ERROR; } break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown file type: %d\n", file)); return A_ERROR; } if ((A_REQUEST_FIRMWARE(&fw_entry, filename, ((struct device *)ar->osDevInfo.pOSDevice))) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to get %s\n", filename)); return A_ENOENT; } #ifdef SOFTMAC_FILE_USED if (file==AR6K_BOARD_DATA_FILE && fw_entry->data) { ar6000_softmac_update(ar, (u8 *)fw_entry->data, fw_entry->size); } #endif fw_entry_size = fw_entry->size; /* Load extended board data for AR6003 */ if ((file==AR6K_BOARD_DATA_FILE) && (fw_entry->data)) { u32 board_ext_address; u32 board_ext_data_size; u32 board_data_size; board_ext_data_size = (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_BOARD_EXT_DATA_SZ : \ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_BOARD_EXT_DATA_SZ : 0)); board_data_size = (((ar)->arTargetType == TARGET_TYPE_AR6002) ? AR6002_BOARD_DATA_SZ : \ (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_BOARD_DATA_SZ : 0)); /* Determine where in Target RAM to write Board Data */ bmifn(BMIReadMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data), (u8 *)&board_ext_address, 4)); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Board extended Data download address: 0x%x\n", board_ext_address)); /* check whether the target has allocated memory for extended board data and file contains extended board data */ if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) { u32 param; status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (u8 *)(fw_entry->data + board_data_size), board_ext_data_size); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); A_RELEASE_FIRMWARE(fw_entry); return A_ERROR; } /* Record the fact that extended board Data IS initialized */ param = (board_ext_data_size << 16) | 1; bmifn(BMIWriteMemory(ar->arHifDevice, HOST_INTEREST_ITEM_ADDRESS(ar, hi_board_ext_data_config), (unsigned char *)&param, 4)); } fw_entry_size = board_data_size; } if (compressed) { status = BMIFastDownload(ar->arHifDevice, address, (u8 *)fw_entry->data, fw_entry_size); } else { status = BMIWriteMemory(ar->arHifDevice, address, (u8 *)fw_entry->data, fw_entry_size); } if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); A_RELEASE_FIRMWARE(fw_entry); return A_ERROR; } A_RELEASE_FIRMWARE(fw_entry); return 0; }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
322,537,328,274,402,200,000,000,000,000,000,000,000
175
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int rev_same_tree_as_empty(struct rev_info *revs, struct commit *commit) { int retval; struct tree *t1 = commit->tree; if (!t1) return 0; tree_difference = REV_TREE_SAME; DIFF_OPT_CLR(&revs->pruning, HAS_CHANGES); retval = diff_tree_sha1(NULL, t1->object.sha1, "", &revs->pruning); return retval >= 0 && (tree_difference == REV_TREE_SAME); }
0
[ "CWE-119", "CWE-787" ]
git
34fa79a6cde56d6d428ab0d3160cb094ebad3305
279,520,526,191,592,900,000,000,000,000,000,000,000
14
prefer memcpy to strcpy When we already know the length of a string (e.g., because we just malloc'd to fit it), it's nicer to use memcpy than strcpy, as it makes it more obvious that we are not going to overflow the buffer (because the size we pass matches the size in the allocation). This also eliminates calls to strcpy, which make auditing the code base harder. Signed-off-by: Jeff King <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
static QuantizationTable *DestroyQuantizationTable(QuantizationTable *table) { assert(table != (QuantizationTable *) NULL); if (table->slot != (char *) NULL) table->slot=DestroyString(table->slot); if (table->description != (char *) NULL) table->description=DestroyString(table->description); if (table->levels != (unsigned int *) NULL) table->levels=(unsigned int *) RelinquishMagickMemory(table->levels); table=(QuantizationTable *) RelinquishMagickMemory(table); return(table); }
0
[ "CWE-416" ]
ImageMagick
39f226a9c137f547e12afde972eeba7551124493
90,250,907,886,306,300,000,000,000,000,000,000,000
12
https://github.com/ImageMagick/ImageMagick/issues/1641
xmlRegisterCharEncodingHandler(xmlCharEncodingHandlerPtr handler) { if (handlers == NULL) xmlInitCharEncodingHandlers(); if ((handler == NULL) || (handlers == NULL)) { xmlEncodingErr(XML_I18N_NO_HANDLER, "xmlRegisterCharEncodingHandler: NULL handler !\n", NULL); return; } if (nbCharEncodingHandler >= MAX_ENCODING_HANDLERS) { xmlEncodingErr(XML_I18N_EXCESS_HANDLER, "xmlRegisterCharEncodingHandler: Too many handler registered, see %s\n", "MAX_ENCODING_HANDLERS"); return; } handlers[nbCharEncodingHandler++] = handler; }
0
[ "CWE-189" ]
libxml2
69f04562f75212bfcabecd190ea8b06ace28ece2
198,415,676,516,062,200,000,000,000,000,000,000,000
16
Fix an off by one error in encoding this off by one error doesn't seems to reproduce on linux but the error is real.
hook_timer (struct t_weechat_plugin *plugin, long interval, int align_second, int max_calls, t_hook_callback_timer *callback, void *callback_data) { struct t_hook *new_hook; struct t_hook_timer *new_hook_timer; if ((interval <= 0) || !callback) return NULL; new_hook = malloc (sizeof (*new_hook)); if (!new_hook) return NULL; new_hook_timer = malloc (sizeof (*new_hook_timer)); if (!new_hook_timer) { free (new_hook); return NULL; } hook_init_data (new_hook, plugin, HOOK_TYPE_TIMER, HOOK_PRIORITY_DEFAULT, callback_data); new_hook->hook_data = new_hook_timer; new_hook_timer->callback = callback; new_hook_timer->interval = interval; new_hook_timer->align_second = align_second; new_hook_timer->remaining_calls = max_calls; hook_timer_init (new_hook); hook_add_to_list (new_hook); return new_hook; }
0
[ "CWE-20" ]
weechat
c265cad1c95b84abfd4e8d861f25926ef13b5d91
29,624,479,502,314,510,000,000,000,000,000,000,000
35
Fix verification of SSL certificates by calling gnutls verify callback (patch #7459)
flatpak_context_load_metadata (FlatpakContext *context, GKeyFile *metakey, GError **error) { gboolean remove; g_auto(GStrv) groups = NULL; gsize i; if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_SHARED, NULL)) { g_auto(GStrv) shares = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_SHARED, NULL, error); if (shares == NULL) return FALSE; for (i = 0; shares[i] != NULL; i++) { FlatpakContextShares share; share = flatpak_context_share_from_string (parse_negated (shares[i], &remove), NULL); if (share == 0) g_debug ("Unknown share type %s", shares[i]); else { if (remove) flatpak_context_remove_shares (context, share); else flatpak_context_add_shares (context, share); } } } if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_SOCKETS, NULL)) { g_auto(GStrv) sockets = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_SOCKETS, NULL, error); if (sockets == NULL) return FALSE; for (i = 0; sockets[i] != NULL; i++) { FlatpakContextSockets socket = flatpak_context_socket_from_string (parse_negated (sockets[i], &remove), NULL); if (socket == 0) g_debug ("Unknown socket type %s", sockets[i]); else { if (remove) flatpak_context_remove_sockets (context, socket); else flatpak_context_add_sockets (context, socket); } } } if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_DEVICES, NULL)) { g_auto(GStrv) devices = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_DEVICES, NULL, error); if (devices == NULL) return FALSE; for (i = 0; devices[i] != NULL; i++) { FlatpakContextDevices device = flatpak_context_device_from_string (parse_negated (devices[i], &remove), NULL); if (device == 0) g_debug ("Unknown device type %s", devices[i]); else { if (remove) flatpak_context_remove_devices (context, device); else flatpak_context_add_devices (context, device); } } } if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_FEATURES, NULL)) { g_auto(GStrv) features = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_FEATURES, NULL, error); if (features == NULL) return FALSE; for (i = 0; features[i] != NULL; i++) { FlatpakContextFeatures feature = flatpak_context_feature_from_string (parse_negated (features[i], &remove), NULL); if (feature == 0) g_debug ("Unknown feature type %s", features[i]); else { if (remove) flatpak_context_remove_features (context, feature); else flatpak_context_add_features (context, feature); } } } if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_FILESYSTEMS, NULL)) { g_auto(GStrv) filesystems = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_FILESYSTEMS, NULL, error); if (filesystems == NULL) return FALSE; for (i = 0; filesystems[i] != NULL; i++) { const char *fs = parse_negated (filesystems[i], &remove); g_autofree char *filesystem = NULL; FlatpakFilesystemMode mode; if (!flatpak_context_parse_filesystem (fs, &filesystem, &mode, NULL)) g_debug ("Unknown filesystem type %s", filesystems[i]); else { if (remove) flatpak_context_take_filesystem (context, g_steal_pointer (&filesystem), FLATPAK_FILESYSTEM_MODE_NONE); else flatpak_context_take_filesystem (context, g_steal_pointer (&filesystem), mode); } } } if (g_key_file_has_key (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_PERSISTENT, NULL)) { g_auto(GStrv) persistent = g_key_file_get_string_list (metakey, FLATPAK_METADATA_GROUP_CONTEXT, FLATPAK_METADATA_KEY_PERSISTENT, NULL, error); if (persistent == NULL) return FALSE; for (i = 0; persistent[i] != NULL; i++) flatpak_context_set_persistent (context, persistent[i]); } if (g_key_file_has_group (metakey, FLATPAK_METADATA_GROUP_SESSION_BUS_POLICY)) { g_auto(GStrv) keys = NULL; gsize keys_count; keys = g_key_file_get_keys (metakey, FLATPAK_METADATA_GROUP_SESSION_BUS_POLICY, &keys_count, NULL); for (i = 0; i < keys_count; i++) { const char *key = keys[i]; g_autofree char *value = g_key_file_get_string (metakey, FLATPAK_METADATA_GROUP_SESSION_BUS_POLICY, key, NULL); FlatpakPolicy policy; if (!flatpak_verify_dbus_name (key, error)) return FALSE; policy = flatpak_policy_from_string (value, NULL); if ((int) policy != -1) flatpak_context_set_session_bus_policy (context, key, policy); } } if (g_key_file_has_group (metakey, FLATPAK_METADATA_GROUP_SYSTEM_BUS_POLICY)) { g_auto(GStrv) keys = NULL; gsize keys_count; keys = g_key_file_get_keys (metakey, FLATPAK_METADATA_GROUP_SYSTEM_BUS_POLICY, &keys_count, NULL); for (i = 0; i < keys_count; i++) { const char *key = keys[i]; g_autofree char *value = g_key_file_get_string (metakey, FLATPAK_METADATA_GROUP_SYSTEM_BUS_POLICY, key, NULL); FlatpakPolicy policy; if (!flatpak_verify_dbus_name (key, error)) return FALSE; policy = flatpak_policy_from_string (value, NULL); if ((int) policy != -1) flatpak_context_set_system_bus_policy (context, key, policy); } } if (g_key_file_has_group (metakey, FLATPAK_METADATA_GROUP_ENVIRONMENT)) { g_auto(GStrv) keys = NULL; gsize keys_count; keys = g_key_file_get_keys (metakey, FLATPAK_METADATA_GROUP_ENVIRONMENT, &keys_count, NULL); for (i = 0; i < keys_count; i++) { const char *key = keys[i]; g_autofree char *value = g_key_file_get_string (metakey, FLATPAK_METADATA_GROUP_ENVIRONMENT, key, NULL); flatpak_context_set_env_var (context, key, value); } } groups = g_key_file_get_groups (metakey, NULL); for (i = 0; groups[i] != NULL; i++) { const char *group = groups[i]; const char *subsystem; int j; if (g_str_has_prefix (group, FLATPAK_METADATA_GROUP_PREFIX_POLICY)) { g_auto(GStrv) keys = NULL; subsystem = group + strlen (FLATPAK_METADATA_GROUP_PREFIX_POLICY); keys = g_key_file_get_keys (metakey, group, NULL, NULL); for (j = 0; keys != NULL && keys[j] != NULL; j++) { const char *key = keys[j]; g_autofree char *policy_key = g_strdup_printf ("%s.%s", subsystem, key); g_auto(GStrv) values = NULL; int k; values = g_key_file_get_string_list (metakey, group, key, NULL, NULL); for (k = 0; values != NULL && values[k] != NULL; k++) flatpak_context_apply_generic_policy (context, policy_key, values[k]); } } } return TRUE; }
1
[ "CWE-74" ]
flatpak
4108e022452303093d8b90c838695a0476cb09c7
250,871,480,456,206,170,000,000,000,000,000,000,000
223
context: Add --unset-env option and a corresponding override This follows up from GHSA-4ppf-fxf6-vxg2 to fix missing functionality that I noticed while resolving that vulnerability, but is not required for fixing the vulnerability. Signed-off-by: Simon McVittie <[email protected]>
void CLASS parse_fuji(int offset) { unsigned entries, tag, len, save, c; fseek(ifp, offset, SEEK_SET); entries = get4(); if (entries > 255) return; while (entries--) { tag = get2(); len = get2(); save = ftell(ifp); if (tag == 0x100) { raw_height = get2(); raw_width = get2(); } else if (tag == 0x121) { height = get2(); if ((width = get2()) == 4284) width += 3; } else if (tag == 0x130) { fuji_layout = fgetc(ifp) >> 7; fuji_width = !(fgetc(ifp) & 8); } else if (tag == 0x131) { filters = 9; FORC(36) xtrans_abs[0][35 - c] = fgetc(ifp) & 3; } else if (tag == 0x2ff0) { FORC4 cam_mul[c ^ 1] = get2(); // IB start #ifdef LIBRAW_LIBRARY_BUILD } else if (tag == 0x9650) { short a = (short)get2(); float b = fMAX(1.0f, get2()); imgdata.makernotes.fuji.FujiExpoMidPointShift = a / b; } else if (tag == 0x2100) { FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ 1] = get2(); } else if (tag == 0x2200) { FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ 1] = get2(); } else if (tag == 0x2300) { FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ 1] = get2(); } else if (tag == 0x2301) { FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ 1] = get2(); } else if (tag == 0x2302) { FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][c ^ 1] = get2(); } else if (tag == 0x2310) { FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c ^ 1] = get2(); } else if (tag == 0x2400) { FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ 1] = get2(); #endif // IB end } else if (tag == 0xc000) { c = order; order = 0x4949; if ((tag = get4()) > 10000) tag = get4(); if (tag > 10000) tag = get4(); width = tag; height = get4(); #ifdef LIBRAW_LIBRARY_BUILD libraw_internal_data.unpacker_data.posRAFData = save; libraw_internal_data.unpacker_data.lenRAFData = (len >> 1); #endif order = c; } fseek(ifp, save + len, SEEK_SET); } height <<= fuji_layout; width >>= fuji_layout; }
0
[ "CWE-476", "CWE-119" ]
LibRaw
d7c3d2cb460be10a3ea7b32e9443a83c243b2251
241,557,489,018,302,540,000,000,000,000,000,000,000
99
Secunia SA75000 advisory: several buffer overruns
bool Column_definition::set_compressed(const char *method) { if (!method || !strcmp(method, zlib_compression_method->name)) { unireg_check= Field::TMYSQL_COMPRESSED; compression_method_ptr= zlib_compression_method; return false; } my_error(ER_UNKNOWN_COMPRESSION_METHOD, MYF(0), method); return true; }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
52,651,389,175,999,230,000,000,000,000,000,000,000
11
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <[email protected]>
ssize_t ip_append_page(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; struct rtable *rt; struct ip_options *opt = NULL; int hh_len; int mtu; int len; int err; unsigned int maxfraglen, fragheaderlen, fraggap; if (inet->hdrincl) return -EPERM; if (flags&MSG_PROBE) return 0; if (skb_queue_empty(&sk->sk_write_queue)) return -EINVAL; rt = (struct rtable *)inet->cork.dst; if (inet->cork.flags & IPCORK_OPT) opt = inet->cork.opt; if (!(rt->dst.dev->features&NETIF_F_SG)) return -EOPNOTSUPP; hh_len = LL_RESERVED_SPACE(rt->dst.dev); mtu = inet->cork.fragsize; fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; if (inet->cork.length + size > 0xFFFF - fragheaderlen) { ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu); return -EMSGSIZE; } if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) return -EINVAL; inet->cork.length += size; if ((size + skb->len > mtu) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO)) { skb_shinfo(skb)->gso_size = mtu - fragheaderlen; skb_shinfo(skb)->gso_type = SKB_GSO_UDP; } while (size > 0) { int i; if (skb_is_gso(skb)) len = size; else { /* Check if the remaining data fits into current packet. */ len = mtu - skb->len; if (len < size) len = maxfraglen - skb->len; } if (len <= 0) { struct sk_buff *skb_prev; int alloclen; skb_prev = skb; fraggap = skb_prev->len - maxfraglen; alloclen = fragheaderlen + hh_len + fraggap + 15; skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); if (unlikely(!skb)) { err = -ENOBUFS; goto error; } /* * Fill in the control structures */ skb->ip_summed = CHECKSUM_NONE; skb->csum = 0; skb_reserve(skb, hh_len); /* * Find where to start putting bytes. */ skb_put(skb, fragheaderlen + fraggap); skb_reset_network_header(skb); skb->transport_header = (skb->network_header + fragheaderlen); if (fraggap) { skb->csum = skb_copy_and_csum_bits(skb_prev, maxfraglen, skb_transport_header(skb), fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); pskb_trim_unique(skb_prev, maxfraglen); } /* * Put the packet on the pending queue. */ __skb_queue_tail(&sk->sk_write_queue, skb); continue; } i = skb_shinfo(skb)->nr_frags; if (len > size) len = size; if (skb_can_coalesce(skb, i, page, offset)) { skb_shinfo(skb)->frags[i-1].size += len; } else if (i < MAX_SKB_FRAGS) { get_page(page); skb_fill_page_desc(skb, i, page, offset, len); } else { err = -EMSGSIZE; goto error; } if (skb->ip_summed == CHECKSUM_NONE) { __wsum csum; csum = csum_page(page, offset, len); skb->csum = csum_block_add(skb->csum, csum, skb->len); } skb->len += len; skb->data_len += len; skb->truesize += len; atomic_add(len, &sk->sk_wmem_alloc); offset += len; size -= len; } return 0; error: inet->cork.length -= size; IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); return err; }
0
[ "CWE-362" ]
linux-2.6
f6d8bd051c391c1c0458a30b2a7abcd939329259
311,977,164,162,068,370,000,000,000,000,000,000,000
142
inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
PHP_FUNCTION(imagecolortransparent) { zval *IM; long COL = 0; gdImagePtr im; int argc = ZEND_NUM_ARGS(); if (zend_parse_parameters(argc TSRMLS_CC, "r|l", &IM, &COL) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); if (argc > 1) { gdImageColorTransparent(im, COL); } RETURN_LONG(gdImageGetTransparent(im)); }
0
[ "CWE-703", "CWE-189" ]
php-src
2938329ce19cb8c4197dec146c3ec887c6f61d01
278,464,989,253,954,450,000,000,000,000,000,000,000
19
Fixed bug #66356 (Heap Overflow Vulnerability in imagecrop()) And also fixed the bug: arguments are altered after some calls
static void ctrn_write_ctso(GF_TrackFragmentRunBox *ctrn, GF_BitStream *bs, u32 ctso, u32 field_size) { if (!field_size) return; if (ctrn->ctso_multiplier) { gf_bs_write_int(bs, ctso / ctrn->ctso_multiplier, field_size); } else { gf_bs_write_int(bs, ctso, field_size); }
0
[ "CWE-787" ]
gpac
388ecce75d05e11fc8496aa4857b91245007d26e
279,242,421,719,895,070,000,000,000,000,000,000,000
10
fixed #1587
int pneigh_delete(struct neigh_table *tbl, const void *pkey, struct net_device *dev) { struct pneigh_entry *n, **np; int key_len = tbl->key_len; u32 hash_val = *(u32 *)(pkey + key_len - 4); hash_val ^= (hash_val >> 16); hash_val ^= hash_val >> 8; hash_val ^= hash_val >> 4; hash_val &= PNEIGH_HASHMASK; write_lock_bh(&tbl->lock); for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; np = &n->next) { if (!memcmp(n->key, pkey, key_len) && n->dev == dev) { *np = n->next; write_unlock_bh(&tbl->lock); if (tbl->pdestructor) tbl->pdestructor(n); if (n->dev) dev_put(n->dev); kfree(n); return 0; } } write_unlock_bh(&tbl->lock); return -ENOENT; }
0
[ "CWE-200" ]
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
161,965,007,883,343,460,000,000,000,000,000,000,000
29
[NETLINK]: Missing initializations in dumped data Mostly missing initialization of padding fields of 1 or 2 bytes length, two instances of uninitialized nlmsgerr->msg of 16 bytes length. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
TEST_P(MessengerTest, SyntheticInjectTest2) { g_ceph_context->_conf->set_val("ms_inject_socket_failures", "30"); g_ceph_context->_conf->set_val("ms_inject_internal_delays", "0.1"); SyntheticWorkload test_msg(8, 16, GetParam(), 100, Messenger::Policy::lossless_peer_reuse(0), Messenger::Policy::lossless_peer_reuse(0)); for (int i = 0; i < 100; ++i) { if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl; test_msg.generate_connection(); } gen_type rng(time(NULL)); for (int i = 0; i < 1000; ++i) { if (!(i % 10)) { lderr(g_ceph_context) << "Op " << i << ": " << dendl; test_msg.print_internal_state(); } boost::uniform_int<> true_false(0, 99); int val = true_false(rng); if (val > 90) { test_msg.generate_connection(); } else if (val > 80) { test_msg.drop_connection(); } else if (val > 10) { test_msg.send_message(); } else { usleep(rand() % 500 + 100); } } test_msg.wait_for_done(); g_ceph_context->_conf->set_val("ms_inject_socket_failures", "0"); g_ceph_context->_conf->set_val("ms_inject_internal_delays", "0"); }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
73,219,056,865,745,420,000,000,000,000,000,000,000
32
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <[email protected]> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
static MagickOffsetType TIFFSeekCustomStream(const MagickOffsetType offset, const int whence,void *user_data) { PhotoshopProfile *profile; profile=(PhotoshopProfile *) user_data; switch (whence) { case SEEK_SET: default: { if (offset < 0) return(-1); profile->offset=offset; break; } case SEEK_CUR: { if (((offset > 0) && (profile->offset > (MAGICK_SSIZE_MAX-offset))) || ((offset < 0) && (profile->offset < (MAGICK_SSIZE_MIN-offset)))) { errno=EOVERFLOW; return(-1); } if ((profile->offset+offset) < 0) return(-1); profile->offset+=offset; break; } case SEEK_END: { if (((MagickOffsetType) profile->length+offset) < 0) return(-1); profile->offset=profile->length+offset; break; } } return(profile->offset); }
0
[ "CWE-125", "CWE-787" ]
ImageMagick
930ff0d1a9bc42925a7856e9ea53f5fc9f318bf3
179,094,529,073,299,840,000,000,000,000,000,000,000
41
eliminate heap buffer overflow vulnerability, thanks to ZhangJiaxing (@r0fm1a) from Codesafe Team of Legendsec at Qi'anxin Group
static void sctp_cmd_del_non_primary(struct sctp_association *asoc) { struct sctp_transport *t; struct list_head *pos; struct list_head *temp; list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { t = list_entry(pos, struct sctp_transport, transports); if (!sctp_cmp_addr_exact(&t->ipaddr, &asoc->peer.primary_addr)) { sctp_assoc_del_peer(asoc, &t->ipaddr); } } }
0
[]
linux
196d67593439b03088913227093e374235596e33
222,882,143,387,694,230,000,000,000,000,000,000,000
14
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call The current SCTP stack is lacking a mechanism to have per association statistics. This is an implementation modeled after OpenSolaris' SCTP_GET_ASSOC_STATS. Userspace part will follow on lksctp if/when there is a general ACK on this. V4: - Move ipackets++ before q->immediate.func() for consistency reasons - Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid returning bogus RTO values - return asoc->rto_min when max_obs_rto value has not changed V3: - Increase ictrlchunks in sctp_assoc_bh_rcv() as well - Move ipackets++ to sctp_inq_push() - return 0 when no rto updates took place since the last call V2: - Implement partial retrieval of stat struct to cope for future expansion - Kill the rtxpackets counter as it cannot be precise anyway - Rename outseqtsns to outofseqtsns to make it clearer that these are out of sequence unexpected TSNs - Move asoc->ipackets++ under a lock to avoid potential miscounts - Fold asoc->opackets++ into the already existing asoc check - Kill unneeded (q->asoc) test when increasing rtxchunks - Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0) - Don't count SHUTDOWNs as SACKs - Move SCTP_GET_ASSOC_STATS to the private space API - Adjust the len check in sctp_getsockopt_assoc_stats() to allow for future struct growth - Move association statistics in their own struct - Update idupchunks when we send a SACK with dup TSNs - return min_rto in max_rto when RTO has not changed. Also return the transport when max_rto last changed. Signed-off: Michele Baldessari <[email protected]> Acked-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void gfs2_trim_blocks(struct inode *inode) { u64 size = inode->i_size; int ret; ret = do_shrink(inode, size, size); WARN_ON(ret != 0); }
0
[ "CWE-119", "CWE-787" ]
linux
64dd153c83743af81f20924c6343652d731eeecb
163,475,901,414,896,640,000,000,000,000,000,000,000
8
GFS2: rewrite fallocate code to write blocks directly GFS2's fallocate code currently goes through the page cache. Since it's only writing to the end of the file or to holes in it, it doesn't need to, and it was causing issues on low memory environments. This patch pulls in some of Steve's block allocation work, and uses it to simply allocate the blocks for the file, and zero them out at allocation time. It provides a slight performance increase, and it dramatically simplifies the code. Signed-off-by: Benjamin Marzinski <[email protected]> Signed-off-by: Steven Whitehouse <[email protected]>
static int php_zip_ops_flush(php_stream *stream TSRMLS_DC) { if (!stream) { return 0; } return 0; }
0
[ "CWE-119" ]
php-src
81406c0c1d45f75fcc7972ed974d2597abb0b9e9
291,576,738,299,726,550,000,000,000,000,000,000,000
8
Fix fir bug #72520
static int ZEND_FASTCALL ZEND_FETCH_IS_SPEC_TMP_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { return zend_fetch_var_address_helper_SPEC_TMP(BP_VAR_IS, ZEND_OPCODE_HANDLER_ARGS_PASSTHRU); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
339,830,006,569,422,260,000,000,000,000,000,000,000
4
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
Status DenseCountSparseOutputShapeFn(InferenceContext *c) { auto values = c->input(0); auto weights = c->input(1); ShapeHandle output; auto num_weights = c->NumElements(weights); if (c->ValueKnown(num_weights) && c->Value(num_weights) == 0) { output = values; } else { TF_RETURN_IF_ERROR(c->Merge(weights, values, &output)); } auto rank = c->Rank(output); auto nvals = c->UnknownDim(); c->set_output(0, c->Matrix(nvals, rank)); // out.indices c->set_output(1, c->Vector(nvals)); // out.values c->set_output(2, c->Vector(rank)); // out.dense_shape return Status::OK(); }
0
[ "CWE-125" ]
tensorflow
701cfaca222a82afbeeb17496bd718baa65a67d2
115,759,854,333,683,580,000,000,000,000,000,000,000
17
Fix heap out of bounds error in tf.raw_ops.SparseCountSparseOutput shape inference when it is called with invalid inputs, and add a test for it. PiperOrigin-RevId: 405766415 Change-Id: I77d244ef35f351ef7b6f821efd959cac2c66db24
reconstructLineOffsets (OPENEXR_IMF_INTERNAL_NAMESPACE::IStream &is, LineOrder lineOrder, vector<Int64> &lineOffsets) { Int64 position = is.tellg(); try { for (unsigned int i = 0; i < lineOffsets.size(); i++) { Int64 lineOffset = is.tellg(); int y; OPENEXR_IMF_INTERNAL_NAMESPACE::Xdr::read <OPENEXR_IMF_INTERNAL_NAMESPACE::StreamIO> (is, y); int dataSize; OPENEXR_IMF_INTERNAL_NAMESPACE::Xdr::read <OPENEXR_IMF_INTERNAL_NAMESPACE::StreamIO> (is, dataSize); // check for bad values to prevent overflow if ( dataSize < 0 ) { throw IEX_NAMESPACE::IoExc("Invalid chunk size"); } Xdr::skip <StreamIO> (is, dataSize); if (lineOrder == INCREASING_Y) lineOffsets[i] = lineOffset; else lineOffsets[lineOffsets.size() - i - 1] = lineOffset; } } catch (...) //NOSONAR - suppress vulnerability reports from SonarCloud. { // // Suppress all exceptions. This functions is // called only to reconstruct the line offset // table for incomplete files, and exceptions // are likely. // } is.clear(); is.seekg (position); }
0
[ "CWE-770" ]
openexr
bc88cdb6c97fbf5bc5d11ad8ca55306da931283a
340,205,869,863,239,100,000,000,000,000,000,000,000
44
sanity check ScanlineInput bytesPerLine instead of lineOffset size (#863) Signed-off-by: Peter Hillman <[email protected]> Co-authored-by: Cary Phillips <[email protected]>
static int mptsas_process_scsi_io_request(MPTSASState *s, MPIMsgSCSIIORequest *scsi_io, hwaddr addr) { MPTSASRequest *req; MPIMsgSCSIIOReply reply; SCSIDevice *sdev; int status; mptsas_fix_scsi_io_endianness(scsi_io); trace_mptsas_process_scsi_io_request(s, scsi_io->Bus, scsi_io->TargetID, scsi_io->LUN[1], scsi_io->DataLength); status = mptsas_scsi_device_find(s, scsi_io->Bus, scsi_io->TargetID, scsi_io->LUN, &sdev); if (status) { goto bad; } req = g_new0(MPTSASRequest, 1); QTAILQ_INSERT_TAIL(&s->pending, req, next); req->scsi_io = *scsi_io; req->dev = s; status = mptsas_build_sgl(s, req, addr); if (status) { goto free_bad; } if (req->qsg.size < scsi_io->DataLength) { trace_mptsas_sgl_overflow(s, scsi_io->MsgContext, scsi_io->DataLength, req->qsg.size); status = MPI_IOCSTATUS_INVALID_SGL; goto free_bad; } req->sreq = scsi_req_new(sdev, scsi_io->MsgContext, scsi_io->LUN[1], scsi_io->CDB, req); if (req->sreq->cmd.xfer > scsi_io->DataLength) { goto overrun; } switch (scsi_io->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK) { case MPI_SCSIIO_CONTROL_NODATATRANSFER: if (req->sreq->cmd.mode != SCSI_XFER_NONE) { goto overrun; } break; case MPI_SCSIIO_CONTROL_WRITE: if (req->sreq->cmd.mode != SCSI_XFER_TO_DEV) { goto overrun; } break; case MPI_SCSIIO_CONTROL_READ: if (req->sreq->cmd.mode != SCSI_XFER_FROM_DEV) { goto overrun; } break; } if (scsi_req_enqueue(req->sreq)) { scsi_req_continue(req->sreq); } return 0; overrun: trace_mptsas_scsi_overflow(s, scsi_io->MsgContext, req->sreq->cmd.xfer, scsi_io->DataLength); status = MPI_IOCSTATUS_SCSI_DATA_OVERRUN; free_bad: mptsas_free_request(req); bad: memset(&reply, 0, sizeof(reply)); reply.TargetID = scsi_io->TargetID; reply.Bus = scsi_io->Bus; reply.MsgLength = sizeof(reply) / 4; reply.Function = scsi_io->Function; reply.CDBLength = scsi_io->CDBLength; reply.SenseBufferLength = scsi_io->SenseBufferLength; reply.MsgContext = scsi_io->MsgContext; reply.SCSIState = MPI_SCSI_STATE_NO_SCSI_STATUS; reply.IOCStatus = status; mptsas_fix_scsi_io_reply_endianness(&reply); mptsas_reply(s, (MPIDefaultReply *)&reply); return 0; }
1
[ "CWE-416" ]
qemu
3791642c8d60029adf9b00bcb4e34d7d8a1aea4d
283,982,666,294,271,800,000,000,000,000,000,000,000
91
mptsas: Remove unused MPTSASState 'pending' field (CVE-2021-3392) While processing SCSI i/o requests in mptsas_process_scsi_io_request(), the Megaraid emulator appends new MPTSASRequest object 'req' to the 's->pending' queue. In case of an error, this same object gets dequeued in mptsas_free_request() only if SCSIRequest object 'req->sreq' is initialised. This may lead to a use-after-free issue. Since s->pending is actually not used, simply remove it from MPTSASState. Cc: [email protected] Signed-off-by: Michael Tokarev <[email protected]> Reviewed-by: Philippe Mathieu-Daudé <[email protected]> Signed-off-by: Philippe Mathieu-Daudé <[email protected]> Reported-by: Cheolwoo Myung <[email protected]> Message-id: [email protected] Message-Id: <[email protected]> Suggested-by: Paolo Bonzini <[email protected]> Reported-by: Cheolwoo Myung <[email protected]> BugLink: https://bugs.launchpad.net/qemu/+bug/1914236 (CVE-2021-3392) Fixes: e351b826112 ("hw: Add support for LSI SAS1068 (mptsas) device") [PMD: Reworded description, added more tags] Signed-off-by: Philippe Mathieu-Daudé <[email protected]> Reviewed-by: Peter Maydell <[email protected]> Signed-off-by: Peter Maydell <[email protected]>
static inline void register_as_ext3(void) { int err = register_filesystem(&ext3_fs_type); if (err) printk(KERN_WARNING "EXT4-fs: Unable to register as ext3 (%d)\n", err); }
0
[ "CWE-703" ]
linux
744692dc059845b2a3022119871846e74d4f6e11
62,412,992,574,985,370,000,000,000,000,000,000,000
7
ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]>
static void vrend_pipe_resource_unref(struct pipe_resource *pres, UNUSED void *data) { struct vrend_resource *res = (struct vrend_resource *)pres; if (vrend_state.finishing || pipe_reference(&res->base.reference, NULL)) vrend_renderer_resource_destroy(res); }
0
[ "CWE-787" ]
virglrenderer
95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
55,158,397,831,269,250,000,000,000,000,000,000,000
8
vrend: Add test to resource OOB write and fix it v2: Also check that no depth != 1 has been send when none is due Closes: #250 Signed-off-by: Gert Wollny <[email protected]> Reviewed-by: Chia-I Wu <[email protected]>
std::string help() const override { return "count objects in collection"; }
0
[ "CWE-20" ]
mongo
722f06f3217c029ef9c50062c8cc775966fd7ead
134,838,774,082,343,170,000,000,000,000,000,000,000
3
SERVER-38275 ban find explain with UUID
gxps_archive_input_stream_class_init (GXPSArchiveInputStreamClass *klass) { GObjectClass *object_class = G_OBJECT_CLASS (klass); GInputStreamClass *istream_class = G_INPUT_STREAM_CLASS (klass); object_class->finalize = gxps_archive_input_stream_finalize; istream_class->read_fn = gxps_archive_input_stream_read; istream_class->skip = gxps_archive_input_stream_skip; istream_class->close_fn = gxps_archive_input_stream_close; }
0
[ "CWE-125" ]
libgxps
b458226e162fe1ffe7acb4230c114a52ada5131b
66,113,534,095,057,290,000,000,000,000,000,000,000
11
gxps-archive: Ensure gxps_archive_read_entry() fills the GError in case of failure And fix the callers to not overwrite the GError.
atrim(const char *str) { size_t start; // Position of first non-space char size_t term; // Position of 0-terminator size_t size; char *result; if (!str) return NULL; start = 0; term = strlen(str); while ((start < term) && isspace(str[start])) start++; while ((term > start) && isspace(str[term - 1])) term--; size = term - start + 1; result = malloc(size); memcpy(result, str + start, size); result[size - 1] = '\0'; return result; }
0
[ "CWE-416" ]
owntone-server
246d8ae0cef27377e5dfe9ee3ad87e864d6b6266
37,580,331,786,808,927,000,000,000,000,000,000,000
27
[misc] Fix use-after-free in net_bind() Thanks to Ba Jinsheng for reporting this bug
static void __device_release_driver(struct device *dev, struct device *parent) { struct device_driver *drv; drv = dev->driver; if (drv) { while (device_links_busy(dev)) { __device_driver_unlock(dev, parent); device_links_unbind_consumers(dev); __device_driver_lock(dev, parent); /* * A concurrent invocation of the same function might * have released the driver successfully while this one * was waiting, so check for that. */ if (dev->driver != drv) return; } pm_runtime_get_sync(dev); pm_runtime_clean_up_links(dev); driver_sysfs_remove(dev); if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_UNBIND_DRIVER, dev); pm_runtime_put_sync(dev); device_remove_file(dev, &dev_attr_state_synced); device_remove_groups(dev, drv->dev_groups); if (dev->bus && dev->bus->remove) dev->bus->remove(dev); else if (drv->remove) drv->remove(dev); device_links_driver_cleanup(dev); devres_release_all(dev); arch_teardown_dma_ops(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) dev->pm_domain->dismiss(dev); pm_runtime_reinit(dev); dev_pm_set_driver_flags(dev, 0); klist_remove(&dev->p->knode_driver); device_pm_check_callbacks(dev); if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_UNBOUND_DRIVER, dev); kobject_uevent(&dev->kobj, KOBJ_UNBIND); } }
0
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
248,953,414,688,769,060,000,000,000,000,000,000,000
62
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <[email protected]> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <[email protected]>
static double mp_permutations(_cimg_math_parser& mp) { return cimg::permutations((int)_mp_arg(2),(int)_mp_arg(3),(bool)_mp_arg(4));
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
253,142,240,063,271,400,000,000,000,000,000,000,000
3
.
static void flush_cpu_slab(void *d) { struct kmem_cache *s = d; __flush_cpu_slab(s, smp_processor_id()); }
0
[ "CWE-189" ]
linux
f8bd2258e2d520dff28c855658bd24bdafb5102d
148,917,511,392,805,060,000,000,000,000,000,000,000
6
remove div_long_long_rem x86 is the only arch right now, which provides an optimized for div_long_long_rem and it has the downside that one has to be very careful that the divide doesn't overflow. The API is a little akward, as the arguments for the unsigned divide are signed. The signed version also doesn't handle a negative divisor and produces worse code on 64bit archs. There is little incentive to keep this API alive, so this converts the few users to the new API. Signed-off-by: Roman Zippel <[email protected]> Cc: Ralf Baechle <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: john stultz <[email protected]> Cc: Christoph Lameter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static int handle_pml_full(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; trace_kvm_pml_full(vcpu->vcpu_id); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); /* * PML buffer FULL happened while executing iret from NMI, * "blocked by NMI" bit has to be set before next VM entry. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); /* * PML buffer already flushed at beginning of VMEXIT. Nothing to do * here.., and there's no userspace involvement needed for PML. */ return 1; }
0
[ "CWE-284", "CWE-264" ]
linux
3ce424e45411cf5a13105e0386b6ecf6eeb4f66f
339,541,641,801,181,440,000,000,000,000,000,000,000
24
kvm:vmx: more complete state update on APICv on/off The function to update APICv on/off state (in particular, to deactivate it when enabling Hyper-V SynIC) is incomplete: it doesn't adjust APICv-related fields among secondary processor-based VM-execution controls. As a result, Windows 2012 guests get stuck when SynIC-based auto-EOI interrupt intersected with e.g. an IPI in the guest. In addition, the MSR intercept bitmap isn't updated every time "virtualize x2APIC mode" is toggled. This path can only be triggered by a malicious guest, because Windows didn't use x2APIC but rather their own synthetic APIC access MSRs; however a guest running in a SynIC-enabled VM could switch to x2APIC and thus obtain direct access to host APIC MSRs (CVE-2016-4440). The patch fixes those omissions. Signed-off-by: Roman Kagan <[email protected]> Reported-by: Steve Rutherford <[email protected]> Reported-by: Yang Zhang <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static void set_output_verbosity(int level, uchar priority) { int j; if (level > MAX_VERBOSITY) level = MAX_VERBOSITY; for (j = 1; j <= level; j++) { parse_output_words(info_words, info_levels, info_verbosity[j], priority); parse_output_words(debug_words, debug_levels, debug_verbosity[j], priority); } }
0
[]
rsync
eac858085e3ac94ec0ab5061d11f52652c90a869
51,223,469,629,538,170,000,000,000,000,000,000,000
12
Add compat flag to allow proper seed checksum order. Fixes the equivalent of librsync's CVE-2014-8242 issue.
static int ca8210_register_ext_clock(struct spi_device *spi) { struct device_node *np = spi->dev.of_node; struct ca8210_priv *priv = spi_get_drvdata(spi); struct ca8210_platform_data *pdata = spi->dev.platform_data; int ret = 0; if (!np) return -EFAULT; priv->clk = clk_register_fixed_rate( &spi->dev, np->name, NULL, 0, pdata->extclockfreq ); if (IS_ERR(priv->clk)) { dev_crit(&spi->dev, "Failed to register external clk\n"); return PTR_ERR(priv->clk); } ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); if (ret) { clk_unregister(priv->clk); dev_crit( &spi->dev, "Failed to register external clock as clock provider\n" ); } else { dev_info(&spi->dev, "External clock set as clock provider\n"); } return ret; }
0
[ "CWE-400", "CWE-401" ]
linux
6402939ec86eaf226c8b8ae00ed983936b164908
272,507,479,640,332,180,000,000,000,000,000,000,000
35
ieee802154: ca8210: prevent memory leak In ca8210_probe the allocated pdata needs to be assigned to spi_device->dev.platform_data before calling ca8210_get_platform_data. Othrwise when ca8210_get_platform_data fails pdata cannot be released. Signed-off-by: Navid Emamdoost <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Stefan Schmidt <[email protected]>
void clear(Content plane = EMPTY) { *this = Nef_polyhedron_2(plane); }
0
[ "CWE-269" ]
cgal
618b409b0fbcef7cb536a4134ae3a424ef5aae45
242,571,832,715,956,060,000,000,000,000,000,000,000
2
Fix Nef_2 and Nef_S2 IO
void NumberFormatTest::Test11626_CustomizeCurrencyPluralInfo() { IcuTestErrorCode errorCode(*this, "Test11626_CustomizeCurrencyPluralInfo"); // Ticket #11626: No unit test demonstrating how to use CurrencyPluralInfo to // change formatting spelled out currencies // Use locale sr because it has interesting plural rules. Locale locale("sr"); LocalPointer<DecimalFormatSymbols> symbols(new DecimalFormatSymbols(locale, errorCode), errorCode); CurrencyPluralInfo info(locale, errorCode); if (!assertSuccess("", errorCode, true, __FILE__, __LINE__)) { return; } info.setCurrencyPluralPattern(u"one", u"0 qwerty", errorCode); info.setCurrencyPluralPattern(u"few", u"0 dvorak", errorCode); DecimalFormat df(u"#", symbols.orphan(), UNUM_CURRENCY_PLURAL, errorCode); df.setCurrencyPluralInfo(info); df.setCurrency(u"USD"); df.setMaximumFractionDigits(0); UnicodeString result; assertEquals("Plural one", u"1 qwerty", df.format(1, result, errorCode)); assertEquals("Plural few", u"3 dvorak", df.format(3, result.remove(), errorCode)); assertEquals("Plural other", u"99 америчких долара", df.format(99, result.remove(), errorCode)); info.setPluralRules(u"few: n is 1; one: n in 2..4", errorCode); df.setCurrencyPluralInfo(info); assertEquals("Plural one", u"1 dvorak", df.format(1, result.remove(), errorCode)); assertEquals("Plural few", u"3 qwerty", df.format(3, result.remove(), errorCode)); assertEquals("Plural other", u"99 америчких долара", df.format(99, result.remove(), errorCode)); }
0
[ "CWE-190" ]
icu
53d8c8f3d181d87a6aa925b449b51c4a2c922a51
195,608,823,854,520,960,000,000,000,000,000,000,000
27
ICU-20246 Fixing another integer overflow in number parsing.
static int process_packet(struct pptp_conn_t *conn) { struct pptp_header *hdr = (struct pptp_header *)conn->in_buf; switch(ntohs(hdr->ctrl_type)) { case PPTP_START_CTRL_CONN_RQST: return pptp_start_ctrl_conn_rqst(conn); case PPTP_STOP_CTRL_CONN_RQST: return pptp_stop_ctrl_conn_rqst(conn); case PPTP_STOP_CTRL_CONN_RPLY: return pptp_stop_ctrl_conn_rply(conn); case PPTP_OUT_CALL_RQST: return pptp_out_call_rqst(conn); case PPTP_ECHO_RQST: return pptp_echo_rqst(conn); case PPTP_ECHO_RPLY: return pptp_echo_rply(conn); case PPTP_CALL_CLEAR_RQST: return pptp_call_clear_rqst(conn); case PPTP_SET_LINK_INFO: if (conf_verbose) log_ppp_info2("recv [PPTP Set-Link-Info]\n"); return 0; default: log_ppp_warn("recv [PPTP Unknown (%x)]\n", ntohs(hdr->ctrl_type)); } return 0; }
0
[ "CWE-787" ]
accel-ppp
a0b8bfc4e74ff31b15ccfa6c626e3bbc591ba98f
83,293,978,135,181,760,000,000,000,000,000,000,000
28
Fix post_msg implementation bug I think the error handling code of `post_msg` is wrongly implemented due to coding typo. The `EPIPE` should be also considered and then return -1, just like `PPTP_write`: https://github.com/xebd/accel-ppp/blob/1b8711cf75a7c278d99840112bc7a396398e0205/accel-pppd/ctrl/pptp/pptp.c#L539-L570 This pr fixes #158.
RI_FKey_restrict_del(PG_FUNCTION_ARGS) { /* * Check that this is a valid trigger call on the right time and event. */ ri_CheckTrigger(fcinfo, "RI_FKey_restrict_del", RI_TRIGTYPE_DELETE); /* * Share code with NO ACTION case. */ return ri_restrict_del((TriggerData *) fcinfo->context, false); }
0
[ "CWE-209" ]
postgres
804b6b6db4dcfc590a468e7be390738f9f7755fb
166,230,592,668,221,850,000,000,000,000,000,000,000
12
Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.
gnutls_priority_mac_list(gnutls_priority_t pcache, const unsigned int **list) { if (pcache->mac.algorithms == 0) return 0; *list = pcache->mac.priority; return pcache->mac.algorithms; }
0
[ "CWE-310" ]
gnutls
21f89efad7014a5ee0debd4cd3d59e27774b29e6
236,772,490,313,698,100,000,000,000,000,000,000,000
9
handshake: add FALLBACK_SCSV priority option This allows clients to enable the TLS_FALLBACK_SCSV mechanism during the handshake, as defined in RFC7507.
static Bool gf_sm_check_for_modif(GF_SceneEngine *seng, GF_AUContext *au) { GF_Command *com; Bool modified=0; u32 i=0; /*au is marked as modified - this happens when commands are concatenated into the au*/ if (au->flags & GF_SM_AU_MODIFIED) { au->flags &= ~GF_SM_AU_MODIFIED; modified=1; } /*check each command*/ while (NULL != (com = gf_list_enum(au->commands, &i))) { u32 j=0; GF_CommandField *field; if (!com->node) continue; /*check root node (for SCENE_REPLACE) */ if (gf_node_dirty_get(com->node)) { modified=1; gf_node_dirty_reset(com->node, 1); } /*check all command fields of type SFNODE or MFNODE*/ while (NULL != (field = gf_list_enum(com->command_fields, &j))) { switch (field->fieldType) { case GF_SG_VRML_SFNODE: if (field->new_node) { if (gf_node_dirty_get(field->new_node)) { modified=1; gf_node_dirty_reset(field->new_node, 1); } } break; case GF_SG_VRML_MFNODE: if (field->field_ptr) { GF_ChildNodeItem *child; child = field->node_list; while (child) { if (gf_node_dirty_get(child->node)) { modified=1; gf_node_dirty_reset(child->node, 1); } child = child->next; } } break; } } } if (!seng->first_dims_sent) { if (au->owner->codec_id==GF_CODECID_DIMS) { GF_Node *root = gf_sg_get_root_node(seng->ctx->scene_graph); if (gf_node_dirty_get(root)) { modified=1; gf_node_dirty_reset(root, 1); } } else { } seng->first_dims_sent = 1; } return modified; }
0
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
205,837,638,085,984,600,000,000,000,000,000,000,000
61
fixed #2138
ins_redraw(int ready) // not busy with something { #ifdef FEAT_CONCEAL linenr_T conceal_old_cursor_line = 0; linenr_T conceal_new_cursor_line = 0; int conceal_update_lines = FALSE; #endif if (char_avail()) return; // Trigger CursorMoved if the cursor moved. Not when the popup menu is // visible, the command might delete it. if (ready && (has_cursormovedI() # ifdef FEAT_PROP_POPUP || popup_visible # endif # if defined(FEAT_CONCEAL) || curwin->w_p_cole > 0 # endif ) && !EQUAL_POS(last_cursormoved, curwin->w_cursor) && !pum_visible()) { # ifdef FEAT_SYN_HL // Need to update the screen first, to make sure syntax // highlighting is correct after making a change (e.g., inserting // a "(". The autocommand may also require a redraw, so it's done // again below, unfortunately. if (syntax_present(curwin) && must_redraw) update_screen(0); # endif if (has_cursormovedI()) { // Make sure curswant is correct, an autocommand may call // getcurpos(). update_curswant(); ins_apply_autocmds(EVENT_CURSORMOVEDI); } #ifdef FEAT_PROP_POPUP if (popup_visible) popup_check_cursor_pos(); #endif # ifdef FEAT_CONCEAL if (curwin->w_p_cole > 0) { conceal_old_cursor_line = last_cursormoved.lnum; conceal_new_cursor_line = curwin->w_cursor.lnum; conceal_update_lines = TRUE; } # endif last_cursormoved = curwin->w_cursor; } // Trigger TextChangedI if b_changedtick_i differs. if (ready && has_textchangedI() && curbuf->b_last_changedtick_i != CHANGEDTICK(curbuf) && !pum_visible()) { aco_save_T aco; varnumber_T tick = CHANGEDTICK(curbuf); // save and restore curwin and curbuf, in case the autocmd changes them aucmd_prepbuf(&aco, curbuf); apply_autocmds(EVENT_TEXTCHANGEDI, NULL, NULL, FALSE, curbuf); aucmd_restbuf(&aco); curbuf->b_last_changedtick_i = CHANGEDTICK(curbuf); if (tick != CHANGEDTICK(curbuf)) // see ins_apply_autocmds() u_save(curwin->w_cursor.lnum, (linenr_T)(curwin->w_cursor.lnum + 1)); } // Trigger TextChangedP if b_changedtick_pum differs. When the popupmenu // closes TextChangedI will need to trigger for backwards compatibility, // thus use different b_last_changedtick* variables. if (ready && has_textchangedP() && curbuf->b_last_changedtick_pum != CHANGEDTICK(curbuf) && pum_visible()) { aco_save_T aco; varnumber_T tick = CHANGEDTICK(curbuf); // save and restore curwin and curbuf, in case the autocmd changes them aucmd_prepbuf(&aco, curbuf); apply_autocmds(EVENT_TEXTCHANGEDP, NULL, NULL, FALSE, curbuf); aucmd_restbuf(&aco); curbuf->b_last_changedtick_pum = CHANGEDTICK(curbuf); if (tick != CHANGEDTICK(curbuf)) // see ins_apply_autocmds() u_save(curwin->w_cursor.lnum, (linenr_T)(curwin->w_cursor.lnum + 1)); } if (ready) may_trigger_winscrolled(); // Trigger SafeState if nothing is pending. may_trigger_safestate(ready && !ins_compl_active() && !pum_visible()); #if defined(FEAT_CONCEAL) if ((conceal_update_lines && (conceal_old_cursor_line != conceal_new_cursor_line || conceal_cursor_line(curwin))) || need_cursor_line_redraw) { if (conceal_old_cursor_line != conceal_new_cursor_line) redrawWinline(curwin, conceal_old_cursor_line); redrawWinline(curwin, conceal_new_cursor_line == 0 ? curwin->w_cursor.lnum : conceal_new_cursor_line); curwin->w_valid &= ~VALID_CROW; need_cursor_line_redraw = FALSE; } #endif if (must_redraw) update_screen(0); else if (clear_cmdline || redraw_cmdline) showmode(); // clear cmdline and show mode showruler(FALSE); setcursor(); emsg_on_display = FALSE; // may remove error message now }
0
[ "CWE-120" ]
vim
7ce5b2b590256ce53d6af28c1d203fb3bc1d2d97
336,662,863,923,416,600,000,000,000,000,000,000,000
122
patch 8.2.4969: changing text in Visual mode may cause invalid memory access Problem: Changing text in Visual mode may cause invalid memory access. Solution: Check the Visual position after making a change.
void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn) { mutex_lock(&bfregi->lock); bfregi->count[bfregn]--; mutex_unlock(&bfregi->lock); }
0
[ "CWE-119", "CWE-787" ]
linux
0625b4ba1a5d4703c7fb01c497bd6c156908af00
178,938,029,277,116,180,000,000,000,000,000,000,000
6
IB/mlx5: Fix leaking stack memory to userspace mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes were written. Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp") Cc: <[email protected]> Acked-by: Leon Romanovsky <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
void RemoteFsDevice::saveProperties(const DeviceOptions &newOpts, const Details &nd) { bool connected=isConnected(); if (configured && (!connected || opts==newOpts) && (connected || details==nd)) { return; } bool isLocal=details.isLocalFile(); if (connected) { if (!configured) { details.configured=configured=true; details.save(); } if (opts.useCache!=newOpts.useCache) { if (opts.useCache) { saveCache(); } else if (opts.useCache && !newOpts.useCache) { removeCache(); } } opts=newOpts; writeOpts(settingsFileName(), opts, true); } if (!connected || isLocal) { Details newDetails=nd; Details oldDetails=details; bool newName=!oldDetails.name.isEmpty() && oldDetails.name!=newDetails.name; bool newDir=oldDetails.url.path()!=newDetails.url.path(); if (isLocal && newDir && opts.useCache) { removeCache(); } details=newDetails; details.configured=configured=true; details.save(); if (newName) { if (!details.isLocalFile()) { QString oldMount=mountPoint(oldDetails, false); if (!oldMount.isEmpty() && QDir(oldMount).exists()) { ::rmdir(QFile::encodeName(oldMount).constData()); } } setData(details.name); renamed(oldDetails.name, details.name); deviceId=createUdi(details.name); emit udiChanged(); m_itemData=details.name; setStatusMessage(QString()); } if (isLocal && newDir && scanned) { rescan(true); } } emit configurationChanged(); }
0
[ "CWE-20", "CWE-22" ]
cantata
afc4f8315d3e96574925fb530a7004cc9e6ce3d3
198,123,986,203,667,570,000,000,000,000,000,000,000
57
Remove internal Samba shre mounting code, this had some privilege escalation issues, and is not well tested
openssl_callback(int ok, X509_STORE_CTX * ctx) { #ifdef DEBUG if (!ok) { char buf[DN_BUF_LEN]; X509_NAME_oneline(X509_get_subject_name(ctx->current_cert), buf, sizeof(buf)); pkiDebug("cert = %s\n", buf); pkiDebug("callback function: %d (%s)\n", ctx->error, X509_verify_cert_error_string(ctx->error)); } #endif return ok; }
0
[ "CWE-476" ]
krb5
f249555301940c6df3a2cdda13b56b5674eebc2e
114,732,336,109,975,350,000,000,000,000,000,000,000
14
PKINIT null pointer deref [CVE-2013-1415] Don't dereference a null pointer when cleaning up. The KDC plugin for PKINIT can dereference a null pointer when a malformed packet causes processing to terminate early, leading to a crash of the KDC process. An attacker would need to have a valid PKINIT certificate or have observed a successful PKINIT authentication, or an unauthenticated attacker could execute the attack if anonymous PKINIT is enabled. CVSSv2 vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:P/RL:O/RC:C This is a minimal commit for pullup; style fixes in a followup. [[email protected]: reformat and edit commit message] (cherry picked from commit c773d3c775e9b2d88bcdff5f8a8ba88d7ec4e8ed) ticket: 7570 version_fixed: 1.11.1 status: resolved
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; struct kvm_device_attr attr; int r; switch (ioctl) { case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; r = -EFAULT; if (copy_from_user(&s390int, argp, sizeof(s390int))) break; r = kvm_s390_inject_vm(kvm, &s390int); break; } case KVM_CREATE_IRQCHIP: { struct kvm_irq_routing_entry routing; r = -EINVAL; if (kvm->arch.use_irqchip) { /* Set up dummy routing. */ memset(&routing, 0, sizeof(routing)); r = kvm_set_irq_routing(kvm, &routing, 0, 0); } break; } case KVM_SET_DEVICE_ATTR: { r = -EFAULT; if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) break; r = kvm_s390_vm_set_attr(kvm, &attr); break; } case KVM_GET_DEVICE_ATTR: { r = -EFAULT; if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) break; r = kvm_s390_vm_get_attr(kvm, &attr); break; } case KVM_HAS_DEVICE_ATTR: { r = -EFAULT; if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) break; r = kvm_s390_vm_has_attr(kvm, &attr); break; } case KVM_S390_GET_SKEYS: { struct kvm_s390_skeys args; r = -EFAULT; if (copy_from_user(&args, argp, sizeof(struct kvm_s390_skeys))) break; r = kvm_s390_get_skeys(kvm, &args); break; } case KVM_S390_SET_SKEYS: { struct kvm_s390_skeys args; r = -EFAULT; if (copy_from_user(&args, argp, sizeof(struct kvm_s390_skeys))) break; r = kvm_s390_set_skeys(kvm, &args); break; } case KVM_S390_GET_CMMA_BITS: { struct kvm_s390_cmma_log args; r = -EFAULT; if (copy_from_user(&args, argp, sizeof(args))) break; mutex_lock(&kvm->slots_lock); r = kvm_s390_get_cmma_bits(kvm, &args); mutex_unlock(&kvm->slots_lock); if (!r) { r = copy_to_user(argp, &args, sizeof(args)); if (r) r = -EFAULT; } break; } case KVM_S390_SET_CMMA_BITS: { struct kvm_s390_cmma_log args; r = -EFAULT; if (copy_from_user(&args, argp, sizeof(args))) break; mutex_lock(&kvm->slots_lock); r = kvm_s390_set_cmma_bits(kvm, &args); mutex_unlock(&kvm->slots_lock); break; } case KVM_S390_PV_COMMAND: { struct kvm_pv_cmd args; /* protvirt means user sigp */ kvm->arch.user_cpu_state_ctrl = 1; r = 0; if (!is_prot_virt_host()) { r = -EINVAL; break; } if (copy_from_user(&args, argp, sizeof(args))) { r = -EFAULT; break; } if (args.flags) { r = -EINVAL; break; } mutex_lock(&kvm->lock); r = kvm_s390_handle_pv(kvm, &args); mutex_unlock(&kvm->lock); if (copy_to_user(argp, &args, sizeof(args))) { r = -EFAULT; break; } break; } default: r = -ENOTTY; } return r; }
0
[ "CWE-416" ]
linux
0774a964ef561b7170d8d1b1bfe6f88002b6d219
11,306,033,882,466,288,000,000,000,000,000,000,000
130
KVM: Fix out of range accesses to memslots Reset the LRU slot if it becomes invalid when deleting a memslot to fix an out-of-bounds/use-after-free access when searching through memslots. Explicitly check for there being no used slots in search_memslots(), and in the caller of s390's approximation variant. Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots") Reported-by: Qian Cai <[email protected]> Cc: Peter Xu <[email protected]> Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Acked-by: Christian Borntraeger <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static void php_do_pcre_match(INTERNAL_FUNCTION_PARAMETERS, int global) /* {{{ */ { /* parameters */ char *regex; /* Regular expression */ char *subject; /* String to match against */ int regex_len; int subject_len; pcre_cache_entry *pce; /* Compiled regular expression */ zval *subpats = NULL; /* Array for subpatterns */ long flags = 0; /* Match control flags */ long start_offset = 0; /* Where the new search starts */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss|zll", &regex, &regex_len, &subject, &subject_len, &subpats, &flags, &start_offset) == FAILURE) { RETURN_FALSE; } /* Compile regex or get it from cache. */ if ((pce = pcre_get_compiled_regex_cache(regex, regex_len TSRMLS_CC)) == NULL) { RETURN_FALSE; } php_pcre_match_impl(pce, subject, subject_len, return_value, subpats, global, ZEND_NUM_ARGS() >= 4, flags, start_offset TSRMLS_CC); }
1
[]
php-src
03964892c054d0c736414c10b3edc7a40318b975
77,159,530,700,824,440,000,000,000,000,000,000,000
25
Fix bug #70345 (Multiple vulnerabilities related to PCRE functions)
static int padzero(unsigned long elf_bss) { unsigned long nbyte; nbyte = ELF_PAGEOFFSET(elf_bss); if (nbyte) { nbyte = ELF_MIN_ALIGN - nbyte; if (clear_user((void __user *) elf_bss, nbyte)) return -EFAULT; } return 0; }
0
[ "CWE-284", "CWE-264" ]
linux
4e7c22d447bb6d7e37bfe39ff658486ae78e8d77
188,807,401,235,139,800,000,000,000,000,000,000,000
12
x86, mm/ASLR: Fix stack randomization on 64-bit systems The issue is that the stack for processes is not properly randomized on 64 bit architectures due to an integer overflow. The affected function is randomize_stack_top() in file "fs/binfmt_elf.c": static unsigned long randomize_stack_top(unsigned long stack_top) { unsigned int random_variable = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { random_variable = get_random_int() & STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } return PAGE_ALIGN(stack_top) + random_variable; return PAGE_ALIGN(stack_top) - random_variable; } Note that, it declares the "random_variable" variable as "unsigned int". Since the result of the shifting operation between STACK_RND_MASK (which is 0x3fffff on x86_64, 22 bits) and PAGE_SHIFT (which is 12 on x86_64): random_variable <<= PAGE_SHIFT; then the two leftmost bits are dropped when storing the result in the "random_variable". This variable shall be at least 34 bits long to hold the (22+12) result. These two dropped bits have an impact on the entropy of process stack. Concretely, the total stack entropy is reduced by four: from 2^28 to 2^30 (One fourth of expected entropy). This patch restores back the entropy by correcting the types involved in the operations in the functions randomize_stack_top() and stack_maxrandom_size(). The successful fix can be tested with: $ for i in `seq 1 10`; do cat /proc/self/maps | grep stack; done 7ffeda566000-7ffeda587000 rw-p 00000000 00:00 0 [stack] 7fff5a332000-7fff5a353000 rw-p 00000000 00:00 0 [stack] 7ffcdb7a1000-7ffcdb7c2000 rw-p 00000000 00:00 0 [stack] 7ffd5e2c4000-7ffd5e2e5000 rw-p 00000000 00:00 0 [stack] ... Once corrected, the leading bytes should be between 7ffc and 7fff, rather than always being 7fff. Signed-off-by: Hector Marco-Gisbert <[email protected]> Signed-off-by: Ismael Ripoll <[email protected]> [ Rebased, fixed 80 char bugs, cleaned up commit message, added test example and CVE ] Signed-off-by: Kees Cook <[email protected]> Cc: <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Al Viro <[email protected]> Fixes: CVE-2015-1593 Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Borislav Petkov <[email protected]>
send_fprtime_if_not_null (ctrl_t ctrl, const char *keyword, int number, const unsigned char *stamp) { char numbuf1[50], numbuf2[50]; unsigned long value; value = buf32_to_ulong (stamp); if (!value) return; sprintf (numbuf1, "%d", number); sprintf (numbuf2, "%lu", value); send_status_info (ctrl, keyword, numbuf1, (size_t)strlen(numbuf1), numbuf2, (size_t)strlen(numbuf2), NULL, 0); }
0
[ "CWE-20" ]
gnupg
2183683bd633818dd031b090b5530951de76f392
322,918,362,359,742,500,000,000,000,000,000,000,000
15
Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>
TEST_P(ProtocolIntegrationTest, ResponseWithHostHeader) { initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); auto response = codec_client_->makeHeaderOnlyRequest( Http::TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}}); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders( Http::TestResponseHeaderMapImpl{{":status", "200"}, {"host", "host"}}, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("host", response->headers().get(Http::LowerCaseString("host"))->value().getStringView()); }
0
[ "CWE-770" ]
envoy
7ca28ff7d46454ae930e193d97b7d08156b1ba59
223,999,121,725,359,180,000,000,000,000,000,000,000
17
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145) Signed-off-by: antonio <[email protected]>
static bool vhost_can_busy_poll(unsigned long endtime) { return likely(!need_resched() && !time_after(busy_clock(), endtime) && !signal_pending(current)); }
0
[ "CWE-787" ]
linux
42d84c8490f9f0931786f1623191fcab397c3d64
65,048,447,435,955,730,000,000,000,000,000,000,000
5
vhost: Check docket sk_family instead of call getname Doing so, we save one call to get data we already have in the struct. Also, since there is no guarantee that getname use sockaddr_ll parameter beyond its size, we add a little bit of security here. It should do not do beyond MAX_ADDR_LEN, but syzbot found that ax25_getname writes more (72 bytes, the size of full_sockaddr_ax25, versus 20 + 32 bytes of sockaddr_ll + MAX_ADDR_LEN in syzbot repro). Fixes: 3a4d5c94e9593 ("vhost_net: a kernel-level virtio server") Reported-by: [email protected] Signed-off-by: Eugenio Pérez <[email protected]> Acked-by: Michael S. Tsirkin <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) { return -1; }
0
[ "CWE-400", "CWE-703", "CWE-835" ]
linux
c40f7d74c741a907cfaeb73a7697081881c497d0
275,691,781,409,750,800,000,000,000,000,000,000,000
4
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the scheduler under high loads, starting at around the v4.18 time frame, and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list manipulation. Do a (manual) revert of: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") It turns out that the list_del_leaf_cfs_rq() introduced by this commit is a surprising property that was not considered in followup commits such as: 9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list") As Vincent Guittot explains: "I think that there is a bigger problem with commit a9e7f6544b9c and cfs_rq throttling: Let take the example of the following topology TG2 --> TG1 --> root: 1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1 cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in one path because it has never been used and can't be throttled so tmp_alone_branch will point to leaf_cfs_rq_list at the end. 2) Then TG1 is throttled 3) and we add TG3 as a new child of TG1. 4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1 cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list. With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list. So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1 cfs_rq is removed from the list. Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list but tmp_alone_branch still points to TG3 cfs_rq because its throttled parent can't be enqueued when the lock is released. tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should. So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch points on another TG cfs_rq, the next TG cfs_rq that will be added, will be linked outside rq->leaf_cfs_rq_list - which is bad. In addition, we can break the ordering of the cfs_rq in rq->leaf_cfs_rq_list but this ordering is used to update and propagate the update from leaf down to root." Instead of trying to work through all these cases and trying to reproduce the very high loads that produced the lockup to begin with, simplify the code temporarily by reverting a9e7f6544b9c - which change was clearly not thought through completely. This (hopefully) gives us a kernel that doesn't lock up so people can continue to enjoy their holidays without worrying about regressions. ;-) [ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ] Analyzed-by: Xie XiuQi <[email protected]> Analyzed-by: Vincent Guittot <[email protected]> Reported-by: Zhipeng Xie <[email protected]> Reported-by: Sargun Dhillon <[email protected]> Reported-by: Xie XiuQi <[email protected]> Tested-by: Zhipeng Xie <[email protected]> Tested-by: Sargun Dhillon <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Acked-by: Vincent Guittot <[email protected]> Cc: <[email protected]> # v4.13+ Cc: Bin Li <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Thomas Gleixner <[email protected]> Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
void ConnectionImpl::onDispatch(const Buffer::Instance& data) { getBytesMeter().addWireBytesReceived(data.length()); }
0
[ "CWE-416" ]
envoy
fe7c69c248f4fe5a9080c7ccb35275b5218bb5ab
127,633,341,827,026,700,000,000,000,000,000,000,000
3
internal redirect: fix a lifetime bug (#785) Signed-off-by: Alyssa Wilk <[email protected]> Signed-off-by: Matt Klein <[email protected]> Signed-off-by: Pradeep Rao <[email protected]>
static int copy_rules(apr_pool_t *mp, msre_ruleset *parent_ruleset, msre_ruleset *child_ruleset, apr_array_header_t *exceptions_arr) { copy_rules_phase(mp, parent_ruleset->phase_request_headers, child_ruleset->phase_request_headers, exceptions_arr); copy_rules_phase(mp, parent_ruleset->phase_request_body, child_ruleset->phase_request_body, exceptions_arr); copy_rules_phase(mp, parent_ruleset->phase_response_headers, child_ruleset->phase_response_headers, exceptions_arr); copy_rules_phase(mp, parent_ruleset->phase_response_body, child_ruleset->phase_response_body, exceptions_arr); copy_rules_phase(mp, parent_ruleset->phase_logging, child_ruleset->phase_logging, exceptions_arr); return 1; }
0
[ "CWE-20", "CWE-611" ]
ModSecurity
d4d80b38aa85eccb26e3c61b04d16e8ca5de76fe
254,007,113,786,520,720,000,000,000,000,000,000,000
17
Added SecXmlExternalEntity
DU_ngetStatusString(Uint16 statusCode) { const char *s = NULL; switch (statusCode) { case STATUS_Success: s = "Success"; break; case STATUS_N_ClassInstanceConflict: s = "Failure: ClassInstanceConflict"; break; case STATUS_N_DuplicateInvocation: s = "Failure: DuplicateInvocation"; break; case STATUS_N_InvalidObjectInstance: s = "Failure: InvalidObjectInstance"; break; case STATUS_N_MistypedArgument: s = "Failure: MistypedArgument"; break; case STATUS_N_NoSuchSOPClass: s = "Failure: NoSuchSOPClass"; break; case STATUS_N_NoSuchObjectInstance: s = "Failure: NoSuchObjectInstance"; break; case STATUS_N_ProcessingFailure: s = "Failure: ProcessingFailure"; break; case STATUS_N_ResourceLimitation: s = "Failure: ResourceLimitation"; break; case STATUS_N_AttributeListError: s = "Warning: AttributeListError"; break; case STATUS_N_AttributeValueOutOfRange: s = "Warning: AttributeValueOutOfRange"; break; } if (s) return s; switch (statusCode & 0xf000) { /* high nibble significant */ case STATUS_FIND_Failed_UnableToProcess: /* high nibble */ s = "Failed: UnableToProcess"; break; } if (s == NULL) { sprintf(staticBuf, "Unknown Status: 0x%x", (unsigned int)statusCode); s = staticBuf; } return s; }
0
[ "CWE-476", "CWE-787" ]
dcmtk
5c14bf53fb42ceca12bbcc0016e8704b1580920d
168,304,695,417,970,430,000,000,000,000,000,000,000
54
Fixed possible NULL pointer dereference. Thanks to Jinsheng Ba <[email protected]> for the report and patch.
static int mwifiex_init_pcie(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; int ret; struct pci_dev *pdev = card->dev; pci_set_drvdata(pdev, card); ret = pci_enable_device(pdev); if (ret) goto err_enable_dev; pci_set_master(pdev); ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { pr_err("set_dma_mask(32) failed: %d\n", ret); goto err_set_dma_mask; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { pr_err("set_consistent_dma_mask(64) failed\n"); goto err_set_dma_mask; } ret = pci_request_region(pdev, 0, DRV_NAME); if (ret) { pr_err("req_reg(0) error\n"); goto err_req_region0; } card->pci_mmap = pci_iomap(pdev, 0, 0); if (!card->pci_mmap) { pr_err("iomap(0) error\n"); ret = -EIO; goto err_iomap0; } ret = pci_request_region(pdev, 2, DRV_NAME); if (ret) { pr_err("req_reg(2) error\n"); goto err_req_region2; } card->pci_mmap1 = pci_iomap(pdev, 2, 0); if (!card->pci_mmap1) { pr_err("iomap(2) error\n"); ret = -EIO; goto err_iomap2; } pr_notice("PCI memory map Virt0: %pK PCI memory map Virt2: %pK\n", card->pci_mmap, card->pci_mmap1); ret = mwifiex_pcie_alloc_buffers(adapter); if (ret) goto err_alloc_buffers; return 0; err_alloc_buffers: pci_iounmap(pdev, card->pci_mmap1); err_iomap2: pci_release_region(pdev, 2); err_req_region2: pci_iounmap(pdev, card->pci_mmap); err_iomap0: pci_release_region(pdev, 0); err_req_region0: err_set_dma_mask: pci_disable_device(pdev); err_enable_dev: return ret; }
0
[ "CWE-400", "CWE-200", "CWE-401" ]
linux
d10dcb615c8e29d403a24d35f8310a7a53e3050c
215,362,423,322,139,200,000,000,000,000,000,000,000
72
mwifiex: pcie: Fix memory leak in mwifiex_pcie_init_evt_ring In mwifiex_pcie_init_evt_ring, a new skb is allocated which should be released if mwifiex_map_pci_memory() fails. The release for skb and card->evtbd_ring_vbase is added. Fixes: 0732484b47b5 ("mwifiex: separate ring initialization and ring creation routines") Signed-off-by: Navid Emamdoost <[email protected]> Acked-by: Ganapathi Bhat <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { flush_fp_to_thread(src); flush_altivec_to_thread(src); flush_vsx_to_thread(src); flush_spe_to_thread(src); *dst = *src; clear_task_ebb(dst); return 0; }
1
[ "CWE-20" ]
linux
621b5060e823301d0cba4cb52a7ee3491922d291
21,917,628,157,399,120,000,000,000,000,000,000,000
13
powerpc/tm: Fix crash when forking inside a transaction When we fork/clone we currently don't copy any of the TM state to the new thread. This results in a TM bad thing (program check) when the new process is switched in as the kernel does a tmrechkpt with TEXASR FS not set. Also, since R1 is from userspace, we trigger the bad kernel stack pointer detection. So we end up with something like this: Bad kernel stack pointer 0 at c0000000000404fc cpu 0x2: Vector: 700 (Program Check) at [c00000003ffefd40] pc: c0000000000404fc: restore_gprs+0xc0/0x148 lr: 0000000000000000 sp: 0 msr: 9000000100201030 current = 0xc000001dd1417c30 paca = 0xc00000000fe00800 softe: 0 irq_happened: 0x01 pid = 0, comm = swapper/2 WARNING: exception is not recoverable, can't continue The below fixes this by flushing the TM state before we copy the task_struct to the clone. To do this we go through the tmreclaim patch, which removes the checkpointed registers from the CPU and transitions the CPU out of TM suspend mode. Hence we need to call tmrechkpt after to restore the checkpointed state and the TM mode for the current task. To make this fail from userspace is simply: tbegin li r0, 2 sc <boom> Kudos to Adhemerval Zanella Neto for finding this. Signed-off-by: Michael Neuling <[email protected]> cc: Adhemerval Zanella Neto <[email protected]> cc: [email protected] Signed-off-by: Benjamin Herrenschmidt <[email protected]>
GF_Err smhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->balance); gf_bs_write_u16(bs, ptr->reserved); return GF_OK; }
0
[ "CWE-787" ]
gpac
388ecce75d05e11fc8496aa4857b91245007d26e
108,615,906,656,097,740,000,000,000,000,000,000,000
10
fixed #1587
static void ProcessRadioTxTimeout( void ) { if( MacCtx.NvmCtx->DeviceClass != CLASS_C ) { Radio.Sleep( ); } UpdateRxSlotIdleState( ); MacCtx.McpsConfirm.Status = LORAMAC_EVENT_INFO_STATUS_TX_TIMEOUT; LoRaMacConfirmQueueSetStatusCmn( LORAMAC_EVENT_INFO_STATUS_TX_TIMEOUT ); if( MacCtx.NodeAckRequested == true ) { MacCtx.AckTimeoutRetry = true; } MacCtx.MacFlags.Bits.MacDone = 1; }
0
[ "CWE-120", "CWE-787" ]
LoRaMac-node
e3063a91daa7ad8a687223efa63079f0c24568e4
264,268,408,680,859,800,000,000,000,000,000,000,000
16
Added received buffer size checks.
interface_removed (GDBusObjectManager *manager, GDBusObject *object, GDBusInterface *interface, gpointer user_data) { BluetoothClient *client = user_data; if (IS_ADAPTER1 (interface)) { adapter_removed (manager, g_dbus_object_get_object_path (object), client); } else if (IS_DEVICE1 (interface)) { device_removed (g_dbus_object_get_object_path (object), client); } }
0
[]
gnome-bluetooth
6b5086d42ea64d46277f3c93b43984f331d12f89
153,905,944,886,710,440,000,000,000,000,000,000,000
16
lib: Fix Discoverable being reset when turned off Work-around race in bluetoothd which would reset the discoverable flag if a timeout change was requested before discoverable finished being set to off: See https://bugzilla.redhat.com/show_bug.cgi?id=1602985
process_cmd_minpoll(CMD_Request *msg, char *line) { IPAddr address; int minpoll; int ok; if (read_address_integer(line, &address, &minpoll)) { UTI_IPHostToNetwork(&address, &msg->data.modify_minpoll.address); msg->data.modify_minpoll.new_minpoll = htonl(minpoll); msg->command = htons(REQ_MODIFY_MINPOLL); ok = 1; } else { ok = 0; } return ok; }
0
[ "CWE-189" ]
chrony
7712455d9aa33d0db0945effaa07e900b85987b1
307,430,926,987,553,200,000,000,000,000,000,000,000
18
Fix buffer overflow when processing crafted command packets When the length of the REQ_SUBNETS_ACCESSED, REQ_CLIENT_ACCESSES command requests and the RPY_SUBNETS_ACCESSED, RPY_CLIENT_ACCESSES, RPY_CLIENT_ACCESSES_BY_INDEX, RPY_MANUAL_LIST command replies is calculated, the number of items stored in the packet is not validated. A crafted command request/reply can be used to crash the server/client. Only clients allowed by cmdallow (by default only localhost) can crash the server. With chrony versions 1.25 and 1.26 this bug has a smaller security impact as the server requires the clients to be authenticated in order to process the subnet and client accesses commands. In 1.27 and 1.28, however, the invalid calculated length is included also in the authentication check which may cause another crash.
static unsigned long dev_pagemap_mapping_shift(struct page *page, struct vm_area_struct *vma) { unsigned long address = vma_address(page, vma); pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset(vma->vm_mm, address); if (!pgd_present(*pgd)) return 0; p4d = p4d_offset(pgd, address); if (!p4d_present(*p4d)) return 0; pud = pud_offset(p4d, address); if (!pud_present(*pud)) return 0; if (pud_devmap(*pud)) return PUD_SHIFT; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return 0; if (pmd_devmap(*pmd)) return PMD_SHIFT; pte = pte_offset_map(pmd, address); if (!pte_present(*pte)) return 0; if (pte_devmap(*pte)) return PAGE_SHIFT; return 0; }
0
[]
linux
46612b751c4941c5c0472ddf04027e877ae5990f
56,001,718,531,290,490,000,000,000,000,000,000,000
33
mm: hwpoison: fix thp split handing in soft_offline_in_use_page() When soft_offline_in_use_page() runs on a thp tail page after pmd is split, we trigger the following VM_BUG_ON_PAGE(): Memory failure: 0x3755ff: non anonymous thp __get_any_page: 0x3755ff: unknown zero refcount page type 2fffff80000000 Soft offlining pfn 0x34d805 at process virtual address 0x20fff000 page:ffffea000d360140 count:0 mapcount:0 mapping:0000000000000000 index:0x1 flags: 0x2fffff80000000() raw: 002fffff80000000 ffffea000d360108 ffffea000d360188 0000000000000000 raw: 0000000000000001 0000000000000000 00000000ffffffff 0000000000000000 page dumped because: VM_BUG_ON_PAGE(page_ref_count(page) == 0) ------------[ cut here ]------------ kernel BUG at ./include/linux/mm.h:519! soft_offline_in_use_page() passed refcount and page lock from tail page to head page, which is not needed because we can pass any subpage to split_huge_page(). Naoya had fixed a similar issue in c3901e722b29 ("mm: hwpoison: fix thp split handling in memory_failure()"). But he missed fixing soft offline. Link: http://lkml.kernel.org/r/[email protected] Fixes: 61f5d698cc97 ("mm: re-enable THP") Signed-off-by: zhongjiang <[email protected]> Acked-by: Naoya Horiguchi <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: <[email protected]> [4.5+] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
MaybeLocal<Object> AddIssuerChainToObject( X509Pointer* cert, Local<Object> object, StackOfX509&& peer_certs, Environment* const env) { Local<Context> context = env->isolate()->GetCurrentContext(); cert->reset(sk_X509_delete(peer_certs.get(), 0)); for (;;) { int i; for (i = 0; i < sk_X509_num(peer_certs.get()); i++) { X509* ca = sk_X509_value(peer_certs.get(), i); if (X509_check_issued(ca, cert->get()) != X509_V_OK) continue; Local<Object> ca_info; MaybeLocal<Object> maybe_ca_info = X509ToObject(env, ca); if (!maybe_ca_info.ToLocal(&ca_info)) return MaybeLocal<Object>(); if (!Set<Object>(context, object, env->issuercert_string(), ca_info)) return MaybeLocal<Object>(); object = ca_info; // NOTE: Intentionally freeing cert that is not used anymore. // Delete cert and continue aggregating issuers. cert->reset(sk_X509_delete(peer_certs.get(), i)); break; } // Issuer not found, break out of the loop. if (i == sk_X509_num(peer_certs.get())) break; } return MaybeLocal<Object>(object); }
0
[ "CWE-295" ]
node
466e5415a2b7b3574ab5403acb87e89a94a980d1
114,049,886,610,243,420,000,000,000,000,000,000,000
35
crypto,tls: implement safe x509 GeneralName format This change introduces JSON-compatible escaping rules for strings that include X.509 GeneralName components (see RFC 5280). This non-standard format avoids ambiguities and prevents injection attacks that could previously lead to X.509 certificates being accepted even though they were not valid for the target hostname. These changes affect the format of subject alternative names and the format of authority information access. The checkServerIdentity function has been modified to safely handle the new format, eliminating the possibility of injecting subject alternative names into the verification logic. Because each subject alternative name is only encoded as a JSON string literal if necessary for security purposes, this change will only be visible in rare cases. This addresses CVE-2021-44532. CVE-ID: CVE-2021-44532 PR-URL: https://github.com/nodejs-private/node-private/pull/300 Reviewed-By: Michael Dawson <[email protected]> Reviewed-By: Rich Trott <[email protected]>
void __init sched_init_smp(void) { cpumask_var_t non_isolated_cpus; alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); alloc_cpumask_var(&fallback_doms, GFP_KERNEL); sched_init_numa(); /* * There's no userspace yet to cause hotplug operations; hence all the * cpu masks are stable and all blatant races in the below code cannot * happen. */ mutex_lock(&sched_domains_mutex); init_sched_domains(cpu_active_mask); cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); if (cpumask_empty(non_isolated_cpus)) cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); mutex_unlock(&sched_domains_mutex); hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); init_hrtick(); /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) BUG(); sched_init_granularity(); free_cpumask_var(non_isolated_cpus); init_sched_rt_class(); init_sched_dl_class(); }
0
[ "CWE-119" ]
linux
29d6455178a09e1dc340380c582b13356227e8df
76,042,382,092,231,740,000,000,000,000,000,000,000
36
sched: panic on corrupted stack end Until now, hitting this BUG_ON caused a recursive oops (because oops handling involves do_exit(), which calls into the scheduler, which in turn raises an oops), which caused stuff below the stack to be overwritten until a panic happened (e.g. via an oops in interrupt context, caused by the overwritten CPU index in the thread_info). Just panic directly. Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
Client::swanSong() { // get rid of our piping obligations if (requestBodySource != NULL) stopConsumingFrom(requestBodySource); #if USE_ADAPTATION cleanAdaptation(); #endif if (!doneWithServer()) closeServer(); if (!doneWithFwd) { doneWithFwd = "swanSong()"; fwd->handleUnregisteredServerEnd(); } BodyConsumer::swanSong(); #if USE_ADAPTATION Initiator::swanSong(); BodyProducer::swanSong(); #endif // paranoid: check that swanSong has been called // extra paranoid: yeah, I really mean it. they MUST pass here. assert(!requestBodySource); #if USE_ADAPTATION assert(!virginBodyDestination); assert(!adaptedBodySource); #endif }
0
[ "CWE-20" ]
squid
6c9c44d0e9cf7b72bb233360c5308aa063af3d69
83,936,584,138,361,350,000,000,000,000,000,000,000
32
Handle more partial responses (#791)
static inline Status _badValue(const char* reason, int location) { return Status(ErrorCodes::BadValue, reason, location); }
0
[ "CWE-264" ]
mongo
23344f8b7506df694f66999693ee3c00dfd6afae
118,681,332,002,757,550,000,000,000,000,000,000,000
3
SERVER-9983 Do not needlessly lock when looking up privileges for the __system@local user. Uncorrected, this can cause replica set heartbeats to stall behind operations that hold the read lock for a long time.
static void free_arg_pages(struct linux_binprm *bprm) { }
0
[ "CWE-200" ]
linux-2.6
b66c5984017533316fd1951770302649baf1aa33
40,844,954,569,806,580,000,000,000,000,000,000,000
3
exec: do not leave bprm->interp on stack If a series of scripts are executed, each triggering module loading via unprintable bytes in the script header, kernel stack contents can leak into the command line. Normally execution of binfmt_script and binfmt_misc happens recursively. However, when modules are enabled, and unprintable bytes exist in the bprm->buf, execution will restart after attempting to load matching binfmt modules. Unfortunately, the logic in binfmt_script and binfmt_misc does not expect to get restarted. They leave bprm->interp pointing to their local stack. This means on restart bprm->interp is left pointing into unused stack memory which can then be copied into the userspace argv areas. After additional study, it seems that both recursion and restart remains the desirable way to handle exec with scripts, misc, and modules. As such, we need to protect the changes to interp. This changes the logic to require allocation for any changes to the bprm->interp. To avoid adding a new kmalloc to every exec, the default value is left as-is. Only when passing through binfmt_script or binfmt_misc does an allocation take place. For a proof of concept, see DoTest.sh from: http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/ Signed-off-by: Kees Cook <[email protected]> Cc: halfdog <[email protected]> Cc: P J P <[email protected]> Cc: Alexander Viro <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
void Context::initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) { network_write_filter_callbacks_ = &callbacks; }
0
[ "CWE-476" ]
envoy
8788a3cf255b647fd14e6b5e2585abaaedb28153
99,181,418,105,995,600,000,000,000,000,000,000,000
3
1.4 - Do not call into the VM unless the VM Context has been created. (#24) * Ensure that the in VM Context is created before onDone is called. Signed-off-by: John Plevyak <[email protected]> * Update as per offline discussion. Signed-off-by: John Plevyak <[email protected]> * Set in_vm_context_created_ in onNetworkNewConnection. Signed-off-by: John Plevyak <[email protected]> * Add guards to other network calls. Signed-off-by: John Plevyak <[email protected]> * Fix common/wasm tests. Signed-off-by: John Plevyak <[email protected]> * Patch tests. Signed-off-by: John Plevyak <[email protected]> * Remove unecessary file from cherry-pick. Signed-off-by: John Plevyak <[email protected]>
set_xattrs(struct archive_write_disk *a) { struct archive_entry *entry = a->entry; static int warning_done = 0; int ret = ARCHIVE_OK; int i = archive_entry_xattr_reset(entry); while (i--) { const char *name; const void *value; size_t size; archive_entry_xattr_next(entry, &name, &value, &size); if (name != NULL) { int e; int namespace; if (strncmp(name, "user.", 5) == 0) { /* "user." attributes go to user namespace */ name += 5; namespace = EXTATTR_NAMESPACE_USER; } else { /* Warn about other extended attributes. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Can't restore extended attribute ``%s''", name); ret = ARCHIVE_WARN; continue; } errno = 0; #if HAVE_EXTATTR_SET_FD if (a->fd >= 0) e = extattr_set_fd(a->fd, namespace, name, value, size); else #endif /* TODO: should we use extattr_set_link() instead? */ { e = extattr_set_file(archive_entry_pathname(entry), namespace, name, value, size); } if (e != (int)size) { if (errno == ENOTSUP || errno == ENOSYS) { if (!warning_done) { warning_done = 1; archive_set_error(&a->archive, errno, "Cannot restore extended " "attributes on this file " "system"); } } else { archive_set_error(&a->archive, errno, "Failed to set extended attribute"); } ret = ARCHIVE_WARN; } } } return (ret); }
0
[ "CWE-703", "CWE-22" ]
libarchive
59357157706d47c365b2227739e17daba3607526
87,405,328,221,502,970,000,000,000,000,000,000,000
60
Add ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS option This fixes a directory traversal in the cpio tool.
sign (gcry_mpi_t r, gcry_mpi_t s, gcry_mpi_t input, DSA_secret_key *skey, int flags, int hashalgo) { gpg_err_code_t rc; gcry_mpi_t hash; gcry_mpi_t k; gcry_mpi_t kinv; gcry_mpi_t tmp; const void *abuf; unsigned int abits, qbits; int extraloops = 0; qbits = mpi_get_nbits (skey->q); /* Convert the INPUT into an MPI. */ rc = _gcry_dsa_normalize_hash (input, &hash, qbits); if (rc) return rc; again: /* Create the K value. */ if ((flags & PUBKEY_FLAG_RFC6979) && hashalgo) { /* Use Pornin's method for deterministic DSA. If this flag is set, it is expected that HASH is an opaque MPI with the to be signed hash. That hash is also used as h1 from 3.2.a. */ if (!mpi_is_opaque (input)) { rc = GPG_ERR_CONFLICT; goto leave; } abuf = mpi_get_opaque (input, &abits); rc = _gcry_dsa_gen_rfc6979_k (&k, skey->q, skey->x, abuf, (abits+7)/8, hashalgo, extraloops); if (rc) goto leave; } else { /* Select a random k with 0 < k < q */ k = _gcry_dsa_gen_k (skey->q, GCRY_STRONG_RANDOM); } /* r = (a^k mod p) mod q */ mpi_powm( r, skey->g, k, skey->p ); mpi_fdiv_r( r, r, skey->q ); /* kinv = k^(-1) mod q */ kinv = mpi_alloc( mpi_get_nlimbs(k) ); mpi_invm(kinv, k, skey->q ); /* s = (kinv * ( hash + x * r)) mod q */ tmp = mpi_alloc( mpi_get_nlimbs(skey->p) ); mpi_mul( tmp, skey->x, r ); mpi_add( tmp, tmp, hash ); mpi_mulm( s , kinv, tmp, skey->q ); mpi_free(k); mpi_free(kinv); mpi_free(tmp); if (!mpi_cmp_ui (r, 0)) { /* This is a highly unlikely code path. */ extraloops++; goto again; } rc = 0; leave: if (hash != input) mpi_free (hash); return rc; }
1
[ "CWE-203" ]
libgcrypt
7c2943309d14407b51c8166c4dcecb56a3628567
42,523,480,536,828,030,000,000,000,000,000,000,000
77
dsa,ecdsa: Fix use of nonce, use larger one. * cipher/dsa-common.c (_gcry_dsa_modify_k): New. * cipher/pubkey-internal.h (_gcry_dsa_modify_k): New. * cipher/dsa.c (sign): Use _gcry_dsa_modify_k. * cipher/ecc-ecdsa.c (_gcry_ecc_ecdsa_sign): Likewise. * cipher/ecc-gost.c (_gcry_ecc_gost_sign): Likewise. CVE-id: CVE-2019-13627 GnuPG-bug-id: 4626 Signed-off-by: NIIBE Yutaka <[email protected]>
TEST(RegexMatchExpression, MatchesElementExtendedOn) { BSONObj match = BSON("x" << "ab"); BSONObj notMatch = BSON("x" << "a b"); RegexMatchExpression regex("", "a b", "x"); ASSERT(regex.matchesSingleElement(match.firstElement())); ASSERT(!regex.matchesSingleElement(notMatch.firstElement())); }
0
[]
mongo
64095239f41e9f3841d8be9088347db56d35c891
30,100,801,040,499,130,000,000,000,000,000,000,000
9
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
void tcp6_proc_exit(struct net *net) { tcp_proc_unregister(net, &tcp6_seq_afinfo); }
0
[ "CWE-362" ]
linux-2.6
f6d8bd051c391c1c0458a30b2a7abcd939329259
9,287,367,070,364,252,000,000,000,000,000,000,000
4
inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int bus_socket_auth_verify(sd_bus *b) { assert(b); if (b->is_server) return bus_socket_auth_verify_server(b); else return bus_socket_auth_verify_client(b); }
0
[ "CWE-787" ]
systemd
6d586a13717ae057aa1b4127400c3de61cd5b9e7
37,579,987,550,853,065,000,000,000,000,000,000,000
8
sd-bus: if we receive an invalid dbus message, ignore and proceeed dbus-daemon might have a slightly different idea of what a valid msg is than us (for example regarding valid msg and field sizes). Let's hence try to proceed if we can and thus drop messages rather than fail the connection if we fail to validate a message. Hopefully the differences in what is considered valid are not visible for real-life usecases, but are specific to exploit attempts only.
compress_application_type(int compression) { struct compression_decoder *d; for (d = compression_decoders; d->type != CMP_NOCOMPRESS; d++) { if (d->type == compression) return d->mime_type; } return NULL; }
0
[ "CWE-476" ]
w3m
59b91cd8e30c86f23476fa81ae005cabff49ebb6
214,429,789,837,078,360,000,000,000,000,000,000,000
10
Prevent segfault with malformed input type Bug-Debian: https://github.com/tats/w3m/issues/7
int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) { int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); /* * In order not to call set_task_cpu() on a blocking task we need * to rely on ttwu() to place the task on a valid ->cpus_allowed * cpu. * * Since this is common to all placement strategies, this lives here. * * [ this allows ->select_task() to simply return task_cpu(p) and * not worry about this generic constraint ] */ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || !cpu_online(cpu))) cpu = select_fallback_rq(task_cpu(p), p); return cpu; }
0
[ "CWE-703", "CWE-835" ]
linux
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
88,370,157,070,454,530,000,000,000,000,000,000,000
20
Sched: fix skip_clock_update optimization idle_balance() drops/retakes rq->lock, leaving the previous task vulnerable to set_tsk_need_resched(). Clear it after we return from balancing instead, and in setup_thread_stack() as well, so no successfully descheduled or never scheduled task has it set. Need resched confused the skip_clock_update logic, which assumes that the next call to update_rq_clock() will come nearly immediately after being set. Make the optimization robust against the waking a sleeper before it sucessfully deschedules case by checking that the current task has not been dequeued before setting the flag, since it is that useless clock update we're trying to save, and clear unconditionally in schedule() proper instead of conditionally in put_prev_task(). Signed-off-by: Mike Galbraith <[email protected]> Reported-by: Bjoern B. Brandenburg <[email protected]> Tested-by: Yong Zhang <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Cc: [email protected] LKML-Reference: <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>