func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
int DynamicMetadataMapWrapper::luaGet(lua_State* state) {
const char* filter_name = luaL_checkstring(state, 2);
const auto& metadata = streamInfo().dynamicMetadata().filter_metadata();
const auto filter_it = metadata.find(filter_name);
if (filter_it == metadata.end()) {
return 0;
}
Filters::Common::Lua::MetadataMapHelper::createTable(state, filter_it->second.fields());
return 1;
}
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 103,663,324,996,254,320,000,000,000,000,000,000,000 | 11 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
TEST_P(ProxyProtocolTest, V1TooLong) {
constexpr uint8_t buffer[] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '};
connect(false);
write("PROXY TCP4 1.2.3.4 2.3.4.5 100 100");
for (size_t i = 0; i < 256; i += sizeof(buffer)) {
write(buffer, sizeof(buffer));
}
expectProxyProtoError();
}
| 0 |
[
"CWE-400"
] |
envoy
|
dfddb529e914d794ac552e906b13d71233609bf7
| 102,937,517,857,086,200,000,000,000,000,000,000,000 | 9 |
listener: Add configurable accepted connection limits (#153)
Add support for per-listener limits on accepted connections.
Signed-off-by: Tony Allen <[email protected]>
|
ZEND_VM_HANDLER(153, ZEND_UNSET_CV, CV, UNUSED)
{
USE_OPLINE
zval *var = EX_VAR(opline->op1.var);
if (Z_REFCOUNTED_P(var)) {
zend_refcounted *garbage = Z_COUNTED_P(var);
ZVAL_UNDEF(var);
SAVE_OPLINE();
if (!GC_DELREF(garbage)) {
rc_dtor_func(garbage);
} else {
gc_check_possible_root(garbage);
}
ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION();
} else {
ZVAL_UNDEF(var);
}
ZEND_VM_NEXT_OPCODE();
}
| 0 |
[
"CWE-787"
] |
php-src
|
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
| 234,563,647,213,233,250,000,000,000,000,000,000,000 | 21 |
Fix #73122: Integer Overflow when concatenating strings
We must avoid integer overflows in memory allocations, so we introduce
an additional check in the VM, and bail out in the rare case of an
overflow. Since the recent fix for bug #74960 still doesn't catch all
possible overflows, we fix that right away.
|
bool AuthorizationSessionImpl::isAuthorizedToChangeAsUser(const UserName& userName,
ActionType actionType) {
User* user = lookupUser(userName);
if (!user) {
return false;
}
ResourcePattern resourceSearchList[resourceSearchListCapacity];
const int resourceSearchListLength = buildResourceSearchList(
ResourcePattern::forDatabaseName(userName.getDB()), resourceSearchList);
ActionSet actions;
for (int i = 0; i < resourceSearchListLength; ++i) {
actions.addAllActionsFromSet(user->getActionsForResource(resourceSearchList[i]));
}
return actions.contains(actionType);
}
| 0 |
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
| 82,974,613,886,714,090,000,000,000,000,000,000,000 | 16 |
SERVER-38984 Validate unique User ID on UserCache hit
|
static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
{
struct v4l2_plane32 __user *uplane32;
struct v4l2_plane __user *uplane;
compat_caddr_t p;
int num_planes;
int ret;
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
put_user(kp->index, &up->index) ||
put_user(kp->type, &up->type) ||
put_user(kp->flags, &up->flags) ||
put_user(kp->memory, &up->memory))
return -EFAULT;
if (put_user(kp->bytesused, &up->bytesused) ||
put_user(kp->field, &up->field) ||
put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
copy_to_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) ||
put_user(kp->sequence, &up->sequence) ||
put_user(kp->reserved2, &up->reserved2) ||
put_user(kp->reserved, &up->reserved) ||
put_user(kp->length, &up->length))
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
num_planes = kp->length;
if (num_planes == 0)
return 0;
uplane = (__force struct v4l2_plane __user *)kp->m.planes;
if (get_user(p, &up->m.planes))
return -EFAULT;
uplane32 = compat_ptr(p);
while (--num_planes >= 0) {
ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
if (ret)
return ret;
++uplane;
++uplane32;
}
} else {
switch (kp->memory) {
case V4L2_MEMORY_MMAP:
case V4L2_MEMORY_OVERLAY:
if (put_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
if (put_user(kp->m.userptr, &up->m.userptr))
return -EFAULT;
break;
case V4L2_MEMORY_DMABUF:
if (put_user(kp->m.fd, &up->m.fd))
return -EFAULT;
break;
}
}
return 0;
}
| 1 |
[
"CWE-787"
] |
linux
|
a1dfb4c48cc1e64eeb7800a27c66a6f7e88d075a
| 142,818,376,030,884,630,000,000,000,000,000,000,000 | 63 |
media: v4l2-compat-ioctl32.c: refactor compat ioctl32 logic
The 32-bit compat v4l2 ioctl handling is implemented based on its 64-bit
equivalent. It converts 32-bit data structures into its 64-bit
equivalents and needs to provide the data to the 64-bit ioctl in user
space memory which is commonly allocated using
compat_alloc_user_space().
However, due to how that function is implemented, it can only be called
a single time for every syscall invocation.
Supposedly to avoid this limitation, the existing code uses a mix of
memory from the kernel stack and memory allocated through
compat_alloc_user_space().
Under normal circumstances, this would not work, because the 64-bit
ioctl expects all pointers to point to user space memory. As a
workaround, set_fs(KERNEL_DS) is called to temporarily disable this
extra safety check and allow kernel pointers. However, this might
introduce a security vulnerability: The result of the 32-bit to 64-bit
conversion is writeable by user space because the output buffer has been
allocated via compat_alloc_user_space(). A malicious user space process
could then manipulate pointers inside this output buffer, and due to the
previous set_fs(KERNEL_DS) call, functions like get_user() or put_user()
no longer prevent kernel memory access.
The new approach is to pre-calculate the total amount of user space
memory that is needed, allocate it using compat_alloc_user_space() and
then divide up the allocated memory to accommodate all data structures
that need to be converted.
An alternative approach would have been to retain the union type karg
that they allocated on the kernel stack in do_video_ioctl(), copy all
data from user space into karg and then back to user space. However, we
decided against this approach because it does not align with other
compat syscall implementations. Instead, we tried to replicate the
get_user/put_user pairs as found in other places in the kernel:
if (get_user(clipcount, &up->clipcount) ||
put_user(clipcount, &kp->clipcount)) return -EFAULT;
Notes from [email protected]:
This patch was taken from:
https://github.com/LineageOS/android_kernel_samsung_apq8084/commit/97b733953c06e4f0398ade18850f0817778255f7
Clearly nobody could be bothered to upstream this patch or at minimum
tell us :-( We only heard about this a week ago.
This patch was rebased and cleaned up. Compared to the original I
also swapped the order of the convert_in_user arguments so that they
matched copy_in_user. It was hard to review otherwise. I also replaced
the ALLOC_USER_SPACE/ALLOC_AND_GET by a normal function.
Fixes: 6b5a9492ca ("v4l: introduce string control support.")
Signed-off-by: Daniel Mentz <[email protected]>
Co-developed-by: Hans Verkuil <[email protected]>
Acked-by: Sakari Ailus <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Cc: <[email protected]> # for v4.15 and up
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
double XMLRPC_GetValueDouble(XMLRPC_VALUE value) {
return ((value && value->type == xmlrpc_double) ? value->d : 0);
}
| 0 |
[
"CWE-119"
] |
php-src
|
88412772d295ebf7dd34409534507dc9bcac726e
| 275,089,696,100,565,950,000,000,000,000,000,000,000 | 3 |
Fix bug #68027 - fix date parsing in XMLRPC lib
|
inline void invert_endianness(char* const, const cimg_ulong) {}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 7,338,540,998,985,290,000,000,000,000,000,000,000 | 1 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
unsigned long exit_qualification;
gpa_t bitmap, last_bitmap;
unsigned int port;
int size;
u8 b;
if (nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING))
return 1;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return 0;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
last_bitmap = (gpa_t)-1;
b = -1;
while (size > 0) {
if (port < 0x8000)
bitmap = vmcs12->io_bitmap_a;
else if (port < 0x10000)
bitmap = vmcs12->io_bitmap_b;
else
return 1;
bitmap += (port & 0x7fff) / 8;
if (last_bitmap != bitmap)
if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
return 1;
if (b & (1 << (port & 7)))
return 1;
port++;
size--;
last_bitmap = bitmap;
}
return 0;
}
| 0 |
[
"CWE-20"
] |
linux
|
bfd0a56b90005f8c8a004baf407ad90045c2b11e
| 329,583,790,775,951,700,000,000,000,000,000,000,000 | 45 |
nEPT: Nested INVEPT
If we let L1 use EPT, we should probably also support the INVEPT instruction.
In our current nested EPT implementation, when L1 changes its EPT table
for L2 (i.e., EPT12), L0 modifies the shadow EPT table (EPT02), and in
the course of this modification already calls INVEPT. But if last level
of shadow page is unsync not all L1's changes to EPT12 are intercepted,
which means roots need to be synced when L1 calls INVEPT. Global INVEPT
should not be different since roots are synced by kvm_mmu_load() each
time EPTP02 changes.
Reviewed-by: Xiao Guangrong <[email protected]>
Signed-off-by: Nadav Har'El <[email protected]>
Signed-off-by: Jun Nakajima <[email protected]>
Signed-off-by: Xinhao Xu <[email protected]>
Signed-off-by: Yang Zhang <[email protected]>
Signed-off-by: Gleb Natapov <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
buf32_to_uint (const void *buffer)
{
const unsigned char *p = buffer;
return (((unsigned int)p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]);
}
| 0 |
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
| 72,613,169,319,725,590,000,000,000,000,000,000,000 | 6 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]>
|
void readErr(const AsyncSocketException& ex) noexcept override {
LOG(ERROR) << ex.what();
}
| 0 |
[
"CWE-125"
] |
folly
|
c321eb588909646c15aefde035fd3133ba32cdee
| 256,599,696,308,773,980,000,000,000,000,000,000,000 | 3 |
Handle close_notify as standard writeErr in AsyncSSLSocket.
Summary: Fixes CVE-2019-11934
Reviewed By: mingtaoy
Differential Revision: D18020613
fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836
|
flatpak_run_add_a11y_dbus_args (FlatpakBwrap *app_bwrap,
FlatpakBwrap *proxy_arg_bwrap,
FlatpakContext *context,
FlatpakRunFlags flags)
{
g_autoptr(GDBusConnection) session_bus = NULL;
g_autofree char *a11y_address = NULL;
g_autoptr(GError) local_error = NULL;
g_autoptr(GDBusMessage) reply = NULL;
g_autoptr(GDBusMessage) msg = NULL;
g_autofree char *proxy_socket = NULL;
if ((flags & FLATPAK_RUN_FLAG_NO_A11Y_BUS_PROXY) != 0)
return FALSE;
session_bus = g_bus_get_sync (G_BUS_TYPE_SESSION, NULL, NULL);
if (session_bus == NULL)
return FALSE;
msg = g_dbus_message_new_method_call ("org.a11y.Bus", "/org/a11y/bus", "org.a11y.Bus", "GetAddress");
g_dbus_message_set_body (msg, g_variant_new ("()"));
reply =
g_dbus_connection_send_message_with_reply_sync (session_bus, msg,
G_DBUS_SEND_MESSAGE_FLAGS_NONE,
30000,
NULL,
NULL,
NULL);
if (reply)
{
if (g_dbus_message_to_gerror (reply, &local_error))
{
if (!g_error_matches (local_error, G_DBUS_ERROR, G_DBUS_ERROR_SERVICE_UNKNOWN))
g_message ("Can't find a11y bus: %s", local_error->message);
}
else
{
g_variant_get (g_dbus_message_get_body (reply),
"(s)", &a11y_address);
}
}
if (!a11y_address)
return FALSE;
proxy_socket = create_proxy_socket ("a11y-bus-proxy-XXXXXX");
if (proxy_socket == NULL)
return FALSE;
g_autofree char *sandbox_socket_path = g_strdup_printf ("/run/user/%d/at-spi-bus", getuid ());
g_autofree char *sandbox_dbus_address = g_strdup_printf ("unix:path=/run/user/%d/at-spi-bus", getuid ());
flatpak_bwrap_add_args (proxy_arg_bwrap,
a11y_address,
proxy_socket, "--filter", "--sloppy-names",
"--call=org.a11y.atspi.Registry=org.a11y.atspi.Socket.Embed@/org/a11y/atspi/accessible/root",
"--call=org.a11y.atspi.Registry=org.a11y.atspi.Socket.Unembed@/org/a11y/atspi/accessible/root",
"--call=org.a11y.atspi.Registry=org.a11y.atspi.Registry.GetRegisteredEvents@/org/a11y/atspi/registry",
"--call=org.a11y.atspi.Registry=org.a11y.atspi.DeviceEventController.GetKeystrokeListeners@/org/a11y/atspi/registry/deviceeventcontroller",
"--call=org.a11y.atspi.Registry=org.a11y.atspi.DeviceEventController.GetDeviceEventListeners@/org/a11y/atspi/registry/deviceeventcontroller",
"--call=org.a11y.atspi.Registry=org.a11y.atspi.DeviceEventController.NotifyListenersSync@/org/a11y/atspi/registry/deviceeventcontroller",
"--call=org.a11y.atspi.Registry=org.a11y.atspi.DeviceEventController.NotifyListenersAsync@/org/a11y/atspi/registry/deviceeventcontroller",
NULL);
if ((flags & FLATPAK_RUN_FLAG_LOG_A11Y_BUS) != 0)
flatpak_bwrap_add_args (proxy_arg_bwrap, "--log", NULL);
flatpak_bwrap_add_args (app_bwrap,
"--ro-bind", proxy_socket, sandbox_socket_path,
NULL);
flatpak_bwrap_set_env (app_bwrap, "AT_SPI_BUS_ADDRESS", sandbox_dbus_address, TRUE);
return TRUE;
}
| 0 |
[
"CWE-668"
] |
flatpak
|
cd2142888fc4c199723a0dfca1f15ea8788a5483
| 224,389,846,626,952,240,000,000,000,000,000,000,000 | 74 |
Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this.
|
Perl_warn_nocontext(const char *pat, ...)
{
dTHX;
va_list args;
PERL_ARGS_ASSERT_WARN_NOCONTEXT;
va_start(args, pat);
vwarn(pat, &args);
va_end(args);
}
| 0 |
[
"CWE-119",
"CWE-703",
"CWE-787"
] |
perl5
|
34716e2a6ee2af96078d62b065b7785c001194be
| 332,208,413,932,167,550,000,000,000,000,000,000,000 | 9 |
Perl_my_setenv(); handle integer wrap
RT #133204
Wean this function off int/I32 and onto UV/Size_t.
Also, replace all malloc-ish calls with a wrapper that does
overflow checks,
In particular, it was doing (nlen + vlen + 2) which could wrap when
the combined length of the environment variable name and value
exceeded around 0x7fffffff.
The wrapper check function is probably overkill, but belt and braces...
NB this function has several variant parts, #ifdef'ed by platform
type; I have blindly changed the parts that aren't compiled under linux.
|
static void _delayed_cleanup(void *rock)
{
const char *mboxname = (const char *)rock;
struct mailbox *mailbox = NULL;
/* don't do the potentially expensive work of repacking mailboxes
* if we are in the middle of a shutdown */
if (in_shutdown) goto done;
int r = mailbox_open_exclusive(mboxname, &mailbox);
if (r) goto done;
if (mailbox->i.options & OPT_MAILBOX_NEEDS_REPACK) {
mailbox_index_repack(mailbox, mailbox->i.minor_version);
// clear the flags here too so we don't try to repack again
mailbox->i.options &= ~(OPT_MAILBOX_NEEDS_REPACK|OPT_MAILBOX_NEEDS_UNLINK);
}
else if (mailbox->i.options & OPT_MAILBOX_NEEDS_UNLINK) {
mailbox_index_unlink(mailbox);
}
/* or we missed out - someone else beat us to it, all good */
done:
mailbox_close(&mailbox);
}
| 0 |
[] |
cyrus-imapd
|
1d6d15ee74e11a9bd745e80be69869e5fb8d64d6
| 110,958,649,072,041,100,000,000,000,000,000,000,000 | 25 |
mailbox.c/reconstruct.c: Add mailbox_mbentry_from_path()
|
compile_lock_unlock(
lval_T *lvp,
char_u *name_end,
exarg_T *eap,
int deep,
void *coookie)
{
cctx_T *cctx = coookie;
int cc = *name_end;
char_u *p = lvp->ll_name;
int ret = OK;
size_t len;
char_u *buf;
isntype_T isn = ISN_EXEC;
if (cctx->ctx_skip == SKIP_YES)
return OK;
// Cannot use :lockvar and :unlockvar on local variables.
if (p[1] != ':')
{
char_u *end = find_name_end(p, NULL, NULL, FNE_CHECK_START);
if (lookup_local(p, end - p, NULL, cctx) == OK)
{
char_u *s = p;
if (*end != '.' && *end != '[')
{
emsg(_(e_cannot_lock_unlock_local_variable));
return FAIL;
}
// For "d.member" put the local variable on the stack, it will be
// passed to ex_lockvar() indirectly.
if (compile_load(&s, end, cctx, FALSE, FALSE) == FAIL)
return FAIL;
isn = ISN_LOCKUNLOCK;
}
}
// Checking is done at runtime.
*name_end = NUL;
len = name_end - p + 20;
buf = alloc(len);
if (buf == NULL)
ret = FAIL;
else
{
char *cmd = eap->cmdidx == CMD_lockvar ? "lockvar" : "unlockvar";
if (deep < 0)
vim_snprintf((char *)buf, len, "%s! %s", cmd, p);
else
vim_snprintf((char *)buf, len, "%s %d %s", cmd, deep, p);
ret = generate_EXEC_copy(cctx, isn, buf);
vim_free(buf);
*name_end = cc;
}
return ret;
}
| 1 |
[
"CWE-703",
"CWE-122"
] |
vim
|
d1d8f6bacb489036d0fd479c9dd3c0102c988889
| 135,643,367,648,085,300,000,000,000,000,000,000,000 | 62 |
patch 9.0.0211: invalid memory access when compiling :lockvar
Problem: Invalid memory access when compiling :lockvar.
Solution: Don't read past the end of the line.
|
v3d_flush_l2t(struct v3d_dev *v3d, int core)
{
/* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
* need to wait for completion before dispatching the job --
* L2T accesses will be stalled until the flush has completed.
* However, we do need to make sure we don't try to trigger a
* new flush while the L2_CLEAN queue is trying to
* synchronously clean after a job.
*/
mutex_lock(&v3d->cache_clean_lock);
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
V3D_L2TCACTL_L2TFLS |
V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
mutex_unlock(&v3d->cache_clean_lock);
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
29cd13cfd7624726d9e6becbae9aa419ef35af7f
| 200,358,680,831,574,180,000,000,000,000,000,000,000 | 15 |
drm/v3d: Fix memory leak in v3d_submit_cl_ioctl
In the impelementation of v3d_submit_cl_ioctl() there are two memory
leaks. One is when allocation for bin fails, and the other is when bin
initialization fails. If kcalloc fails to allocate memory for bin then
render->base should be put. Also, if v3d_job_init() fails to initialize
bin->base then allocated memory for bin should be released.
Fixes: a783a09ee76d ("drm/v3d: Refactor job management.")
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Eric Anholt <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
|
static void free_resources()
{
if (md_result_file && md_result_file != stdout)
my_fclose(md_result_file, MYF(0));
my_free(opt_password);
if (my_hash_inited(&ignore_table))
my_hash_free(&ignore_table);
if (extended_insert)
dynstr_free(&extended_row);
if (insert_pat_inited)
dynstr_free(&insert_pat);
if (defaults_argv)
free_defaults(defaults_argv);
my_end(my_end_arg);
}
| 0 |
[
"CWE-295"
] |
mysql-server
|
b3e9211e48a3fb586e88b0270a175d2348935424
| 12,718,440,942,600,085,000,000,000,000,000,000,000 | 15 |
WL#9072: Backport WL#8785 to 5.5
|
xmlAddRef(xmlValidCtxtPtr ctxt, xmlDocPtr doc, const xmlChar *value,
xmlAttrPtr attr) {
xmlRefPtr ret;
xmlRefTablePtr table;
xmlListPtr ref_list;
if (doc == NULL) {
return(NULL);
}
if (value == NULL) {
return(NULL);
}
if (attr == NULL) {
return(NULL);
}
/*
* Create the Ref table if needed.
*/
table = (xmlRefTablePtr) doc->refs;
if (table == NULL) {
doc->refs = table = xmlHashCreateDict(0, doc->dict);
}
if (table == NULL) {
xmlVErrMemory(ctxt,
"xmlAddRef: Table creation failed!\n");
return(NULL);
}
ret = (xmlRefPtr) xmlMalloc(sizeof(xmlRef));
if (ret == NULL) {
xmlVErrMemory(ctxt, "malloc failed");
return(NULL);
}
/*
* fill the structure.
*/
ret->value = xmlStrdup(value);
if ((ctxt != NULL) && (ctxt->vstateNr != 0)) {
/*
* Operating in streaming mode, attr is gonna disappear
*/
ret->name = xmlStrdup(attr->name);
ret->attr = NULL;
} else {
ret->name = NULL;
ret->attr = attr;
}
ret->lineno = xmlGetLineNo(attr->parent);
/* To add a reference :-
* References are maintained as a list of references,
* Lookup the entry, if no entry create new nodelist
* Add the owning node to the NodeList
* Return the ref
*/
if (NULL == (ref_list = xmlHashLookup(table, value))) {
if (NULL == (ref_list = xmlListCreate(xmlFreeRef, xmlDummyCompare))) {
xmlErrValid(NULL, XML_ERR_INTERNAL_ERROR,
"xmlAddRef: Reference list creation failed!\n",
NULL);
goto failed;
}
if (xmlHashAddEntry(table, value, ref_list) < 0) {
xmlListDelete(ref_list);
xmlErrValid(NULL, XML_ERR_INTERNAL_ERROR,
"xmlAddRef: Reference list insertion failed!\n",
NULL);
goto failed;
}
}
if (xmlListAppend(ref_list, ret) != 0) {
xmlErrValid(NULL, XML_ERR_INTERNAL_ERROR,
"xmlAddRef: Reference list insertion failed!\n",
NULL);
goto failed;
}
return(ret);
failed:
if (ret != NULL) {
if (ret->value != NULL)
xmlFree((char *)ret->value);
if (ret->name != NULL)
xmlFree((char *)ret->name);
xmlFree(ret);
}
return(NULL);
}
| 1 |
[
"CWE-416"
] |
libxml2
|
652dd12a858989b14eed4e84e453059cd3ba340e
| 52,142,536,819,327,650,000,000,000,000,000,000,000 | 90 |
[CVE-2022-23308] Use-after-free of ID and IDREF attributes
If a document is parsed with XML_PARSE_DTDVALID and without
XML_PARSE_NOENT, the value of ID attributes has to be normalized after
potentially expanding entities in xmlRemoveID. Otherwise, later calls
to xmlGetID can return a pointer to previously freed memory.
ID attributes which are empty or contain only whitespace after
entity expansion are affected in a similar way. This is fixed by
not storing such attributes in the ID table.
The test to detect streaming mode when validating against a DTD was
broken. In connection with the defects above, this could result in a
use-after-free when using the xmlReader interface with validation.
Fix detection of streaming mode to avoid similar issues. (This changes
the expected result of a test case. But as far as I can tell, using the
XML reader with XIncludes referencing the root document never worked
properly, anyway.)
All of these issues can result in denial of service. Using xmlReader
with validation could result in disclosure of memory via the error
channel, typically stderr. The security impact of xmlGetID returning
a pointer to freed memory depends on the application. The typical use
case of calling xmlGetID on an unmodified document is not affected.
|
void __perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next)
{
int ctxn;
if (atomic_read(&nr_switch_events))
perf_event_switch(task, next, false);
for_each_task_context_nr(ctxn)
perf_event_context_sched_out(task, ctxn, next);
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch out PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
perf_cgroup_sched_out(task, next);
}
| 0 |
[
"CWE-401"
] |
tip
|
7bdb157cdebbf95a1cd94ed2e01b338714075d00
| 10,060,170,711,090,891,000,000,000,000,000,000,000 | 19 |
perf/core: Fix a memory leak in perf_event_parse_addr_filter()
As shown through runtime testing, the "filename" allocation is not
always freed in perf_event_parse_addr_filter().
There are three possible ways that this could happen:
- It could be allocated twice on subsequent iterations through the loop,
- or leaked on the success path,
- or on the failure path.
Clean up the code flow to make it obvious that 'filename' is always
freed in the reallocation path and in the two return paths as well.
We rely on the fact that kfree(NULL) is NOP and filename is initialized
with NULL.
This fixes the leak. No other side effects expected.
[ Dan Carpenter: cleaned up the code flow & added a changelog. ]
[ Ingo Molnar: updated the changelog some more. ]
Fixes: 375637bc5249 ("perf/core: Introduce address range filtering")
Signed-off-by: "kiyin(尹亮)" <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Cc: "Srivatsa S. Bhat" <[email protected]>
Cc: Anthony Liguori <[email protected]>
--
kernel/events/core.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
|
_outRowMarkClause(StringInfo str, const RowMarkClause *node)
{
WRITE_NODE_TYPE("ROWMARKCLAUSE");
WRITE_UINT_FIELD(rti);
WRITE_ENUM_FIELD(strength, LockClauseStrength);
WRITE_BOOL_FIELD(noWait);
WRITE_BOOL_FIELD(pushedDown);
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 106,962,973,134,143,000,000,000,000,000,000,000,000 | 9 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
void Writef(SAVESTREAM* f, const char* frm, ...)
{
char Buffer[4096];
va_list args;
va_start(args, frm);
vsnprintf(Buffer, 4095, frm, args);
Buffer[4095] = 0;
WriteStr(f, Buffer);
va_end(args);
}
| 0 |
[] |
Little-CMS
|
65e2f1df3495edc984f7e0d7b7b24e29d851e240
| 236,081,948,490,576,470,000,000,000,000,000,000,000 | 12 |
Fix some warnings from static analysis
|
static void nbt_name_socket_recv(struct nbt_name_socket *nbtsock)
{
TALLOC_CTX *tmp_ctx = talloc_new(nbtsock);
NTSTATUS status;
enum ndr_err_code ndr_err;
struct socket_address *src;
DATA_BLOB blob;
size_t nread, dsize;
struct nbt_name_packet *packet;
struct nbt_name_request *req;
status = socket_pending(nbtsock->sock, &dsize);
if (!NT_STATUS_IS_OK(status)) {
talloc_free(tmp_ctx);
return;
}
blob = data_blob_talloc(tmp_ctx, NULL, dsize);
if (blob.data == NULL) {
talloc_free(tmp_ctx);
return;
}
status = socket_recvfrom(nbtsock->sock, blob.data, blob.length, &nread,
tmp_ctx, &src);
if (!NT_STATUS_IS_OK(status)) {
talloc_free(tmp_ctx);
return;
}
packet = talloc(tmp_ctx, struct nbt_name_packet);
if (packet == NULL) {
talloc_free(tmp_ctx);
return;
}
/* parse the request */
ndr_err = ndr_pull_struct_blob(&blob, packet, packet,
(ndr_pull_flags_fn_t)ndr_pull_nbt_name_packet);
if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
status = ndr_map_error2ntstatus(ndr_err);
DEBUG(2,("Failed to parse incoming NBT name packet - %s\n",
nt_errstr(status)));
talloc_free(tmp_ctx);
return;
}
if (DEBUGLVL(10)) {
DEBUG(10,("Received nbt packet of length %d from %s:%d\n",
(int)blob.length, src->addr, src->port));
NDR_PRINT_DEBUG(nbt_name_packet, packet);
}
/* if its not a reply then pass it off to the incoming request
handler, if any */
if (!(packet->operation & NBT_FLAG_REPLY)) {
if (nbtsock->incoming.handler) {
nbtsock->incoming.handler(nbtsock, packet, src);
}
talloc_free(tmp_ctx);
return;
}
/* find the matching request */
req = (struct nbt_name_request *)idr_find(nbtsock->idr,
packet->name_trn_id);
if (req == NULL) {
if (nbtsock->unexpected.handler) {
nbtsock->unexpected.handler(nbtsock, packet, src);
} else {
DEBUG(10,("Failed to match request for incoming name packet id 0x%04x on %p\n",
packet->name_trn_id, nbtsock));
}
talloc_free(tmp_ctx);
return;
}
talloc_steal(req, packet);
talloc_steal(req, src);
talloc_free(tmp_ctx);
nbt_name_socket_handle_response_packet(req, packet, src);
}
| 1 |
[
"CWE-834"
] |
samba
|
3cc0f1eeda5f133532dda31eef9fc1b394127e50
| 175,937,496,058,538,000,000,000,000,000,000,000,000 | 82 |
CVE-2020-14303: s4 nbt: fix busy loop on empty UDP packet
An empty UDP packet put the nbt server into a busy loop that consumes
100% of a cpu.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14417
Signed-off-by: Gary Lockyer <[email protected]>
Autobuild-User(master): Karolin Seeger <[email protected]>
Autobuild-Date(master): Thu Jul 2 10:26:24 UTC 2020 on sn-devel-184
|
static inline void ok_jpg_idct_1d_row_8(int h, const int *in, uint8_t *out) {
static const int out_shift = 19;
int t0, t1, t2;
int p0, p1, p2, p3;
int q0, q1, q2, q3;
for (int y = 0; y < h; y++) {
// Quick check to avoid mults
if (in[1] == 0 && in[2] == 0 && in[3] == 0 && in[4] == 0 &&
in[5] == 0 && in[6] == 0 && in[7] == 0) {
const int offset = 1 << (out_shift - 12 - 1);
t0 = (in[0] + offset) >> (out_shift - 12);
memset(out, ok_jpg_clip_uint8(t0 + 128), 8);
} else {
ok_jpg_idct_1d_8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7]);
out[0] = ok_jpg_clip_uint8(((p0 + q0) >> out_shift) + 128);
out[1] = ok_jpg_clip_uint8(((p1 + q1) >> out_shift) + 128);
out[2] = ok_jpg_clip_uint8(((p2 + q2) >> out_shift) + 128);
out[3] = ok_jpg_clip_uint8(((p3 + q3) >> out_shift) + 128);
out[4] = ok_jpg_clip_uint8(((p3 - q3) >> out_shift) + 128);
out[5] = ok_jpg_clip_uint8(((p2 - q2) >> out_shift) + 128);
out[6] = ok_jpg_clip_uint8(((p1 - q1) >> out_shift) + 128);
out[7] = ok_jpg_clip_uint8(((p0 - q0) >> out_shift) + 128);
}
in += 8;
out += C_WIDTH;
}
}
| 0 |
[
"CWE-787"
] |
ok-file-formats
|
a9cc1711dd4ed6a215038f1c5c03af0ef52c3211
| 312,691,349,882,511,430,000,000,000,000,000,000,000 | 29 |
ok_jpg: Fix invalid DHT (#11)
|
static void textview_add_part(TextView *textview, MimeInfo *mimeinfo)
{
GtkAllocation allocation;
GtkTextView *text;
GtkTextBuffer *buffer;
GtkTextIter iter, start_iter;
gchar buf[BUFFSIZE];
GPtrArray *headers = NULL;
const gchar *name;
gchar *content_type;
gint charcount;
START_TIMING("");
cm_return_if_fail(mimeinfo != NULL);
text = GTK_TEXT_VIEW(textview->text);
buffer = gtk_text_view_get_buffer(text);
charcount = gtk_text_buffer_get_char_count(buffer);
gtk_text_buffer_get_end_iter(buffer, &iter);
if (textview->stop_loading) {
return;
}
if (mimeinfo->type == MIMETYPE_MULTIPART) {
END_TIMING();
return;
}
textview->prev_quote_level = -1;
if ((mimeinfo->type == MIMETYPE_MESSAGE) && !g_ascii_strcasecmp(mimeinfo->subtype, "rfc822")) {
FILE *fp;
if (mimeinfo->content == MIMECONTENT_MEM)
fp = str_open_as_stream(mimeinfo->data.mem);
else
fp = claws_fopen(mimeinfo->data.filename, "rb");
if (!fp) {
FILE_OP_ERROR(mimeinfo->data.filename, "claws_fopen");
END_TIMING();
return;
}
if (fseek(fp, mimeinfo->offset, SEEK_SET) < 0) {
FILE_OP_ERROR(mimeinfo->data.filename, "fseek");
claws_fclose(fp);
END_TIMING();
return;
}
headers = textview_scan_header(textview, fp);
if (headers) {
if (charcount > 0)
gtk_text_buffer_insert(buffer, &iter, "\n", 1);
if (procmime_mimeinfo_parent(mimeinfo) == NULL)
textview_show_tags(textview);
textview_show_header(textview, headers);
procheader_header_array_destroy(headers);
}
claws_fclose(fp);
END_TIMING();
return;
}
name = procmime_mimeinfo_get_parameter(mimeinfo, "filename");
content_type = procmime_get_content_type_str(mimeinfo->type,
mimeinfo->subtype);
if (name == NULL)
name = procmime_mimeinfo_get_parameter(mimeinfo, "name");
if (name != NULL)
g_snprintf(buf, sizeof(buf), _("[%s %s (%d bytes)]"),
name, content_type, mimeinfo->length);
else
g_snprintf(buf, sizeof(buf), _("[%s (%d bytes)]"),
content_type, mimeinfo->length);
g_free(content_type);
if (mimeinfo->disposition == DISPOSITIONTYPE_ATTACHMENT
|| (mimeinfo->disposition == DISPOSITIONTYPE_INLINE &&
mimeinfo->type != MIMETYPE_TEXT)) {
gtk_text_buffer_insert(buffer, &iter, "\n", 1);
TEXTVIEW_INSERT_LINK(buf, "sc://select_attachment", mimeinfo);
gtk_text_buffer_insert(buffer, &iter, " \n", -1);
if (mimeinfo->type == MIMETYPE_IMAGE &&
prefs_common.inline_img ) {
GdkPixbuf *pixbuf;
GError *error = NULL;
ClickableText *uri;
START_TIMING("inserting image");
pixbuf = procmime_get_part_as_pixbuf(mimeinfo, &error);
if (error != NULL) {
g_warning("Can't load the image: %s\n", error->message);
g_error_free(error);
END_TIMING();
return;
}
if (textview->stop_loading) {
END_TIMING();
return;
}
gtk_widget_get_allocation(textview->scrolledwin, &allocation);
pixbuf = claws_load_pixbuf_fitting(pixbuf, prefs_common.inline_img,
prefs_common.fit_img_height, allocation.width,
allocation.height);
if (textview->stop_loading) {
END_TIMING();
return;
}
uri = g_new0(ClickableText, 1);
uri->uri = g_strdup("");
uri->filename = g_strdup("sc://select_attachment");
uri->data = mimeinfo;
uri->start = gtk_text_iter_get_offset(&iter);
gtk_text_buffer_insert_pixbuf(buffer, &iter, pixbuf);
g_object_unref(pixbuf);
if (textview->stop_loading) {
g_free(uri);
return;
}
uri->end = gtk_text_iter_get_offset(&iter);
textview->uri_list =
g_slist_prepend(textview->uri_list, uri);
gtk_text_buffer_insert(buffer, &iter, " ", 1);
gtk_text_buffer_get_iter_at_offset(buffer, &start_iter, uri->start);
gtk_text_buffer_apply_tag_by_name(buffer, "link",
&start_iter, &iter);
END_TIMING();
GTK_EVENTS_FLUSH();
}
} else if (mimeinfo->type == MIMETYPE_TEXT) {
if (prefs_common.display_header && (charcount > 0))
gtk_text_buffer_insert(buffer, &iter, "\n", 1);
if (!gtk_text_buffer_get_mark(buffer, "body_start")) {
gtk_text_buffer_get_end_iter(buffer, &iter);
gtk_text_buffer_create_mark(buffer, "body_start", &iter, TRUE);
}
textview_write_body(textview, mimeinfo);
if (!gtk_text_buffer_get_mark(buffer, "body_end")) {
gtk_text_buffer_get_end_iter(buffer, &iter);
gtk_text_buffer_create_mark(buffer, "body_end", &iter, TRUE);
}
}
END_TIMING();
}
| 0 |
[
"CWE-601"
] |
claws
|
ac286a71ed78429e16c612161251b9ea90ccd431
| 68,522,932,821,365,640,000,000,000,000,000,000,000 | 156 |
harden link checker before accepting click
|
parse_efm_option(char_u *efm)
{
efm_T *fmt_ptr = NULL;
efm_T *fmt_first = NULL;
efm_T *fmt_last = NULL;
char_u *fmtstr = NULL;
int len;
int sz;
// Each part of the format string is copied and modified from errorformat
// to regex prog. Only a few % characters are allowed.
// Get some space to modify the format string into.
sz = efm_regpat_bufsz(efm);
if ((fmtstr = alloc_id(sz, aid_qf_efm_fmtstr)) == NULL)
goto parse_efm_error;
while (efm[0] != NUL)
{
// Allocate a new eformat structure and put it at the end of the list
fmt_ptr = ALLOC_CLEAR_ONE_ID(efm_T, aid_qf_efm_fmtpart);
if (fmt_ptr == NULL)
goto parse_efm_error;
if (fmt_first == NULL) // first one
fmt_first = fmt_ptr;
else
fmt_last->next = fmt_ptr;
fmt_last = fmt_ptr;
// Isolate one part in the 'errorformat' option
len = efm_option_part_len(efm);
if (efm_to_regpat(efm, len, fmt_ptr, fmtstr) == FAIL)
goto parse_efm_error;
if ((fmt_ptr->prog = vim_regcomp(fmtstr, RE_MAGIC + RE_STRING)) == NULL)
goto parse_efm_error;
// Advance to next part
efm = skip_to_option_part(efm + len); // skip comma and spaces
}
if (fmt_first == NULL) // nothing found
emsg(_(e_errorformat_contains_no_pattern));
goto parse_efm_end;
parse_efm_error:
free_efm_list(&fmt_first);
parse_efm_end:
vim_free(fmtstr);
return fmt_first;
}
| 0 |
[
"CWE-416"
] |
vim
|
4f1b083be43f351bc107541e7b0c9655a5d2c0bb
| 4,848,993,403,496,642,000,000,000,000,000,000,000 | 53 |
patch 9.0.0322: crash when no errors and 'quickfixtextfunc' is set
Problem: Crash when no errors and 'quickfixtextfunc' is set.
Solution: Do not handle errors if there aren't any.
|
switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
{
if (list_is_last(&rq->sched.link, &engine->active.requests))
return INT_MIN;
return rq_prio(list_next_entry(rq, sched.link));
}
| 0 |
[] |
linux
|
bc8a76a152c5f9ef3b48104154a65a68a8b76946
| 52,338,356,390,899,370,000,000,000,000,000,000,000 | 7 |
drm/i915/gen9: Clear residual context state on context switch
Intel ID: PSIRT-TA-201910-001
CVEID: CVE-2019-14615
Intel GPU Hardware prior to Gen11 does not clear EU state
during a context switch. This can result in information
leakage between contexts.
For Gen8 and Gen9, hardware provides a mechanism for
fast cleardown of the EU state, by issuing a PIPE_CONTROL
with bit 27 set. We can use this in a context batch buffer
to explicitly cleardown the state on every context switch.
As this workaround is already in place for gen8, we can borrow
the code verbatim for Gen9.
Signed-off-by: Mika Kuoppala <[email protected]>
Signed-off-by: Akeem G Abodunrin <[email protected]>
Cc: Kumar Valsan Prathap <[email protected]>
Cc: Chris Wilson <[email protected]>
Cc: Balestrieri Francesco <[email protected]>
Cc: Bloomfield Jon <[email protected]>
Cc: Dutt Sudeep <[email protected]>
|
static inline struct kiocb *aio_get_req(struct kioctx *ctx)
{
struct kiocb *req;
if (!get_reqs_available(ctx)) {
user_refill_reqs_available(ctx);
if (!get_reqs_available(ctx))
return NULL;
}
req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
if (unlikely(!req))
goto out_put;
percpu_ref_get(&ctx->reqs);
req->ki_ctx = ctx;
return req;
out_put:
put_reqs_available(ctx, 1);
return NULL;
}
| 0 |
[] |
linux-stable
|
c4f4b82694fe48b02f7a881a1797131a6dad1364
| 51,301,871,074,473,640,000,000,000,000,000,000,000 | 22 |
AIO: properly check iovec sizes
In Linus's tree, the iovec code has been reworked massively, but in
older kernels the AIO layer should be checking this before passing the
request on to other layers.
Many thanks to Ben Hawkes of Google Project Zero for pointing out the
issue.
Reported-by: Ben Hawkes <[email protected]>
Acked-by: Benjamin LaHaise <[email protected]>
Tested-by: Willy Tarreau <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
set_lan_param_wait(struct ipmi_intf * intf, uint8_t chan,
int param, uint8_t * data, int len)
{
struct lan_param * p;
int retry = 10; /* 10 retries */
lprintf(LOG_DEBUG, "Waiting for Set LAN Parameter to complete...");
if (verbose > 1)
printbuf(data, len, "SET DATA");
for (;;) {
p = get_lan_param(intf, chan, param);
if (!p) {
sleep(IPMI_LANP_TIMEOUT);
if (retry-- == 0)
return -1;
continue;
}
if (verbose > 1)
printbuf(p->data, p->data_len, "READ DATA");
if (p->data_len != len) {
sleep(IPMI_LANP_TIMEOUT);
if (retry-- == 0) {
lprintf(LOG_WARNING, "Mismatched data lengths: %d != %d",
p->data_len, len);
return -1;
}
continue;
}
if (memcmp(data, p->data, len) != 0) {
sleep(IPMI_LANP_TIMEOUT);
if (retry-- == 0) {
lprintf(LOG_WARNING, "LAN Parameter Data does not match! "
"Write may have failed.");
return -1;
}
continue;
}
break;
}
return 0;
}
| 0 |
[
"CWE-120"
] |
ipmitool
|
d45572d71e70840e0d4c50bf48218492b79c1a10
| 331,480,396,033,927,150,000,000,000,000,000,000,000 | 42 |
lanp: Fix buffer overflows in get_lan_param_select
Partial fix for CVE-2020-5208, see
https://github.com/ipmitool/ipmitool/security/advisories/GHSA-g659-9qxw-p7cp
The `get_lan_param_select` function is missing a validation check on the
response’s `data_len`, which it then returns to caller functions, where
stack buffer overflow can occur.
|
get_mapclear_arg(expand_T *xp UNUSED, int idx)
{
if (idx == 0)
return (char_u *)"<buffer>";
return NULL;
}
| 0 |
[
"CWE-78"
] |
vim
|
8c62a08faf89663e5633dc5036cd8695c80f1075
| 127,692,227,159,825,990,000,000,000,000,000,000,000 | 6 |
patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others.
|
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{
*new = *old;
if (unlikely(iov_iter_is_pipe(new))) {
WARN_ON(1);
return NULL;
}
if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
return NULL;
if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec),
flags);
else
/* iovec and kvec have identical layout */
return new->iov = kmemdup(new->iov,
new->nr_segs * sizeof(struct iovec),
flags);
}
| 0 |
[
"CWE-665",
"CWE-284"
] |
linux
|
9d2231c5d74e13b2a0546fee6737ee4446017903
| 9,695,670,227,787,770,000,000,000,000,000,000,000 | 19 |
lib/iov_iter: initialize "flags" in new pipe_buffer
The functions copy_page_to_iter_pipe() and push_pipe() can both
allocate a new pipe_buffer, but the "flags" member initializer is
missing.
Fixes: 241699cd72a8 ("new iov_iter flavour: pipe-backed")
To: Alexander Viro <[email protected]>
To: [email protected]
To: [email protected]
Cc: [email protected]
Signed-off-by: Max Kellermann <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
OidFunctionCall4Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
Datum result;
fmgr_info(functionId, &flinfo);
InitFunctionCallInfoData(fcinfo, &flinfo, 4, collation, NULL, NULL);
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
fcinfo.arg[2] = arg3;
fcinfo.arg[3] = arg4;
fcinfo.argnull[0] = false;
fcinfo.argnull[1] = false;
fcinfo.argnull[2] = false;
fcinfo.argnull[3] = false;
result = FunctionCallInvoke(&fcinfo);
/* Check for null result, since caller is clearly not expecting one */
if (fcinfo.isnull)
elog(ERROR, "function %u returned NULL", flinfo.fn_oid);
return result;
}
| 0 |
[
"CWE-264"
] |
postgres
|
537cbd35c893e67a63c59bc636c3e888bd228bc7
| 274,033,923,662,218,920,000,000,000,000,000,000,000 | 28 |
Prevent privilege escalation in explicit calls to PL validators.
The primary role of PL validators is to be called implicitly during
CREATE FUNCTION, but they are also normal functions that a user can call
explicitly. Add a permissions check to each validator to ensure that a
user cannot use explicit validator calls to achieve things he could not
otherwise achieve. Back-patch to 8.4 (all supported versions).
Non-core procedural language extensions ought to make the same two-line
change to their own validators.
Andres Freund, reviewed by Tom Lane and Noah Misch.
Security: CVE-2014-0061
|
static inline void ept_sync_context(u64 eptp)
{
if (enable_ept) {
if (cpu_has_vmx_invept_context())
__invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
else
ept_sync_global();
}
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
9581d442b9058d3699b4be568b6e5eae38a41493
| 103,001,995,902,521,770,000,000,000,000,000,000,000 | 9 |
KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
static void add_pending_object_with_mode(struct rev_info *revs, struct object *obj, const char *name, unsigned mode)
{
if (revs->no_walk && (obj->flags & UNINTERESTING))
die("object ranges do not make sense when not walking revisions");
if (revs->reflog_info && obj->type == OBJ_COMMIT &&
add_reflog_for_walk(revs->reflog_info,
(struct commit *)obj, name))
return;
add_object_array_with_mode(obj, name, &revs->pending, mode);
}
| 0 |
[
"CWE-119"
] |
git
|
fd55a19eb1d49ae54008d932a65f79cd6fda45c9
| 120,834,399,528,139,760,000,000,000,000,000,000,000 | 10 |
Fix buffer overflow in git diff
If PATH_MAX on your system is smaller than a path stored, it may cause
buffer overflow and stack corruption in diff_addremove() and diff_change()
functions when running git-diff
Signed-off-by: Dmitry Potapov <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
ofputil_format_version_name(struct ds *msg, enum ofp_version version)
{
ds_put_cstr(msg, ofputil_version_to_string(version));
}
| 0 |
[
"CWE-772"
] |
ovs
|
77ad4225d125030420d897c873e4734ac708c66b
| 186,829,360,771,517,870,000,000,000,000,000,000,000 | 4 |
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
gdImageDestroy (gdImagePtr im)
{
int i;
if (im->pixels)
{
for (i = 0; (i < im->sy); i++)
{
gdFree (im->pixels[i]);
}
gdFree (im->pixels);
}
if (im->tpixels)
{
for (i = 0; (i < im->sy); i++)
{
gdFree (im->tpixels[i]);
}
gdFree (im->tpixels);
}
if (im->polyInts)
{
gdFree (im->polyInts);
}
if (im->style)
{
gdFree (im->style);
}
gdFree (im);
}
| 0 |
[
"CWE-119"
] |
php-src
|
feba44546c27b0158f9ac20e72040a224b918c75
| 243,741,327,058,724,840,000,000,000,000,000,000,000 | 29 |
Fixed bug #22965 (Crash in gd lib's ImageFillToBorder()).
|
__releases(&keyring_serialise_link_sem)
{
BUG_ON(index_key->type == NULL);
kenter("%d,%s,", keyring->serial, index_key->type->name);
if (index_key->type == &key_type_keyring)
up_write(&keyring_serialise_link_sem);
if (edit && !edit->dead_leaf) {
key_payload_reserve(keyring,
keyring->datalen - KEYQUOTA_LINK_BYTES);
assoc_array_cancel_edit(edit);
}
up_write(&keyring->sem);
}
| 1 |
[
"CWE-119",
"CWE-787"
] |
linux
|
ca4da5dd1f99fe9c59f1709fb43e818b18ad20e0
| 31,557,457,625,386,580,000,000,000,000,000,000,000 | 15 |
KEYS: ensure we free the assoc array edit if edit is valid
__key_link_end is not freeing the associated array edit structure
and this leads to a 512 byte memory leak each time an identical
existing key is added with add_key().
The reason the add_key() system call returns okay is that
key_create_or_update() calls __key_link_begin() before checking to see
whether it can update a key directly rather than adding/replacing - which
it turns out it can. Thus __key_link() is not called through
__key_instantiate_and_link() and __key_link_end() must cancel the edit.
CVE-2015-1333
Signed-off-by: Colin Ian King <[email protected]>
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
void __user *log_base)
{
struct vhost_memory *mp;
size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
mp = rcu_dereference_protected(vq->dev->memory,
lockdep_is_held(&vq->mutex));
return vq_memory_access_ok(log_base, mp,
vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
vq->num * sizeof *vq->used->ring + s));
}
| 0 |
[] |
linux-2.6
|
bd97120fc3d1a11f3124c7c9ba1d91f51829eb85
| 216,868,168,050,489,570,000,000,000,000,000,000,000 | 14 |
vhost: fix length for cross region descriptor
If a single descriptor crosses a region, the
second chunk length should be decremented
by size translated so far, instead it includes
the full descriptor length.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Acked-by: Jason Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t tail = (i << 1) + 1;
if (cdf_check_stream_offset(sst, h, p, tail * sizeof(uint32_t),
__LINE__) == -1)
goto out;
size_t ofs = CDF_GETUINT32(p, tail);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
if (nelements == 0) {
DPRINTF(("CDF_VECTOR with nelements == 0\n"));
goto out;
}
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements && i < sh.sh_properties;
j++, i++)
{
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
| 1 |
[
"CWE-703",
"CWE-189"
] |
file
|
0641e56be1af003aa02c7c6b0184466540637233
| 37,267,751,515,960,885,000,000,000,000,000,000,000 | 197 |
Prevent wrap around (Remi Collet at redhat)
|
ptr_t GC_unix_get_mem(size_t bytes)
{
# if defined(MMAP_SUPPORTED)
/* By default, we try both sbrk and mmap, in that order. */
static GC_bool sbrk_failed = FALSE;
ptr_t result = 0;
if (!sbrk_failed) result = GC_unix_sbrk_get_mem(bytes);
if (0 == result) {
sbrk_failed = TRUE;
result = GC_unix_mmap_get_mem(bytes);
}
if (0 == result) {
/* Try sbrk again, in case sbrk memory became available. */
result = GC_unix_sbrk_get_mem(bytes);
}
return result;
# else /* !MMAP_SUPPORTED */
return GC_unix_sbrk_get_mem(bytes);
# endif
}
| 0 |
[
"CWE-119"
] |
bdwgc
|
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
| 232,541,686,956,514,940,000,000,000,000,000,000,000 | 21 |
Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now).
|
MagickExport void XFontBrowserWidget(Display *display,XWindows *windows,
const char *action,char *reply)
{
#define BackButtonText "Back"
#define CancelButtonText "Cancel"
#define FontnameText "Name:"
#define FontPatternText "Pattern:"
#define ResetButtonText "Reset"
char
back_pattern[MaxTextExtent],
**fontlist,
**listhead,
primary_selection[MaxTextExtent],
reset_pattern[MaxTextExtent],
text[MaxTextExtent];
int
fonts,
x,
y;
int
i;
static char
glob_pattern[MaxTextExtent] = "*";
static MagickStatusType
mask = (MagickStatusType) (CWWidth | CWHeight | CWX | CWY);
Status
status;
unsigned int
height,
text_width,
visible_fonts,
width;
size_t
delay,
state;
XEvent
event;
XFontStruct
*font_info;
XTextProperty
window_name;
XWidgetInfo
action_info,
back_info,
cancel_info,
expose_info,
list_info,
mode_info,
north_info,
reply_info,
reset_info,
scroll_info,
selection_info,
slider_info,
south_info,
text_info;
XWindowChanges
window_changes;
/*
Get font list and sort in ascending order.
*/
assert(display != (Display *) NULL);
assert(windows != (XWindows *) NULL);
assert(action != (char *) NULL);
assert(reply != (char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",action);
XSetCursorState(display,windows,MagickTrue);
XCheckRefreshWindows(display,windows);
(void) CopyMagickString(back_pattern,glob_pattern,MaxTextExtent);
(void) CopyMagickString(reset_pattern,"*",MaxTextExtent);
fontlist=XListFonts(display,glob_pattern,32767,&fonts);
if (fonts == 0)
{
/*
Pattern failed, obtain all the fonts.
*/
XNoticeWidget(display,windows,"Unable to obtain fonts names:",
glob_pattern);
(void) CopyMagickString(glob_pattern,"*",MaxTextExtent);
fontlist=XListFonts(display,glob_pattern,32767,&fonts);
if (fontlist == (char **) NULL)
{
XNoticeWidget(display,windows,"Unable to obtain fonts names:",
glob_pattern);
return;
}
}
/*
Sort font list in ascending order.
*/
listhead=fontlist;
fontlist=(char **) AcquireQuantumMemory((size_t) fonts,sizeof(*fontlist));
if (fontlist == (char **) NULL)
{
XNoticeWidget(display,windows,"MemoryAllocationFailed",
"UnableToViewFonts");
return;
}
for (i=0; i < fonts; i++)
fontlist[i]=listhead[i];
qsort((void *) fontlist,(size_t) fonts,sizeof(*fontlist),FontCompare);
/*
Determine Font Browser widget attributes.
*/
font_info=windows->widget.font_info;
text_width=0;
for (i=0; i < fonts; i++)
if (WidgetTextWidth(font_info,fontlist[i]) > text_width)
text_width=WidgetTextWidth(font_info,fontlist[i]);
width=WidgetTextWidth(font_info,(char *) action);
if (WidgetTextWidth(font_info,CancelButtonText) > width)
width=WidgetTextWidth(font_info,CancelButtonText);
if (WidgetTextWidth(font_info,ResetButtonText) > width)
width=WidgetTextWidth(font_info,ResetButtonText);
if (WidgetTextWidth(font_info,BackButtonText) > width)
width=WidgetTextWidth(font_info,BackButtonText);
width+=QuantumMargin;
if (WidgetTextWidth(font_info,FontPatternText) > width)
width=WidgetTextWidth(font_info,FontPatternText);
if (WidgetTextWidth(font_info,FontnameText) > width)
width=WidgetTextWidth(font_info,FontnameText);
height=(unsigned int) (font_info->ascent+font_info->descent);
/*
Position Font Browser widget.
*/
windows->widget.width=width+MagickMin((int) text_width,(int) MaxTextWidth)+
6*QuantumMargin;
windows->widget.min_width=width+MinTextWidth+4*QuantumMargin;
if (windows->widget.width < windows->widget.min_width)
windows->widget.width=windows->widget.min_width;
windows->widget.height=(unsigned int)
(((85*height) >> 2)+((13*QuantumMargin) >> 1)+4);
windows->widget.min_height=(unsigned int)
(((27*height) >> 1)+((13*QuantumMargin) >> 1)+4);
if (windows->widget.height < windows->widget.min_height)
windows->widget.height=windows->widget.min_height;
XConstrainWindowPosition(display,&windows->widget);
/*
Map Font Browser widget.
*/
(void) CopyMagickString(windows->widget.name,"Browse and Select a Font",
MaxTextExtent);
status=XStringListToTextProperty(&windows->widget.name,1,&window_name);
if (status != False)
{
XSetWMName(display,windows->widget.id,&window_name);
XSetWMIconName(display,windows->widget.id,&window_name);
(void) XFree((void *) window_name.value);
}
window_changes.width=(int) windows->widget.width;
window_changes.height=(int) windows->widget.height;
window_changes.x=windows->widget.x;
window_changes.y=windows->widget.y;
(void) XReconfigureWMWindow(display,windows->widget.id,
windows->widget.screen,mask,&window_changes);
(void) XMapRaised(display,windows->widget.id);
windows->widget.mapped=MagickFalse;
/*
Respond to X events.
*/
XGetWidgetInfo((char *) NULL,&slider_info);
XGetWidgetInfo((char *) NULL,&north_info);
XGetWidgetInfo((char *) NULL,&south_info);
XGetWidgetInfo((char *) NULL,&expose_info);
XGetWidgetInfo((char *) NULL,&selection_info);
visible_fonts=0;
delay=SuspendTime << 2;
state=UpdateConfigurationState;
do
{
if (state & UpdateConfigurationState)
{
int
id;
/*
Initialize button information.
*/
XGetWidgetInfo(CancelButtonText,&cancel_info);
cancel_info.width=width;
cancel_info.height=(unsigned int) ((3*height) >> 1);
cancel_info.x=(int)
(windows->widget.width-cancel_info.width-QuantumMargin-2);
cancel_info.y=(int)
(windows->widget.height-cancel_info.height-QuantumMargin);
XGetWidgetInfo(action,&action_info);
action_info.width=width;
action_info.height=(unsigned int) ((3*height) >> 1);
action_info.x=cancel_info.x-(cancel_info.width+(QuantumMargin >> 1)+
(action_info.bevel_width << 1));
action_info.y=cancel_info.y;
XGetWidgetInfo(BackButtonText,&back_info);
back_info.width=width;
back_info.height=(unsigned int) ((3*height) >> 1);
back_info.x=QuantumMargin;
back_info.y=((5*QuantumMargin) >> 1)+height;
XGetWidgetInfo(ResetButtonText,&reset_info);
reset_info.width=width;
reset_info.height=(unsigned int) ((3*height) >> 1);
reset_info.x=QuantumMargin;
reset_info.y=back_info.y+back_info.height+QuantumMargin;
/*
Initialize reply information.
*/
XGetWidgetInfo(reply,&reply_info);
reply_info.raised=MagickFalse;
reply_info.bevel_width--;
reply_info.width=windows->widget.width-width-((6*QuantumMargin) >> 1);
reply_info.height=height << 1;
reply_info.x=(int) (width+(QuantumMargin << 1));
reply_info.y=action_info.y-(action_info.height << 1)-QuantumMargin;
/*
Initialize mode information.
*/
XGetWidgetInfo(reply,&mode_info);
mode_info.bevel_width=0;
mode_info.width=(unsigned int)
(action_info.x-reply_info.x-QuantumMargin);
mode_info.height=action_info.height << 1;
mode_info.x=reply_info.x;
mode_info.y=action_info.y-action_info.height+action_info.bevel_width;
/*
Initialize scroll information.
*/
XGetWidgetInfo((char *) NULL,&scroll_info);
scroll_info.bevel_width--;
scroll_info.width=height;
scroll_info.height=(unsigned int)
(reply_info.y-back_info.y-(QuantumMargin >> 1));
scroll_info.x=reply_info.x+(reply_info.width-scroll_info.width);
scroll_info.y=back_info.y-reply_info.bevel_width;
scroll_info.raised=MagickFalse;
scroll_info.trough=MagickTrue;
north_info=scroll_info;
north_info.raised=MagickTrue;
north_info.width-=(north_info.bevel_width << 1);
north_info.height=north_info.width-1;
north_info.x+=north_info.bevel_width;
north_info.y+=north_info.bevel_width;
south_info=north_info;
south_info.y=scroll_info.y+scroll_info.height-scroll_info.bevel_width-
south_info.height;
id=slider_info.id;
slider_info=north_info;
slider_info.id=id;
slider_info.width-=2;
slider_info.min_y=north_info.y+north_info.height+north_info.bevel_width+
slider_info.bevel_width+2;
slider_info.height=scroll_info.height-((slider_info.min_y-
scroll_info.y+1) << 1)+4;
visible_fonts=(unsigned int) (scroll_info.height*
PerceptibleReciprocal((double) height+(height >> 3)));
if (fonts > (int) visible_fonts)
slider_info.height=(visible_fonts*slider_info.height)/fonts;
slider_info.max_y=south_info.y-south_info.bevel_width-
slider_info.bevel_width-2;
slider_info.x=scroll_info.x+slider_info.bevel_width+1;
slider_info.y=slider_info.min_y;
expose_info=scroll_info;
expose_info.y=slider_info.y;
/*
Initialize list information.
*/
XGetWidgetInfo((char *) NULL,&list_info);
list_info.raised=MagickFalse;
list_info.bevel_width--;
list_info.width=(unsigned int)
(scroll_info.x-reply_info.x-(QuantumMargin >> 1));
list_info.height=scroll_info.height;
list_info.x=reply_info.x;
list_info.y=scroll_info.y;
if (windows->widget.mapped == MagickFalse)
state|=JumpListState;
/*
Initialize text information.
*/
*text='\0';
XGetWidgetInfo(text,&text_info);
text_info.center=MagickFalse;
text_info.width=reply_info.width;
text_info.height=height;
text_info.x=list_info.x-(QuantumMargin >> 1);
text_info.y=QuantumMargin;
/*
Initialize selection information.
*/
XGetWidgetInfo((char *) NULL,&selection_info);
selection_info.center=MagickFalse;
selection_info.width=list_info.width;
selection_info.height=(unsigned int) ((9*height) >> 3);
selection_info.x=list_info.x;
state&=(~UpdateConfigurationState);
}
if (state & RedrawWidgetState)
{
/*
Redraw Font Browser window.
*/
x=QuantumMargin;
y=text_info.y+((text_info.height-height) >> 1)+font_info->ascent;
(void) XDrawString(display,windows->widget.id,
windows->widget.annotate_context,x,y,FontPatternText,
Extent(FontPatternText));
(void) CopyMagickString(text_info.text,glob_pattern,MaxTextExtent);
XDrawWidgetText(display,&windows->widget,&text_info);
XDrawBeveledButton(display,&windows->widget,&back_info);
XDrawBeveledButton(display,&windows->widget,&reset_info);
XDrawBeveledMatte(display,&windows->widget,&list_info);
XDrawBeveledMatte(display,&windows->widget,&scroll_info);
XDrawTriangleNorth(display,&windows->widget,&north_info);
XDrawBeveledButton(display,&windows->widget,&slider_info);
XDrawTriangleSouth(display,&windows->widget,&south_info);
x=QuantumMargin;
y=reply_info.y+((reply_info.height-height) >> 1)+font_info->ascent;
(void) XDrawString(display,windows->widget.id,
windows->widget.annotate_context,x,y,FontnameText,
Extent(FontnameText));
XDrawBeveledMatte(display,&windows->widget,&reply_info);
XDrawMatteText(display,&windows->widget,&reply_info);
XDrawBeveledButton(display,&windows->widget,&action_info);
XDrawBeveledButton(display,&windows->widget,&cancel_info);
XHighlightWidget(display,&windows->widget,BorderOffset,BorderOffset);
selection_info.id=(~0);
state|=RedrawActionState;
state|=RedrawListState;
state&=(~RedrawWidgetState);
}
if (state & UpdateListState)
{
char
**checklist;
int
number_fonts;
/*
Update font list.
*/
checklist=XListFonts(display,glob_pattern,32767,&number_fonts);
if (checklist == (char **) NULL)
{
if ((strchr(glob_pattern,'*') == (char *) NULL) &&
(strchr(glob_pattern,'?') == (char *) NULL))
{
/*
Might be a scaleable font-- exit.
*/
(void) CopyMagickString(reply,glob_pattern,MaxTextExtent);
(void) CopyMagickString(glob_pattern,back_pattern,MaxTextExtent);
action_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&action_info);
break;
}
(void) CopyMagickString(glob_pattern,back_pattern,MaxTextExtent);
(void) XBell(display,0);
}
else
if (number_fonts == 1)
{
/*
Reply is a single font name-- exit.
*/
(void) CopyMagickString(reply,checklist[0],MaxTextExtent);
(void) CopyMagickString(glob_pattern,back_pattern,MaxTextExtent);
(void) XFreeFontNames(checklist);
action_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&action_info);
break;
}
else
{
(void) XFreeFontNames(listhead);
fontlist=(char **) RelinquishMagickMemory(fontlist);
fontlist=checklist;
fonts=number_fonts;
}
/*
Sort font list in ascending order.
*/
listhead=fontlist;
fontlist=(char **) AcquireQuantumMemory((size_t) fonts,
sizeof(*fontlist));
if (fontlist == (char **) NULL)
{
XNoticeWidget(display,windows,"MemoryAllocationFailed",
"UnableToViewFonts");
return;
}
for (i=0; i < fonts; i++)
fontlist[i]=listhead[i];
qsort((void *) fontlist,(size_t) fonts,sizeof(*fontlist),FontCompare);
slider_info.height=
scroll_info.height-((slider_info.min_y-scroll_info.y+1) << 1)+1;
if (fonts > (int) visible_fonts)
slider_info.height=(visible_fonts*slider_info.height)/fonts;
slider_info.max_y=south_info.y-south_info.bevel_width-
slider_info.bevel_width-2;
slider_info.id=0;
slider_info.y=slider_info.min_y;
expose_info.y=slider_info.y;
selection_info.id=(~0);
list_info.id=(~0);
state|=RedrawListState;
/*
Redraw font name & reply.
*/
*reply_info.text='\0';
reply_info.cursor=reply_info.text;
(void) CopyMagickString(text_info.text,glob_pattern,MaxTextExtent);
XDrawWidgetText(display,&windows->widget,&text_info);
XDrawMatteText(display,&windows->widget,&reply_info);
XDrawBeveledMatte(display,&windows->widget,&scroll_info);
XDrawTriangleNorth(display,&windows->widget,&north_info);
XDrawBeveledButton(display,&windows->widget,&slider_info);
XDrawTriangleSouth(display,&windows->widget,&south_info);
XHighlightWidget(display,&windows->widget,BorderOffset,BorderOffset);
state&=(~UpdateListState);
}
if (state & JumpListState)
{
/*
Jump scroll to match user font.
*/
list_info.id=(~0);
for (i=0; i < fonts; i++)
if (LocaleCompare(fontlist[i],reply) >= 0)
{
list_info.id=LocaleCompare(fontlist[i],reply) == 0 ? i : ~0;
break;
}
if ((i < slider_info.id) || (i >= (int) (slider_info.id+visible_fonts)))
slider_info.id=i-(visible_fonts >> 1);
selection_info.id=(~0);
state|=RedrawListState;
state&=(~JumpListState);
}
if (state & RedrawListState)
{
/*
Determine slider id and position.
*/
if (slider_info.id >= (int) (fonts-visible_fonts))
slider_info.id=fonts-visible_fonts;
if ((slider_info.id < 0) || (fonts <= (int) visible_fonts))
slider_info.id=0;
slider_info.y=slider_info.min_y;
if (fonts > 0)
slider_info.y+=
slider_info.id*(slider_info.max_y-slider_info.min_y+1)/fonts;
if (slider_info.id != selection_info.id)
{
/*
Redraw scroll bar and file names.
*/
selection_info.id=slider_info.id;
selection_info.y=list_info.y+(height >> 3)+2;
for (i=0; i < (int) visible_fonts; i++)
{
selection_info.raised=(slider_info.id+i) != list_info.id ?
MagickTrue : MagickFalse;
selection_info.text=(char *) NULL;
if ((slider_info.id+i) < fonts)
selection_info.text=fontlist[slider_info.id+i];
XDrawWidgetText(display,&windows->widget,&selection_info);
selection_info.y+=(int) selection_info.height;
}
/*
Update slider.
*/
if (slider_info.y > expose_info.y)
{
expose_info.height=(unsigned int) slider_info.y-expose_info.y;
expose_info.y=slider_info.y-expose_info.height-
slider_info.bevel_width-1;
}
else
{
expose_info.height=(unsigned int) expose_info.y-slider_info.y;
expose_info.y=slider_info.y+slider_info.height+
slider_info.bevel_width+1;
}
XDrawTriangleNorth(display,&windows->widget,&north_info);
XDrawMatte(display,&windows->widget,&expose_info);
XDrawBeveledButton(display,&windows->widget,&slider_info);
XDrawTriangleSouth(display,&windows->widget,&south_info);
expose_info.y=slider_info.y;
}
state&=(~RedrawListState);
}
if (state & RedrawActionState)
{
XFontStruct
*save_info;
/*
Display the selected font in a drawing area.
*/
save_info=windows->widget.font_info;
font_info=XLoadQueryFont(display,reply_info.text);
if (font_info != (XFontStruct *) NULL)
{
windows->widget.font_info=font_info;
(void) XSetFont(display,windows->widget.widget_context,
font_info->fid);
}
XDrawBeveledButton(display,&windows->widget,&mode_info);
windows->widget.font_info=save_info;
if (font_info != (XFontStruct *) NULL)
{
(void) XSetFont(display,windows->widget.widget_context,
windows->widget.font_info->fid);
(void) XFreeFont(display,font_info);
}
XHighlightWidget(display,&windows->widget,BorderOffset,BorderOffset);
XDrawMatteText(display,&windows->widget,&reply_info);
state&=(~RedrawActionState);
}
/*
Wait for next event.
*/
if (north_info.raised && south_info.raised)
(void) XIfEvent(display,&event,XScreenEvent,(char *) windows);
else
{
/*
Brief delay before advancing scroll bar.
*/
XDelay(display,delay);
delay=SuspendTime;
(void) XCheckIfEvent(display,&event,XScreenEvent,(char *) windows);
if (north_info.raised == MagickFalse)
if (slider_info.id > 0)
{
/*
Move slider up.
*/
slider_info.id--;
state|=RedrawListState;
}
if (south_info.raised == MagickFalse)
if (slider_info.id < fonts)
{
/*
Move slider down.
*/
slider_info.id++;
state|=RedrawListState;
}
if (event.type != ButtonRelease)
continue;
}
switch (event.type)
{
case ButtonPress:
{
if (MatteIsActive(slider_info,event.xbutton))
{
/*
Track slider.
*/
slider_info.active=MagickTrue;
break;
}
if (MatteIsActive(north_info,event.xbutton))
if (slider_info.id > 0)
{
/*
Move slider up.
*/
north_info.raised=MagickFalse;
slider_info.id--;
state|=RedrawListState;
break;
}
if (MatteIsActive(south_info,event.xbutton))
if (slider_info.id < fonts)
{
/*
Move slider down.
*/
south_info.raised=MagickFalse;
slider_info.id++;
state|=RedrawListState;
break;
}
if (MatteIsActive(scroll_info,event.xbutton))
{
/*
Move slider.
*/
if (event.xbutton.y < slider_info.y)
slider_info.id-=(visible_fonts-1);
else
slider_info.id+=(visible_fonts-1);
state|=RedrawListState;
break;
}
if (MatteIsActive(list_info,event.xbutton))
{
int
id;
/*
User pressed list matte.
*/
id=slider_info.id+(event.xbutton.y-(list_info.y+(height >> 1))+1)/
selection_info.height;
if (id >= (int) fonts)
break;
(void) CopyMagickString(reply_info.text,fontlist[id],MaxTextExtent);
reply_info.highlight=MagickFalse;
reply_info.marker=reply_info.text;
reply_info.cursor=reply_info.text+Extent(reply_info.text);
XDrawMatteText(display,&windows->widget,&reply_info);
state|=RedrawActionState;
if (id == list_info.id)
{
(void) CopyMagickString(glob_pattern,reply_info.text,
MaxTextExtent);
state|=UpdateListState;
}
selection_info.id=(~0);
list_info.id=id;
state|=RedrawListState;
break;
}
if (MatteIsActive(back_info,event.xbutton))
{
/*
User pressed Back button.
*/
back_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&back_info);
break;
}
if (MatteIsActive(reset_info,event.xbutton))
{
/*
User pressed Reset button.
*/
reset_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&reset_info);
break;
}
if (MatteIsActive(action_info,event.xbutton))
{
/*
User pressed action button.
*/
action_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&action_info);
break;
}
if (MatteIsActive(cancel_info,event.xbutton))
{
/*
User pressed Cancel button.
*/
cancel_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&cancel_info);
break;
}
if (MatteIsActive(reply_info,event.xbutton) == MagickFalse)
break;
if (event.xbutton.button != Button2)
{
static Time
click_time;
/*
Move text cursor to position of button press.
*/
x=event.xbutton.x-reply_info.x-(QuantumMargin >> 2);
for (i=1; i <= Extent(reply_info.marker); i++)
if (XTextWidth(font_info,reply_info.marker,i) > x)
break;
reply_info.cursor=reply_info.marker+i-1;
if (event.xbutton.time > (click_time+DoubleClick))
reply_info.highlight=MagickFalse;
else
{
/*
Become the XA_PRIMARY selection owner.
*/
(void) CopyMagickString(primary_selection,reply_info.text,
MaxTextExtent);
(void) XSetSelectionOwner(display,XA_PRIMARY,windows->widget.id,
event.xbutton.time);
reply_info.highlight=XGetSelectionOwner(display,XA_PRIMARY) ==
windows->widget.id ? MagickTrue : MagickFalse;
}
XDrawMatteText(display,&windows->widget,&reply_info);
click_time=event.xbutton.time;
break;
}
/*
Request primary selection.
*/
(void) XConvertSelection(display,XA_PRIMARY,XA_STRING,XA_STRING,
windows->widget.id,event.xbutton.time);
break;
}
case ButtonRelease:
{
if (windows->widget.mapped == MagickFalse)
break;
if (north_info.raised == MagickFalse)
{
/*
User released up button.
*/
delay=SuspendTime << 2;
north_info.raised=MagickTrue;
XDrawTriangleNorth(display,&windows->widget,&north_info);
}
if (south_info.raised == MagickFalse)
{
/*
User released down button.
*/
delay=SuspendTime << 2;
south_info.raised=MagickTrue;
XDrawTriangleSouth(display,&windows->widget,&south_info);
}
if (slider_info.active)
{
/*
Stop tracking slider.
*/
slider_info.active=MagickFalse;
break;
}
if (back_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
if (MatteIsActive(back_info,event.xbutton))
{
(void) CopyMagickString(glob_pattern,back_pattern,
MaxTextExtent);
state|=UpdateListState;
}
back_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&back_info);
}
if (reset_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
if (MatteIsActive(reset_info,event.xbutton))
{
(void) CopyMagickString(back_pattern,glob_pattern,MaxTextExtent);
(void) CopyMagickString(glob_pattern,reset_pattern,MaxTextExtent);
state|=UpdateListState;
}
reset_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&reset_info);
}
if (action_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
{
if (MatteIsActive(action_info,event.xbutton))
{
if (*reply_info.text == '\0')
(void) XBell(display,0);
else
state|=ExitState;
}
}
action_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&action_info);
}
if (cancel_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
if (MatteIsActive(cancel_info,event.xbutton))
{
*reply_info.text='\0';
state|=ExitState;
}
cancel_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&cancel_info);
}
break;
}
case ClientMessage:
{
/*
If client window delete message, exit.
*/
if (event.xclient.message_type != windows->wm_protocols)
break;
if (*event.xclient.data.l == (int) windows->wm_take_focus)
{
(void) XSetInputFocus(display,event.xclient.window,RevertToParent,
(Time) event.xclient.data.l[1]);
break;
}
if (*event.xclient.data.l != (int) windows->wm_delete_window)
break;
if (event.xclient.window == windows->widget.id)
{
*reply_info.text='\0';
state|=ExitState;
break;
}
break;
}
case ConfigureNotify:
{
/*
Update widget configuration.
*/
if (event.xconfigure.window != windows->widget.id)
break;
if ((event.xconfigure.width == (int) windows->widget.width) &&
(event.xconfigure.height == (int) windows->widget.height))
break;
windows->widget.width=(unsigned int)
MagickMax(event.xconfigure.width,(int) windows->widget.min_width);
windows->widget.height=(unsigned int)
MagickMax(event.xconfigure.height,(int) windows->widget.min_height);
state|=UpdateConfigurationState;
break;
}
case EnterNotify:
{
if (event.xcrossing.window != windows->widget.id)
break;
state&=(~InactiveWidgetState);
break;
}
case Expose:
{
if (event.xexpose.window != windows->widget.id)
break;
if (event.xexpose.count != 0)
break;
state|=RedrawWidgetState;
break;
}
case KeyPress:
{
static char
command[MaxTextExtent];
static int
length;
static KeySym
key_symbol;
/*
Respond to a user key press.
*/
if (event.xkey.window != windows->widget.id)
break;
length=XLookupString((XKeyEvent *) &event.xkey,command,
(int) sizeof(command),&key_symbol,(XComposeStatus *) NULL);
*(command+length)='\0';
if (AreaIsActive(scroll_info,event.xkey))
{
/*
Move slider.
*/
switch ((int) key_symbol)
{
case XK_Home:
case XK_KP_Home:
{
slider_info.id=0;
break;
}
case XK_Up:
case XK_KP_Up:
{
slider_info.id--;
break;
}
case XK_Down:
case XK_KP_Down:
{
slider_info.id++;
break;
}
case XK_Prior:
case XK_KP_Prior:
{
slider_info.id-=visible_fonts;
break;
}
case XK_Next:
case XK_KP_Next:
{
slider_info.id+=visible_fonts;
break;
}
case XK_End:
case XK_KP_End:
{
slider_info.id=fonts;
break;
}
}
state|=RedrawListState;
break;
}
if ((key_symbol == XK_Return) || (key_symbol == XK_KP_Enter))
{
/*
Read new font or glob patterm.
*/
if (*reply_info.text == '\0')
break;
(void) CopyMagickString(back_pattern,glob_pattern,MaxTextExtent);
(void) CopyMagickString(glob_pattern,reply_info.text,MaxTextExtent);
state|=UpdateListState;
break;
}
if (key_symbol == XK_Control_L)
{
state|=ControlState;
break;
}
if (state & ControlState)
switch ((int) key_symbol)
{
case XK_u:
case XK_U:
{
/*
Erase the entire line of text.
*/
*reply_info.text='\0';
reply_info.cursor=reply_info.text;
reply_info.marker=reply_info.text;
reply_info.highlight=MagickFalse;
break;
}
default:
break;
}
XEditText(display,&reply_info,key_symbol,command,state);
XDrawMatteText(display,&windows->widget,&reply_info);
state|=JumpListState;
break;
}
case KeyRelease:
{
static char
command[MaxTextExtent];
static KeySym
key_symbol;
/*
Respond to a user key release.
*/
if (event.xkey.window != windows->widget.id)
break;
(void) XLookupString((XKeyEvent *) &event.xkey,command,
(int) sizeof(command),&key_symbol,(XComposeStatus *) NULL);
if (key_symbol == XK_Control_L)
state&=(~ControlState);
break;
}
case LeaveNotify:
{
if (event.xcrossing.window != windows->widget.id)
break;
state|=InactiveWidgetState;
break;
}
case MapNotify:
{
mask&=(~CWX);
mask&=(~CWY);
break;
}
case MotionNotify:
{
/*
Discard pending button motion events.
*/
while (XCheckMaskEvent(display,ButtonMotionMask,&event)) ;
if (slider_info.active)
{
/*
Move slider matte.
*/
slider_info.y=event.xmotion.y-
((slider_info.height+slider_info.bevel_width) >> 1)+1;
if (slider_info.y < slider_info.min_y)
slider_info.y=slider_info.min_y;
if (slider_info.y > slider_info.max_y)
slider_info.y=slider_info.max_y;
slider_info.id=0;
if (slider_info.y != slider_info.min_y)
slider_info.id=(fonts*(slider_info.y-slider_info.min_y+1))/
(slider_info.max_y-slider_info.min_y+1);
state|=RedrawListState;
break;
}
if (state & InactiveWidgetState)
break;
if (back_info.raised == MatteIsActive(back_info,event.xmotion))
{
/*
Back button status changed.
*/
back_info.raised=!back_info.raised;
XDrawBeveledButton(display,&windows->widget,&back_info);
break;
}
if (reset_info.raised == MatteIsActive(reset_info,event.xmotion))
{
/*
Reset button status changed.
*/
reset_info.raised=!reset_info.raised;
XDrawBeveledButton(display,&windows->widget,&reset_info);
break;
}
if (action_info.raised == MatteIsActive(action_info,event.xmotion))
{
/*
Action button status changed.
*/
action_info.raised=action_info.raised == MagickFalse ?
MagickTrue : MagickFalse;
XDrawBeveledButton(display,&windows->widget,&action_info);
break;
}
if (cancel_info.raised == MatteIsActive(cancel_info,event.xmotion))
{
/*
Cancel button status changed.
*/
cancel_info.raised=cancel_info.raised == MagickFalse ?
MagickTrue : MagickFalse;
XDrawBeveledButton(display,&windows->widget,&cancel_info);
break;
}
break;
}
case SelectionClear:
{
reply_info.highlight=MagickFalse;
XDrawMatteText(display,&windows->widget,&reply_info);
break;
}
case SelectionNotify:
{
Atom
type;
int
format;
unsigned char
*data;
unsigned long
after,
length;
/*
Obtain response from primary selection.
*/
if (event.xselection.property == (Atom) None)
break;
status=XGetWindowProperty(display,event.xselection.requestor,
event.xselection.property,0L,2047L,MagickTrue,XA_STRING,&type,
&format,&length,&after,&data);
if ((status != Success) || (type != XA_STRING) || (format == 32) ||
(length == 0))
break;
if ((Extent(reply_info.text)+length) >= (MaxTextExtent-1))
(void) XBell(display,0);
else
{
/*
Insert primary selection in reply text.
*/
*(data+length)='\0';
XEditText(display,&reply_info,(KeySym) XK_Insert,(char *) data,
state);
XDrawMatteText(display,&windows->widget,&reply_info);
state|=JumpListState;
state|=RedrawActionState;
}
(void) XFree((void *) data);
break;
}
case SelectionRequest:
{
XSelectionEvent
notify;
XSelectionRequestEvent
*request;
/*
Set XA_PRIMARY selection.
*/
request=(&(event.xselectionrequest));
(void) XChangeProperty(request->display,request->requestor,
request->property,request->target,8,PropModeReplace,
(unsigned char *) primary_selection,Extent(primary_selection));
notify.type=SelectionNotify;
notify.display=request->display;
notify.requestor=request->requestor;
notify.selection=request->selection;
notify.target=request->target;
notify.time=request->time;
if (request->property == None)
notify.property=request->target;
else
notify.property=request->property;
(void) XSendEvent(request->display,request->requestor,False,0,
(XEvent *) ¬ify);
}
default:
break;
}
} while ((state & ExitState) == 0);
XSetCursorState(display,windows,MagickFalse);
(void) XWithdrawWindow(display,windows->widget.id,windows->widget.screen);
XCheckRefreshWindows(display,windows);
/*
Free font list.
*/
(void) XFreeFontNames(listhead);
fontlist=(char **) RelinquishMagickMemory(fontlist);
}
| 0 |
[] |
ImageMagick6
|
ad492c7ed4cf1cf285a7f6e2ee5a1e7cf24e73b2
| 141,847,976,755,257,970,000,000,000,000,000,000,000 | 1,148 |
https://github.com/ImageMagick/ImageMagick/issues/3335
|
is_device_installed(gx_device *dev, const char *name)
{
while (dev) {
if (!strcmp(dev->dname, name)) {
return true;
}
dev = dev->child;
}
return false;
}
| 0 |
[] |
ghostpdl
|
c9b362ba908ca4b1d7c72663a33229588012d7d9
| 171,414,785,133,527,000,000,000,000,000,000,000,000 | 10 |
Bug 699670: disallow copying of the epo device
The erasepage optimisation (epo) subclass device shouldn't be allowed to be
copied because the subclass private data, child and parent pointers end up
being shared between the original device and the copy.
Add an epo_finish_copydevice which NULLs the three offending pointers, and
then communicates to the caller that copying is not allowed.
This also exposed a separate issue with the stype for subclasses devices.
Devices are, I think, unique in having two stype objects associated with them:
the usual one in the memory manager header, and the other stored in the device
structere directly. In order for the stype to be correct, we have to use the
stype for the incoming device, with the ssize of the original device (ssize
should reflect the size of the memory allocation). We correctly did so with the
stype in the device structure, but then used the prototype device's stype to
patch the memory manager stype - meaning the ssize potentially no longer
matched the allocated memory. This caused problems in the garbager where there
is an implicit assumption that the size of a single object clump (c_alone == 1)
is also the size (+ memory manager overheads) of the single object it contains.
The solution is to use the same stype instance to patch the memory manager
data as we do in the device structure (with the correct ssize).
|
TEST_F(QueryPlannerTest, TwoNestedElemMatchBounds) {
addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1));
runQuery(
fromjson("{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
"b: {$elemMatch: {c: {$gte: 1}}}}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists(
"{fetch: {node: {ixscan: {pattern: {'a.d.e': 1, 'a.b.c': 1}, bounds: "
"{'a.d.e': [[-Infinity, 1, true, true]],"
"'a.b.c': [[1, Infinity, true, true]]}}}}}");
}
| 0 |
[
"CWE-834"
] |
mongo
|
94d0e046baa64d1aa1a6af97e2d19bb466cc1ff5
| 186,154,329,297,090,880,000,000,000,000,000,000,000 | 13 |
SERVER-38164 $or pushdown optimization does not correctly handle $not within an $elemMatch
|
free_autopickup_exceptions()
{
struct autopickup_exception *ape;
int pass;
for (pass = AP_LEAVE; pass <= AP_GRAB; ++pass) {
while((ape = iflags.autopickup_exceptions[pass]) != 0) {
free(ape->pattern);
iflags.autopickup_exceptions[pass] = ape->next;
free(ape);
}
}
}
| 0 |
[
"CWE-269"
] |
NetHack
|
612755bfb5c412079795c68ba392df5d93874ed8
| 37,839,690,884,799,580,000,000,000,000,000,000,000 | 13 |
escapes() revamp
Partial rewrite of escapes(), mostly changing its if-then-else
logic so that end-of-string can be checked once instead for each case.
The previous version had a bug if the input string ended with backslash
and one decimal digit (due to being lumped together with the handling
for trailing \X or \O).
|
void SslIntegrationTestBase::TearDown() {
HttpIntegrationTest::cleanupUpstreamAndDownstream();
codec_client_.reset();
context_manager_.reset();
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 18,250,008,054,471,818,000,000,000,000,000,000,000 | 5 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
static int fbo_emulate_request_sense(struct tcmu_device *dev, uint8_t *cdb,
struct iovec *iovec, size_t iov_cnt,
uint8_t *sense)
{
struct fbo_state *state = tcmu_get_dev_private(dev);
uint8_t buf[18];
if (cdb[1] & 0x01)
return tcmu_set_sense_data(sense, ILLEGAL_REQUEST,
ASC_INVALID_FIELD_IN_CDB, NULL);
/* Note that upon successful completion, Request Sense returns the
* sense data in the data buffer, not as sense data.
*/
memset(buf, 0, sizeof(buf));
buf[0] = 0x70;
buf[7] = 0xa;
if (state->flags & FBO_FORMATTING) {
buf[2] = NOT_READY;
buf[12] = 0x04; // Not Ready
buf[13] = 0x04; // Format in progress
buf[15] = 0x80;
*(uint16_t *)&buf[16] = htobe16(state->format_progress);
}
else {
buf[2] = NO_SENSE;
}
tcmu_memcpy_into_iovec(iovec, iov_cnt, buf, sizeof(buf));
return SAM_STAT_GOOD;
}
| 0 |
[
"CWE-200"
] |
tcmu-runner
|
8cf8208775022301adaa59c240bb7f93742d1329
| 61,565,263,538,578,770,000,000,000,000,000,000,000 | 33 |
removed all check_config callback implementations to avoid security issues
see github issue #194
qcow.c contained an information leak, could test for existance of any
file in the system
file_example.c and file_optical.c allow also to test for existance of
any file, plus to temporarily create empty new files anywhere in the
file system. This also involves a race condition, if a file didn't exist
in the first place, but would be created in-between by some other
process, then the file would be deleted by the check_config
implementation.
|
long long getInstantaneousMetric(int metric) {
int j;
long long sum = 0;
for (j = 0; j < STATS_METRIC_SAMPLES; j++)
sum += server.inst_metric[metric].samples[j];
return sum / STATS_METRIC_SAMPLES;
}
| 0 |
[
"CWE-770"
] |
redis
|
5674b0057ff2903d43eaff802017eddf37c360f8
| 293,988,763,242,437,260,000,000,000,000,000,000,000 | 8 |
Prevent unauthenticated client from easily consuming lots of memory (CVE-2021-32675)
This change sets a low limit for multibulk and bulk length in the
protocol for unauthenticated connections, so that they can't easily
cause redis to allocate massive amounts of memory by sending just a few
characters on the network.
The new limits are 10 arguments of 16kb each (instead of 1m of 512mb)
|
u32 gf_isom_get_constant_sample_size(GF_ISOFile *the_file, u32 trackNumber)
{
GF_TrackBox *trak;
trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak || !trak->Media || !trak->Media->information || !trak->Media->information->sampleTable || !trak->Media->information->sampleTable->SampleSize) return 0;
return trak->Media->information->sampleTable->SampleSize->sampleSize;
}
| 0 |
[
"CWE-476"
] |
gpac
|
ebfa346eff05049718f7b80041093b4c5581c24e
| 79,083,077,288,632,720,000,000,000,000,000,000,000 | 7 |
fixed #1706
|
static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pmd_t *pmd;
unsigned long next;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next;
/* fall through */
}
/*
* Here there can be other concurrent MADV_DONTNEED or
* trans huge page faults running, and if the pmd is
* none or trans huge it can change under us. This is
* because MADV_DONTNEED holds the mmap_sem in read
* mode.
*/
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
goto next;
next = zap_pte_range(tlb, vma, pmd, addr, next, details);
next:
cond_resched();
} while (pmd++, addr = next, addr != end);
return addr;
}
| 0 |
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
| 79,337,938,379,353,060,000,000,000,000,000,000,000 | 35 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void CL_GenerateQKey(void)
{
int len = 0;
unsigned char buff[ QKEY_SIZE ];
fileHandle_t f;
len = FS_SV_FOpenFileRead( QKEY_FILE, &f );
FS_FCloseFile( f );
if( len == QKEY_SIZE ) {
Com_Printf( "QKEY found.\n" );
return;
}
else {
if( len > 0 ) {
Com_Printf( "QKEY file size != %d, regenerating\n",
QKEY_SIZE );
}
Com_Printf( "QKEY building random string\n" );
Com_RandomBytes( buff, sizeof(buff) );
f = FS_SV_FOpenFileWrite( QKEY_FILE );
if( !f ) {
Com_Printf( "QKEY could not open %s for write\n",
QKEY_FILE );
return;
}
FS_Write( buff, sizeof(buff), f );
FS_FCloseFile( f );
Com_Printf( "QKEY generated\n" );
}
}
| 0 |
[
"CWE-269"
] |
ioq3
|
376267d534476a875d8b9228149c4ee18b74a4fd
| 246,107,634,174,971,730,000,000,000,000,000,000,000 | 32 |
Don't load .pk3s as .dlls, and don't load user config files from .pk3s.
|
virDomainObjParseXML(xmlDocPtr xml,
xmlXPathContextPtr ctxt,
virDomainXMLOptionPtr xmlopt,
unsigned int flags)
{
long val;
xmlNodePtr config;
xmlNodePtr oldnode;
virDomainObjPtr obj;
size_t i;
int n;
int state;
int reason = 0;
void *parseOpaque = NULL;
g_autofree char *tmp = NULL;
g_autofree xmlNodePtr *nodes = NULL;
if (!(obj = virDomainObjNew(xmlopt)))
return NULL;
if (!(config = virXPathNode("./domain", ctxt))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("no domain config"));
goto error;
}
oldnode = ctxt->node;
ctxt->node = config;
obj->def = virDomainDefParseXML(xml, ctxt, xmlopt, flags);
ctxt->node = oldnode;
if (!obj->def)
goto error;
if (!(tmp = virXMLPropString(ctxt->node, "state"))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("missing domain state"));
goto error;
}
if ((state = virDomainStateTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("invalid domain state '%s'"), tmp);
goto error;
}
VIR_FREE(tmp);
if ((tmp = virXMLPropString(ctxt->node, "reason"))) {
if ((reason = virDomainStateReasonFromString(state, tmp)) < 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("invalid domain state reason '%s'"), tmp);
goto error;
}
}
virDomainObjSetState(obj, state, reason);
if (virXPathLong("string(./@pid)", ctxt, &val) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("invalid pid"));
goto error;
}
obj->pid = (pid_t)val;
if ((n = virXPathNodeSet("./taint", ctxt, &nodes)) < 0)
goto error;
for (i = 0; i < n; i++) {
char *str = virXMLPropString(nodes[i], "flag");
if (str) {
int flag = virDomainTaintTypeFromString(str);
if (flag < 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("Unknown taint flag %s"), str);
VIR_FREE(str);
goto error;
}
VIR_FREE(str);
virDomainObjTaint(obj, flag);
}
}
if (xmlopt->privateData.parse &&
xmlopt->privateData.parse(ctxt, obj, &xmlopt->config) < 0)
goto error;
if (xmlopt->privateData.getParseOpaque)
parseOpaque = xmlopt->privateData.getParseOpaque(obj);
/* callback to fill driver specific domain aspects */
if (virDomainDefPostParse(obj->def, flags, xmlopt, parseOpaque) < 0)
goto error;
/* validate configuration */
if (virDomainDefValidate(obj->def, flags, xmlopt) < 0)
goto error;
return obj;
error:
virObjectUnref(obj);
return NULL;
}
| 0 |
[
"CWE-212"
] |
libvirt
|
a5b064bf4b17a9884d7d361733737fb614ad8979
| 276,909,084,547,811,760,000,000,000,000,000,000,000 | 100 |
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]>
|
static char *am_optional_metadata(apr_pool_t *p, request_rec *r)
{
am_dir_cfg_rec *cfg = am_get_dir_cfg(r);
int count = 0;
char *org_data = NULL;
char *org_name = NULL;
char *org_display_name = NULL;
char *org_url = NULL;
count += apr_hash_count(cfg->sp_org_name);
count += apr_hash_count(cfg->sp_org_display_name);
count += apr_hash_count(cfg->sp_org_url);
if (count == 0)
return "";
org_name = am_optional_metadata_element(p, cfg->sp_org_name,
"OrganizationName");
org_display_name = am_optional_metadata_element(p, cfg->sp_org_display_name,
"OrganizationDisplayName");
org_url = am_optional_metadata_element(p, cfg->sp_org_url,
"OrganizationURL");
org_data = apr_psprintf(p, "<Organization>%s%s%s</Organization>",
org_name, org_display_name, org_url);
return org_data;
}
| 0 |
[] |
mod_auth_mellon
|
6bdda9170a8f1757dabc5b109958657417728018
| 149,998,363,629,080,700,000,000,000,000,000,000,000 | 27 |
Fix segmentation fault when receiving badly formed logout message.
If the logout message is badly formed, we won't get the entityID in
`logout->parent.remote_providerID`. If we call `apr_hash_get()` with a
null pointer, it will cause a segmentation fault.
Add a check to validate that the entityID is correctly set.
|
xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
xmlChar *buf = NULL;
int len = 0;
int size = XML_PARSER_BUFFER_SIZE;
xmlChar cur;
xmlChar stop;
int count = 0;
xmlParserInputState oldstate = ctxt->instate;
SHRINK;
if (RAW == '"') {
NEXT;
stop = '"';
} else if (RAW == '\'') {
NEXT;
stop = '\'';
} else {
xmlFatalErr(ctxt, XML_ERR_LITERAL_NOT_STARTED, NULL);
return(NULL);
}
buf = (xmlChar *) xmlMallocAtomic(size * sizeof(xmlChar));
if (buf == NULL) {
xmlErrMemory(ctxt, NULL);
return(NULL);
}
ctxt->instate = XML_PARSER_PUBLIC_LITERAL;
cur = CUR;
while ((IS_PUBIDCHAR_CH(cur)) && (cur != stop)) { /* checked */
if (len + 1 >= size) {
xmlChar *tmp;
size *= 2;
tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
if (tmp == NULL) {
xmlErrMemory(ctxt, NULL);
xmlFree(buf);
return(NULL);
}
buf = tmp;
}
buf[len++] = cur;
count++;
if (count > 50) {
GROW;
count = 0;
}
NEXT;
cur = CUR;
if (cur == 0) {
GROW;
SHRINK;
cur = CUR;
}
}
buf[len] = 0;
if (cur != stop) {
xmlFatalErr(ctxt, XML_ERR_LITERAL_NOT_FINISHED, NULL);
} else {
NEXT;
}
ctxt->instate = oldstate;
return(buf);
}
| 0 |
[
"CWE-125"
] |
libxml2
|
77404b8b69bc122d12231807abf1a837d121b551
| 140,540,689,144,593,200,000,000,000,000,000,000,000 | 63 |
Make sure the parser returns when getting a Stop order
patch backported from chromiun bug fixes, assuming author is Chris
|
push_handle_free (PushHandle *handle)
{
if (handle->in)
{
g_input_stream_close_async (handle->in, 0, NULL, NULL, NULL);
g_object_unref (handle->in);
}
g_object_unref (handle->backend);
g_object_unref (handle->job);
soup_uri_free (handle->uri);
g_slice_free (PushHandle, handle);
}
| 0 |
[] |
gvfs
|
f81ff2108ab3b6e370f20dcadd8708d23f499184
| 308,132,058,049,829,560,000,000,000,000,000,000,000 | 12 |
dav: don't unescape the uri twice
path_equal tries to unescape path before comparing. Unfortunately
this function is used also for already unescaped paths. Therefore
unescaping can fail. This commit reverts changes which was done in
commit 50af53d and unescape just uris, which aren't unescaped yet.
https://bugzilla.gnome.org/show_bug.cgi?id=743298
|
static void php_csr_free(zend_rsrc_list_entry *rsrc TSRMLS_DC)
{
X509_REQ * csr = (X509_REQ*)rsrc->ptr;
X509_REQ_free(csr);
}
| 0 |
[
"CWE-200"
] |
php-src
|
270a406ac94b5fc5cc9ef59fc61e3b4b95648a3e
| 95,337,070,757,928,920,000,000,000,000,000,000,000 | 5 |
Fix bug #61413 ext\openssl\tests\openssl_encrypt_crash.phpt fails 5.3 only
|
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
case InstrDual:
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.idual->mod3;
else
opcode = opcode.u.idual->mod012;
break;
case ModeDual:
if (ctxt->mode == X86EMUL_MODE_PROT64)
opcode = opcode.u.mdual->mode64;
else
opcode = opcode.u.mdual->mode32;
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
No16))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64) {
if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
else if (ctxt->d & NearBranch)
ctxt->op_bytes = 8;
}
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if ((ctxt->d & No16) && ctxt->op_bytes == 2)
ctxt->op_bytes = 4;
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative)
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
| 1 |
[
"CWE-476",
"CWE-284"
] |
linux
|
d9092f52d7e61dd1557f2db2400ddb430e85937e
| 190,016,248,246,993,300,000,000,000,000,000,000,000 | 268 |
kvm: x86: Check memopp before dereference (CVE-2016-8630)
Commit 41061cdb98 ("KVM: emulate: do not initialize memopp") removes a
check for non-NULL under incorrect assumptions. An undefined instruction
with a ModR/M byte with Mod=0 and R/M-5 (e.g. 0xc7 0x15) will attempt
to dereference a null pointer here.
Fixes: 41061cdb98a0bec464278b4db8e894a3121671f5
Message-Id: <[email protected]>
Signed-off-by: Owen Hofmann <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
int version) const override {
return constant_return_;
}
| 0 |
[
"CWE-20",
"CWE-787"
] |
tensorflow
|
d58c96946b2880991d63d1dacacb32f0a4dfa453
| 1,429,113,576,787,051,400,000,000,000,000,000,000 | 4 |
[tflite] Ensure inputs and outputs don't overlap.
If a model uses the same tensor for both an input and an output then this can result in data loss and memory corruption. This should not happen.
PiperOrigin-RevId: 332522916
Change-Id: If0905b142415a9dfceaf2d181872f2a8fb88f48a
|
int parse_timestamp(char *argv[], int *opt, struct tstamp *tse,
const char *def_timestamp)
{
char timestamp[9];
if (argv[++(*opt)]) {
switch (strlen(argv[*opt])) {
case 5:
strncpy(timestamp, argv[(*opt)++], 5);
timestamp[5] = '\0';
strcat(timestamp, ":00");
break;
case 8:
strncpy(timestamp, argv[(*opt)++], 8);
break;
default:
strncpy(timestamp, def_timestamp, 8);
break;
}
} else {
strncpy(timestamp, def_timestamp, 8);
}
timestamp[8] = '\0';
return decode_timestamp(timestamp, tse);
}
| 0 |
[
"CWE-125"
] |
sysstat
|
fbc691eaaa10d0bcea6741d5a223dc3906106548
| 288,432,733,975,018,540,000,000,000,000,000,000,000 | 29 |
Fix #196 and #199: Out of bound reads security issues
Check args before calling memmove() and memset() in remap_struct()
function to avoid out of bound reads which would possibly lead to
unknown code execution and/or sadf command crash.
Signed-off-by: Sebastien GODARD <[email protected]>
|
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
.rpc_client = NFS_CLIENT(data->args.inode),
.rpc_message = &msg,
.callback_ops = &nfs4_layoutcommit_ops,
.callback_data = data,
};
struct rpc_task *task;
int status = 0;
dprintk("NFS: initiating layoutcommit call. sync %d "
"lbw: %llu inode %lu\n", sync,
data->args.lastbytewritten,
data->args.inode->i_ino);
if (!sync) {
data->inode = nfs_igrab_and_active(data->args.inode);
if (data->inode == NULL) {
nfs4_layoutcommit_release(data);
return -EAGAIN;
}
task_setup_data.flags = RPC_TASK_ASYNC;
}
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
if (sync)
status = task->tk_status;
trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
dprintk("%s: status %d\n", __func__, status);
rpc_put_task(task);
return status;
}
| 0 |
[
"CWE-787"
] |
linux
|
b4487b93545214a9db8cbf32e86411677b0cca21
| 282,808,764,552,367,970,000,000,000,000,000,000,000 | 42 |
nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <[email protected]>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <[email protected]>
|
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
vcpu->arch.at_instruction_boundary = true;
}
| 0 |
[
"CWE-703"
] |
linux
|
6cd88243c7e03845a450795e134b488fc2afb736
| 92,291,810,461,839,920,000,000,000,000,000,000,000 | 5 |
KVM: x86: do not report a vCPU as preempted outside instruction boundaries
If a vCPU is outside guest mode and is scheduled out, it might be in the
process of making a memory access. A problem occurs if another vCPU uses
the PV TLB flush feature during the period when the vCPU is scheduled
out, and a virtual address has already been translated but has not yet
been accessed, because this is equivalent to using a stale TLB entry.
To avoid this, only report a vCPU as preempted if sure that the guest
is at an instruction boundary. A rescheduling request will be delivered
to the host physical CPU as an external interrupt, so for simplicity
consider any vmexit *not* instruction boundary except for external
interrupts.
It would in principle be okay to report the vCPU as preempted also
if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the
vmentry/vmexit overhead unnecessarily, and optimistic spinning is
also unlikely to succeed. However, leave it for later because right
now kvm_vcpu_check_block() is doing memory accesses. Even
though the TLB flush issue only applies to virtual memory address,
it's very much preferrable to be conservative.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
START_TEST(test_all_users_waiting_to_send)
{
in_addr_t ip;
ip = inet_addr("127.0.0.1");
init_users(ip, 27);
fail_unless(all_users_waiting_to_send() == 1);
users[0].conn = CONN_DNS_NULL;
users[0].active = 1;
fail_unless(all_users_waiting_to_send() == 1);
users[0].last_pkt = time(NULL);
users[0].outpacket.len = 0;
fail_unless(all_users_waiting_to_send() == 0);
#ifdef OUTPACKETQ_LEN
users[0].outpacketq_filled = 1;
#else
users[0].outpacket.len = 44;
#endif
fail_unless(all_users_waiting_to_send() == 1);
}
| 0 |
[] |
iodine
|
b715be5cf3978fbe589b03b09c9398d0d791f850
| 267,023,886,654,537,580,000,000,000,000,000,000,000 | 27 |
Fix authentication bypass bug
The client could bypass the password check by continuing after getting error
from the server and guessing the network parameters. The server would still
accept the rest of the setup and also network traffic.
Add checks for normal and raw mode that user has authenticated before allowing
any other communication.
Problem found by Oscar Reparaz.
|
template<typename t>
CImg<T>& operator<<=(const t value) {
if (is_empty()) return *this;
cimg_pragma_openmp(parallel for cimg_openmp_if(size()>=65536))
cimg_rof(*this,ptrd,T) *ptrd = (T)(((longT)*ptrd) << (int)value);
return *this;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 151,678,076,412,590,570,000,000,000,000,000,000,000 | 6 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
void X509Certificate::PublicKey(const FunctionCallbackInfo<Value>& args) {
Environment* env = Environment::GetCurrent(args);
X509Certificate* cert;
ASSIGN_OR_RETURN_UNWRAP(&cert, args.Holder());
EVPKeyPointer pkey(X509_get_pubkey(cert->get()));
ManagedEVPPKey epkey(std::move(pkey));
std::shared_ptr<KeyObjectData> key_data =
KeyObjectData::CreateAsymmetric(kKeyTypePublic, epkey);
Local<Value> ret;
if (KeyObjectHandle::Create(env, key_data).ToLocal(&ret))
args.GetReturnValue().Set(ret);
}
| 0 |
[
"CWE-295"
] |
node
|
466e5415a2b7b3574ab5403acb87e89a94a980d1
| 205,488,268,932,207,460,000,000,000,000,000,000,000 | 14 |
crypto,tls: implement safe x509 GeneralName format
This change introduces JSON-compatible escaping rules for strings that
include X.509 GeneralName components (see RFC 5280). This non-standard
format avoids ambiguities and prevents injection attacks that could
previously lead to X.509 certificates being accepted even though they
were not valid for the target hostname.
These changes affect the format of subject alternative names and the
format of authority information access. The checkServerIdentity function
has been modified to safely handle the new format, eliminating the
possibility of injecting subject alternative names into the verification
logic.
Because each subject alternative name is only encoded as a JSON string
literal if necessary for security purposes, this change will only be
visible in rare cases.
This addresses CVE-2021-44532.
CVE-ID: CVE-2021-44532
PR-URL: https://github.com/nodejs-private/node-private/pull/300
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Rich Trott <[email protected]>
|
static void pngflush(png_struct *p) {
#ifdef DOFLUSH
fflush(png_get_io_ptr(p));
#else
(void)p;
#endif
}
| 0 |
[
"CWE-415",
"CWE-787"
] |
png2webp
|
8f21ad79b0cd98fc22d5b49734543101946abbff
| 262,495,444,855,699,380,000,000,000,000,000,000,000 | 7 |
v1.0.5: fix buffer overrun when reading bad WebPs
|
void CompactProtocolReader::readFieldBeginWithState(StructReadState& state) {
int8_t byte;
readByte(byte);
readFieldBeginWithStateImpl(state, state.fieldId, byte);
}
| 0 |
[
"CWE-703",
"CWE-770"
] |
fbthrift
|
c9a903e5902834e95bbd4ab0e9fa53ba0189f351
| 69,356,619,170,020,230,000,000,000,000,000,000,000 | 5 |
Better handling of truncated data when reading strings
Summary:
Currently we read string size and blindly pre-allocate it. This allows malicious attacker to send a few bytes message and cause server to allocate huge amount of memory (>1GB).
This diff changes the logic to check if we have enough data in the buffer before allocating the string.
This is a second part of a fix for CVE-2019-3553.
Reviewed By: vitaut
Differential Revision: D14393393
fbshipit-source-id: e2046d2f5b087d3abc9a9d2c6c107cf088673057
|
rfbBool rfbInitClient(rfbClient* client,int* argc,char** argv) {
int i,j;
if(argv && argc && *argc) {
if(client->programName==0)
client->programName=argv[0];
for (i = 1; i < *argc; i++) {
j = i;
if (strcmp(argv[i], "-listen") == 0) {
listenForIncomingConnections(client);
break;
} else if (strcmp(argv[i], "-listennofork") == 0) {
listenForIncomingConnectionsNoFork(client, -1);
break;
} else if (strcmp(argv[i], "-play") == 0) {
client->serverPort = -1;
j++;
} else if (i+1<*argc && strcmp(argv[i], "-encodings") == 0) {
client->appData.encodingsString = argv[i+1];
j+=2;
} else if (i+1<*argc && strcmp(argv[i], "-compress") == 0) {
client->appData.compressLevel = atoi(argv[i+1]);
j+=2;
} else if (i+1<*argc && strcmp(argv[i], "-quality") == 0) {
client->appData.qualityLevel = atoi(argv[i+1]);
j+=2;
} else if (i+1<*argc && strcmp(argv[i], "-scale") == 0) {
client->appData.scaleSetting = atoi(argv[i+1]);
j+=2;
} else if (i+1<*argc && strcmp(argv[i], "-qosdscp") == 0) {
client->QoS_DSCP = atoi(argv[i+1]);
j+=2;
} else if (i+1<*argc && strcmp(argv[i], "-repeaterdest") == 0) {
char* colon=strchr(argv[i+1],':');
if(client->destHost)
free(client->destHost);
client->destPort = 5900;
client->destHost = strdup(argv[i+1]);
if(client->destHost && colon) {
client->destHost[(int)(colon-argv[i+1])] = '\0';
client->destPort = atoi(colon+1);
}
j+=2;
} else {
char* colon=strrchr(argv[i],':');
if(client->serverHost)
free(client->serverHost);
if(colon) {
client->serverHost = strdup(argv[i]);
if(client->serverHost) {
client->serverHost[(int)(colon-argv[i])] = '\0';
client->serverPort = atoi(colon+1);
}
} else {
client->serverHost = strdup(argv[i]);
}
if(client->serverPort >= 0 && client->serverPort < 5900)
client->serverPort += 5900;
}
/* purge arguments */
if (j>i) {
*argc-=j-i;
memmove(argv+i,argv+j,(*argc-i)*sizeof(char*));
i--;
}
}
}
if(!rfbInitConnection(client)) {
rfbClientCleanup(client);
return FALSE;
}
return TRUE;
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
libvncserver
|
bef41f6ec4097a8ee094f90a1b34a708fbd757ec
| 40,210,280,811,369,256,000,000,000,000,000,000,000 | 80 |
libvncclient: free vncRec memory in rfbClientCleanup()
Otherwise we leak memory. Spotted by Ramin Farajpour Cami
<[email protected]>, thanks!
|
cib_tls_close(cib_t * cib)
{
cib_remote_opaque_t *private = cib->variant_opaque;
#ifdef HAVE_GNUTLS_GNUTLS_H
if (private->command.encrypted) {
if (private->command.session) {
gnutls_bye(*(private->command.session), GNUTLS_SHUT_RDWR);
gnutls_deinit(*(private->command.session));
gnutls_free(private->command.session);
}
if (private->callback.session) {
gnutls_bye(*(private->callback.session), GNUTLS_SHUT_RDWR);
gnutls_deinit(*(private->callback.session));
gnutls_free(private->callback.session);
}
private->command.session = NULL;
private->callback.session = NULL;
if (remote_gnutls_credentials_init) {
gnutls_anon_free_client_credentials(anon_cred_c);
gnutls_global_deinit();
remote_gnutls_credentials_init = FALSE;
}
}
#endif
if (private->command.socket) {
shutdown(private->command.socket, SHUT_RDWR); /* no more receptions */
close(private->command.socket);
}
if (private->callback.socket) {
shutdown(private->callback.socket, SHUT_RDWR); /* no more receptions */
close(private->callback.socket);
}
private->command.socket = 0;
private->callback.socket = 0;
free(private->command.recv_buf);
free(private->callback.recv_buf);
private->command.recv_buf = NULL;
private->callback.recv_buf = NULL;
return 0;
}
| 0 |
[
"CWE-399"
] |
pacemaker
|
564f7cc2a51dcd2f28ab12a13394f31be5aa3c93
| 296,787,261,485,493,630,000,000,000,000,000,000,000 | 45 |
High: core: Internal tls api improvements for reuse with future LRMD tls backend.
|
and_cclass(CClassNode* dest, CClassNode* cc, ScanEnv* env)
{
OnigEncoding enc = env->enc;
int r, not1, not2;
BBuf *buf1, *buf2, *pbuf = 0;
BitSetRef bsr1, bsr2;
BitSet bs1, bs2;
not1 = IS_NCCLASS_NOT(dest);
bsr1 = dest->bs;
buf1 = dest->mbuf;
not2 = IS_NCCLASS_NOT(cc);
bsr2 = cc->bs;
buf2 = cc->mbuf;
if (not1 != 0) {
bitset_invert_to(bsr1, bs1);
bsr1 = bs1;
}
if (not2 != 0) {
bitset_invert_to(bsr2, bs2);
bsr2 = bs2;
}
bitset_and(bsr1, bsr2);
if (bsr1 != dest->bs) {
bitset_copy(dest->bs, bsr1);
bsr1 = dest->bs;
}
if (not1 != 0) {
bitset_invert(dest->bs);
}
if (! ONIGENC_IS_SINGLEBYTE(enc)) {
if (not1 != 0 && not2 != 0) {
r = or_code_range_buf(enc, buf1, 0, buf2, 0, &pbuf, env);
}
else {
r = and_code_range_buf(buf1, not1, buf2, not2, &pbuf, env);
if (r == 0 && not1 != 0) {
BBuf *tbuf = 0;
r = not_code_range_buf(enc, pbuf, &tbuf, env);
bbuf_free(pbuf);
pbuf = tbuf;
}
}
if (r != 0) {
bbuf_free(pbuf);
return r;
}
dest->mbuf = pbuf;
bbuf_free(buf1);
return r;
}
return 0;
}
| 0 |
[
"CWE-476"
] |
Onigmo
|
00cc7e28a3ed54b3b512ef3b58ea737a57acf1f9
| 198,072,814,426,471,470,000,000,000,000,000,000,000 | 56 |
Fix SEGV in onig_error_code_to_str() (Fix #132)
When onig_new(ONIG_SYNTAX_PERL) fails with ONIGERR_INVALID_GROUP_NAME,
onig_error_code_to_str() crashes.
onig_scan_env_set_error_string() should have been used when returning
ONIGERR_INVALID_GROUP_NAME.
|
static void lite_font_init( wmfAPI* API, wmfAPI_Options* options)
{
wmfFontData
*font_data;
(void) options;
API->fonts = 0;
/* Allocate wmfFontData data structure */
API->font_data = wmf_malloc(API,sizeof(wmfFontData));
if (ERR (API))
return;
font_data = (wmfFontData*)API->font_data;
/* Assign function to map font (type wmfMap) */
font_data->map = lite_font_map;
/* Assign function to return string width in points (type wmfStringWidth) */
font_data->stringwidth = lite_font_stringwidth;
/* Assign user data, not used by libwmflite (type void*) */
font_data->user_data = wmf_malloc(API,sizeof(wmf_magick_font_t));
if (ERR(API))
return;
((wmf_magick_font_t*)font_data->user_data)->ps_name = 0;
((wmf_magick_font_t*)font_data->user_data)->pointsize = 0;
}
| 0 |
[
"CWE-772"
] |
ImageMagick
|
b2b48d50300a9fbcd0aa0d9230fd6d7a08f7671e
| 296,795,363,480,000,500,000,000,000,000,000,000,000 | 28 |
https://github.com/ImageMagick/ImageMagick/issues/544
|
TEST_P(ConnectTerminationIntegrationTest, BasicAllowPost) {
allow_post_ = true;
initialize();
// Use POST request.
connect_headers_.setMethod("POST");
connect_headers_.removeProtocol();
setUpConnection();
sendBidirectionalDataAndCleanShutdown();
}
| 0 |
[
"CWE-416"
] |
envoy
|
ce0ae309057a216aba031aff81c445c90c6ef145
| 177,025,723,050,140,270,000,000,000,000,000,000,000 | 11 |
CVE-2021-43826
Signed-off-by: Yan Avlasov <[email protected]>
|
diff_get_corresponding_line_int(
buf_T *buf1,
linenr_T lnum1)
{
int idx1;
int idx2;
diff_T *dp;
int baseline = 0;
idx1 = diff_buf_idx(buf1);
idx2 = diff_buf_idx(curbuf);
if (idx1 == DB_COUNT || idx2 == DB_COUNT || curtab->tp_first_diff == NULL)
return lnum1;
if (curtab->tp_diff_invalid)
ex_diffupdate(NULL); // update after a big change
if (curtab->tp_first_diff == NULL) // no diffs today
return lnum1;
FOR_ALL_DIFFBLOCKS_IN_TAB(curtab, dp)
{
if (dp->df_lnum[idx1] > lnum1)
return lnum1 - baseline;
if ((dp->df_lnum[idx1] + dp->df_count[idx1]) > lnum1)
{
// Inside the diffblock
baseline = lnum1 - dp->df_lnum[idx1];
if (baseline > dp->df_count[idx2])
baseline = dp->df_count[idx2];
return dp->df_lnum[idx2] + baseline;
}
if ( (dp->df_lnum[idx1] == lnum1)
&& (dp->df_count[idx1] == 0)
&& (dp->df_lnum[idx2] <= curwin->w_cursor.lnum)
&& ((dp->df_lnum[idx2] + dp->df_count[idx2])
> curwin->w_cursor.lnum))
/*
* Special case: if the cursor is just after a zero-count
* block (i.e. all filler) and the target cursor is already
* inside the corresponding block, leave the target cursor
* unmoved. This makes repeated CTRL-W W operations work
* as expected.
*/
return curwin->w_cursor.lnum;
baseline = (dp->df_lnum[idx1] + dp->df_count[idx1])
- (dp->df_lnum[idx2] + dp->df_count[idx2]);
}
// If we get here then the cursor is after the last diff
return lnum1 - baseline;
}
| 0 |
[
"CWE-787"
] |
vim
|
c101abff4c6756db4f5e740fde289decb9452efa
| 108,652,984,607,219,770,000,000,000,000,000,000,000 | 53 |
patch 8.2.5164: invalid memory access after diff buffer manipulations
Problem: Invalid memory access after diff buffer manipulations.
Solution: Use zero offset when change removes all lines in a diff block.
|
static int acl_parse_ver(const char **text, struct acl_pattern *pattern, int *opaque, char **err)
{
pattern->ptr.str = strdup(*text);
if (!pattern->ptr.str) {
memprintf(err, "out of memory while loading pattern");
return 0;
}
pattern->len = strlen(*text);
return 1;
}
| 0 |
[] |
haproxy
|
aae75e3279c6c9bd136413a72dafdcd4986bb89a
| 254,432,597,454,653,600,000,000,000,000,000,000,000 | 10 |
BUG/CRITICAL: using HTTP information in tcp-request content may crash the process
During normal HTTP request processing, request buffers are realigned if
there are less than global.maxrewrite bytes available after them, in
order to leave enough room for rewriting headers after the request. This
is done in http_wait_for_request().
However, if some HTTP inspection happens during a "tcp-request content"
rule, this realignment is not performed. In theory this is not a problem
because empty buffers are always aligned and TCP inspection happens at
the beginning of a connection. But with HTTP keep-alive, it also happens
at the beginning of each subsequent request. So if a second request was
pipelined by the client before the first one had a chance to be forwarded,
the second request will not be realigned. Then, http_wait_for_request()
will not perform such a realignment either because the request was
already parsed and marked as such. The consequence of this, is that the
rewrite of a sufficient number of such pipelined, unaligned requests may
leave less room past the request been processed than the configured
reserve, which can lead to a buffer overflow if request processing appends
some data past the end of the buffer.
A number of conditions are required for the bug to be triggered :
- HTTP keep-alive must be enabled ;
- HTTP inspection in TCP rules must be used ;
- some request appending rules are needed (reqadd, x-forwarded-for)
- since empty buffers are always realigned, the client must pipeline
enough requests so that the buffer always contains something till
the point where there is no more room for rewriting.
While such a configuration is quite unlikely to be met (which is
confirmed by the bug's lifetime), a few people do use these features
together for very specific usages. And more importantly, writing such
a configuration and the request to attack it is trivial.
A quick workaround consists in forcing keep-alive off by adding
"option httpclose" or "option forceclose" in the frontend. Alternatively,
disabling HTTP-based TCP inspection rules enough if the application
supports it.
At first glance, this bug does not look like it could lead to remote code
execution, as the overflowing part is controlled by the configuration and
not by the user. But some deeper analysis should be performed to confirm
this. And anyway, corrupting the process' memory and crashing it is quite
trivial.
Special thanks go to Yves Lafon from the W3C who reported this bug and
deployed significant efforts to collect the relevant data needed to
understand it in less than one week.
CVE-2013-1912 was assigned to this issue.
Note that 1.4 is also affected so the fix must be backported.
|
TEST_F(HttpConnectionManagerImplTest, UpstreamWatermarkCallbacks) {
setup(false, "");
setUpEncoderAndDecoder(false, false);
sendRequestHeadersAndData();
// Mimic the upstream connection backing up. The router would call
// onDecoderFilterAboveWriteBufferHighWatermark which should readDisable the stream and increment
// stats.
EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_));
EXPECT_CALL(stream_, readDisable(true));
ASSERT(decoder_filters_[0]->callbacks_ != nullptr);
decoder_filters_[0]->callbacks_->onDecoderFilterAboveWriteBufferHighWatermark();
EXPECT_EQ(1U, stats_.named_.downstream_flow_control_paused_reading_total_.value());
// Resume the flow of data. When the router buffer drains it calls
// onDecoderFilterBelowWriteBufferLowWatermark which should re-enable reads on the stream.
EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_));
EXPECT_CALL(stream_, readDisable(false));
ASSERT(decoder_filters_[0]->callbacks_ != nullptr);
decoder_filters_[0]->callbacks_->onDecoderFilterBelowWriteBufferLowWatermark();
EXPECT_EQ(1U, stats_.named_.downstream_flow_control_resumed_reading_total_.value());
// Backup upstream once again.
EXPECT_CALL(response_encoder_, getStream()).WillOnce(ReturnRef(stream_));
EXPECT_CALL(stream_, readDisable(true));
ASSERT(decoder_filters_[0]->callbacks_ != nullptr);
decoder_filters_[0]->callbacks_->onDecoderFilterAboveWriteBufferHighWatermark();
EXPECT_EQ(2U, stats_.named_.downstream_flow_control_paused_reading_total_.value());
// Send a full response.
EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true));
EXPECT_CALL(*encoder_filters_[0], encodeComplete());
EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true));
EXPECT_CALL(*encoder_filters_[1], encodeComplete());
EXPECT_CALL(response_encoder_, encodeHeaders(_, true));
expectOnDestroy();
decoder_filters_[1]->callbacks_->encodeHeaders(
ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true);
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 15,402,002,235,362,306,000,000,000,000,000,000,000 | 39 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
HTTPSession::onWriteSuccess(uint64_t bytesWritten) {
DestructorGuard dg(this);
bytesWritten_ += bytesWritten;
transportInfo_.totalBytes += bytesWritten;
CHECK(writeTimeout_.isScheduled());
if (pendingWrites_.empty()) {
VLOG(10) << "Cancel write timer on last successful write";
writeTimeout_.cancelTimeout();
} else {
VLOG(10) << "Refresh write timer on writeSuccess";
timeout_.scheduleTimeout(&writeTimeout_);
}
if (infoCallback_) {
infoCallback_->onWrite(*this, bytesWritten);
}
VLOG(5) << "total bytesWritten_: " << bytesWritten_;
// processByteEvents will return true if it has been replaced with another
// tracker in the middle and needs to be re-run. Should happen at most
// once. while with no body is intentional
while (byteEventTracker_ &&
byteEventTracker_->processByteEvents(
byteEventTracker_, bytesWritten_)) {} // pass
if ((!codec_->isReusable() || readsShutdown()) && (transactions_.empty())) {
if (!codec_->isReusable()) {
// Shouldn't happen unless there is a bug. This can only happen when
// someone calls shutdownTransport, but did not specify a reason before.
setCloseReason(ConnectionCloseReason::UNKNOWN);
}
VLOG(4) << *this << " shutdown from onWriteSuccess";
shutdownTransport(true, true);
}
numActiveWrites_--;
if (!inLoopCallback_) {
updateWriteCount();
// safe to resume here:
updateWriteBufSize(-folly::to<int64_t>(bytesWritten));
// PRIO_FIXME: this is done because of the corking business...
// in the future we may want to have a pull model
// whereby the socket asks us for a given amount of
// data to send...
if (numActiveWrites_ == 0 && hasMoreWrites()) {
runLoopCallback();
}
}
onWriteCompleted();
if (egressBytesLimit_ > 0 && bytesWritten_ >= egressBytesLimit_) {
VLOG(4) << "Egress limit reached, shutting down "
"session (egressed " << bytesWritten_ << ", limit set to "
<< egressBytesLimit_ << ")";
shutdownTransport(true, true);
}
}
| 0 |
[
"CWE-20"
] |
proxygen
|
0600ebe59c3e82cd012def77ca9ca1918da74a71
| 47,084,132,666,643,270,000,000,000,000,000,000,000 | 57 |
Check that a secondary auth manager is set before dereferencing.
Summary: CVE-2018-6343
Reviewed By: mingtaoy
Differential Revision: D12994423
fbshipit-source-id: 9229ec11da8085f1fa153595e8e5353e19d06fb7
|
void notify_removers() {
// Notify all removers. This is because they are
// waiting for specific keys to appear in the map
// so we don't know which one to wake up.
not_empty_.notify_all();
}
| 0 |
[
"CWE-20",
"CWE-476"
] |
tensorflow
|
d7de67733925de196ec8863a33445b73f9562d1d
| 171,157,553,024,382,160,000,000,000,000,000,000,000 | 6 |
Prevent a CHECK-fail due to empty tensor input in `map_stage_op.cc`
PiperOrigin-RevId: 387737906
Change-Id: Idc52df0c71c7ed6e2dd633b651a581932f277c8a
|
static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
{
#ifdef CONFIG_X86_64
unsigned long a;
#endif
int i;
/* I/O */
vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
if (enable_shadow_vmcs) {
vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
}
if (cpu_has_vmx_msr_bitmap())
vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
/* Control */
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
if (cpu_has_secondary_exec_ctrls())
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
vmx_secondary_exec_control(vmx));
if (vmx_cpu_uses_apicv(&vmx->vcpu)) {
vmcs_write64(EOI_EXIT_BITMAP0, 0);
vmcs_write64(EOI_EXIT_BITMAP1, 0);
vmcs_write64(EOI_EXIT_BITMAP2, 0);
vmcs_write64(EOI_EXIT_BITMAP3, 0);
vmcs_write16(GUEST_INTR_STATUS, 0);
vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
}
if (ple_gap) {
vmcs_write32(PLE_GAP, ple_gap);
vmx->ple_window = ple_window;
vmx->ple_window_dirty = true;
}
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
vmx_set_constant_host_state(vmx);
#ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a);
vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
rdmsrl(MSR_GS_BASE, a);
vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
#else
vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
#endif
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
int j = vmx->nmsrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;
vmx->guest_msrs[j].index = i;
vmx->guest_msrs[j].data = 0;
vmx->guest_msrs[j].mask = -1ull;
++vmx->nmsrs;
}
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
/* 22.2.1, 20.8.1 */
vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
set_cr4_guest_host_mask(vmx);
if (vmx_xsaves_supported())
vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
return 0;
}
| 0 |
[
"CWE-399"
] |
linux
|
54a20552e1eae07aa240fa370a0293e006b5faed
| 285,812,305,160,140,860,000,000,000,000,000,000,000 | 102 |
KVM: x86: work around infinite loop in microcode when #AC is delivered
It was found that a guest can DoS a host by triggering an infinite
stream of "alignment check" (#AC) exceptions. This causes the
microcode to enter an infinite loop where the core never receives
another interrupt. The host kernel panics pretty quickly due to the
effects (CVE-2015-5307).
Signed-off-by: Eric Northup <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
extern "C" void *malloc_wrapper(size_t size, void *caller)
{
void *ptr = NULL;
#ifdef MBED_MEM_TRACING_ENABLED
mbed_mem_trace_lock();
#endif
#ifdef MBED_HEAP_STATS_ENABLED
malloc_stats_mutex->lock();
alloc_info_t *alloc_info = NULL;
if (size <= SIZE_MAX - sizeof(alloc_info_t)) {
alloc_info = (alloc_info_t *)SUPER_MALLOC(size + sizeof(alloc_info_t));
}
if (alloc_info != NULL) {
alloc_info->size = size;
alloc_info->signature = MBED_HEAP_STATS_SIGNATURE;
ptr = (void *)(alloc_info + 1);
heap_stats.current_size += size;
heap_stats.total_size += size;
heap_stats.alloc_cnt += 1;
if (heap_stats.current_size > heap_stats.max_size) {
heap_stats.max_size = heap_stats.current_size;
}
heap_stats.overhead_size += MALLOC_HEAP_TOTAL_SIZE(MALLOC_HEADER_PTR(alloc_info)) - size;
} else {
heap_stats.alloc_fail_cnt += 1;
}
malloc_stats_mutex->unlock();
#else // #ifdef MBED_HEAP_STATS_ENABLED
ptr = SUPER_MALLOC(size);
#endif // #ifdef MBED_HEAP_STATS_ENABLED
#ifdef MBED_MEM_TRACING_ENABLED
mbed_mem_trace_malloc(ptr, size, caller);
mbed_mem_trace_unlock();
#endif // #ifdef MBED_MEM_TRACING_ENABLED
return ptr;
}
| 0 |
[
"CWE-190"
] |
mbed-os
|
151ebfcfc9f2383ee11ce3c771c3bf92900d6b43
| 101,788,159,327,881,560,000,000,000,000,000,000,000 | 36 |
Add integer overflow check to the malloc wrappers
Add a check that the combined size of the buffer to allocate and
alloc_info_t does not exceed the maximum integer value representable
by size_t.
|
_outBitmapOrPath(StringInfo str, const BitmapOrPath *node)
{
WRITE_NODE_TYPE("BITMAPORPATH");
_outPathInfo(str, (const Path *) node);
WRITE_NODE_FIELD(bitmapquals);
WRITE_FLOAT_FIELD(bitmapselectivity, "%.4f");
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 35,929,144,245,364,650,000,000,000,000,000,000,000 | 9 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
pkinit_init_pkcs11(pkinit_identity_crypto_context ctx)
{
krb5_error_code retval = ENOMEM;
#ifndef WITHOUT_PKCS11
ctx->p11_module_name = strdup(PKCS11_MODNAME);
if (ctx->p11_module_name == NULL)
return retval;
ctx->p11_module = NULL;
ctx->slotid = PK_NOSLOT;
ctx->token_label = NULL;
ctx->cert_label = NULL;
ctx->session = CK_INVALID_HANDLE;
ctx->p11 = NULL;
#endif
ctx->pkcs11_method = 0;
retval = 0;
return retval;
}
| 0 |
[
"CWE-476"
] |
krb5
|
f249555301940c6df3a2cdda13b56b5674eebc2e
| 164,805,720,117,584,570,000,000,000,000,000,000,000 | 20 |
PKINIT null pointer deref [CVE-2013-1415]
Don't dereference a null pointer when cleaning up.
The KDC plugin for PKINIT can dereference a null pointer when a
malformed packet causes processing to terminate early, leading to
a crash of the KDC process. An attacker would need to have a valid
PKINIT certificate or have observed a successful PKINIT authentication,
or an unauthenticated attacker could execute the attack if anonymous
PKINIT is enabled.
CVSSv2 vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:P/RL:O/RC:C
This is a minimal commit for pullup; style fixes in a followup.
[[email protected]: reformat and edit commit message]
(cherry picked from commit c773d3c775e9b2d88bcdff5f8a8ba88d7ec4e8ed)
ticket: 7570
version_fixed: 1.11.1
status: resolved
|
xmlXIncludeCopyRange(xmlXIncludeCtxtPtr ctxt, xmlDocPtr target,
xmlDocPtr source, xmlXPathObjectPtr range) {
/* pointers to generated nodes */
xmlNodePtr list = NULL, last = NULL, listParent = NULL;
xmlNodePtr tmp, tmp2;
/* pointers to traversal nodes */
xmlNodePtr start, cur, end;
int index1, index2;
int level = 0, lastLevel = 0, endLevel = 0, endFlag = 0;
if ((ctxt == NULL) || (target == NULL) || (source == NULL) ||
(range == NULL))
return(NULL);
if (range->type != XPATH_RANGE)
return(NULL);
start = (xmlNodePtr) range->user;
if ((start == NULL) || (start->type == XML_NAMESPACE_DECL))
return(NULL);
end = range->user2;
if (end == NULL)
return(xmlDocCopyNode(start, target, 1));
if (end->type == XML_NAMESPACE_DECL)
return(NULL);
cur = start;
index1 = range->index;
index2 = range->index2;
/*
* level is depth of the current node under consideration
* list is the pointer to the root of the output tree
* listParent is a pointer to the parent of output tree (within
the included file) in case we need to add another level
* last is a pointer to the last node added to the output tree
* lastLevel is the depth of last (relative to the root)
*/
while (cur != NULL) {
/*
* Check if our output tree needs a parent
*/
if (level < 0) {
while (level < 0) {
/* copy must include namespaces and properties */
tmp2 = xmlDocCopyNode(listParent, target, 2);
xmlAddChild(tmp2, list);
list = tmp2;
listParent = listParent->parent;
level++;
}
last = list;
lastLevel = 0;
}
/*
* Check whether we need to change our insertion point
*/
while (level < lastLevel) {
last = last->parent;
lastLevel --;
}
if (cur == end) { /* Are we at the end of the range? */
if (cur->type == XML_TEXT_NODE) {
const xmlChar *content = cur->content;
int len;
if (content == NULL) {
tmp = xmlNewTextLen(NULL, 0);
} else {
len = index2;
if ((cur == start) && (index1 > 1)) {
content += (index1 - 1);
len -= (index1 - 1);
} else {
len = index2;
}
tmp = xmlNewTextLen(content, len);
}
/* single sub text node selection */
if (list == NULL)
return(tmp);
/* prune and return full set */
if (level == lastLevel)
xmlAddNextSibling(last, tmp);
else
xmlAddChild(last, tmp);
return(list);
} else { /* ending node not a text node */
endLevel = level; /* remember the level of the end node */
endFlag = 1;
/* last node - need to take care of properties + namespaces */
tmp = xmlDocCopyNode(cur, target, 2);
if (list == NULL) {
list = tmp;
listParent = cur->parent;
} else {
if (level == lastLevel)
xmlAddNextSibling(last, tmp);
else {
xmlAddChild(last, tmp);
lastLevel = level;
}
}
last = tmp;
if (index2 > 1) {
end = xmlXIncludeGetNthChild(cur, index2 - 1);
index2 = 0;
}
if ((cur == start) && (index1 > 1)) {
cur = xmlXIncludeGetNthChild(cur, index1 - 1);
index1 = 0;
} else {
cur = cur->children;
}
level++; /* increment level to show change */
/*
* Now gather the remaining nodes from cur to end
*/
continue; /* while */
}
} else if (cur == start) { /* Not at the end, are we at start? */
if ((cur->type == XML_TEXT_NODE) ||
(cur->type == XML_CDATA_SECTION_NODE)) {
const xmlChar *content = cur->content;
if (content == NULL) {
tmp = xmlNewTextLen(NULL, 0);
} else {
if (index1 > 1) {
content += (index1 - 1);
index1 = 0;
}
tmp = xmlNewText(content);
}
last = list = tmp;
listParent = cur->parent;
} else { /* Not text node */
/*
* start of the range - need to take care of
* properties and namespaces
*/
tmp = xmlDocCopyNode(cur, target, 2);
list = last = tmp;
listParent = cur->parent;
if (index1 > 1) { /* Do we need to position? */
cur = xmlXIncludeGetNthChild(cur, index1 - 1);
level = lastLevel = 1;
index1 = 0;
/*
* Now gather the remaining nodes from cur to end
*/
continue; /* while */
}
}
} else {
tmp = NULL;
switch (cur->type) {
case XML_DTD_NODE:
case XML_ELEMENT_DECL:
case XML_ATTRIBUTE_DECL:
case XML_ENTITY_NODE:
/* Do not copy DTD information */
break;
case XML_ENTITY_DECL:
/* handle crossing entities -> stack needed */
break;
case XML_XINCLUDE_START:
case XML_XINCLUDE_END:
/* don't consider it part of the tree content */
break;
case XML_ATTRIBUTE_NODE:
/* Humm, should not happen ! */
break;
default:
/*
* Middle of the range - need to take care of
* properties and namespaces
*/
tmp = xmlDocCopyNode(cur, target, 2);
break;
}
if (tmp != NULL) {
if (level == lastLevel)
xmlAddNextSibling(last, tmp);
else {
xmlAddChild(last, tmp);
lastLevel = level;
}
last = tmp;
}
}
/*
* Skip to next node in document order
*/
cur = xmlXPtrAdvanceNode(cur, &level);
if (endFlag && (level >= endLevel))
break;
}
return(list);
}
| 0 |
[
"CWE-416"
] |
libxml2
|
1098c30a040e72a4654968547f415be4e4c40fe7
| 340,024,361,186,170,870,000,000,000,000,000,000,000 | 199 |
Fix user-after-free with `xmllint --xinclude --dropdtd`
The --dropdtd option can leave dangling pointers in entity reference
nodes. Make sure to skip these nodes when processing XIncludes.
This also avoids scanning entity declarations and even modifying
them inadvertently during XInclude processing.
Move from a block list to an allow list approach to avoid descending
into other node types that can't contain elements.
Fixes #237.
|
CallResult<bool> JSObject::getOwnComputedDescriptor(
Handle<JSObject> selfHandle,
Runtime *runtime,
Handle<> nameValHandle,
ComputedPropertyDescriptor &desc) {
auto converted = toPropertyKeyIfObject(runtime, nameValHandle);
if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) {
return ExecutionStatus::EXCEPTION;
}
return JSObject::getOwnComputedPrimitiveDescriptor(
selfHandle, runtime, *converted, IgnoreProxy::No, desc);
}
| 0 |
[
"CWE-843",
"CWE-125"
] |
hermes
|
fe52854cdf6725c2eaa9e125995da76e6ceb27da
| 318,661,896,243,185,950,000,000,000,000,000,000,000 | 12 |
[CVE-2020-1911] Look up HostObject computed properties on the right object in the prototype chain.
Summary:
The change in the hermes repository fixes the security vulnerability
CVE-2020-1911. This vulnerability only affects applications which
allow evaluation of uncontrolled, untrusted JavaScript code not
shipped with the app, so React Native apps will generally not be affected.
This revision includes a test for the bug. The test is generic JSI
code, so it is included in the hermes and react-native repositories.
Changelog: [Internal]
Reviewed By: tmikov
Differential Revision: D23322992
fbshipit-source-id: 4e88c974afe1ad33a263f9cac03e9dc98d33649a
|
static void xudc_write32_be(void __iomem *addr, u32 offset, u32 val)
{
iowrite32be(val, addr + offset);
}
| 0 |
[
"CWE-20",
"CWE-129"
] |
linux
|
7f14c7227f342d9932f9b918893c8814f86d2a0d
| 20,890,272,840,358,471,000,000,000,000,000,000,000 | 4 |
USB: gadget: validate endpoint index for xilinx udc
Assure that host may not manipulate the index to point
past endpoint array.
Signed-off-by: Szymon Heidrich <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
int wc_RsaPSS_VerifyCheck(byte* in, word32 inLen, byte* out, word32 outLen,
const byte* digest, word32 digestLen,
enum wc_HashType hash, int mgf,
RsaKey* key)
{
int ret = 0, verify, saltLen, hLen, bits = 0;
hLen = wc_HashGetDigestSize(hash);
if (hLen < 0)
return hLen;
if ((word32)hLen != digestLen)
return BAD_FUNC_ARG;
saltLen = hLen;
#ifdef WOLFSSL_SHA512
/* See FIPS 186-4 section 5.5 item (e). */
bits = mp_count_bits(&key->n);
if (bits == 1024 && hLen == WC_SHA512_DIGEST_SIZE)
saltLen = RSA_PSS_SALT_MAX_SZ;
#endif
verify = wc_RsaPSS_Verify_ex(in, inLen, out, outLen, hash,
mgf, saltLen, key);
if (verify > 0)
ret = wc_RsaPSS_CheckPadding_ex(digest, digestLen, out, verify,
hash, saltLen, bits);
if (ret == 0)
ret = verify;
return ret;
}
| 0 |
[
"CWE-310",
"CWE-787"
] |
wolfssl
|
fb2288c46dd4c864b78f00a47a364b96a09a5c0f
| 44,905,014,409,109,820,000,000,000,000,000,000,000 | 31 |
RSA-PSS: Handle edge case with encoding message to hash
When the key is small relative to the digest (1024-bit key, 64-byte
hash, 61-byte salt length), the internal message to hash is larger than
the output size.
Allocate a buffer for the message when this happens.
|
drop_database_if_exists(const char *dbname)
{
header(_("dropping database \"%s\""), dbname);
psql_command("postgres", "DROP DATABASE IF EXISTS \"%s\"", dbname);
}
| 0 |
[
"CWE-119"
] |
postgres
|
01824385aead50e557ca1af28640460fa9877d51
| 25,779,774,856,698,550,000,000,000,000,000,000,000 | 5 |
Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt()
|
xmlNodePtr xml_add_schedresponse(xmlNodePtr root, xmlNsPtr dav_ns,
xmlChar *recipient, xmlChar *status)
{
xmlNodePtr resp, recip;
resp = xmlNewChild(root, NULL, BAD_CAST "response", NULL);
recip = xmlNewChild(resp, NULL, BAD_CAST "recipient", NULL);
if (dav_ns) xml_add_href(recip, dav_ns, (const char *) recipient);
else xmlNodeAddContent(recip, recipient);
if (status)
xmlNewChild(resp, NULL, BAD_CAST "request-status", status);
return resp;
}
| 0 |
[
"CWE-787"
] |
cyrus-imapd
|
a5779db8163b99463e25e7c476f9cbba438b65f3
| 259,006,264,814,303,930,000,000,000,000,000,000,000 | 16 |
HTTP: don't overrun buffer when parsing strings with sscanf()
|
address_compare (EContact *ecard1,
EContact *ecard2,
EContactField field)
{
const gchar *address1, *address2;
gboolean equal;
address1 = e_contact_get_const (ecard1, field);
address2 = e_contact_get_const (ecard2, field);
if (address1 && address2)
equal = !strcmp (address1, address2);
else
equal = (!!address1 == !!address2);
return equal;
}
| 0 |
[] |
evolution-data-server
|
34bad61738e2127736947ac50e0c7969cc944972
| 125,523,811,544,702,240,000,000,000,000,000,000,000 | 17 |
Bug 796174 - strcat() considered unsafe for buffer overflow
|
_Pickle_GetState(PyObject *module)
{
return (PickleState *)PyModule_GetState(module);
}
| 0 |
[
"CWE-190",
"CWE-369"
] |
cpython
|
a4ae828ee416a66d8c7bf5ee71d653c2cc6a26dd
| 64,409,727,957,374,620,000,000,000,000,000,000,000 | 4 |
closes bpo-34656: Avoid relying on signed overflow in _pickle memos. (GH-9261)
|
int Arg_comparator::compare_native()
{
THD *thd= current_thd;
if (!(*a)->val_native_with_conversion(thd, &m_native1,
compare_type_handler()))
{
if (!(*b)->val_native_with_conversion(thd, &m_native2,
compare_type_handler()))
{
if (set_null)
owner->null_value= 0;
return compare_type_handler()->cmp_native(m_native1, m_native2);
}
}
if (set_null)
owner->null_value= 1;
return -1;
}
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 44,715,551,295,058,860,000,000,000,000,000,000,000 | 18 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
SYSCALL_DEFINE1(exit, int, error_code)
{
do_exit((error_code&0xff)<<8);
}
| 0 |
[
"CWE-200",
"CWE-284"
] |
linux
|
6c85501f2fabcfc4fc6ed976543d252c4eaf4be9
| 85,942,578,398,519,830,000,000,000,000,000,000,000 | 4 |
fix infoleak in waitid(2)
kernel_waitid() can return a PID, an error or 0. rusage is filled in the first
case and waitid(2) rusage should've been copied out exactly in that case, *not*
whenever kernel_waitid() has not returned an error. Compat variant shares that
braino; none of kernel_wait4() callers do, so the below ought to fix it.
Reported-and-tested-by: Alexander Potapenko <[email protected]>
Fixes: ce72a16fa705 ("wait4(2)/waitid(2): separate copying rusage to userland")
Cc: [email protected] # v4.13
Signed-off-by: Al Viro <[email protected]>
|
virDomainSetMetadata(virDomainPtr domain,
int type,
const char *metadata,
const char *key,
const char *uri,
unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(domain,
"type=%d, metadata='%s', key='%s', uri='%s', flags=%x",
type, NULLSTR(metadata), NULLSTR(key), NULLSTR(uri),
flags);
virResetLastError();
virCheckDomainReturn(domain, -1);
conn = domain->conn;
virCheckReadOnlyGoto(conn->flags, error);
switch (type) {
case VIR_DOMAIN_METADATA_TITLE:
if (metadata && strchr(metadata, '\n')) {
virReportInvalidArg(metadata, "%s",
_("metadata title can't contain "
"newlines"));
goto error;
}
/* fallthrough */
case VIR_DOMAIN_METADATA_DESCRIPTION:
virCheckNullArgGoto(uri, error);
virCheckNullArgGoto(key, error);
break;
case VIR_DOMAIN_METADATA_ELEMENT:
virCheckNonNullArgGoto(uri, error);
if (metadata)
virCheckNonNullArgGoto(key, error);
break;
default:
/* For future expansion */
break;
}
if (conn->driver->domainSetMetadata) {
int ret;
ret = conn->driver->domainSetMetadata(domain, type, metadata, key, uri,
flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(domain->conn);
return -1;
}
| 0 |
[
"CWE-254"
] |
libvirt
|
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
| 56,034,529,472,827,050,000,000,000,000,000,000,000 | 59 |
virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <[email protected]>
|
int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
const struct nft_data *data,
enum nft_data_types type)
{
int err;
switch (reg) {
case NFT_REG_VERDICT:
if (data == NULL || type != NFT_DATA_VERDICT)
return -EINVAL;
if (data->verdict == NFT_GOTO || data->verdict == NFT_JUMP) {
err = nf_tables_check_loops(ctx, data->chain);
if (err < 0)
return err;
if (ctx->chain->level + 1 > data->chain->level) {
if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
return -EMLINK;
data->chain->level = ctx->chain->level + 1;
}
}
return 0;
default:
if (data != NULL && type != NFT_DATA_VALUE)
return -EINVAL;
return 0;
}
}
| 0 |
[
"CWE-19"
] |
nf
|
a2f18db0c68fec96631c10cad9384c196e9008ac
| 302,750,533,944,285,000,000,000,000,000,000,000,000 | 30 |
netfilter: nf_tables: fix flush ruleset chain dependencies
Jumping between chains doesn't mix well with flush ruleset. Rules
from a different chain and set elements may still refer to us.
[ 353.373791] ------------[ cut here ]------------
[ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159!
[ 353.373896] invalid opcode: 0000 [#1] SMP
[ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi
[ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98
[ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010
[...]
[ 353.375018] Call Trace:
[ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540
[ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0
[ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0
[ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790
[ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0
[ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70
[ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30
[ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0
[ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400
[ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90
[ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20
[ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0
[ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80
[ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d
[ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20
[ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b
Release objects in this order: rules -> sets -> chains -> tables, to
make sure no references to chains are held anymore.
Reported-by: Asbjoern Sloth Toennesen <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
| 0 |
[
"CWE-772"
] |
ImageMagick
|
d4e8b9722577547177a2daecee98ea9e5fe54968
| 269,835,955,450,076,140,000,000,000,000,000,000,000 | 21 |
Fixed memory leak reported in #462.
|
Repetition(const std::shared_ptr<Ope> &ope, size_t min, size_t max)
: ope_(ope), min_(min), max_(max) {}
| 0 |
[
"CWE-125"
] |
cpp-peglib
|
b3b29ce8f3acf3a32733d930105a17d7b0ba347e
| 166,049,266,410,593,820,000,000,000,000,000,000,000 | 2 |
Fix #122
|
inline T hypot(const T x, const T y, const T z) {
return std::sqrt(x*x + y*y + z*z);
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 104,112,492,835,473,020,000,000,000,000,000,000,000 | 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
void ipxitf_down(struct ipx_interface *intrfc)
{
spin_lock_bh(&ipx_interfaces_lock);
__ipxitf_down(intrfc);
spin_unlock_bh(&ipx_interfaces_lock);
}
| 0 |
[
"CWE-416"
] |
linux
|
ee0d8d8482345ff97a75a7d747efc309f13b0d80
| 314,395,366,266,803,900,000,000,000,000,000,000,000 | 6 |
ipx: call ipxitf_put() in ioctl error path
We should call ipxitf_put() if the copy_to_user() fails.
Reported-by: 李强 <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
{
struct dentry *p;
if (p1 == p2) {
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
return NULL;
}
mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
for (p = p1; p->d_parent != p; p = p->d_parent) {
if (p->d_parent == p2) {
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD);
return p;
}
}
for (p = p2; p->d_parent != p; p = p->d_parent) {
if (p->d_parent == p1) {
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
return p;
}
}
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
return NULL;
}
| 0 |
[
"CWE-120"
] |
linux-2.6
|
d70b67c8bc72ee23b55381bd6a884f4796692f77
| 216,054,640,570,691,600,000,000,000,000,000,000,000 | 31 |
[patch] vfs: fix lookup on deleted directory
Lookup can install a child dentry for a deleted directory. This keeps
the directory dentry alive, and the inode pinned in the cache and on
disk, even after all external references have gone away.
This isn't a big problem normally, since memory pressure or umount
will clear out the directory dentry and its children, releasing the
inode. But for UBIFS this causes problems because its orphan area can
overflow.
Fix this by returning ENOENT for all lookups on a S_DEAD directory
before creating a child dentry.
Thanks to Zoltan Sogor for noticing this while testing UBIFS, and
Artem for the excellent analysis of the problem and testing.
Reported-by: Artem Bityutskiy <[email protected]>
Tested-by: Artem Bityutskiy <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
hb_set_intersect (hb_set_t *set,
const hb_set_t *other)
{
if (unlikely (hb_object_is_immutable (set)))
return;
set->intersect (*other);
}
| 1 |
[
"CWE-787"
] |
harfbuzz
|
d3e09bf4654fe5478b6dbf2b26ebab6271317d81
| 18,484,642,144,351,567,000,000,000,000,000,000,000 | 8 |
[set] Make all operators null-safe again
Changed my mind.
Also for hb_map_clear().
Part of https://github.com/harfbuzz/harfbuzz/pull/3162
|
signed_with_key(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
dns_rdatatype_t type, dst_key_t *key)
{
isc_result_t result;
dns_rdataset_t rdataset;
dns_rdata_t rdata = DNS_RDATA_INIT;
dns_rdata_rrsig_t rrsig;
dns_rdataset_init(&rdataset);
result = dns_db_findrdataset(db, node, version, dns_rdatatype_rrsig,
type, 0, &rdataset, NULL);
if (result != ISC_R_SUCCESS) {
INSIST(!dns_rdataset_isassociated(&rdataset));
return (false);
}
for (result = dns_rdataset_first(&rdataset);
result == ISC_R_SUCCESS;
result = dns_rdataset_next(&rdataset)) {
dns_rdataset_current(&rdataset, &rdata);
result = dns_rdata_tostruct(&rdata, &rrsig, NULL);
INSIST(result == ISC_R_SUCCESS);
if (rrsig.algorithm == dst_key_alg(key) &&
rrsig.keyid == dst_key_id(key)) {
dns_rdataset_disassociate(&rdataset);
return (true);
}
dns_rdata_reset(&rdata);
}
dns_rdataset_disassociate(&rdataset);
return (false);
}
| 0 |
[
"CWE-327"
] |
bind9
|
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
| 60,155,723,772,639,080,000,000,000,000,000,000,000 | 31 |
Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key.
|
static stf_status ikev2_parent_outI1_common(struct msg_digest *md,
struct state *st)
{
struct connection *c = st->st_connection;
int numvidtosend = 0;
/* set up reply */
init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer),
"reply packet");
/* HDR out */
{
struct isakmp_hdr hdr;
zero(&hdr); /* default to 0 */
/* Impair function will raise major/minor by 1 for testing */
hdr.isa_version = build_ike_version();
if (st->st_dcookie.ptr)
hdr.isa_np = ISAKMP_NEXT_v2N;
else
hdr.isa_np = ISAKMP_NEXT_v2SA;
hdr.isa_xchg = ISAKMP_v2_SA_INIT;
hdr.isa_flags = ISAKMP_FLAGS_I;
memcpy(hdr.isa_icookie, st->st_icookie, COOKIE_SIZE);
/* R-cookie, are left zero */
if (!out_struct(&hdr, &isakmp_hdr_desc, &reply_stream,
&md->rbody)) {
reset_cur_state();
return STF_INTERNAL_ERROR;
}
}
/* send an anti DOS cookie, 4306 2.6, if we have received one from the
* responder
*/
if (st->st_dcookie.ptr) {
chunk_t child_spi;
memset(&child_spi, 0, sizeof(child_spi));
ship_v2N(ISAKMP_NEXT_v2SA, DBGP(
IMPAIR_SEND_BOGUS_ISAKMP_FLAG) ?
(ISAKMP_PAYLOAD_NONCRITICAL |
ISAKMP_PAYLOAD_LIBRESWAN_BOGUS) :
ISAKMP_PAYLOAD_NONCRITICAL, PROTO_ISAKMP,
&child_spi,
v2N_COOKIE, &st->st_dcookie, &md->rbody);
}
/* SA out */
{
u_char *sa_start = md->rbody.cur;
if (st->st_sadb->prop_disj_cnt == 0 || st->st_sadb->prop_disj)
st->st_sadb = sa_v2_convert(st->st_sadb);
if (!ikev2_out_sa(&md->rbody,
PROTO_ISAKMP,
st->st_sadb,
st, TRUE, /* parentSA */
ISAKMP_NEXT_v2KE)) {
libreswan_log("outsa fail");
reset_cur_state();
return STF_INTERNAL_ERROR;
}
/* save initiator SA for later HASH */
if (st->st_p1isa.ptr == NULL) { /* no leak! (MUST be first time) */
clonetochunk(st->st_p1isa, sa_start,
md->rbody.cur - sa_start,
"sa in main_outI1");
}
}
/* send KE */
if (!justship_v2KE(st, &st->st_gi, st->st_oakley.groupnum, &md->rbody,
ISAKMP_NEXT_v2Ni))
return STF_INTERNAL_ERROR;
/*
* Check which Vendor ID's we need to send - there will be more soon
* In IKEv2, DPD and NAT-T are no longer vendorid's
*/
if (c->send_vendorid) {
numvidtosend++; /* if we need to send Libreswan VID */
}
/* send NONCE */
{
int np = numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE;
struct ikev2_generic in;
pb_stream pb;
memset(&in, 0, sizeof(in));
in.isag_np = np;
in.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL;
if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) {
libreswan_log(
" setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload");
in.isag_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS;
}
if (!out_struct(&in, &ikev2_nonce_desc, &md->rbody, &pb) ||
!out_raw(st->st_ni.ptr, st->st_ni.len, &pb, "IKEv2 nonce"))
return STF_INTERNAL_ERROR;
close_output_pbs(&pb);
}
/* Send Vendor VID if needed */
if (c->send_vendorid) {
const char *myvid = ipsec_version_vendorid();
int np = --numvidtosend >
0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE;
if (!out_generic_raw(np, &isakmp_vendor_id_desc, &md->rbody,
myvid, strlen(myvid),
"Vendor ID"))
return STF_INTERNAL_ERROR;
/* ensure our VID chain was valid */
passert(numvidtosend == 0);
}
close_message(&md->rbody, st);
close_output_pbs(&reply_stream);
freeanychunk(st->st_tpacket);
clonetochunk(st->st_tpacket, reply_stream.start,
pbs_offset(&reply_stream),
"reply packet for ikev2_parent_outI1_tail");
/* save packet for later signing */
freeanychunk(st->st_firstpacket_me);
clonetochunk(st->st_firstpacket_me, reply_stream.start,
pbs_offset(&reply_stream), "saved first packet");
/* Transmit */
send_ike_msg(st, __FUNCTION__);
delete_event(st);
event_schedule(EVENT_v2_RETRANSMIT, EVENT_RETRANSMIT_DELAY_0, st);
reset_cur_state();
return STF_OK;
}
| 0 |
[
"CWE-20"
] |
libreswan
|
2899351224fe2940aec37d7656e1e392c0fe07f0
| 316,878,063,956,977,500,000,000,000,000,000,000,000 | 143 |
SECURITY: Properly handle IKEv2 I1 notification packet without KE payload
|
TEST_F(RouterTest, UpstreamPerTryTimeoutExcludesNewStream) {
InSequence s;
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
Http::ConnectionPool::Callbacks* pool_callbacks;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _))
.WillOnce(Invoke(
[&](Http::ResponseDecoder& decoder,
Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
pool_callbacks = &callbacks;
return nullptr;
}));
response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);
EXPECT_CALL(*response_timeout_, enableTimer(_, _));
EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_))
.WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void {
EXPECT_EQ(host_address_, host->address());
}));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-internal", "true"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
per_try_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);
EXPECT_CALL(*per_try_timeout_, enableTimer(_, _));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// The per try timeout timer should not be started yet.
pool_callbacks->onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));
EXPECT_CALL(*per_try_timeout_, disableTimer());
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(*response_timeout_, disableTimer());
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
per_try_timeout_->invokeCallback();
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_rq_per_try_timeout")
.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
| 0 |
[
"CWE-703"
] |
envoy
|
18871dbfb168d3512a10c78dd267ff7c03f564c6
| 292,759,407,642,811,570,000,000,000,000,000,000,000 | 59 |
[1.18] CVE-2022-21655
Crash with direct_response
Signed-off-by: Otto van der Schaaf <[email protected]>
|
cmsBool _cmsRegisterMultiProcessElementPlugin(cmsContext id, cmsPluginBase* Data)
{
return RegisterTypesPlugin(id, Data,MPEPlugin);
}
| 0 |
[
"CWE-125"
] |
Little-CMS
|
5ca71a7bc18b6897ab21d815d15e218e204581e2
| 71,410,058,903,488,770,000,000,000,000,000,000,000 | 4 |
Added an extra check to MLU bounds
Thanks to Ibrahim el-sayed for spotting the bug
|
int tcf_action_exec(struct sk_buff *skb, struct tc_action *act,
struct tcf_result *res)
{
struct tc_action *a;
int ret = -1;
if (skb->tc_verd & TC_NCLS) {
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
D2PRINTK("(%p)tcf_action_exec: cleared TC_NCLS in %s out %s\n",
skb, skb->input_dev ? skb->input_dev->name : "xxx",
skb->dev->name);
ret = TC_ACT_OK;
goto exec_done;
}
while ((a = act) != NULL) {
repeat:
if (a->ops && a->ops->act) {
ret = a->ops->act(&skb, a);
if (TC_MUNGED & skb->tc_verd) {
/* copied already, allow trampling */
skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
}
if (ret == TC_ACT_REPEAT)
goto repeat; /* we need a ttl - JHS */
if (ret != TC_ACT_PIPE)
goto exec_done;
}
act = a->next;
}
exec_done:
if (skb->tc_classid > 0) {
res->classid = skb->tc_classid;
res->class = 0;
skb->tc_classid = 0;
}
return ret;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
| 123,764,204,150,066,640,000,000,000,000,000,000,000 | 38 |
[NETLINK]: Missing initializations in dumped data
Mostly missing initialization of padding fields of 1 or 2 bytes length,
two instances of uninitialized nlmsgerr->msg of 16 bytes length.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.