func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
} | 0 | [
"CWE-416"
]
| linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 13,103,144,539,224,706,000,000,000,000,000,000,000 | 4 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
virDomainDiskDefCheckDuplicateInfo(const virDomainDiskDef *a,
const virDomainDiskDef *b)
{
if (STREQ(a->dst, b->dst)) {
virReportError(VIR_ERR_XML_ERROR,
_("target '%s' duplicated for disk sources '%s' and '%s'"),
a->dst,
NULLSTR(virDomainDiskGetSource(a)),
NULLSTR(virDomainDiskGetSource(b)));
return -1;
}
/* Duplicate WWN/serial isn't usually problematic for the OS and
* forbidding it would possibly inhibit using multipath configurations */
return 0;
} | 0 | [
"CWE-212"
]
| libvirt | a5b064bf4b17a9884d7d361733737fb614ad8979 | 235,371,201,691,049,780,000,000,000,000,000,000,000 | 17 | conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]> |
static double mp_bitwise_and(_cimg_math_parser& mp) {
return (double)((longT)_mp_arg(2) & (longT)_mp_arg(3));
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 305,194,306,434,608,630,000,000,000,000,000,000,000 | 3 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
void kernel_power_off(void)
{
kernel_shutdown_prepare(SYSTEM_POWER_OFF);
if (pm_power_off_prepare)
pm_power_off_prepare();
disable_nonboot_cpus();
syscore_shutdown();
printk(KERN_EMERG "Power down.\n");
kmsg_dump(KMSG_DUMP_POWEROFF);
machine_power_off();
} | 0 | [
"CWE-264"
]
| linux | 259e5e6c75a910f3b5e656151dc602f53f9d7548 | 321,964,878,151,573,030,000,000,000,000,000,000,000 | 11 | Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs
With this change, calling
prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
disables privilege granting operations at execve-time. For example, a
process will not be able to execute a setuid binary to change their uid
or gid if this bit is set. The same is true for file capabilities.
Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that
LSMs respect the requested behavior.
To determine if the NO_NEW_PRIVS bit is set, a task may call
prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
It returns 1 if set and 0 if it is not set. If any of the arguments are
non-zero, it will return -1 and set errno to -EINVAL.
(PR_SET_NO_NEW_PRIVS behaves similarly.)
This functionality is desired for the proposed seccomp filter patch
series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the
system call behavior for itself and its child tasks without being
able to impact the behavior of a more privileged task.
Another potential use is making certain privileged operations
unprivileged. For example, chroot may be considered "safe" if it cannot
affect privileged tasks.
Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is
set and AppArmor is in use. It is fixed in a subsequent patch.
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Will Drewry <[email protected]>
Acked-by: Eric Paris <[email protected]>
Acked-by: Kees Cook <[email protected]>
v18: updated change desc
v17: using new define values as per 3.4
Signed-off-by: James Morris <[email protected]> |
static void test_helper_initialized(VncConnection *conn,
gpointer opaque)
{
struct GVncTest *test = opaque;
gint32 encodings[] = { VNC_CONNECTION_ENCODING_DESKTOP_RESIZE,
VNC_CONNECTION_ENCODING_ZRLE,
VNC_CONNECTION_ENCODING_HEXTILE,
VNC_CONNECTION_ENCODING_RRE,
VNC_CONNECTION_ENCODING_COPY_RECT,
VNC_CONNECTION_ENCODING_RAW };
gint32 *encodingsp;
int n_encodings;
test_helper_desktop_resize(conn,
vnc_connection_get_width(conn),
vnc_connection_get_height(conn),
test);
encodingsp = encodings;
n_encodings = G_N_ELEMENTS(encodings);
VNC_DEBUG("Sending %d encodings", n_encodings);
if (!vnc_connection_set_encodings(conn, n_encodings, encodingsp))
goto error;
VNC_DEBUG("Requesting first framebuffer update");
if (!vnc_connection_framebuffer_update_request(test->conn,
0, 0, 0,
vnc_connection_get_width(test->conn),
vnc_connection_get_height(test->conn)))
vnc_connection_shutdown(test->conn);
test->connected = TRUE;
return;
error:
vnc_connection_shutdown(conn);
} | 0 | []
| gtk-vnc | ea0386933214c9178aaea9f2f85049ea3fa3e14a | 82,850,833,000,416,610,000,000,000,000,000,000,000 | 38 | Fix bounds checking for RRE, hextile & copyrect encodings
While the client would bounds check the overall update
region, it failed to bounds check the payload data
parameters.
Add a test case to validate bounds checking.
https://bugzilla.gnome.org/show_bug.cgi?id=778048
CVE-2017-5884
Signed-off-by: Daniel P. Berrange <[email protected]> |
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
ssize_t len;
device_lock(dev);
len = sprintf(buf, "%s\n", pdev->driver_override);
device_unlock(dev);
return len;
} | 1 | [
"CWE-787"
]
| linux | aa838896d87af561a33ecefea1caa4c15a68bc47 | 193,885,374,034,574,530,000,000,000,000,000,000,000 | 11 | drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static void xmlwriter_object_free_storage(void *object TSRMLS_DC)
{
ze_xmlwriter_object * intern = (ze_xmlwriter_object *) object;
if (!intern) {
return;
}
if (intern->xmlwriter_ptr) {
xmlwriter_free_resource_ptr(intern->xmlwriter_ptr TSRMLS_CC);
}
intern->xmlwriter_ptr = NULL;
zend_object_std_dtor(&intern->zo TSRMLS_CC);
efree(intern);
} | 0 | [
"CWE-20"
]
| php-src | 52b93f0cfd3cba7ff98cc5198df6ca4f23865f80 | 313,961,543,550,747,400,000,000,000,000,000,000,000 | 14 | Fixed bug #69353 (Missing null byte checks for paths in various PHP extensions) |
xmlCleanupCharEncodingHandlers(void) {
xmlCleanupEncodingAliases();
if (handlers == NULL) return;
for (;nbCharEncodingHandler > 0;) {
nbCharEncodingHandler--;
if (handlers[nbCharEncodingHandler] != NULL) {
if (handlers[nbCharEncodingHandler]->name != NULL)
xmlFree(handlers[nbCharEncodingHandler]->name);
xmlFree(handlers[nbCharEncodingHandler]);
}
}
xmlFree(handlers);
handlers = NULL;
nbCharEncodingHandler = 0;
xmlDefaultCharEncodingHandler = NULL;
} | 0 | [
"CWE-189"
]
| libxml2 | 69f04562f75212bfcabecd190ea8b06ace28ece2 | 83,010,041,697,289,220,000,000,000,000,000,000,000 | 18 | Fix an off by one error in encoding
this off by one error doesn't seems to reproduce on linux
but the error is real. |
send_append_header (char const *text)
{
char *p;
size_t len;
char *name;
p = strchr (text, ':');
if (!p)
{
mu_error (_("Invalid header: %s"), text);
return;
}
len = p - text;
name = mu_alloc (len + 1);
memcpy (name, text, len);
name[len] = 0;
for (p++; *p && mu_isspace (*p); p++)
;
add_header (name, mu_strdup (p), COMPOSE_APPEND);
} | 0 | []
| mailutils | 4befcfd015256c568121653038accbd84820198f | 125,551,835,143,986,610,000,000,000,000,000,000,000 | 21 | mail: disable compose escapes in non-interctive mode.
* NEWS: Document changes.
* doc/texinfo/programs/mail.texi: Document changes.
* mail/send.c (mail_compose_send): Recognize escapes only in
interactive mode. |
pxa3xx_gcu_reset(struct pxa3xx_gcu_priv *priv)
{
QDUMP("RESET");
/* disable interrupts */
gc_writel(priv, REG_GCIECR, 0);
/* reset hardware */
gc_writel(priv, REG_GCCR, GCCR_ABORT);
gc_writel(priv, REG_GCCR, 0);
memset(priv->shared, 0, SHARED_SIZE);
priv->shared->buffer_phys = priv->shared_phys;
priv->shared->magic = PXA3XX_GCU_SHARED_MAGIC;
ktime_get_ts64(&priv->base_time);
/* set up the ring buffer pointers */
gc_writel(priv, REG_GCRBLR, 0);
gc_writel(priv, REG_GCRBBR, priv->shared_phys);
gc_writel(priv, REG_GCRBTR, priv->shared_phys);
/* enable all IRQs except EOB */
gc_writel(priv, REG_GCIECR, IE_ALL & ~IE_EOB);
} | 0 | [
"CWE-190",
"CWE-703"
]
| linux | a09d2d00af53b43c6f11e6ab3cb58443c2cac8a7 | 144,908,964,952,026,480,000,000,000,000,000,000,000 | 25 | video: fbdev: pxa3xx-gcu: Fix integer overflow in pxa3xx_gcu_write
In pxa3xx_gcu_write, a count parameter of type size_t is passed to words of
type int. Then, copy_from_user() may cause a heap overflow because it is used
as the third argument of copy_from_user().
Signed-off-by: Hyunwoo Kim <[email protected]>
Signed-off-by: Helge Deller <[email protected]> |
void CServer::SetClientName(int ClientID, const char *pName)
{
if(ClientID < 0 || ClientID >= MAX_CLIENTS || m_aClients[ClientID].m_State < CClient::STATE_READY)
return;
if(!pName)
return;
char aCleanName[MAX_NAME_LENGTH];
str_copy(aCleanName, pName, sizeof(aCleanName));
// clear name
for(char *p = aCleanName; *p; ++p)
{
if(*p < 32)
*p = ' ';
}
if(TrySetClientName(ClientID, aCleanName))
{
// auto rename
for(int i = 1;; i++)
{
char aNameTry[MAX_NAME_LENGTH];
str_format(aNameTry, sizeof(aCleanName), "(%d)%s", i, aCleanName);
if(TrySetClientName(ClientID, aNameTry) == 0)
break;
}
}
} | 0 | [
"CWE-20"
]
| teeworlds | a766cb44bcffcdb0b88e776d01c5ee1323d44f85 | 168,884,148,979,805,630,000,000,000,000,000,000,000 | 30 | fixed a server crash |
napi_status napi_open_escapable_handle_scope(
napi_env env,
napi_escapable_handle_scope* result) {
// Omit NAPI_PREAMBLE and GET_RETURN_STATUS because V8 calls here cannot throw
// JS exceptions.
CHECK_ENV(env);
CHECK_ARG(env, result);
*result = v8impl::JsEscapableHandleScopeFromV8EscapableHandleScope(
new v8impl::EscapableHandleScopeWrapper(env->isolate));
env->open_handle_scopes++;
return napi_clear_last_error(env);
} | 0 | [
"CWE-191"
]
| node | 656260b4b65fec3b10f6da3fdc9f11fb941aafb5 | 155,114,705,293,476,570,000,000,000,000,000,000,000 | 13 | napi: fix memory corruption vulnerability
Fixes: https://hackerone.com/reports/784186
CVE-ID: CVE-2020-8174
PR-URL: https://github.com/nodejs-private/node-private/pull/195
Reviewed-By: Anna Henningsen <[email protected]>
Reviewed-By: Gabriel Schulhof <[email protected]>
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Colin Ihrig <[email protected]>
Reviewed-By: Rich Trott <[email protected]> |
nbd_unlocked_set_pread_initialize (struct nbd_handle *h, bool request)
{
h->pread_initialize = request;
return 0;
} | 0 | [
"CWE-252"
]
| libnbd | c79706af4e7475bf58861a143b77b77a54e7a1cd | 20,970,767,510,574,243,000,000,000,000,000,000,000 | 5 | api: Add new API nbd_set_pread_initialize()
The recent patch series for CVE-2022-0485 demonstrated that when
applications using libnbd are not careful about error checking, the
difference on whether a data leak is at least sanitized (all zeroes,
partial reads, or data leftover from a prior read) vs. a dangerous
information leak (uninitialized data from the heap) was partly under
libnbd's control. The previous two patches changed libnbd to always
sanitize, as a security hardening technique that prevents heap leaks
no matter how buggy the client app is. But a blind memset() also adds
an execution delay, even if it doesn't show up as the hot spot in our
profiling when compared to the time spent with network traffic.
At any rate, if client apps choose to pre-initialize their buffers, or
otherwise audit their code to take on their own risk about not
dereferencing a buffer on failure paths, then the time spent by libnbd
doing memset() is wasted; so it is worth adding a knob to let a user
opt in to faster execution at the expense of giving up our memset()
hardening on their behalf.
In addition to adding two new APIs, this patch also causes changes to
the four existing APIs nbd_{aio_,}pread{,_structured}, with those
generated lib/api.c changes looking like:
| --- lib/api.c.bak 2022-02-10 08:17:09.973381979 -0600
| +++ lib/api.c 2022-02-10 08:22:27.503428024 -0600
| @@ -2871,7 +2914,8 @@ nbd_pread (struct nbd_handle *h, void *b
| debug (h, "enter: buf=<buf> count=%zu offset=%" PRIu64 " flags=0x%x", count, offset, flags);
| }
|
| - memset (buf, 0, count);
| + if (h->pread_initialize)
| + memset (buf, 0, count);
| if (unlikely (!pread_in_permitted_state (h))) {
| ret = -1;
| goto out;
Message-Id: <[email protected]>
Acked-by: Laszlo Ersek <[email protected]>
[eblake: enhance commit message to show generated file diff, mention CVE
in doc text]
Reviewed-by: Richard W.M. Jones <[email protected]>
(cherry picked from commit e0953cb71250947bb97b25e34ff1ea34bd504bf3) |
int snd_sof_ipc_set_get_comp_data(struct snd_sof_ipc *ipc,
struct snd_sof_control *scontrol,
u32 ipc_cmd,
enum sof_ipc_ctrl_type ctrl_type,
enum sof_ipc_ctrl_cmd ctrl_cmd,
bool send)
{
struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
struct snd_sof_dev *sdev = ipc->sdev;
struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
struct sof_ipc_fw_version *v = &ready->version;
struct sof_ipc_ctrl_data_params sparams;
size_t send_bytes;
int err;
/* read or write firmware volume */
if (scontrol->readback_offset != 0) {
/* write/read value header via mmaped region */
send_bytes = sizeof(struct sof_ipc_ctrl_value_chan) *
cdata->num_elems;
if (send)
snd_sof_dsp_block_write(sdev, sdev->mmio_bar,
scontrol->readback_offset,
cdata->chanv, send_bytes);
else
snd_sof_dsp_block_read(sdev, sdev->mmio_bar,
scontrol->readback_offset,
cdata->chanv, send_bytes);
return 0;
}
cdata->rhdr.hdr.cmd = SOF_IPC_GLB_COMP_MSG | ipc_cmd;
cdata->cmd = ctrl_cmd;
cdata->type = ctrl_type;
cdata->comp_id = scontrol->comp_id;
cdata->msg_index = 0;
/* calculate header and data size */
switch (cdata->type) {
case SOF_CTRL_TYPE_VALUE_CHAN_GET:
case SOF_CTRL_TYPE_VALUE_CHAN_SET:
sparams.msg_bytes = scontrol->num_channels *
sizeof(struct sof_ipc_ctrl_value_chan);
sparams.hdr_bytes = sizeof(struct sof_ipc_ctrl_data);
sparams.elems = scontrol->num_channels;
break;
case SOF_CTRL_TYPE_VALUE_COMP_GET:
case SOF_CTRL_TYPE_VALUE_COMP_SET:
sparams.msg_bytes = scontrol->num_channels *
sizeof(struct sof_ipc_ctrl_value_comp);
sparams.hdr_bytes = sizeof(struct sof_ipc_ctrl_data);
sparams.elems = scontrol->num_channels;
break;
case SOF_CTRL_TYPE_DATA_GET:
case SOF_CTRL_TYPE_DATA_SET:
sparams.msg_bytes = cdata->data->size;
sparams.hdr_bytes = sizeof(struct sof_ipc_ctrl_data) +
sizeof(struct sof_abi_hdr);
sparams.elems = cdata->data->size;
break;
default:
return -EINVAL;
}
cdata->rhdr.hdr.size = sparams.msg_bytes + sparams.hdr_bytes;
cdata->num_elems = sparams.elems;
cdata->elems_remaining = 0;
/* send normal size ipc in one part */
if (cdata->rhdr.hdr.size <= SOF_IPC_MSG_MAX_SIZE) {
err = sof_ipc_tx_message(sdev->ipc, cdata->rhdr.hdr.cmd, cdata,
cdata->rhdr.hdr.size, cdata,
cdata->rhdr.hdr.size);
if (err < 0)
dev_err(sdev->dev, "error: set/get ctrl ipc comp %d\n",
cdata->comp_id);
return err;
}
/* data is bigger than max ipc size, chop into smaller pieces */
dev_dbg(sdev->dev, "large ipc size %u, control size %u\n",
cdata->rhdr.hdr.size, scontrol->size);
/* large messages is only supported from ABI 3.3.0 onwards */
if (v->abi_version < SOF_ABI_VER(3, 3, 0)) {
dev_err(sdev->dev, "error: incompatible FW ABI version\n");
return -EINVAL;
}
err = sof_set_get_large_ctrl_data(sdev, cdata, &sparams, send);
if (err < 0)
dev_err(sdev->dev, "error: set/get large ctrl ipc comp %d\n",
cdata->comp_id);
return err;
} | 0 | [
"CWE-400",
"CWE-401"
]
| linux | 45c1380358b12bf2d1db20a5874e9544f56b34ab | 285,121,149,465,261,130,000,000,000,000,000,000,000 | 100 | ASoC: SOF: ipc: Fix memory leak in sof_set_get_large_ctrl_data
In the implementation of sof_set_get_large_ctrl_data() there is a memory
leak in case an error. Release partdata if sof_get_ctrl_copy_params()
fails.
Fixes: 54d198d5019d ("ASoC: SOF: Propagate sof_get_ctrl_copy_params() error properly")
Signed-off-by: Navid Emamdoost <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Mark Brown <[email protected]> |
void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, Handle, TRAPS), Handle mirror, TRAPS) {
for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) {
fieldDescriptor& fd = fs.field_descriptor();
f(&fd, mirror, CHECK);
}
}
} | 0 | []
| jdk17u | f8eb9abe034f7c6bea4da05a9ea42017b3f80730 | 129,879,195,894,442,350,000,000,000,000,000,000,000 | 8 | 8270386: Better verification of scan methods
Reviewed-by: coleenp
Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4 |
get_varp(struct vimoption *p)
{
// hidden option, always return NULL
if (p->var == NULL)
return NULL;
switch ((int)p->indir)
{
case PV_NONE: return p->var;
// global option with local value: use local value if it's been set
case PV_EP: return *curbuf->b_p_ep != NUL
? (char_u *)&curbuf->b_p_ep : p->var;
case PV_KP: return *curbuf->b_p_kp != NUL
? (char_u *)&curbuf->b_p_kp : p->var;
case PV_PATH: return *curbuf->b_p_path != NUL
? (char_u *)&(curbuf->b_p_path) : p->var;
case PV_AR: return curbuf->b_p_ar >= 0
? (char_u *)&(curbuf->b_p_ar) : p->var;
case PV_TAGS: return *curbuf->b_p_tags != NUL
? (char_u *)&(curbuf->b_p_tags) : p->var;
case PV_TC: return *curbuf->b_p_tc != NUL
? (char_u *)&(curbuf->b_p_tc) : p->var;
case PV_BKC: return *curbuf->b_p_bkc != NUL
? (char_u *)&(curbuf->b_p_bkc) : p->var;
case PV_SISO: return curwin->w_p_siso >= 0
? (char_u *)&(curwin->w_p_siso) : p->var;
case PV_SO: return curwin->w_p_so >= 0
? (char_u *)&(curwin->w_p_so) : p->var;
#ifdef FEAT_FIND_ID
case PV_DEF: return *curbuf->b_p_def != NUL
? (char_u *)&(curbuf->b_p_def) : p->var;
case PV_INC: return *curbuf->b_p_inc != NUL
? (char_u *)&(curbuf->b_p_inc) : p->var;
#endif
case PV_DICT: return *curbuf->b_p_dict != NUL
? (char_u *)&(curbuf->b_p_dict) : p->var;
case PV_TSR: return *curbuf->b_p_tsr != NUL
? (char_u *)&(curbuf->b_p_tsr) : p->var;
case PV_FP: return *curbuf->b_p_fp != NUL
? (char_u *)&(curbuf->b_p_fp) : p->var;
#ifdef FEAT_QUICKFIX
case PV_EFM: return *curbuf->b_p_efm != NUL
? (char_u *)&(curbuf->b_p_efm) : p->var;
case PV_GP: return *curbuf->b_p_gp != NUL
? (char_u *)&(curbuf->b_p_gp) : p->var;
case PV_MP: return *curbuf->b_p_mp != NUL
? (char_u *)&(curbuf->b_p_mp) : p->var;
#endif
#if defined(FEAT_BEVAL) && defined(FEAT_EVAL)
case PV_BEXPR: return *curbuf->b_p_bexpr != NUL
? (char_u *)&(curbuf->b_p_bexpr) : p->var;
#endif
#if defined(FEAT_CRYPT)
case PV_CM: return *curbuf->b_p_cm != NUL
? (char_u *)&(curbuf->b_p_cm) : p->var;
#endif
#ifdef FEAT_LINEBREAK
case PV_SBR: return *curwin->w_p_sbr != NUL
? (char_u *)&(curwin->w_p_sbr) : p->var;
#endif
#ifdef FEAT_STL_OPT
case PV_STL: return *curwin->w_p_stl != NUL
? (char_u *)&(curwin->w_p_stl) : p->var;
#endif
case PV_UL: return curbuf->b_p_ul != NO_LOCAL_UNDOLEVEL
? (char_u *)&(curbuf->b_p_ul) : p->var;
#ifdef FEAT_LISP
case PV_LW: return *curbuf->b_p_lw != NUL
? (char_u *)&(curbuf->b_p_lw) : p->var;
#endif
case PV_MENC: return *curbuf->b_p_menc != NUL
? (char_u *)&(curbuf->b_p_menc) : p->var;
#ifdef FEAT_ARABIC
case PV_ARAB: return (char_u *)&(curwin->w_p_arab);
#endif
case PV_LIST: return (char_u *)&(curwin->w_p_list);
case PV_LCS: return *curwin->w_p_lcs != NUL
? (char_u *)&(curwin->w_p_lcs) : p->var;
case PV_VE: return *curwin->w_p_ve != NUL
? (char_u *)&(curwin->w_p_ve) : p->var;
#ifdef FEAT_SPELL
case PV_SPELL: return (char_u *)&(curwin->w_p_spell);
#endif
#ifdef FEAT_SYN_HL
case PV_CUC: return (char_u *)&(curwin->w_p_cuc);
case PV_CUL: return (char_u *)&(curwin->w_p_cul);
case PV_CULOPT: return (char_u *)&(curwin->w_p_culopt);
case PV_CC: return (char_u *)&(curwin->w_p_cc);
#endif
#ifdef FEAT_DIFF
case PV_DIFF: return (char_u *)&(curwin->w_p_diff);
#endif
#ifdef FEAT_FOLDING
case PV_FDC: return (char_u *)&(curwin->w_p_fdc);
case PV_FEN: return (char_u *)&(curwin->w_p_fen);
case PV_FDI: return (char_u *)&(curwin->w_p_fdi);
case PV_FDL: return (char_u *)&(curwin->w_p_fdl);
case PV_FDM: return (char_u *)&(curwin->w_p_fdm);
case PV_FML: return (char_u *)&(curwin->w_p_fml);
case PV_FDN: return (char_u *)&(curwin->w_p_fdn);
# ifdef FEAT_EVAL
case PV_FDE: return (char_u *)&(curwin->w_p_fde);
case PV_FDT: return (char_u *)&(curwin->w_p_fdt);
# endif
case PV_FMR: return (char_u *)&(curwin->w_p_fmr);
#endif
case PV_NU: return (char_u *)&(curwin->w_p_nu);
case PV_RNU: return (char_u *)&(curwin->w_p_rnu);
#ifdef FEAT_LINEBREAK
case PV_NUW: return (char_u *)&(curwin->w_p_nuw);
#endif
case PV_WFH: return (char_u *)&(curwin->w_p_wfh);
case PV_WFW: return (char_u *)&(curwin->w_p_wfw);
#if defined(FEAT_QUICKFIX)
case PV_PVW: return (char_u *)&(curwin->w_p_pvw);
#endif
#ifdef FEAT_RIGHTLEFT
case PV_RL: return (char_u *)&(curwin->w_p_rl);
case PV_RLC: return (char_u *)&(curwin->w_p_rlc);
#endif
case PV_SCROLL: return (char_u *)&(curwin->w_p_scr);
case PV_WRAP: return (char_u *)&(curwin->w_p_wrap);
#ifdef FEAT_LINEBREAK
case PV_LBR: return (char_u *)&(curwin->w_p_lbr);
case PV_BRI: return (char_u *)&(curwin->w_p_bri);
case PV_BRIOPT: return (char_u *)&(curwin->w_p_briopt);
#endif
case PV_WCR: return (char_u *)&(curwin->w_p_wcr);
case PV_SCBIND: return (char_u *)&(curwin->w_p_scb);
case PV_CRBIND: return (char_u *)&(curwin->w_p_crb);
#ifdef FEAT_CONCEAL
case PV_COCU: return (char_u *)&(curwin->w_p_cocu);
case PV_COLE: return (char_u *)&(curwin->w_p_cole);
#endif
#ifdef FEAT_TERMINAL
case PV_TWK: return (char_u *)&(curwin->w_p_twk);
case PV_TWS: return (char_u *)&(curwin->w_p_tws);
case PV_TWSL: return (char_u *)&(curbuf->b_p_twsl);
#endif
case PV_AI: return (char_u *)&(curbuf->b_p_ai);
case PV_BIN: return (char_u *)&(curbuf->b_p_bin);
case PV_BOMB: return (char_u *)&(curbuf->b_p_bomb);
case PV_BH: return (char_u *)&(curbuf->b_p_bh);
case PV_BT: return (char_u *)&(curbuf->b_p_bt);
case PV_BL: return (char_u *)&(curbuf->b_p_bl);
case PV_CI: return (char_u *)&(curbuf->b_p_ci);
#ifdef FEAT_CINDENT
case PV_CIN: return (char_u *)&(curbuf->b_p_cin);
case PV_CINK: return (char_u *)&(curbuf->b_p_cink);
case PV_CINO: return (char_u *)&(curbuf->b_p_cino);
#endif
#if defined(FEAT_SMARTINDENT) || defined(FEAT_CINDENT)
case PV_CINW: return (char_u *)&(curbuf->b_p_cinw);
#endif
case PV_COM: return (char_u *)&(curbuf->b_p_com);
#ifdef FEAT_FOLDING
case PV_CMS: return (char_u *)&(curbuf->b_p_cms);
#endif
case PV_CPT: return (char_u *)&(curbuf->b_p_cpt);
#ifdef BACKSLASH_IN_FILENAME
case PV_CSL: return (char_u *)&(curbuf->b_p_csl);
#endif
#ifdef FEAT_COMPL_FUNC
case PV_CFU: return (char_u *)&(curbuf->b_p_cfu);
case PV_OFU: return (char_u *)&(curbuf->b_p_ofu);
#endif
#ifdef FEAT_EVAL
case PV_TFU: return (char_u *)&(curbuf->b_p_tfu);
#endif
case PV_EOL: return (char_u *)&(curbuf->b_p_eol);
case PV_FIXEOL: return (char_u *)&(curbuf->b_p_fixeol);
case PV_ET: return (char_u *)&(curbuf->b_p_et);
case PV_FENC: return (char_u *)&(curbuf->b_p_fenc);
case PV_FF: return (char_u *)&(curbuf->b_p_ff);
case PV_FT: return (char_u *)&(curbuf->b_p_ft);
case PV_FO: return (char_u *)&(curbuf->b_p_fo);
case PV_FLP: return (char_u *)&(curbuf->b_p_flp);
case PV_IMI: return (char_u *)&(curbuf->b_p_iminsert);
case PV_IMS: return (char_u *)&(curbuf->b_p_imsearch);
case PV_INF: return (char_u *)&(curbuf->b_p_inf);
case PV_ISK: return (char_u *)&(curbuf->b_p_isk);
#ifdef FEAT_FIND_ID
# ifdef FEAT_EVAL
case PV_INEX: return (char_u *)&(curbuf->b_p_inex);
# endif
#endif
#if defined(FEAT_CINDENT) && defined(FEAT_EVAL)
case PV_INDE: return (char_u *)&(curbuf->b_p_inde);
case PV_INDK: return (char_u *)&(curbuf->b_p_indk);
#endif
#ifdef FEAT_EVAL
case PV_FEX: return (char_u *)&(curbuf->b_p_fex);
#endif
#ifdef FEAT_CRYPT
case PV_KEY: return (char_u *)&(curbuf->b_p_key);
#endif
#ifdef FEAT_LISP
case PV_LISP: return (char_u *)&(curbuf->b_p_lisp);
#endif
case PV_ML: return (char_u *)&(curbuf->b_p_ml);
case PV_MPS: return (char_u *)&(curbuf->b_p_mps);
case PV_MA: return (char_u *)&(curbuf->b_p_ma);
case PV_MOD: return (char_u *)&(curbuf->b_changed);
case PV_NF: return (char_u *)&(curbuf->b_p_nf);
case PV_PI: return (char_u *)&(curbuf->b_p_pi);
#ifdef FEAT_TEXTOBJ
case PV_QE: return (char_u *)&(curbuf->b_p_qe);
#endif
case PV_RO: return (char_u *)&(curbuf->b_p_ro);
#ifdef FEAT_SMARTINDENT
case PV_SI: return (char_u *)&(curbuf->b_p_si);
#endif
case PV_SN: return (char_u *)&(curbuf->b_p_sn);
case PV_STS: return (char_u *)&(curbuf->b_p_sts);
#ifdef FEAT_SEARCHPATH
case PV_SUA: return (char_u *)&(curbuf->b_p_sua);
#endif
case PV_SWF: return (char_u *)&(curbuf->b_p_swf);
#ifdef FEAT_SYN_HL
case PV_SMC: return (char_u *)&(curbuf->b_p_smc);
case PV_SYN: return (char_u *)&(curbuf->b_p_syn);
#endif
#ifdef FEAT_SPELL
case PV_SPC: return (char_u *)&(curwin->w_s->b_p_spc);
case PV_SPF: return (char_u *)&(curwin->w_s->b_p_spf);
case PV_SPL: return (char_u *)&(curwin->w_s->b_p_spl);
case PV_SPO: return (char_u *)&(curwin->w_s->b_p_spo);
#endif
case PV_SW: return (char_u *)&(curbuf->b_p_sw);
case PV_TS: return (char_u *)&(curbuf->b_p_ts);
case PV_TW: return (char_u *)&(curbuf->b_p_tw);
case PV_TX: return (char_u *)&(curbuf->b_p_tx);
#ifdef FEAT_PERSISTENT_UNDO
case PV_UDF: return (char_u *)&(curbuf->b_p_udf);
#endif
case PV_WM: return (char_u *)&(curbuf->b_p_wm);
#ifdef FEAT_KEYMAP
case PV_KMAP: return (char_u *)&(curbuf->b_p_keymap);
#endif
#ifdef FEAT_SIGNS
case PV_SCL: return (char_u *)&(curwin->w_p_scl);
#endif
#ifdef FEAT_VARTABS
case PV_VSTS: return (char_u *)&(curbuf->b_p_vsts);
case PV_VTS: return (char_u *)&(curbuf->b_p_vts);
#endif
default: iemsg(_("E356: get_varp ERROR"));
}
// always return a valid pointer to avoid a crash!
return (char_u *)&(curbuf->b_p_wm);
} | 0 | [
"CWE-122"
]
| vim | b7081e135a16091c93f6f5f7525a5c58fb7ca9f9 | 42,410,428,682,983,880,000,000,000,000,000,000,000 | 253 | patch 8.2.3402: invalid memory access when using :retab with large value
Problem: Invalid memory access when using :retab with large value.
Solution: Check the number is positive. |
int imap_wordcasecmp(const char *a, const char *b)
{
char tmp[SHORT_STRING];
char *s = (char *)b;
int i;
tmp[SHORT_STRING-1] = 0;
for(i=0;i < SHORT_STRING-2;i++,s++)
{
if (!*s || ISSPACE(*s))
{
tmp[i] = 0;
break;
}
tmp[i] = *s;
}
tmp[i+1] = 0;
return ascii_strcasecmp(a, tmp);
} | 0 | [
"CWE-78"
]
| mutt | 185152818541f5cdc059cbff3f3e8b654fc27c1d | 268,998,181,874,534,300,000,000,000,000,000,000,000 | 20 | Properly quote IMAP mailbox names when (un)subscribing.
When handling automatic subscription (via $imap_check_subscribed), or
manual subscribe/unsubscribe commands, mutt generating a "mailboxes"
command but failed to properly escape backquotes.
Thanks to Jeriko One for the detailed bug report and patch, which this
commit is based upon. |
char *get_proxy(char *url, struct pool *pool)
{
pool->rpc_proxy = NULL;
char *split;
int plen, len, i;
for (i = 0; proxynames[i].name; i++) {
plen = strlen(proxynames[i].name);
if (strncmp(url, proxynames[i].name, plen) == 0) {
if (!(split = strchr(url, '|')))
return url;
*split = '\0';
len = split - url;
pool->rpc_proxy = malloc(1 + len - plen);
if (!(pool->rpc_proxy))
quithere(1, "Failed to malloc rpc_proxy");
strcpy(pool->rpc_proxy, url + plen);
extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port);
pool->rpc_proxytype = proxynames[i].proxytype;
url = split + 1;
break;
}
}
return url;
} | 0 | [
"CWE-119",
"CWE-787"
]
| cgminer | e1c5050734123973b99d181c45e74b2cbb00272e | 294,451,985,977,689,800,000,000,000,000,000,000,000 | 28 | Do some random sanity checking for stratum message parsing |
const Tensor& indices() const { return ix_; } | 0 | [
"CWE-703",
"CWE-787"
]
| tensorflow | 8ba6fa29cd8bf9cef9b718dc31c78c73081f5b31 | 8,986,645,232,521,310,000,000,000,000,000,000,000 | 1 | Fix heap-buffer-overflow issue with `tf.raw_ops.SparseSplit`.
PiperOrigin-RevId: 371242872
Change-Id: I482bb3d12602c7c3cc9446f97fb9f584bb98e9a4 |
static void ssl_write_supported_point_formats_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
((void) ssl);
if( ( ssl->handshake->cli_exts &
MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS_PRESENT ) == 0 )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, supported_point_formats extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS ) & 0xFF );
*p++ = 0x00;
*p++ = 2;
*p++ = 1;
*p++ = MBEDTLS_ECP_PF_UNCOMPRESSED;
*olen = 6;
} | 0 | [
"CWE-20",
"CWE-190"
]
| mbedtls | 83c9f495ffe70c7dd280b41fdfd4881485a3bc28 | 314,612,863,980,758,870,000,000,000,000,000,000,000 | 27 | Prevent bounds check bypass through overflow in PSK identity parsing
The check `if( *p + n > end )` in `ssl_parse_client_psk_identity` is
unsafe because `*p + n` might overflow, thus bypassing the check. As
`n` is a user-specified value up to 65K, this is relevant if the
library happens to be located in the last 65K of virtual memory.
This commit replaces the check by a safe version. |
Get the key size of cipher */
PHP_FUNCTION(mcrypt_get_cipher_name)
{
char *cipher_dir_string;
char *module_dir_string;
char *cipher_name;
char *cipher;
int cipher_len;
MCRYPT td;
MCRYPT_GET_INI
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s",
&cipher, &cipher_len) == FAILURE) {
return;
}
/* The code below is actually not very nice, but I didn't see a better
* method */
td = mcrypt_module_open(cipher, cipher_dir_string, "ecb", module_dir_string);
if (td != MCRYPT_FAILED) {
cipher_name = mcrypt_enc_get_algorithms_name(td);
mcrypt_module_close(td);
RETVAL_STRING(cipher_name,1);
mcrypt_free(cipher_name);
} else {
td = mcrypt_module_open(cipher, cipher_dir_string, "stream", module_dir_string);
if (td != MCRYPT_FAILED) {
cipher_name = mcrypt_enc_get_algorithms_name(td);
mcrypt_module_close(td);
RETVAL_STRING(cipher_name,1);
mcrypt_free(cipher_name);
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, MCRYPT_OPEN_MODULE_FAILED);
RETURN_FALSE;
}
} | 0 | [
"CWE-190"
]
| php-src | 6c5211a0cef0cc2854eaa387e0eb036e012904d0 | 249,924,067,127,107,570,000,000,000,000,000,000,000 | 37 | Fix bug #72455: Heap Overflow due to integer overflows |
qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
const char *fmt, ...)
{
va_list va;
struct va_format vaf;
char nfunc[32];
memset(nfunc, 0, sizeof(nfunc));
memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
if (likely(qedi) && likely(qedi->pdev))
pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
nfunc, line, qedi->host_no, &vaf);
else
pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
va_end(va);
} | 1 | [
"CWE-125"
]
| linux | c09581a52765a85f19fc35340127396d5e3379cc | 227,926,972,015,987,130,000,000,000,000,000,000,000 | 23 | scsi: qedi: remove memset/memcpy to nfunc and use func instead
KASAN reports this:
BUG: KASAN: global-out-of-bounds in qedi_dbg_err+0xda/0x330 [qedi]
Read of size 31 at addr ffffffffc12b0ae0 by task syz-executor.0/2429
CPU: 0 PID: 2429 Comm: syz-executor.0 Not tainted 5.0.0-rc7+ #45
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xfa/0x1ce lib/dump_stack.c:113
print_address_description+0x1c4/0x270 mm/kasan/report.c:187
kasan_report+0x149/0x18d mm/kasan/report.c:317
memcpy+0x1f/0x50 mm/kasan/common.c:130
qedi_dbg_err+0xda/0x330 [qedi]
? 0xffffffffc12d0000
qedi_init+0x118/0x1000 [qedi]
? 0xffffffffc12d0000
? 0xffffffffc12d0000
? 0xffffffffc12d0000
do_one_initcall+0xfa/0x5ca init/main.c:887
do_init_module+0x204/0x5f6 kernel/module.c:3460
load_module+0x66b2/0x8570 kernel/module.c:3808
__do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f2d57e55c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bfa0 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 00000000200003c0 RDI: 0000000000000003
RBP: 00007f2d57e55c70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f2d57e566bc
R13: 00000000004bcefb R14: 00000000006f7030 R15: 0000000000000004
The buggy address belongs to the variable:
__func__.67584+0x0/0xffffffffffffd520 [qedi]
Memory state around the buggy address:
ffffffffc12b0980: fa fa fa fa 00 04 fa fa fa fa fa fa 00 00 05 fa
ffffffffc12b0a00: fa fa fa fa 00 00 04 fa fa fa fa fa 00 05 fa fa
> ffffffffc12b0a80: fa fa fa fa 00 06 fa fa fa fa fa fa 00 02 fa fa
^
ffffffffc12b0b00: fa fa fa fa 00 00 04 fa fa fa fa fa 00 00 03 fa
ffffffffc12b0b80: fa fa fa fa 00 00 02 fa fa fa fa fa 00 00 04 fa
Currently the qedi_dbg_* family of functions can overrun the end of the
source string if it is less than the destination buffer length because of
the use of a fixed sized memcpy. Remove the memset/memcpy calls to nfunc
and just use func instead as it is always a null terminated string.
Reported-by: Hulk Robot <[email protected]>
Fixes: ace7f46ba5fd ("scsi: qedi: Add QLogic FastLinQ offload iSCSI driver framework.")
Signed-off-by: YueHaibing <[email protected]>
Reviewed-by: Dan Carpenter <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
struct file_id vfs_file_id_from_sbuf(connection_struct *conn, const SMB_STRUCT_STAT *sbuf)
{
return SMB_VFS_FILE_ID_CREATE(conn, sbuf);
} | 0 | [
"CWE-22"
]
| samba | bd269443e311d96ef495a9db47d1b95eb83bb8f4 | 134,506,062,571,524,440,000,000,000,000,000,000,000 | 4 | Fix bug 7104 - "wide links" and "unix extensions" are incompatible.
Change parameter "wide links" to default to "no".
Ensure "wide links = no" if "unix extensions = yes" on a share.
Fix man pages to refect this.
Remove "within share" checks for a UNIX symlink set - even if
widelinks = no. The server will not follow that link anyway.
Correct DEBUG message in check_reduced_name() to add missing "\n"
so it's really clear when a path is being denied as it's outside
the enclosing share path.
Jeremy. |
MouseMode(mode)
int mode;
{
if (!display)
return;
if (mode < D_mousetrack)
mode = D_mousetrack;
if (D_mouse != mode)
{
char mousebuf[20];
if (!D_CXT)
return;
if (D_mouse)
{
sprintf(mousebuf, "\033[?%dl", D_mouse);
AddStr(mousebuf);
}
if (mode)
{
sprintf(mousebuf, "\033[?%dh", mode);
AddStr(mousebuf);
}
D_mouse = mode;
D_mouse_parse.state = CSI_INACTIVE;
}
} | 0 | []
| screen | c5db181b6e017cfccb8d7842ce140e59294d9f62 | 204,395,167,282,260,400,000,000,000,000,000,000,000 | 28 | ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected] |
bool Http2Session::AddSettings(Http2Session::Http2Settings* settings) {
if (outstanding_settings_.size() == max_outstanding_settings_)
return false;
outstanding_settings_.push(settings);
return true;
} | 0 | []
| node | ce22d6f9178507c7a41b04ac4097b9ea902049e3 | 33,588,115,788,251,617,000,000,000,000,000,000,000 | 6 | http2: add altsvc support
Add support for sending and receiving ALTSVC frames.
PR-URL: https://github.com/nodejs/node/pull/17917
Reviewed-By: Anna Henningsen <[email protected]>
Reviewed-By: Tiancheng "Timothy" Gu <[email protected]>
Reviewed-By: Matteo Collina <[email protected]> |
inline void BinaryBroadcastFiveFold(const ArithmeticParams& unswitched_params,
const RuntimeShape& unswitched_input1_shape,
const T* unswitched_input1_data,
const RuntimeShape& unswitched_input2_shape,
const T* unswitched_input2_data,
const RuntimeShape& output_shape,
T* output_data, ElementwiseF elementwise_f,
ScalarBroadcastF scalar_broadcast_f) {
ArithmeticParams switched_params = unswitched_params;
switched_params.input1_offset = unswitched_params.input2_offset;
switched_params.input1_multiplier = unswitched_params.input2_multiplier;
switched_params.input1_shift = unswitched_params.input2_shift;
switched_params.input2_offset = unswitched_params.input1_offset;
switched_params.input2_multiplier = unswitched_params.input1_multiplier;
switched_params.input2_shift = unswitched_params.input1_shift;
const bool use_unswitched =
unswitched_params.broadcast_category ==
tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
const ArithmeticParams& params =
use_unswitched ? unswitched_params : switched_params;
const T* input1_data =
use_unswitched ? unswitched_input1_data : unswitched_input2_data;
const T* input2_data =
use_unswitched ? unswitched_input2_data : unswitched_input1_data;
// Fivefold nested loops. The second input resets its position for each
// iteration of the second loop. The first input resets its position at the
// beginning of the fourth loop. The innermost loop is an elementwise add of
// sections of the arrays.
T* output_data_ptr = output_data;
const T* input1_data_ptr = input1_data;
const T* input2_data_reset = input2_data;
// In the fivefold pattern, y0, y2 and y4 are not broadcast, and so shared
// between input shapes. y3 for input 1 is always broadcast, and so the
// dimension there is 1, whereas optionally y1 might be broadcast for
// input 2. Put another way, input1.shape.FlatSize = y0 * y1 * y2 * y4,
// input2.shape.FlatSize = y0 * y2 * y3 * y4.
int y0 = params.broadcast_shape[0];
int y1 = params.broadcast_shape[1];
int y2 = params.broadcast_shape[2];
int y3 = params.broadcast_shape[3];
int y4 = params.broadcast_shape[4];
if (y4 > 1) {
// General fivefold pattern, with y4 > 1 so there is a non-broadcast inner
// dimension.
for (int i0 = 0; i0 < y0; ++i0) {
const T* input2_data_ptr = nullptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
for (int i3 = 0; i3 < y3; ++i3) {
elementwise_f(y4, params, input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y4;
output_data_ptr += y4;
}
// We have broadcast y4 of input1 data y3 times, and now move on.
input1_data_ptr += y4;
}
}
// We have broadcast y2*y3*y4 of input2 data y1 times, and now move on.
input2_data_reset = input2_data_ptr;
}
} else if (input1_data_ptr != nullptr) {
// Special case of y4 == 1, in which the innermost loop is a single
// element and can be combined with the next (y3) as an inner broadcast.
//
// Note that this handles the case of pure scalar broadcast when
// y0 == y1 == y2 == 1. With low overhead it handles cases such as scalar
// broadcast with batch (as y2 > 1).
//
// NOTE The process is the same as the above general case except
// simplified for y4 == 1 and the loop over y3 is contained within the
// AddScalarBroadcast function.
for (int i0 = 0; i0 < y0; ++i0) {
const T* input2_data_ptr = nullptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
scalar_broadcast_f(y3, params, *input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y3;
output_data_ptr += y3;
input1_data_ptr += 1;
}
}
input2_data_reset = input2_data_ptr;
}
}
} | 0 | [
"CWE-476",
"CWE-369"
]
| tensorflow | 15691e456c7dc9bd6be203b09765b063bf4a380c | 107,181,671,241,159,970,000,000,000,000,000,000,000 | 92 | Prevent dereferencing of null pointers in TFLite's `add.cc`.
PiperOrigin-RevId: 387244946
Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9 |
template<typename tf, typename tc>
CImg<floatT> get_projections3d(CImgList<tf>& primitives, CImgList<tc>& colors,
const unsigned int x0, const unsigned int y0, const unsigned int z0,
const bool normalize_colors=false) const {
float m = 0, M = 0, delta = 1;
if (normalize_colors) { m = (float)min_max(M); delta = 255/(m==M?1:M-m); }
const unsigned int
_x0 = (x0>=_width)?_width - 1:x0,
_y0 = (y0>=_height)?_height - 1:y0,
_z0 = (z0>=_depth)?_depth - 1:z0;
CImg<tc> img_xy, img_xz, img_yz;
if (normalize_colors) {
((get_crop(0,0,_z0,0,_width - 1,_height - 1,_z0,_spectrum - 1)-=m)*=delta).move_to(img_xy);
((get_crop(0,_y0,0,0,_width - 1,_y0,_depth - 1,_spectrum - 1)-=m)*=delta).resize(_width,_depth,1,-100,-1).
move_to(img_xz);
((get_crop(_x0,0,0,0,_x0,_height - 1,_depth - 1,_spectrum - 1)-=m)*=delta).resize(_height,_depth,1,-100,-1).
move_to(img_yz);
} else {
get_crop(0,0,_z0,0,_width - 1,_height - 1,_z0,_spectrum - 1).move_to(img_xy);
get_crop(0,_y0,0,0,_width - 1,_y0,_depth - 1,_spectrum - 1).resize(_width,_depth,1,-100,-1).move_to(img_xz);
get_crop(_x0,0,0,0,_x0,_height - 1,_depth - 1,_spectrum - 1).resize(_height,_depth,1,-100,-1).move_to(img_yz);
}
CImg<floatT> points(12,3,1,1,
0,_width - 1,_width - 1,0, 0,_width - 1,_width - 1,0, _x0,_x0,_x0,_x0,
0,0,_height - 1,_height - 1, _y0,_y0,_y0,_y0, 0,_height - 1,_height - 1,0,
_z0,_z0,_z0,_z0, 0,0,_depth - 1,_depth - 1, 0,0,_depth - 1,_depth - 1);
primitives.assign();
CImg<tf>::vector(0,1,2,3,0,0,img_xy._width - 1,0,img_xy._width - 1,img_xy._height - 1,0,img_xy._height - 1).
move_to(primitives);
CImg<tf>::vector(4,5,6,7,0,0,img_xz._width - 1,0,img_xz._width - 1,img_xz._height - 1,0,img_xz._height - 1).
move_to(primitives);
CImg<tf>::vector(8,9,10,11,0,0,img_yz._width - 1,0,img_yz._width - 1,img_yz._height - 1,0,img_yz._height - 1).
move_to(primitives);
colors.assign();
img_xy.move_to(colors);
img_xz.move_to(colors);
img_yz.move_to(colors);
return points; | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 146,030,395,303,578,900,000,000,000,000,000,000,000 | 38 | Fix other issues in 'CImg<T>::load_bmp()'. |
directory_too_idle_to_fetch_descriptors(or_options_t *options, time_t now)
{
return !directory_caches_dir_info(options) &&
!options->FetchUselessDescriptors &&
rep_hist_circbuilding_dormant(now);
} | 0 | [
"CWE-264"
]
| tor | 00fffbc1a15e2696a89c721d0c94dc333ff419ef | 118,617,088,297,846,420,000,000,000,000,000,000,000 | 6 | Don't give the Guard flag to relays without the CVE-2011-2768 fix |
f_settabwinvar(typval_T *argvars, typval_T *rettv UNUSED)
{
if (in_vim9script()
&& (check_for_number_arg(argvars, 0) == FAIL
|| check_for_number_arg(argvars, 1) == FAIL
|| check_for_string_arg(argvars, 2) == FAIL))
return;
setwinvar(argvars, 1);
} | 0 | [
"CWE-476"
]
| vim | 0f6e28f686dbb59ab3b562408ab9b2234797b9b1 | 154,481,127,464,732,180,000,000,000,000,000,000,000 | 10 | patch 8.2.4428: crash when switching tabpage while in the cmdline window
Problem: Crash when switching tabpage while in the cmdline window.
Solution: Disallow switching tabpage when in the cmdline window. |
if (key->idx != ECC_CUSTOM_IDX && ecc_sets[key->idx].id == ECC_SECP384R1) {
return sp_ecc_verify_384(hash, hashlen, key->pubkey.x, key->pubkey.y,
key->pubkey.z, r, s, res, key->heap);
} | 0 | [
"CWE-326",
"CWE-203"
]
| wolfssl | 1de07da61f0c8e9926dcbd68119f73230dae283f | 237,471,086,811,797,900,000,000,000,000,000,000,000 | 4 | Constant time EC map to affine for private operations
For fast math, use a constant time modular inverse when mapping to
affine when operation involves a private key - key gen, calc shared
secret, sign. |
e1000e_write_ps_rx_descr(E1000ECore *core, uint8_t *desc,
struct NetRxPkt *pkt,
const E1000E_RSSInfo *rss_info,
size_t ps_hdr_len,
uint16_t(*written)[MAX_PS_BUFFERS])
{
int i;
union e1000_rx_desc_packet_split *d =
(union e1000_rx_desc_packet_split *) desc;
memset(&d->wb, 0, sizeof(d->wb));
d->wb.middle.length0 = cpu_to_le16((*written)[0]);
for (i = 0; i < PS_PAGE_BUFFERS; i++) {
d->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]);
}
e1000e_build_rx_metadata(core, pkt, pkt != NULL,
rss_info,
&d->wb.lower.hi_dword.rss,
&d->wb.lower.mrq,
&d->wb.middle.status_error,
&d->wb.lower.hi_dword.csum_ip.ip_id,
&d->wb.middle.vlan);
d->wb.upper.header_status =
cpu_to_le16(ps_hdr_len | (ps_hdr_len ? E1000_RXDPS_HDRSTAT_HDRSP : 0));
trace_e1000e_rx_desc_ps_write((*written)[0], (*written)[1],
(*written)[2], (*written)[3]);
} | 0 | [
"CWE-835"
]
| qemu | 4154c7e03fa55b4cf52509a83d50d6c09d743b77 | 139,829,987,335,434,300,000,000,000,000,000,000,000 | 32 | net: e1000e: fix an infinite loop issue
This issue is like the issue in e1000 network card addressed in
this commit:
e1000: eliminate infinite loops on out-of-bounds transfer start.
Signed-off-by: Li Qiang <[email protected]>
Reviewed-by: Dmitry Fleytman <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
TEST_F(QueryPlannerTest, MultikeyNestedElemMatch) {
// true means multikey
addIndex(BSON("a.b.c" << 1), true);
runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $lte: 1}}}}}}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists(
"{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
"{'a.b.c': [[-Infinity, 1, true, true]]}}}}}");
assertSolutionExists(
"{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
"{'a.b.c': [[1, Infinity, true, true]]}}}}}");
} | 0 | [
"CWE-834"
]
| mongo | 94d0e046baa64d1aa1a6af97e2d19bb466cc1ff5 | 274,669,124,571,963,200,000,000,000,000,000,000,000 | 14 | SERVER-38164 $or pushdown optimization does not correctly handle $not within an $elemMatch |
tor_version_as_new_as(const char *platform, const char *cutoff)
{
tor_version_t cutoff_version, router_version;
char *s, *s2, *start;
char tmp[128];
tor_assert(platform);
if (tor_version_parse(cutoff, &cutoff_version)<0) {
log_warn(LD_BUG,"cutoff version '%s' unparseable.",cutoff);
return 0;
}
if (strcmpstart(platform,"Tor ")) /* nonstandard Tor; be safe and say yes */
return 1;
start = (char *)eat_whitespace(platform+3);
if (!*start) return 0;
s = (char *)find_whitespace(start); /* also finds '\0', which is fine */
s2 = (char*)eat_whitespace(s);
if (!strcmpstart(s2, "(r") || !strcmpstart(s2, "(git-"))
s = (char*)find_whitespace(s2);
if ((size_t)(s-start+1) >= sizeof(tmp)) /* too big, no */
return 0;
strlcpy(tmp, start, s-start+1);
if (tor_version_parse(tmp, &router_version)<0) {
log_info(LD_DIR,"Router version '%s' unparseable.",tmp);
return 1; /* be safe and say yes */
}
/* Here's why we don't need to do any special handling for svn revisions:
* - If neither has an svn revision, we're fine.
* - If the router doesn't have an svn revision, we can't assume that it
* is "at least" any svn revision, so we need to return 0.
* - If the target version doesn't have an svn revision, any svn revision
* (or none at all) is good enough, so return 1.
* - If both target and router have an svn revision, we compare them.
*/
return tor_version_compare(&router_version, &cutoff_version) >= 0;
} | 0 | [
"CWE-399"
]
| tor | 57e35ad3d91724882c345ac709666a551a977f0f | 47,950,301,969,991,550,000,000,000,000,000,000,000 | 42 | Avoid possible segfault when handling networkstatus vote with bad flavor
Fix for 6530; fix on 0.2.2.6-alpha. |
query (void)
{
static const GimpParamDef load_args[] =
{
{ GIMP_PDB_INT32, "run-mode", "The run mode { RUN-INTERACTIVE (0), RUN-NONINTERACTIVE (1) }" },
{ GIMP_PDB_STRING, "filename", "The name of the file to load" },
{ GIMP_PDB_STRING, "raw-filename", "The name of the file to load" }
};
static const GimpParamDef load_return_vals[] =
{
{ GIMP_PDB_IMAGE, "image", "Output image" }
};
#if 0
static const GimpParamDef save_args[] =
{
{ GIMP_PDB_INT32, "run-mode", "The run mode { RUN-INTERACTIVE (0), RUN-NONINTERACTIVE (1) }" },
{ GIMP_PDB_IMAGE, "image", "Input image" },
{ GIMP_PDB_DRAWABLE, "drawable", "Drawable to export" },
{ GIMP_PDB_STRING, "filename", "The name of the file to export the image in" },
{ GIMP_PDB_STRING, "raw-filename", "The name of the file to export the image in" },
{ GIMP_PDB_INT32, "compression", "Specify 0 for no compression, 1 for RLE, and 2 for LZ77" }
};
#endif
gimp_install_procedure (LOAD_PROC,
"loads images from the Paint Shop Pro PSP file format",
"This plug-in loads and exports images in "
"Paint Shop Pro's native PSP format. "
"Vector layers aren't handled. Exporting isn't "
"yet implemented.",
"Tor Lillqvist",
"Tor Lillqvist",
"1999",
N_("Paint Shop Pro image"),
NULL,
GIMP_PLUGIN,
G_N_ELEMENTS (load_args),
G_N_ELEMENTS (load_return_vals),
load_args, load_return_vals);
gimp_register_file_handler_mime (LOAD_PROC, "image/x-psp");
gimp_register_magic_load_handler (LOAD_PROC,
"psp,tub,pspimage",
"",
"0,string,Paint\\040Shop\\040Pro\\040Image\\040File\n\032");
/* commented out until exporting is implemented */
#if 0
gimp_install_procedure (SAVE_PROC,
"exports images in the Paint Shop Pro PSP file format",
"This plug-in loads and exports images in "
"Paint Shop Pro's native PSP format. "
"Vector layers aren't handled. Exporting isn't "
"yet implemented.",
"Tor Lillqvist",
"Tor Lillqvist",
"1999",
N_("Paint Shop Pro image"),
"RGB*, GRAY*, INDEXED*",
GIMP_PLUGIN,
G_N_ELEMENTS (save_args), 0,
save_args, NULL);
gimp_register_save_handler (SAVE_PROC, "psp,tub", "");
#endif
} | 0 | [
"CWE-125"
]
| gimp | eb2980683e6472aff35a3117587c4f814515c74d | 146,106,454,201,259,140,000,000,000,000,000,000,000 | 67 | Bug 790853 - (CVE-2017-17787) heap overread in psp importer.
As any external data, we have to check that strings being read at fixed
length are properly nul-terminated. |
This function encrypts the plaintext */
PHP_FUNCTION(mcrypt_generic)
{
zval *mcryptind;
char *data;
int data_len;
php_mcrypt *pm;
unsigned char* data_s;
int block_size, data_size;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs", &mcryptind, &data, &data_len) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(pm, php_mcrypt *, &mcryptind, -1, "MCrypt", le_mcrypt);
PHP_MCRYPT_INIT_CHECK
if (data_len == 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "An empty string was passed");
RETURN_FALSE
}
/* Check blocksize */
if (mcrypt_enc_is_block_mode(pm->td) == 1) { /* It's a block algorithm */
block_size = mcrypt_enc_get_block_size(pm->td);
data_size = (((data_len - 1) / block_size) + 1) * block_size;
data_s = emalloc(data_size + 1);
memset(data_s, 0, data_size);
memcpy(data_s, data, data_len);
} else { /* It's not a block algorithm */
data_size = data_len;
data_s = emalloc(data_size + 1);
memset(data_s, 0, data_size);
memcpy(data_s, data, data_len);
}
mcrypt_generic(pm->td, data_s, data_size);
data_s[data_size] = '\0';
RETVAL_STRINGL(data_s, data_size, 1);
efree(data_s); | 1 | [
"CWE-190"
]
| php-src | 6c5211a0cef0cc2854eaa387e0eb036e012904d0 | 281,110,117,196,865,200,000,000,000,000,000,000,000 | 41 | Fix bug #72455: Heap Overflow due to integer overflows |
void WriteOutputSlice(int64 begin, int64 end) override {
std::vector<int> combination(features_.size(), 0);
for (int64 b = begin; b < end; ++b) {
auto row_start = splits_out_(b);
auto row_limit = splits_out_(b + 1);
for (auto i = row_start; i < row_limit; ++i) {
WriteCombination(b, combination, &values_out_(i));
NextCombination(b, &combination);
}
combination.assign(features_.size(), 0); // reset for next batch.
}
} | 0 | [
"CWE-125",
"CWE-369"
]
| tensorflow | 44b7f486c0143f68b56c34e2d01e146ee445134a | 313,236,042,697,283,140,000,000,000,000,000,000,000 | 12 | Fix out of bounds read in `ragged_cross_op.cc`.
PiperOrigin-RevId: 369757702
Change-Id: Ie6e5d2c21513a8d56bf41fcf35960caf76e890f9 |
pdf14_get_num_spots(gx_device * dev)
{
cmm_dev_profile_t *dev_profile;
cmm_profile_t *icc_profile;
gsicc_rendering_param_t render_cond;
dev_proc(dev, get_profile)(dev, &dev_profile);
gsicc_extract_profile(GS_UNKNOWN_TAG, dev_profile, &icc_profile,
&render_cond);
return dev->color_info.num_components - icc_profile->num_comps;
} | 0 | [
"CWE-416"
]
| ghostpdl | 90fd0c7ca3efc1ddff64a86f4104b13b3ac969eb | 44,979,870,227,127,770,000,000,000,000,000,000,000 | 11 | Bug 697456. Dont create new ctx when pdf14 device reenabled
This bug had yet another weird case where the user created a
file that pushed the pdf14 device twice. We were in that case,
creating a new ctx and blowing away the original one with out
proper clean up. To avoid, only create a new one when we need it. |
void inet_csk_delete_keepalive_timer(struct sock *sk)
{
sk_stop_timer(sk, &sk->sk_timer);
} | 0 | [
"CWE-362"
]
| linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 336,085,382,587,982,430,000,000,000,000,000,000,000 | 4 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int snmp6_alloc_dev(struct inet6_dev *idev)
{
int i;
idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
if (!idev->stats.ipv6)
goto err_ip;
for_each_possible_cpu(i) {
struct ipstats_mib *addrconf_stats;
addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
u64_stats_init(&addrconf_stats->syncp);
}
idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
GFP_KERNEL);
if (!idev->stats.icmpv6dev)
goto err_icmp;
idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
GFP_KERNEL);
if (!idev->stats.icmpv6msgdev)
goto err_icmpmsg;
return 0;
err_icmpmsg:
kfree(idev->stats.icmpv6dev);
err_icmp:
free_percpu(idev->stats.ipv6);
err_ip:
return -ENOMEM;
} | 0 | [
"CWE-20"
]
| linux | 77751427a1ff25b27d47a4c36b12c3c8667855ac | 5,282,622,595,310,851,300,000,000,000,000,000,000 | 33 | ipv6: addrconf: validate new MTU before applying it
Currently we don't check if the new MTU is valid or not and this allows
one to configure a smaller than minimum allowed by RFCs or even bigger
than interface own MTU, which is a problem as it may lead to packet
drops.
If you have a daemon like NetworkManager running, this may be exploited
by remote attackers by forging RA packets with an invalid MTU, possibly
leading to a DoS. (NetworkManager currently only validates for values
too small, but not for too big ones.)
The fix is just to make sure the new value is valid. That is, between
IPV6_MIN_MTU and interface's MTU.
Note that similar check is already performed at
ndisc_router_discovery(), for when kernel itself parses the RA.
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
b64encode(uschar *clear, int len)
{
uschar *code = store_get(4*((len+2)/3) + 1);
uschar *p = code;
while (len-- >0)
{
int x, y;
x = *clear++;
*p++ = enc64table[(x >> 2) & 63];
if (len-- <= 0)
{
*p++ = enc64table[(x << 4) & 63];
*p++ = '=';
*p++ = '=';
break;
}
y = *clear++;
*p++ = enc64table[((x << 4) | ((y >> 4) & 15)) & 63];
if (len-- <= 0)
{
*p++ = enc64table[(y << 2) & 63];
*p++ = '=';
break;
}
x = *clear++;
*p++ = enc64table[((y << 2) | ((x >> 6) & 3)) & 63];
*p++ = enc64table[x & 63];
}
*p = 0;
return code;
} | 0 | [
"CWE-119"
]
| exim | cf3cd306062a08969c41a1cdd32c6855f1abecf1 | 39,699,741,195,160,380,000,000,000,000,000,000,000 | 40 | Fix base64d() buffer size (CVE-2018-6789)
Credits for discovering this bug: Meh Chang <[email protected]>
(cherry picked from commit 062990cc1b2f9e5d82a413b53c8f0569075de700) |
static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_SCO)
return sco_sock_getsockopt_old(sock, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
} | 0 | [
"CWE-200"
]
| linux | c4c896e1471aec3b004a693c689f60be3b17ac86 | 204,963,287,871,973,960,000,000,000,000,000,000,000 | 24 | Bluetooth: sco: fix information leak to userspace
struct sco_conninfo has one padding byte in the end. Local variable
cinfo of type sco_conninfo is copied to userspace with this uninizialized
one byte, leading to old stack contents leak.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Gustavo F. Padovan <[email protected]> |
netsnmp_init_mib(void)
{
const char *prefix;
char *env_var, *entry;
PrefixListPtr pp = &mib_prefixes[0];
char *st = NULL;
if (Mib)
return;
netsnmp_init_mib_internals();
/*
* Initialise the MIB directory/ies
*/
netsnmp_fixup_mib_directory();
env_var = strdup(netsnmp_get_mib_directory());
if (!env_var)
return;
netsnmp_mibindex_load();
DEBUGMSGTL(("init_mib",
"Seen MIBDIRS: Looking in '%s' for mib dirs ...\n",
env_var));
entry = strtok_r(env_var, ENV_SEPARATOR, &st);
while (entry) {
add_mibdir(entry);
entry = strtok_r(NULL, ENV_SEPARATOR, &st);
}
SNMP_FREE(env_var);
env_var = netsnmp_getenv("MIBFILES");
if (env_var != NULL) {
if (*env_var == '+')
entry = strtok_r(env_var+1, ENV_SEPARATOR, &st);
else
entry = strtok_r(env_var, ENV_SEPARATOR, &st);
while (entry) {
add_mibfile(entry, NULL, NULL);
entry = strtok_r(NULL, ENV_SEPARATOR, &st);
}
}
netsnmp_init_mib_internals();
/*
* Read in any modules or mibs requested
*/
env_var = netsnmp_getenv("MIBS");
if (env_var == NULL) {
if (confmibs != NULL)
env_var = strdup(confmibs);
else
env_var = strdup(NETSNMP_DEFAULT_MIBS);
} else {
env_var = strdup(env_var);
}
if (env_var && ((*env_var == '+') || (*env_var == '-'))) {
entry =
(char *) malloc(strlen(NETSNMP_DEFAULT_MIBS) + strlen(env_var) + 2);
if (!entry) {
DEBUGMSGTL(("init_mib", "env mibs malloc failed"));
SNMP_FREE(env_var);
return;
} else {
if (*env_var == '+')
sprintf(entry, "%s%c%s", NETSNMP_DEFAULT_MIBS, ENV_SEPARATOR_CHAR,
env_var+1);
else
sprintf(entry, "%s%c%s", env_var+1, ENV_SEPARATOR_CHAR,
NETSNMP_DEFAULT_MIBS );
}
SNMP_FREE(env_var);
env_var = entry;
}
DEBUGMSGTL(("init_mib",
"Seen MIBS: Looking in '%s' for mib files ...\n",
env_var));
entry = strtok_r(env_var, ENV_SEPARATOR, &st);
while (entry) {
if (strcasecmp(entry, DEBUG_ALWAYS_TOKEN) == 0) {
read_all_mibs();
} else if (strstr(entry, "/") != NULL) {
read_mib(entry);
} else {
netsnmp_read_module(entry);
}
entry = strtok_r(NULL, ENV_SEPARATOR, &st);
}
adopt_orphans();
SNMP_FREE(env_var);
env_var = netsnmp_getenv("MIBFILES");
if (env_var != NULL) {
if ((*env_var == '+') || (*env_var == '-')) {
#ifdef NETSNMP_DEFAULT_MIBFILES
entry =
(char *) malloc(strlen(NETSNMP_DEFAULT_MIBFILES) +
strlen(env_var) + 2);
if (!entry) {
DEBUGMSGTL(("init_mib", "env mibfiles malloc failed"));
} else {
if (*env_var++ == '+')
sprintf(entry, "%s%c%s", NETSNMP_DEFAULT_MIBFILES, ENV_SEPARATOR_CHAR,
env_var );
else
sprintf(entry, "%s%c%s", env_var, ENV_SEPARATOR_CHAR,
NETSNMP_DEFAULT_MIBFILES );
}
SNMP_FREE(env_var);
env_var = entry;
#else
env_var = strdup(env_var + 1);
#endif
} else {
env_var = strdup(env_var);
}
} else {
#ifdef NETSNMP_DEFAULT_MIBFILES
env_var = strdup(NETSNMP_DEFAULT_MIBFILES);
#endif
}
if (env_var != NULL) {
DEBUGMSGTL(("init_mib",
"Seen MIBFILES: Looking in '%s' for mib files ...\n",
env_var));
entry = strtok_r(env_var, ENV_SEPARATOR, &st);
while (entry) {
read_mib(entry);
entry = strtok_r(NULL, ENV_SEPARATOR, &st);
}
SNMP_FREE(env_var);
}
prefix = netsnmp_getenv("PREFIX");
if (!prefix)
prefix = Standard_Prefix;
Prefix = (char *) malloc(strlen(prefix) + 2);
if (!Prefix)
DEBUGMSGTL(("init_mib", "Prefix malloc failed"));
else
strcpy(Prefix, prefix);
DEBUGMSGTL(("init_mib",
"Seen PREFIX: Looking in '%s' for prefix ...\n", Prefix));
/*
* remove trailing dot
*/
if (Prefix) {
env_var = &Prefix[strlen(Prefix) - 1];
if (*env_var == '.')
*env_var = '\0';
}
pp->str = Prefix; /* fixup first mib_prefix entry */
/*
* now that the list of prefixes is built, save each string length.
*/
while (pp->str) {
pp->len = strlen(pp->str);
pp++;
}
Mib = tree_head; /* Backwards compatibility */
tree_top = (struct tree *) calloc(1, sizeof(struct tree));
/*
* XX error check ?
*/
if (tree_top) {
tree_top->label = strdup("(top)");
tree_top->child_list = tree_head;
}
} | 1 | [
"CWE-59",
"CWE-61"
]
| net-snmp | 4fd9a450444a434a993bc72f7c3486ccce41f602 | 274,098,571,578,956,330,000,000,000,000,000,000,000 | 179 | CHANGES: snmpd: Stop reading and writing the mib_indexes/* files
Caching directory contents is something the operating system should do
and is not something Net-SNMP should do. Instead of storing a copy of
the directory contents in ${tmp_dir}/mib_indexes/${n}, always scan a
MIB directory. |
xsltCopyTree(xsltTransformContextPtr ctxt, xmlNodePtr node,
xmlNodePtr insert, int literal)
{
return(xsltCopyTreeInternal(ctxt, node, node, insert, literal, 0));
} | 0 | []
| libxslt | 937ba2a3eb42d288f53c8adc211bd1122869f0bf | 10,734,302,381,965,014,000,000,000,000,000,000,000 | 6 | Fix default template processing on namespace nodes |
int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_deauth_request *req)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
bool tx = !req->local_state_change;
if (ifmgd->auth_data &&
ether_addr_equal(ifmgd->auth_data->bss->bssid, req->bssid)) {
sdata_info(sdata,
"aborting authentication with %pM by local choice (Reason: %u=%s)\n",
req->bssid, req->reason_code,
ieee80211_get_reason_code_string(req->reason_code));
drv_mgd_prepare_tx(sdata->local, sdata, 0);
ieee80211_send_deauth_disassoc(sdata, req->bssid,
IEEE80211_STYPE_DEAUTH,
req->reason_code, tx,
frame_buf);
ieee80211_destroy_auth_data(sdata, false);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
req->reason_code);
return 0;
}
if (ifmgd->assoc_data &&
ether_addr_equal(ifmgd->assoc_data->bss->bssid, req->bssid)) {
sdata_info(sdata,
"aborting association with %pM by local choice (Reason: %u=%s)\n",
req->bssid, req->reason_code,
ieee80211_get_reason_code_string(req->reason_code));
drv_mgd_prepare_tx(sdata->local, sdata, 0);
ieee80211_send_deauth_disassoc(sdata, req->bssid,
IEEE80211_STYPE_DEAUTH,
req->reason_code, tx,
frame_buf);
ieee80211_destroy_assoc_data(sdata, false, true);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
req->reason_code);
return 0;
}
if (ifmgd->associated &&
ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
sdata_info(sdata,
"deauthenticating from %pM by local choice (Reason: %u=%s)\n",
req->bssid, req->reason_code,
ieee80211_get_reason_code_string(req->reason_code));
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
req->reason_code, tx, frame_buf);
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
req->reason_code);
return 0;
}
return -ENOTCONN;
} | 0 | []
| linux | 79c92ca42b5a3e0ea172ea2ce8df8e125af237da | 16,226,994,718,408,385,000,000,000,000,000,000,000 | 63 | mac80211: handle deauthentication/disassociation from TDLS peer
When receiving a deauthentication/disassociation frame from a TDLS
peer, a station should not disconnect the current AP, but only
disable the current TDLS link if it's enabled.
Without this change, a TDLS issue can be reproduced by following the
steps as below:
1. STA-1 and STA-2 are connected to AP, bidirection traffic is running
between STA-1 and STA-2.
2. Set up TDLS link between STA-1 and STA-2, stay for a while, then
teardown TDLS link.
3. Repeat step #2 and monitor the connection between STA and AP.
During the test, one STA may send a deauthentication/disassociation
frame to another, after TDLS teardown, with reason code 6/7, which
means: Class 2/3 frame received from nonassociated STA.
On receive this frame, the receiver STA will disconnect the current
AP and then reconnect. It's not a expected behavior, purpose of this
frame should be disabling the TDLS link, not the link with AP.
Cc: [email protected]
Signed-off-by: Yu Wang <[email protected]>
Signed-off-by: Johannes Berg <[email protected]> |
vrrp_preempt_handler(__attribute__((unused)) vector_t *strvec)
{
vrrp_t *vrrp = LIST_TAIL_DATA(vrrp_data->vrrp);
vrrp->nopreempt = 0;
} | 0 | [
"CWE-59",
"CWE-61"
]
| keepalived | 04f2d32871bb3b11d7dc024039952f2fe2750306 | 311,997,241,565,514,070,000,000,000,000,000,000,000 | 5 | When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]> |
static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
{
unsigned int mnt_flags = 0;
if (attr_flags & MOUNT_ATTR_RDONLY)
mnt_flags |= MNT_READONLY;
if (attr_flags & MOUNT_ATTR_NOSUID)
mnt_flags |= MNT_NOSUID;
if (attr_flags & MOUNT_ATTR_NODEV)
mnt_flags |= MNT_NODEV;
if (attr_flags & MOUNT_ATTR_NOEXEC)
mnt_flags |= MNT_NOEXEC;
if (attr_flags & MOUNT_ATTR_NODIRATIME)
mnt_flags |= MNT_NODIRATIME;
if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
mnt_flags |= MNT_NOSYMFOLLOW;
return mnt_flags;
} | 0 | [
"CWE-200"
]
| linux | 427215d85e8d1476da1a86b8d67aceb485eb3631 | 172,492,931,819,316,300,000,000,000,000,000,000,000 | 19 | ovl: prevent private clone if bind mount is not allowed
Add the following checks from __do_loopback() to clone_private_mount() as
well:
- verify that the mount is in the current namespace
- verify that there are no locked children
Reported-by: Alois Wohlschlager <[email protected]>
Fixes: c771d683a62e ("vfs: introduce clone_private_mount()")
Cc: <[email protected]> # v3.18
Signed-off-by: Miklos Szeredi <[email protected]> |
struct mempolicy *get_vma_policy(struct task_struct *task,
struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = task->mempolicy;
if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) {
struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
addr);
if (vpol)
pol = vpol;
} else if (vma->vm_policy)
pol = vma->vm_policy;
}
if (!pol)
pol = &default_policy;
return pol;
} | 0 | [
"CWE-264"
]
| linux-2.6 | 1a5a9906d4e8d1976b701f889d8f35d54b928f25 | 72,495,857,089,014,880,000,000,000,000,000,000,000 | 18 | mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void kvm_arch_flush_shadow(struct kvm *kvm)
{
kvm_mmu_zap_all(kvm);
kvm_reload_remote_mmus(kvm);
} | 0 | [
"CWE-200"
]
| kvm | 831d9d02f9522e739825a51a11e3bc5aa531a905 | 26,392,869,660,055,403,000,000,000,000,000,000,000 | 5 | KVM: x86: fix information leak to userland
Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and
kvm_clock_data are copied to userland with some padding and reserved
fields unitialized. It leads to leaking of contents of kernel stack
memory. We have to initialize them to zero.
In patch v1 Jan Kiszka suggested to fill reserved fields with zeros
instead of memset'ting the whole struct. It makes sense as these
fields are explicitly marked as padding. No more fields need zeroing.
KVM-Stable-Tag.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
nl80211_prep_sched_scan_msg(struct sk_buff *msg,
struct cfg80211_sched_scan_request *req, u32 cmd)
{
void *hdr;
hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
if (!hdr)
return -1;
if (nla_put_u32(msg, NL80211_ATTR_WIPHY,
wiphy_to_rdev(req->wiphy)->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, req->dev->ifindex) ||
nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->reqid,
NL80211_ATTR_PAD))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
} | 0 | [
"CWE-120"
]
| linux | f88eb7c0d002a67ef31aeb7850b42ff69abc46dc | 231,844,908,532,185,400,000,000,000,000,000,000,000 | 23 | nl80211: validate beacon head
We currently don't validate the beacon head, i.e. the header,
fixed part and elements that are to go in front of the TIM
element. This means that the variable elements there can be
malformed, e.g. have a length exceeding the buffer size, but
most downstream code from this assumes that this has already
been checked.
Add the necessary checks to the netlink policy.
Cc: [email protected]
Fixes: ed1b6cc7f80f ("cfg80211/nl80211: add beacon settings")
Link: https://lore.kernel.org/r/1569009255-I7ac7fbe9436e9d8733439eab8acbbd35e55c74ef@changeid
Signed-off-by: Johannes Berg <[email protected]> |
bool walk_args(Item_processor processor, bool walk_subquery, void *arg)
{
for (uint i= 0; i < arg_count; i++)
{
if (args[i]->walk(processor, walk_subquery, arg))
return true;
}
return false;
} | 0 | [
"CWE-617"
]
| server | 2e7891080667c59ac80f788eef4d59d447595772 | 96,438,192,880,099,650,000,000,000,000,000,000,000 | 9 | MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]> |
SecureElementStatus_t SecureElementSetDevEui( uint8_t* devEui )
{
if( devEui == NULL )
{
return SECURE_ELEMENT_ERROR_NPE;
}
memcpy1( SeContext.DevEui, devEui, SE_EUI_SIZE );
SeNvmCtxChanged( );
return SECURE_ELEMENT_SUCCESS;
} | 0 | [
"CWE-120",
"CWE-787"
]
| LoRaMac-node | e3063a91daa7ad8a687223efa63079f0c24568e4 | 115,604,445,340,252,450,000,000,000,000,000,000,000 | 10 | Added received buffer size checks. |
void zend_shared_alloc_lock(TSRMLS_D)
{
#ifndef ZEND_WIN32
#ifdef ZTS
tsrm_mutex_lock(zts_lock);
#endif
#if 0
/* this will happen once per process, and will un-globalize mem_write_lock */
if (mem_write_lock.l_pid == -1) {
mem_write_lock.l_pid = getpid();
}
#endif
while (1) {
if (fcntl(lock_file, F_SETLKW, &mem_write_lock) == -1) {
if (errno == EINTR) {
continue;
}
zend_accel_error(ACCEL_LOG_ERROR, "Cannot create lock - %s (%d)", strerror(errno), errno);
}
break;
}
#else
zend_shared_alloc_lock_win32();
#endif
ZCG(locked) = 1;
/* Prepare translation table
*
* Make it persistent so that it uses malloc() and allocated blocks
* won't be taken from space which is freed by efree in memdup.
* Otherwise it leads to false matches in memdup check.
*/
zend_hash_init(&xlat_table, 100, NULL, NULL, 1);
} | 0 | [
"CWE-416"
]
| php-src | 0a8f28b43212cc2ddbc1f2df710e37b1bec0addd | 12,456,624,217,880,618,000,000,000,000,000,000,000 | 38 | Fixed bug #68677 (Use After Free in OPcache)
(cherry picked from commit 777c39f4042327eac4b63c7ee87dc1c7a09a3115) |
int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
int rc = 0;
wait_event(card->wait_q,
(rc = __qeth_do_run_thread(card, thread)) >= 0);
return rc;
} | 0 | [
"CWE-200",
"CWE-119"
]
| linux | 6fb392b1a63ae36c31f62bc3fc8630b49d602b62 | 335,395,097,167,771,840,000,000,000,000,000,000,000 | 8 | qeth: avoid buffer overflow in snmp ioctl
Check user-defined length in snmp ioctl request and allow request
only if it fits into a qeth command buffer.
Signed-off-by: Ursula Braun <[email protected]>
Signed-off-by: Frank Blaschka <[email protected]>
Reviewed-by: Heiko Carstens <[email protected]>
Reported-by: Nico Golde <[email protected]>
Reported-by: Fabian Yamaguchi <[email protected]>
Cc: <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
jas_cmprof_t *jas_cmprof_copy(jas_cmprof_t *prof)
{
jas_cmprof_t *newprof;
int i;
if (!(newprof = jas_cmprof_create()))
goto error;
newprof->clrspc = prof->clrspc;
newprof->numchans = prof->numchans;
newprof->refclrspc = prof->refclrspc;
newprof->numrefchans = prof->numrefchans;
newprof->iccprof = jas_iccprof_copy(prof->iccprof);
for (i = 0; i < JAS_CMPROF_NUMPXFORMSEQS; ++i) {
if (prof->pxformseqs[i]) {
if (!(newprof->pxformseqs[i] = jas_cmpxformseq_copy(prof->pxformseqs[i])))
goto error;
}
}
return newprof;
error:
return 0;
} | 0 | [
"CWE-189"
]
| jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 114,926,668,317,368,450,000,000,000,000,000,000,000 | 22 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
static int network_init(void) {
static _Bool have_init = 0;
/* Check if we were already initialized. If so, just return - there's
* nothing more to do (for now, that is). */
if (have_init)
return (0);
have_init = 1;
if (network_config_stats)
plugin_register_read("network", network_stats_read);
plugin_register_shutdown("network", network_shutdown);
send_buffer = malloc(network_config_packet_size);
if (send_buffer == NULL) {
ERROR("network plugin: malloc failed.");
return (-1);
}
network_init_buffer();
/* setup socket(s) and so on */
if (sending_sockets != NULL) {
plugin_register_write("network", network_write,
/* user_data = */ NULL);
plugin_register_notification("network", network_notification,
/* user_data = */ NULL);
}
/* If no threads need to be started, return here. */
if ((listen_sockets_num == 0) ||
((dispatch_thread_running != 0) && (receive_thread_running != 0)))
return (0);
if (dispatch_thread_running == 0) {
int status;
status = plugin_thread_create(&dispatch_thread_id, NULL /* no attributes */,
dispatch_thread, NULL /* no argument */);
if (status != 0) {
char errbuf[1024];
ERROR("network: pthread_create failed: %s",
sstrerror(errno, errbuf, sizeof(errbuf)));
} else {
dispatch_thread_running = 1;
}
}
if (receive_thread_running == 0) {
int status;
status = plugin_thread_create(&receive_thread_id, NULL /* no attributes */,
receive_thread, NULL /* no argument */);
if (status != 0) {
char errbuf[1024];
ERROR("network: pthread_create failed: %s",
sstrerror(errno, errbuf, sizeof(errbuf)));
} else {
receive_thread_running = 1;
}
}
return (0);
} /* int network_init */ | 0 | [
"CWE-835"
]
| collectd | f6be4f9b49b949b379326c3d7002476e6ce4f211 | 337,515,615,047,699,360,000,000,000,000,000,000,000 | 62 | network plugin: Fix endless loop DOS in parse_packet()
When correct 'Signature part' is received by Collectd, configured without
AuthFile option, condition for endless loop occurs due to missing increase
of pointer to next unprocessed part.
This is a forward-port of #2233.
Fixes: CVE-2017-7401
Closes: #2174
Signed-off-by: Florian Forster <[email protected]> |
static gboolean property_get_discovering(const GDBusPropertyTable *property,
DBusMessageIter *iter, void *user_data)
{
struct btd_adapter *adapter = user_data;
dbus_bool_t discovering = adapter->discovering;
dbus_message_iter_append_basic(iter, DBUS_TYPE_BOOLEAN, &discovering);
return TRUE;
} | 0 | [
"CWE-862",
"CWE-863"
]
| bluez | b497b5942a8beb8f89ca1c359c54ad67ec843055 | 12,912,612,943,301,900,000,000,000,000,000,000,000 | 10 | adapter: Fix storing discoverable setting
discoverable setting shall only be store when changed via Discoverable
property and not when discovery client set it as that be considered
temporary just for the lifetime of the discovery. |
HttpTransact::HandleFiltering(State* s)
{
ink_release_assert(!"Fix-Me AUTH MERGE");
if (s->method == HTTP_WKSIDX_PUSH && s->http_config_param->push_method_enabled == 0) {
// config file says this request is not authorized.
// send back error response to client.
DebugTxn("http_trans", "[HandleFiltering] access denied.");
DebugTxn("http_seq", "[HttpTransact::HandleFiltering] Access Denied.");
SET_VIA_STRING(VIA_DETAIL_TUNNEL, VIA_DETAIL_TUNNEL_NO_FORWARD);
// adding a comment so that cvs recognizes that I added a space in the text below
build_error_response(s, HTTP_STATUS_FORBIDDEN, "Access Denied", "access#denied", NULL);
// s->cache_info.action = CACHE_DO_NO_ACTION;
TRANSACT_RETURN(SM_ACTION_SEND_ERROR_CACHE_NOOP, NULL);
}
DebugTxn("http_seq", "[HttpTransact::HandleFiltering] Request Authorized.");
//////////////////////////////////////////////////////////////
// ok, the config file says that the request is authorized. //
//////////////////////////////////////////////////////////////
// request is not black listed so now decided if we ought to
// lookup the cache
DecideCacheLookup(s);
} | 0 | [
"CWE-119"
]
| trafficserver | 8b5f0345dade6b2822d9b52c8ad12e63011a5c12 | 224,377,385,395,449,960,000,000,000,000,000,000,000 | 26 | Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug |
zone_send_securedb(dns_zone_t *zone, dns_db_t *db) {
isc_event_t *e;
dns_db_t *dummy = NULL;
dns_zone_t *secure = NULL;
e = isc_event_allocate(zone->secure->mctx, zone,
DNS_EVENT_ZONESECUREDB,
receive_secure_db, zone->secure,
sizeof(struct secure_event));
if (e == NULL)
return (ISC_R_NOMEMORY);
dns_db_attach(db, &dummy);
((struct secure_event *)e)->db = dummy;
INSIST(LOCKED_ZONE(zone->secure));
zone_iattach(zone->secure, &secure);
isc_task_send(zone->secure->task, &e);
DNS_ZONE_CLRFLAG(zone, DNS_ZONEFLG_SENDSECURE);
return (ISC_R_SUCCESS);
} | 0 | [
"CWE-327"
]
| bind9 | f09352d20a9d360e50683cd1d2fc52ccedcd77a0 | 48,891,394,583,787,400,000,000,000,000,000,000,000 | 19 | Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key. |
static int TS_find_cert(STACK_OF(ESS_CERT_ID) *cert_ids, X509 *cert)
{
int i;
if (!cert_ids || !cert) return -1;
/* Recompute SHA1 hash of certificate if necessary (side effect). */
X509_check_purpose(cert, -1, 0);
/* Look for cert in the cert_ids vector. */
for (i = 0; i < sk_ESS_CERT_ID_num(cert_ids); ++i)
{
ESS_CERT_ID *cid = sk_ESS_CERT_ID_value(cert_ids, i);
/* Check the SHA-1 hash first. */
if (cid->hash->length == sizeof(cert->sha1_hash)
&& !memcmp(cid->hash->data, cert->sha1_hash,
sizeof(cert->sha1_hash)))
{
/* Check the issuer/serial as well if specified. */
ESS_ISSUER_SERIAL *is = cid->issuer_serial;
if (!is || !TS_issuer_serial_cmp(is, cert->cert_info))
return i;
}
}
return -1;
} | 0 | []
| openssl | c7235be6e36c4bef84594aa3b2f0561db84b63d8 | 82,657,138,462,581,360,000,000,000,000,000,000,000 | 28 | RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <[email protected]>
Reviewed by: Ulf Moeller |
static struct buf *cache_buf(const struct index_record *record)
{
static struct buf staticbuf;
buf_init_ro(&staticbuf,
cache_base(record),
cache_len(record));
return &staticbuf;
} | 0 | []
| cyrus-imapd | 1d6d15ee74e11a9bd745e80be69869e5fb8d64d6 | 152,024,775,229,111,270,000,000,000,000,000,000,000 | 10 | mailbox.c/reconstruct.c: Add mailbox_mbentry_from_path() |
PHP_METHOD(Phar, mount)
{
char *fname, *arch = NULL, *entry = NULL, *path, *actual;
int fname_len, arch_len, entry_len;
size_t path_len, actual_len;
phar_archive_data *pphar;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "pp", &path, &path_len, &actual, &actual_len) == FAILURE) {
return;
}
fname = (char*)zend_get_executed_filename();
fname_len = strlen(fname);
#ifdef PHP_WIN32
phar_unixify_path_separators(fname, fname_len);
#endif
if (fname_len > 7 && !memcmp(fname, "phar://", 7) && SUCCESS == phar_split_fname(fname, fname_len, &arch, &arch_len, &entry, &entry_len, 2, 0)) {
efree(entry);
entry = NULL;
if (path_len > 7 && !memcmp(path, "phar://", 7)) {
zend_throw_exception_ex(phar_ce_PharException, 0, "Can only mount internal paths within a phar archive, use a relative path instead of \"%s\"", path);
efree(arch);
return;
}
carry_on2:
if (NULL == (pphar = zend_hash_str_find_ptr(&(PHAR_G(phar_fname_map)), arch, arch_len))) {
if (PHAR_G(manifest_cached) && NULL != (pphar = zend_hash_str_find_ptr(&cached_phars, arch, arch_len))) {
if (SUCCESS == phar_copy_on_write(&pphar)) {
goto carry_on;
}
}
zend_throw_exception_ex(phar_ce_PharException, 0, "%s is not a phar archive, cannot mount", arch);
if (arch) {
efree(arch);
}
return;
}
carry_on:
if (SUCCESS != phar_mount_entry(pphar, actual, actual_len, path, path_len)) {
zend_throw_exception_ex(phar_ce_PharException, 0, "Mounting of %s to %s within phar %s failed", path, actual, arch);
if (path && path == entry) {
efree(entry);
}
if (arch) {
efree(arch);
}
return;
}
if (entry && path && path == entry) {
efree(entry);
}
if (arch) {
efree(arch);
}
return;
} else if (PHAR_G(phar_fname_map.u.flags) && NULL != (pphar = zend_hash_str_find_ptr(&(PHAR_G(phar_fname_map)), fname, fname_len))) {
goto carry_on;
} else if (PHAR_G(manifest_cached) && NULL != (pphar = zend_hash_str_find_ptr(&cached_phars, fname, fname_len))) {
if (SUCCESS == phar_copy_on_write(&pphar)) {
goto carry_on;
}
goto carry_on;
} else if (SUCCESS == phar_split_fname(path, path_len, &arch, &arch_len, &entry, &entry_len, 2, 0)) {
path = entry;
path_len = entry_len;
goto carry_on2;
}
zend_throw_exception_ex(phar_ce_PharException, 0, "Mounting of %s to %s failed", path, actual);
} | 0 | [
"CWE-20"
]
| php-src | 1e9b175204e3286d64dfd6c9f09151c31b5e099a | 250,141,251,423,059,450,000,000,000,000,000,000,000 | 81 | Fix bug #71860: Require valid paths for phar filenames |
fr_window_show_cb (GtkWidget *widget,
FrWindow *window)
{
fr_window_update_current_location (window);
set_active (window, "ViewStatusbar", g_settings_get_boolean (window->priv->settings_ui, PREF_UI_VIEW_STATUSBAR));
window->priv->view_folders = g_settings_get_boolean (window->priv->settings_ui, PREF_UI_VIEW_FOLDERS);
set_active (window, "ViewFolders", window->priv->view_folders);
gtk_widget_hide (window->priv->filter_bar);
return TRUE;
} | 0 | [
"CWE-22"
]
| file-roller | b147281293a8307808475e102a14857055f81631 | 89,295,416,946,787,250,000,000,000,000,000,000,000 | 14 | libarchive: sanitize filenames before extracting |
__export struct rad_dict_attr_t *rad_dict_find_attr(const char *name)
{
return dict_find_attr(&dict->items, name);
} | 0 | [
"CWE-787"
]
| accel-ppp | d4cb89721cc8e5b3dd3fbefaf173eb77ecb85615 | 292,984,124,311,788,200,000,000,000,000,000,000,000 | 4 | fix buffer overflow when receive radius packet
This patch fixes buffer overflow if radius packet contains invalid atribute length
and attrubute type from the following list: ipv4addr, ipv6addr, ipv6prefix or ifid
Reported-by: Chloe Ong
Reported-by: Eugene Lim <[email protected]>
Reported-by: Kar Wei Loh
Signed-off-by: Sergey V. Lobanov <[email protected]> |
template<typename t>
CImg<T>& _LU(CImg<t>& indx, bool& d) {
const int N = width();
int imax = 0;
CImg<Tfloat> vv(N);
indx.assign(N);
d = true;
bool return0 = false;
cimg_pragma_openmp(parallel for cimg_openmp_if(_width*_height>=512))
cimg_forX(*this,i) {
Tfloat vmax = 0;
cimg_forX(*this,j) {
const Tfloat tmp = cimg::abs((*this)(j,i));
if (tmp>vmax) vmax = tmp;
}
if (vmax==0) return0 = true; else vv[i] = 1/vmax;
}
if (return0) { indx.fill(0); return fill(0); }
cimg_forX(*this,j) {
for (int i = 0; i<j; ++i) {
Tfloat sum = (*this)(j,i);
for (int k = 0; k<i; ++k) sum-=(*this)(k,i)*(*this)(j,k);
(*this)(j,i) = (T)sum;
}
Tfloat vmax = 0;
for (int i = j; i<width(); ++i) {
Tfloat sum = (*this)(j,i);
for (int k = 0; k<j; ++k) sum-=(*this)(k,i)*(*this)(j,k);
(*this)(j,i) = (T)sum;
const Tfloat tmp = vv[i]*cimg::abs(sum);
if (tmp>=vmax) { vmax = tmp; imax = i; }
}
if (j!=imax) {
cimg_forX(*this,k) cimg::swap((*this)(k,imax),(*this)(k,j));
d = !d;
vv[imax] = vv[j];
}
indx[j] = (t)imax;
if ((*this)(j,j)==0) (*this)(j,j) = (T)1e-20;
if (j<N) {
const Tfloat tmp = 1/(Tfloat)(*this)(j,j);
for (int i = j + 1; i<N; ++i) (*this)(j,i) = (T)((*this)(j,i)*tmp);
}
}
return *this; | 0 | [
"CWE-119",
"CWE-787"
]
| CImg | ac8003393569aba51048c9d67e1491559877b1d1 | 183,557,371,346,623,800,000,000,000,000,000,000,000 | 49 | . |
static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
bool *resched_timer)
{
struct sigqueue *q, *first = NULL;
/*
* Collect the siginfo appropriate to this signal. Check if
* there is another siginfo for the same signal.
*/
list_for_each_entry(q, &list->list, list) {
if (q->info.si_signo == sig) {
if (first)
goto still_pending;
first = q;
}
}
sigdelset(&list->signal, sig);
if (first) {
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
*resched_timer =
(first->flags & SIGQUEUE_PREALLOC) &&
(info->si_code == SI_TIMER) &&
(info->si_sys_private);
__sigqueue_free(first);
} else {
/*
* Ok, it wasn't in the queue. This must be
* a fast-pathed signal or we must have been
* out of queue space. So zero out the info.
*/
clear_siginfo(info);
info->si_signo = sig;
info->si_errno = 0;
info->si_code = SI_USER;
info->si_pid = 0;
info->si_uid = 0;
}
} | 0 | [
"CWE-190"
]
| linux | d1e7fd6462ca9fc76650fbe6ca800e35b24267da | 242,384,130,263,043,700,000,000,000,000,000,000,000 | 44 | signal: Extend exec_id to 64bits
Replace the 32bit exec_id with a 64bit exec_id to make it impossible
to wrap the exec_id counter. With care an attacker can cause exec_id
wrap and send arbitrary signals to a newly exec'd parent. This
bypasses the signal sending checks if the parent changes their
credentials during exec.
The severity of this problem can been seen that in my limited testing
of a 32bit exec_id it can take as little as 19s to exec 65536 times.
Which means that it can take as little as 14 days to wrap a 32bit
exec_id. Adam Zabrocki has succeeded wrapping the self_exe_id in 7
days. Even my slower timing is in the uptime of a typical server.
Which means self_exec_id is simply a speed bump today, and if exec
gets noticably faster self_exec_id won't even be a speed bump.
Extending self_exec_id to 64bits introduces a problem on 32bit
architectures where reading self_exec_id is no longer atomic and can
take two read instructions. Which means that is is possible to hit
a window where the read value of exec_id does not match the written
value. So with very lucky timing after this change this still
remains expoiltable.
I have updated the update of exec_id on exec to use WRITE_ONCE
and the read of exec_id in do_notify_parent to use READ_ONCE
to make it clear that there is no locking between these two
locations.
Link: https://lore.kernel.org/kernel-hardening/[email protected]
Fixes: 2.3.23pre2
Cc: [email protected]
Signed-off-by: "Eric W. Biederman" <[email protected]> |
explicit MockRequestIDExtension(Random::RandomGenerator& random)
: real_(Extensions::RequestId::UUIDRequestIDExtension::defaultInstance(random)) {
ON_CALL(*this, set(_, _))
.WillByDefault([this](Http::RequestHeaderMap& request_headers, bool force) {
return real_->set(request_headers, force);
});
ON_CALL(*this, setInResponse(_, _))
.WillByDefault([this](Http::ResponseHeaderMap& response_headers,
const Http::RequestHeaderMap& request_headers) {
return real_->setInResponse(response_headers, request_headers);
});
ON_CALL(*this, toInteger(_))
.WillByDefault([this](const Http::RequestHeaderMap& request_headers) {
return real_->toInteger(request_headers);
});
ON_CALL(*this, getTraceReason(_))
.WillByDefault([this](const Http::RequestHeaderMap& request_headers) {
return real_->getTraceReason(request_headers);
});
ON_CALL(*this, setTraceReason(_, _))
.WillByDefault(
[this](Http::RequestHeaderMap& request_headers, Tracing::Reason trace_status) {
real_->setTraceReason(request_headers, trace_status);
});
} | 0 | [
"CWE-22"
]
| envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 249,007,305,682,464,200,000,000,000,000,000,000,000 | 25 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]> |
static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
struct net_device *dev;
u8 *addr;
int err;
err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
if (err < 0)
return err;
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex == 0) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
return -EINVAL;
}
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
return -ENODEV;
}
if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
return -EINVAL;
}
addr = nla_data(tb[NDA_LLADDR]);
if (!is_valid_ether_addr(addr)) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n");
return -EINVAL;
}
err = -EOPNOTSUPP;
/* Support fdb on master device the net/bridge default case */
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
(dev->priv_flags & IFF_BRIDGE_PORT)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops = br_dev->netdev_ops;
err = ops->ndo_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags);
if (err)
goto out;
else
ndm->ndm_flags &= ~NTF_MASTER;
}
/* Embedded bridge, macvlan, and any other device support */
if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
err = dev->netdev_ops->ndo_fdb_add(ndm, tb,
dev, addr,
nlh->nlmsg_flags);
if (!err) {
rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
ndm->ndm_flags &= ~NTF_SELF;
}
}
out:
return err;
} | 0 | [
"CWE-399"
]
| linux-2.6 | 84d73cd3fb142bf1298a8c13fd4ca50fd2432372 | 30,966,248,985,159,734,000,000,000,000,000,000,000 | 65 | rtnl: fix info leak on RTM_GETLINK request for VF devices
Initialize the mac address buffer with 0 as the driver specific function
will probably not fill the whole buffer. In fact, all in-kernel drivers
fill only ETH_ALEN of the MAX_ADDR_LEN bytes, i.e. 6 of the 32 possible
bytes. Therefore we currently leak 26 bytes of stack memory to userland
via the netlink interface.
Signed-off-by: Mathias Krause <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static gboolean netscreen_check_file_type(wtap *wth, int *err, gchar **err_info)
{
char buf[NETSCREEN_LINE_LENGTH];
guint reclen, line;
buf[NETSCREEN_LINE_LENGTH-1] = '\0';
for (line = 0; line < NETSCREEN_HEADER_LINES_TO_CHECK; line++) {
if (file_gets(buf, NETSCREEN_LINE_LENGTH, wth->fh) == NULL) {
/* EOF or error. */
*err = file_error(wth->fh, err_info);
return FALSE;
}
reclen = (guint) strlen(buf);
if (reclen < strlen(NETSCREEN_HDR_MAGIC_STR1) ||
reclen < strlen(NETSCREEN_HDR_MAGIC_STR2)) {
continue;
}
if (strstr(buf, NETSCREEN_HDR_MAGIC_STR1) ||
strstr(buf, NETSCREEN_HDR_MAGIC_STR2)) {
return TRUE;
}
}
*err = 0;
return FALSE;
} | 0 | [
"CWE-20"
]
| wireshark | 11edc83b98a61e890d7bb01855389d40e984ea82 | 202,793,941,967,959,070,000,000,000,000,000,000,000 | 28 | Don't treat the packet length as unsigned.
The scanf family of functions are as annoyingly bad at handling unsigned
numbers as strtoul() is - both of them are perfectly willing to accept a
value beginning with a negative sign as an unsigned value. When using
strtoul(), you can compensate for this by explicitly checking for a '-'
as the first character of the string, but you can't do that with
sscanf().
So revert to having pkt_len be signed, and scanning it with %d, but
check for a negative value and fail if we see a negative value.
Bug: 12396
Change-Id: I54fe8f61f42c32b5ef33da633ece51bbcda8c95f
Reviewed-on: https://code.wireshark.org/review/15220
Reviewed-by: Guy Harris <[email protected]> |
static void unit_emit_audit_start(Unit *u) {
assert(u);
if (u->type != UNIT_SERVICE)
return;
/* Write audit record if we have just finished starting up */
manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
u->in_audit = true;
} | 0 | [
"CWE-269"
]
| systemd | bf65b7e0c9fc215897b676ab9a7c9d1c688143ba | 269,984,704,752,399,930,000,000,000,000,000,000,000 | 10 | core: imply NNP and SUID/SGID restriction for DynamicUser=yes service
Let's be safe, rather than sorry. This way DynamicUser=yes services can
neither take benefit of, nor create SUID/SGID binaries.
Given that DynamicUser= is a recent addition only we should be able to
get away with turning this on, even though this is strictly speaking a
binary compatibility breakage. |
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ssize_t
y;
/*
Allocate image colormap.
*/
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);;
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AssignImageColors)
#endif
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
(cube_info->quantize_info->colorspace == GRAYColorspace))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(MagickTrue);
} | 0 | [
"CWE-703",
"CWE-772"
]
| ImageMagick | 0417cea1b6d72f90bd4f1f573f91e42a8ba66a89 | 312,526,643,901,289,000,000,000,000,000,000,000,000 | 181 | https://github.com/ImageMagick/ImageMagick/issues/574 |
int pm_qos_sysfs_add_resume_latency(struct device *dev)
{
return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
} | 0 | [
"CWE-787"
]
| linux | aa838896d87af561a33ecefea1caa4c15a68bc47 | 296,990,423,725,061,130,000,000,000,000,000,000,000 | 4 | drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static toff_t tiff_sizeproc(thandle_t clientdata)
{
tiff_handle *th = (tiff_handle *)clientdata;
return th->size;
} | 0 | [
"CWE-125"
]
| libgd | 4859d69e07504d4b0a4bdf9bcb4d9e3769ca35ae | 123,322,264,993,035,780,000,000,000,000,000,000,000 | 5 | Fix invalid read in gdImageCreateFromTiffPtr()
tiff_invalid_read.tiff is corrupt, and causes an invalid read in
gdImageCreateFromTiffPtr(), but not in gdImageCreateFromTiff(). The culprit
is dynamicGetbuf(), which doesn't check for out-of-bound reads. In this case,
dynamicGetbuf() is called with a negative dp->pos, but also positive buffer
overflows have to be handled, in which case 0 has to be returned (cf. commit
75e29a9).
Fixing dynamicGetbuf() exhibits that the corrupt TIFF would still create
the image, because the return value of TIFFReadRGBAImage() is not checked.
We do that, and let createFromTiffRgba() fail if TIFFReadRGBAImage() fails.
This issue had been reported by Ibrahim El-Sayed to [email protected].
CVE-2016-6911 |
static void qxl_enter_vga_mode(PCIQXLDevice *d)
{
if (d->mode == QXL_MODE_VGA) {
return;
}
trace_qxl_enter_vga_mode(d->id);
spice_qxl_driver_unload(&d->ssd.qxl);
graphic_console_set_hwops(d->ssd.dcl.con, d->vga.hw_ops, &d->vga);
update_displaychangelistener(&d->ssd.dcl, GUI_REFRESH_INTERVAL_DEFAULT);
qemu_spice_create_host_primary(&d->ssd);
d->mode = QXL_MODE_VGA;
qemu_spice_display_switch(&d->ssd, d->ssd.ds);
vga_dirty_log_start(&d->vga);
graphic_hw_update(d->vga.con);
} | 0 | [
"CWE-476"
]
| qemu | d52680fc932efb8a2f334cc6993e705ed1e31e99 | 189,741,224,328,569,730,000,000,000,000,000,000,000 | 15 | qxl: check release info object
When releasing spice resources in release_resource() routine,
if release info object 'ext.info' is null, it leads to null
pointer dereference. Add check to avoid it.
Reported-by: Bugs SysSec <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]> |
static void register_hooks(apr_pool_t * p)
{
ap_register_output_filter("MOD_SESSION_OUT", session_output_filter,
NULL, AP_FTYPE_CONTENT_SET);
ap_hook_insert_filter(session_insert_output_filter, NULL, NULL,
APR_HOOK_MIDDLE);
ap_hook_insert_error_filter(session_insert_output_filter,
NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_fixups(session_fixups, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_session_encode(session_identity_encode, NULL, NULL,
APR_HOOK_REALLY_FIRST);
ap_hook_session_decode(session_identity_decode, NULL, NULL,
APR_HOOK_REALLY_LAST);
APR_REGISTER_OPTIONAL_FN(ap_session_get);
APR_REGISTER_OPTIONAL_FN(ap_session_set);
APR_REGISTER_OPTIONAL_FN(ap_session_load);
APR_REGISTER_OPTIONAL_FN(ap_session_save);
} | 0 | [
"CWE-476"
]
| httpd | 67bd9bfe6c38831e14fe7122f1d84391472498f8 | 81,816,317,082,417,330,000,000,000,000,000,000,000 | 18 | mod_session: save one apr_strtok() in session_identity_decode().
When the encoding is invalid (missing '='), no need to parse further.
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1887050 13f79535-47bb-0310-9956-ffa450edef68 |
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
return security_ops->secctx_to_secid(secdata, seclen, secid);
} | 0 | []
| linux-2.6 | ee18d64c1f632043a02e6f5ba5e045bb26a5465f | 44,303,721,089,161,330,000,000,000,000,000,000,000 | 4 | KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]> |
static int client_gen_key_share(gnutls_session_t session, const gnutls_group_entry_st *group, gnutls_buffer_st *extdata)
{
gnutls_datum_t tmp = {NULL, 0};
int ret;
if (group->pk != GNUTLS_PK_EC && group->pk != GNUTLS_PK_ECDH_X25519 &&
group->pk != GNUTLS_PK_ECDH_X448 &&
group->pk != GNUTLS_PK_DH) {
_gnutls_debug_log("Cannot send key share for group %s!\n", group->name);
return GNUTLS_E_INT_RET_0;
}
_gnutls_handshake_log("EXT[%p]: sending key share for %s\n", session, group->name);
ret =
_gnutls_buffer_append_prefix(extdata, 16, group->tls_id);
if (ret < 0)
return gnutls_assert_val(ret);
if (group->pk == GNUTLS_PK_EC) {
gnutls_pk_params_release(&session->key.kshare.ecdh_params);
gnutls_pk_params_init(&session->key.kshare.ecdh_params);
ret = _gnutls_pk_generate_keys(group->pk, group->curve,
&session->key.kshare.ecdh_params, 1);
if (ret < 0)
return gnutls_assert_val(ret);
ret = _gnutls_ecc_ansi_x962_export(group->curve,
session->key.kshare.ecdh_params.params[ECC_X],
session->key.kshare.ecdh_params.params[ECC_Y],
&tmp);
if (ret < 0)
return gnutls_assert_val(ret);
ret =
_gnutls_buffer_append_data_prefix(extdata, 16, tmp.data, tmp.size);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
session->key.kshare.ecdh_params.algo = group->pk;
session->key.kshare.ecdh_params.curve = group->curve;
ret = 0;
} else if (group->pk == GNUTLS_PK_ECDH_X25519 ||
group->pk == GNUTLS_PK_ECDH_X448) {
gnutls_pk_params_release(&session->key.kshare.ecdhx_params);
gnutls_pk_params_init(&session->key.kshare.ecdhx_params);
ret = _gnutls_pk_generate_keys(group->pk, group->curve,
&session->key.kshare.ecdhx_params, 1);
if (ret < 0)
return gnutls_assert_val(ret);
ret =
_gnutls_buffer_append_data_prefix(extdata, 16,
session->key.kshare.ecdhx_params.raw_pub.data,
session->key.kshare.ecdhx_params.raw_pub.size);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
session->key.kshare.ecdhx_params.algo = group->pk;
session->key.kshare.ecdhx_params.curve = group->curve;
ret = 0;
} else if (group->pk == GNUTLS_PK_DH) {
/* we need to initialize the group parameters first */
gnutls_pk_params_release(&session->key.kshare.dh_params);
gnutls_pk_params_init(&session->key.kshare.dh_params);
ret = _gnutls_mpi_init_scan_nz(&session->key.kshare.dh_params.params[DH_G],
group->generator->data, group->generator->size);
if (ret < 0)
return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER);
ret = _gnutls_mpi_init_scan_nz(&session->key.kshare.dh_params.params[DH_P],
group->prime->data, group->prime->size);
if (ret < 0)
return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER);
ret = _gnutls_mpi_init_scan_nz(&session->key.kshare.dh_params.params[DH_Q],
group->q->data, group->q->size);
if (ret < 0)
return gnutls_assert_val(GNUTLS_E_RECEIVED_ILLEGAL_PARAMETER);
session->key.kshare.dh_params.algo = group->pk;
session->key.kshare.dh_params.dh_group = group->id; /* no curve in FFDH, we write the group */
session->key.kshare.dh_params.qbits = *group->q_bits;
session->key.kshare.dh_params.params_nr = 3;
ret = _gnutls_pk_generate_keys(group->pk, 0, &session->key.kshare.dh_params, 1);
if (ret < 0)
return gnutls_assert_val(ret);
ret =
_gnutls_buffer_append_prefix(extdata, 16, group->prime->size);
if (ret < 0)
return gnutls_assert_val(ret);
ret = _gnutls_buffer_append_fixed_mpi(extdata, session->key.kshare.dh_params.params[DH_Y],
group->prime->size);
if (ret < 0)
return gnutls_assert_val(ret);
ret = 0;
}
cleanup:
gnutls_free(tmp.data);
return ret;
} | 0 | [
"CWE-416"
]
| gnutls | 15beb4b193b2714d88107e7dffca781798684e7e | 45,013,412,884,831,200,000,000,000,000,000,000,000 | 117 | key_share: avoid use-after-free around realloc
Signed-off-by: Daiki Ueno <[email protected]> |
asmlinkage long do_ni_syscall(struct pt_regs *regs)
{
#ifdef CONFIG_COMPAT
long ret;
if (is_compat_task()) {
ret = compat_arm_syscall(regs);
if (ret != -ENOSYS)
return ret;
}
#endif
if (show_unhandled_signals && printk_ratelimit()) {
pr_info("%s[%d]: syscall %d\n", current->comm,
task_pid_nr(current), (int)regs->syscallno);
dump_instr("", regs);
if (user_mode(regs))
__show_regs(regs);
}
return sys_ni_syscall();
} | 0 | [
"CWE-703"
]
| linux | 9955ac47f4ba1c95ecb6092aeaefb40a22e99268 | 151,307,917,382,259,930,000,000,000,000,000,000,000 | 21 | arm64: don't kill the kernel on a bad esr from el0
Rather than completely killing the kernel if we receive an esr value we
can't deal with in the el0 handlers, send the process a SIGILL and log
the esr value in the hope that we can debug it. If we receive a bad esr
from el1, we'll die() as before.
Signed-off-by: Mark Rutland <[email protected]>
Signed-off-by: Catalin Marinas <[email protected]>
Cc: [email protected] |
static void storageDriverUnlock(void)
{
virMutexUnlock(&driver->lock);
} | 0 | []
| libvirt | 447f69dec47e1b0bd15ecd7cd49a9fd3b050fb87 | 316,938,069,576,283,230,000,000,000,000,000,000,000 | 4 | storage_driver: Unlock object on ACL fail in storagePoolLookupByTargetPath
'virStoragePoolObjListSearch' returns a locked and refed object, thus we
must release it on ACL permission failure.
Fixes: 7aa0e8c0cb8
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1984318
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Michal Privoznik <[email protected]> |
int group_concat_key_cmp_with_distinct(void* arg, const void* key1,
const void* key2)
{
Item_func_group_concat *item_func= (Item_func_group_concat*)arg;
for (uint i= 0; i < item_func->arg_count_field; i++)
{
Item *item= item_func->args[i];
/*
If item is a const item then either get_tmp_table_field returns 0
or it is an item over a const table.
*/
if (item->const_item())
continue;
/*
We have to use get_tmp_table_field() instead of
real_item()->get_tmp_table_field() because we want the field in
the temporary table, not the original field
*/
Field *field= item->get_tmp_table_field();
if (!field)
continue;
uint offset= (field->offset(field->table->record[0]) -
field->table->s->null_bytes);
int res= field->cmp((uchar*)key1 + offset, (uchar*)key2 + offset);
if (res)
return res;
}
return 0;
} | 0 | [
"CWE-120"
]
| server | eca207c46293bc72dd8d0d5622153fab4d3fccf1 | 327,611,895,292,065,600,000,000,000,000,000,000,000 | 32 | MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix. |
inline unsigned int nb_cpus() {
unsigned int res = 1;
#if cimg_OS==2
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
res = (unsigned int)sysinfo.dwNumberOfProcessors;
#elif cimg_OS == 1
res = (unsigned int)sysconf(_SC_NPROCESSORS_ONLN);
#endif
return res?res:1U;
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 259,420,469,743,656,300,000,000,000,000,000,000,000 | 11 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
void CLASS parseOlympus_CameraSettings (int base, unsigned tag, unsigned type, unsigned len, unsigned dng_writer)
{
// uptag 0x2020
int c;
uchar uc;
switch (tag) {
case 0x0101:
if (dng_writer == nonDNG) {
thumb_offset = get4() + base;
}
break;
case 0x0102:
if (dng_writer == nonDNG) {
thumb_length = get4();
}
break;
case 0x0200:
imgdata.shootinginfo.ExposureMode = get2();
break;
case 0x0202:
imgdata.shootinginfo.MeteringMode = get2();
break;
case 0x0301:
imgdata.shootinginfo.FocusMode =
imgdata.makernotes.olympus.FocusMode[0] = get2();
if (len == 2) {
imgdata.makernotes.olympus.FocusMode[1] = get2();
}
break;
case 0x0304:
for (c = 0; c < 64; c++) {
imgdata.makernotes.olympus.AFAreas[c] = get4();
}
break;
case 0x0305:
for (c = 0; c < 5; c++) {
imgdata.makernotes.olympus.AFPointSelected[c] = getreal(type);
}
break;
case 0x0306:
fread(&uc, 1, 1, ifp);
imgdata.makernotes.olympus.AFFineTune = uc;
break;
case 0x0307:
FORC3 imgdata.makernotes.olympus.AFFineTuneAdj[c] = get2();
break;
case 0x0401:
imgdata.other.FlashEC = getreal(type);
break;
case 0x0507:
imgdata.makernotes.olympus.ColorSpace = get2();
break;
case 0x0600:
imgdata.shootinginfo.DriveMode =
imgdata.makernotes.olympus.DriveMode[0] = get2();
for (c = 1; c < len; c++) {
imgdata.makernotes.olympus.DriveMode[c] = get2();
}
break;
case 0x0604:
imgdata.shootinginfo.ImageStabilization = get4();
break;
}
return;
} | 0 | [
"CWE-400"
]
| LibRaw | e67a9862d10ebaa97712f532eca1eb5e2e410a22 | 49,606,776,350,720,260,000,000,000,000,000,000,000 | 67 | Fixed Secunia Advisory SA86384
- possible infinite loop in unpacked_load_raw()
- possible infinite loop in parse_rollei()
- possible infinite loop in parse_sinar_ia()
Credits: Laurent Delosieres, Secunia Research at Flexera |
void* LibRaw:: calloc(size_t n,size_t t)
{
void *p = memmgr.calloc(n,t);
if(!p)
throw LIBRAW_EXCEPTION_ALLOC;
return p;
} | 0 | [
"CWE-119",
"CWE-787"
]
| LibRaw | 2f912f5b33582961b1cdbd9fd828589f8b78f21d | 239,091,623,914,376,500,000,000,000,000,000,000,000 | 7 | fixed wrong data_maximum calcluation; prevent out-of-buffer in exp_bef |
void qemu_ram_unset_migratable(RAMBlock *rb)
{
rb->flags &= ~RAM_MIGRATABLE;
} | 0 | [
"CWE-787"
]
| qemu | 4bfb024bc76973d40a359476dc0291f46e435442 | 327,602,838,526,123,640,000,000,000,000,000,000,000 | 4 | memory: clamp cached translation in case it points to an MMIO region
In using the address_space_translate_internal API, address_space_cache_init
forgot one piece of advice that can be found in the code for
address_space_translate_internal:
/* MMIO registers can be expected to perform full-width accesses based only
* on their address, without considering adjacent registers that could
* decode to completely different MemoryRegions. When such registers
* exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
* regions overlap wildly. For this reason we cannot clamp the accesses
* here.
*
* If the length is small (as is the case for address_space_ldl/stl),
* everything works fine. If the incoming length is large, however,
* the caller really has to do the clamping through memory_access_size.
*/
address_space_cache_init is exactly one such case where "the incoming length
is large", therefore we need to clamp the resulting length---not to
memory_access_size though, since we are not doing an access yet, but to
the size of the resulting section. This ensures that subsequent accesses
to the cached MemoryRegionSection will be in range.
With this patch, the enclosed testcase notices that the used ring does
not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used"
error.
Signed-off-by: Paolo Bonzini <[email protected]> |
static int smtp_server_connection_output(struct smtp_server_connection *conn)
{
int ret;
e_debug(conn->event, "Sending replies");
smtp_server_connection_ref(conn);
o_stream_cork(conn->conn.output);
ret = smtp_server_connection_flush(conn);
if (ret > 0) {
smtp_server_connection_timeout_reset(conn);
smtp_server_connection_send_replies(conn);
}
if (ret >= 0 && !conn->corked && conn->conn.output != NULL)
ret = o_stream_uncork_flush(conn->conn.output);
if (conn->conn.output != NULL && conn->conn.output->closed) {
smtp_server_connection_handle_output_error(conn);
ret = -1;
}
smtp_server_connection_unref(&conn);
return ret;
} | 0 | [
"CWE-77"
]
| core | 321c339756f9b2b98fb7326359d1333adebb5295 | 23,920,092,741,160,516,000,000,000,000,000,000,000 | 22 | lib-smtp: smtp-server-connection - Fix STARTTLS command injection vulnerability.
The input handler kept reading more commands even though the input was locked by
the STARTTLS command, thereby causing it to read the command pipelined beyond
STARTTLS. This causes a STARTTLS command injection vulerability. |
static int l2_table_update(struct qcow_state *s,
uint64_t *l2_table, uint64_t l2_table_offset,
unsigned int l2_index, uint64_t cluster_offset)
{
ssize_t ret;
tcmu_dbg("%s: setting %llx[%d] to %llx\n", __func__, l2_table_offset, l2_index, cluster_offset);
l2_table[l2_index] = htobe64(cluster_offset);
ret = pwrite(s->fd,
&(l2_table[l2_index]),
sizeof(uint64_t),
l2_table_offset + (l2_index * sizeof(uint64_t)));
if (ret != sizeof(uint64_t))
tcmu_err("%s: error, L2 writeback failed (%zd)\n", __func__, ret);
fdatasync(s->fd);
return ret;
} | 0 | [
"CWE-200"
]
| tcmu-runner | 8cf8208775022301adaa59c240bb7f93742d1329 | 222,851,737,519,212,970,000,000,000,000,000,000,000 | 20 | removed all check_config callback implementations to avoid security issues
see github issue #194
qcow.c contained an information leak, could test for existance of any
file in the system
file_example.c and file_optical.c allow also to test for existance of
any file, plus to temporarily create empty new files anywhere in the
file system. This also involves a race condition, if a file didn't exist
in the first place, but would be created in-between by some other
process, then the file would be deleted by the check_config
implementation. |
GF_Err schm_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SchemeTypeBox *ptr = (GF_SchemeTypeBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->scheme_type);
gf_bs_write_u32(bs, ptr->scheme_version);
if (ptr->flags & 0x000001) {
if (ptr->URI)
gf_bs_write_data(bs, ptr->URI, (u32) strlen(ptr->URI)+1);
else
gf_bs_write_u8(bs, 0);
}
return GF_OK;
} | 0 | [
"CWE-703"
]
| gpac | f19668964bf422cf5a63e4dbe1d3c6c75edadcbb | 23,703,595,021,331,565,000,000,000,000,000,000,000 | 17 | fixed #1879 |
flatpak_dir_read_latest (FlatpakDir *self,
const char *remote,
const char *ref,
char **out_alt_id,
GCancellable *cancellable,
GError **error)
{
g_autofree char *remote_and_ref = NULL;
g_autofree char *alt_id = NULL;
g_autofree char *res = NULL;
/* There may be several remotes with the same branch (if we for
* instance changed the origin) so prepend the current origin to
* make sure we get the right one */
if (remote)
remote_and_ref = g_strdup_printf ("%s:%s", remote, ref);
else
remote_and_ref = g_strdup (ref);
if (!ostree_repo_resolve_rev (self->repo, remote_and_ref, FALSE, &res, error))
return NULL;
if (out_alt_id)
{
g_autoptr(GVariant) commit_data = NULL;
g_autoptr(GVariant) commit_metadata = NULL;
if (!ostree_repo_load_commit (self->repo, res, &commit_data, NULL, error))
return NULL;
commit_metadata = g_variant_get_child_value (commit_data, 0);
g_variant_lookup (commit_metadata, "xa.alt-id", "s", &alt_id);
*out_alt_id = g_steal_pointer (&alt_id);
}
return g_steal_pointer (&res);
} | 0 | [
"CWE-668"
]
| flatpak | cd2142888fc4c199723a0dfca1f15ea8788a5483 | 324,558,140,338,763,600,000,000,000,000,000,000,000 | 39 | Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this. |
void nghttp2_session_set_user_data(nghttp2_session *session, void *user_data) {
session->user_data = user_data;
} | 0 | []
| nghttp2 | 0a6ce87c22c69438ecbffe52a2859c3a32f1620f | 146,056,511,129,161,870,000,000,000,000,000,000,000 | 3 | Add nghttp2_option_set_max_outbound_ack |
static int init_dumping(char *database, int init_func(char*))
{
if (mysql_select_db(mysql, database))
{
DB_error(mysql, "when selecting the database");
return 1; /* If --force */
}
if (!path && !opt_xml)
{
if (opt_databases || opt_alldbs)
{
/*
length of table name * 2 (if name contains quotes), 2 quotes and 0
*/
char quoted_database_buf[NAME_LEN*2+3];
char *qdatabase= quote_name(database,quoted_database_buf,opt_quoted);
print_comment(md_result_file, 0,
"\n--\n-- Current Database: %s\n--\n",
fix_identifier_with_newline(qdatabase));
/* Call the view or table specific function */
init_func(qdatabase);
fprintf(md_result_file,"\nUSE %s;\n", qdatabase);
check_io(md_result_file);
}
}
if (extended_insert)
init_dynamic_string_checked(&extended_row, "", 1024, 1024);
return 0;
} /* init_dumping */ | 1 | []
| mysql-server | d982e717aba67227ec40761a21a4211db91aa0e2 | 290,048,595,450,187,300,000,000,000,000,000,000,000 | 32 | Bug#27510150: MYSQLDUMP FAILS FOR SPECIFIC --WHERE CLAUSES
Description: Mysqldump utility fails for specific clauses
used with the option, 'where'.
Analysis:- Method, "fix_identifier_with_newline()" that
prefixes all occurrences of newline char ('\n') in incoming
buffer does not verify the size of the buffer. The buffer in
which the incoming buffer is copied is limited to 2048 bytes
and the method does not try to allocate additional memory
for larger incoming buffers.
Fix:- Method, "fix_identifier_with_newline()" is modified
to fix this issue. |
static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is,
unsigned char id)
{
struct ippp_ccp_reset_state *rs = is->reset->rs[id];
if (rs) {
if (rs->ta && rs->state == CCPResetSentReq) {
/* Great, we are correct */
if (!rs->expra)
printk(KERN_DEBUG "ippp_ccp: ResetAck received"
" for id %d but not expected\n", id);
} else {
printk(KERN_INFO "ippp_ccp: ResetAck received out of"
"sync for id %d\n", id);
}
if (rs->ta) {
rs->ta = 0;
del_timer(&rs->timer);
}
isdn_ppp_ccp_reset_free_state(is, id);
} else {
printk(KERN_INFO "ippp_ccp: ResetAck received for unknown id"
" %d\n", id);
}
/* Make sure the simple reset stuff uses a new id next time */
is->reset->lastid++;
} | 0 | []
| linux | 4ab42d78e37a294ac7bc56901d563c642e03c4ae | 298,831,038,805,594,130,000,000,000,000,000,000,000 | 27 | ppp, slip: Validate VJ compression slot parameters completely
Currently slhc_init() treats out-of-range values of rslots and tslots
as equivalent to 0, except that if tslots is too large it will
dereference a null pointer (CVE-2015-7799).
Add a range-check at the top of the function and make it return an
ERR_PTR() on error instead of NULL. Change the callers accordingly.
Compile-tested only.
Reported-by: 郭永刚 <[email protected]>
References: http://article.gmane.org/gmane.comp.security.oss.general/17908
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static opj_bool pi_next_rpcl(opj_pi_iterator_t * pi)
{
opj_pi_comp_t *comp = NULL;
opj_pi_resolution_t *res = NULL;
long index = 0;
if (!pi->first) {
goto LABEL_SKIP;
} else {
int compno, resno;
pi->first = 0;
pi->dx = 0;
pi->dy = 0;
for (compno = 0; compno < pi->numcomps; compno++) {
comp = &pi->comps[compno];
for (resno = 0; resno < comp->numresolutions; resno++) {
int dx, dy;
res = &comp->resolutions[resno];
dx = comp->dx * (1 << (res->pdx + comp->numresolutions - 1 - resno));
dy = comp->dy * (1 << (res->pdy + comp->numresolutions - 1 - resno));
pi->dx = !pi->dx ? dx : int_min(pi->dx, dx);
pi->dy = !pi->dy ? dy : int_min(pi->dy, dy);
}
}
}
if (!pi->tp_on) {
pi->poc.ty0 = pi->ty0;
pi->poc.tx0 = pi->tx0;
pi->poc.ty1 = pi->ty1;
pi->poc.tx1 = pi->tx1;
}
for (pi->resno = pi->poc.resno0; pi->resno < pi->poc.resno1; pi->resno++) {
for (pi->y = pi->poc.ty0; pi->y < pi->poc.ty1;
pi->y += pi->dy - (pi->y % pi->dy)) {
for (pi->x = pi->poc.tx0; pi->x < pi->poc.tx1;
pi->x += pi->dx - (pi->x % pi->dx)) {
for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) {
int levelno;
int trx0, try0;
int trx1, try1;
int rpx, rpy;
int prci, prcj;
comp = &pi->comps[pi->compno];
if (pi->resno >= comp->numresolutions) {
continue;
}
res = &comp->resolutions[pi->resno];
levelno = comp->numresolutions - 1 - pi->resno;
trx0 = int_ceildiv(pi->tx0, comp->dx << levelno);
try0 = int_ceildiv(pi->ty0, comp->dy << levelno);
trx1 = int_ceildiv(pi->tx1, comp->dx << levelno);
try1 = int_ceildiv(pi->ty1, comp->dy << levelno);
rpx = res->pdx + levelno;
rpy = res->pdy + levelno;
if (!((pi->y % (comp->dy << rpy) == 0) || ((pi->y == pi->ty0) &&
((try0 << levelno) % (1 << rpy))))) {
continue;
}
if (!((pi->x % (comp->dx << rpx) == 0) || ((pi->x == pi->tx0) &&
((trx0 << levelno) % (1 << rpx))))) {
continue;
}
if ((res->pw == 0) || (res->ph == 0)) {
continue;
}
if ((trx0 == trx1) || (try0 == try1)) {
continue;
}
prci = int_floordivpow2(int_ceildiv(pi->x, comp->dx << levelno), res->pdx)
- int_floordivpow2(trx0, res->pdx);
prcj = int_floordivpow2(int_ceildiv(pi->y, comp->dy << levelno), res->pdy)
- int_floordivpow2(try0, res->pdy);
pi->precno = prci + prcj * res->pw;
for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) {
index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno *
pi->step_c + pi->precno * pi->step_p;
if (!pi->include[index]) {
pi->include[index] = 1;
return OPJ_TRUE;
}
LABEL_SKIP:
;
}
}
}
}
}
return OPJ_FALSE;
} | 1 | [
"CWE-369"
]
| openjpeg | c5bd64ea146162967c29bd2af0cbb845ba3eaaaf | 308,604,278,359,727,300,000,000,000,000,000,000,000 | 93 | [MJ2] To avoid divisions by zero / undefined behaviour on shift
Signed-off-by: Young_X <[email protected]> |
logger_get_line_tag_info (int tags_count, const char **tags,
int *log_level, int *prefix_is_nick)
{
int i, log_level_set, prefix_is_nick_set;
if (log_level)
*log_level = LOGGER_LEVEL_DEFAULT;
if (prefix_is_nick)
*prefix_is_nick = 0;
log_level_set = 0;
prefix_is_nick_set = 0;
for (i = 0; i < tags_count; i++)
{
if (log_level && !log_level_set)
{
if (strcmp (tags[i], "no_log") == 0)
{
/* log disabled on line: set level to -1 */
*log_level = -1;
log_level_set = 1;
}
else if (strncmp (tags[i], "log", 3) == 0)
{
/* set log level for line */
if (isdigit ((unsigned char)tags[i][3]))
{
*log_level = (tags[i][3] - '0');
log_level_set = 1;
}
}
}
if (prefix_is_nick && !prefix_is_nick_set)
{
if (strncmp (tags[i], "prefix_nick", 11) == 0)
{
*prefix_is_nick = 1;
prefix_is_nick_set = 1;
}
}
}
} | 0 | [
"CWE-119",
"CWE-787"
]
| weechat | f105c6f0b56fb5687b2d2aedf37cb1d1b434d556 | 42,925,856,013,064,375,000,000,000,000,000,000,000 | 43 | logger: call strftime before replacing buffer local variables |
virtual void ms_handle_fast_accept(Connection *con) {} | 0 | [
"CWE-287",
"CWE-284"
]
| ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 270,075,581,415,266,130,000,000,000,000,000,000,000 | 1 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
get_highlight_default(void)
{
int i;
i = findoption((char_u *)"hl");
if (i >= 0)
return options[i].def_val[VI_DEFAULT];
return (char_u *)NULL;
} | 0 | [
"CWE-20"
]
| vim | d0b5138ba4bccff8a744c99836041ef6322ed39a | 260,757,513,646,335,550,000,000,000,000,000,000,000 | 9 | patch 8.0.0056
Problem: When setting 'filetype' there is no check for a valid name.
Solution: Only allow valid characters in 'filetype', 'syntax' and 'keymap'. |
guessContentTypeFromTable(struct table2 *table, char *filename)
{
struct table2 *t;
char *p;
if (table == NULL)
return NULL;
p = &filename[strlen(filename) - 1];
while (filename < p && *p != '.')
p--;
if (p == filename)
return NULL;
p++;
for (t = table; t->item1; t++) {
if (!strcmp(p, t->item1))
return t->item2;
}
for (t = table; t->item1; t++) {
if (!strcasecmp(p, t->item1))
return t->item2;
}
return NULL;
} | 0 | [
"CWE-119"
]
| w3m | ba9d78faeba9024c3e8840579c3b0e959ae2cb0f | 168,751,077,973,286,570,000,000,000,000,000,000,000 | 22 | Prevent global-buffer-overflow in parseURL()
Bug-Debian: https://github.com/tats/w3m/issues/41 |
BOOL INSTAPI SQLCreateDataSourceW( HWND hwndParent, LPCWSTR lpszDSN )
{
BOOL ret;
char *ms = _multi_string_alloc_and_copy( lpszDSN );
inst_logClear();
ret = SQLCreateDataSource( hwndParent, ms );
free( ms );
return ret;
} | 0 | [
"CWE-119",
"CWE-369"
]
| unixODBC | 45ef78e037f578b15fc58938a3a3251655e71d6f | 232,313,348,950,250,100,000,000,000,000,000,000,000 | 13 | New Pre Source |
static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_next(info->net);
u8 family = info->nfmsg->nfgen_family;
struct nft_chain *chain = NULL;
struct net *net = info->net;
struct nft_table *table;
struct nft_rule *rule;
struct nft_ctx ctx;
int err = 0;
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask,
NETLINK_CB(skb).portid);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_TABLE]);
return PTR_ERR(table);
}
if (nla[NFTA_RULE_CHAIN]) {
chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN],
genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
if (nft_chain_is_bound(chain))
return -EOPNOTSUPP;
}
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
if (chain) {
if (nla[NFTA_RULE_HANDLE]) {
rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
return PTR_ERR(rule);
}
err = nft_delrule(&ctx, rule);
} else if (nla[NFTA_RULE_ID]) {
rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_ID]);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_ID]);
return PTR_ERR(rule);
}
err = nft_delrule(&ctx, rule);
} else {
err = nft_delrule_by_chain(&ctx);
}
} else {
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_active_next(net, chain))
continue;
ctx.chain = chain;
err = nft_delrule_by_chain(&ctx);
if (err < 0)
break;
}
}
return err;
} | 0 | [
"CWE-400",
"CWE-703"
]
| linux | e02f0d3970404bfea385b6edb86f2d936db0ea2b | 142,198,909,769,237,900,000,000,000,000,000,000,000 | 67 | netfilter: nf_tables: disallow binding to already bound chain
Update nft_data_init() to report EINVAL if chain is already bound.
Fixes: d0e2c7de92c7 ("netfilter: nf_tables: add NFT_CHAIN_BINDING")
Reported-by: Gwangun Jung <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
int tcp_test(const char* ip_str, const short port)
{
int sock, i;
struct sockaddr_in s_in;
int packetsize = 1024;
unsigned char packet[packetsize];
struct timeval tv, tv2, tv3;
int caplen = 0;
int times[REQUESTS];
int min, avg, max, len;
struct net_hdr nh;
tv3.tv_sec=0;
tv3.tv_usec=1;
s_in.sin_family = PF_INET;
s_in.sin_port = htons(port);
if (!inet_aton(ip_str, &s_in.sin_addr))
return -1;
if ((sock = socket(s_in.sin_family, SOCK_STREAM, IPPROTO_TCP)) == -1)
return -1;
/* avoid blocking on reading the socket */
if( fcntl( sock, F_SETFL, O_NONBLOCK ) < 0 )
{
perror( "fcntl(O_NONBLOCK) failed" );
return( 1 );
}
gettimeofday( &tv, NULL );
while (1) //waiting for relayed packet
{
if (connect(sock, (struct sockaddr*) &s_in, sizeof(s_in)) == -1)
{
if(errno != EINPROGRESS && errno != EALREADY)
{
perror("connect");
close(sock);
printf("Failed to connect\n");
return -1;
}
}
else
{
gettimeofday( &tv2, NULL );
break;
}
gettimeofday( &tv2, NULL );
//wait 3000ms for a successful connect
if (((tv2.tv_sec*1000000 - tv.tv_sec*1000000) + (tv2.tv_usec - tv.tv_usec)) > (3000*1000))
{
printf("Connection timed out\n");
close(sock);
return(-1);
}
usleep(10);
}
PCT; printf("TCP connection successful\n");
//trying to identify airserv-ng
memset(&nh, 0, sizeof(nh));
// command: GET_CHAN
nh.nh_type = 2;
nh.nh_len = htonl(0);
if (send(sock, &nh, sizeof(nh), 0) != sizeof(nh))
{
perror("send");
return -1;
}
gettimeofday( &tv, NULL );
i=0;
while (1) //waiting for GET_CHAN answer
{
caplen = read(sock, &nh, sizeof(nh));
if(caplen == -1)
{
if( errno != EAGAIN )
{
perror("read");
return -1;
}
}
if( (unsigned)caplen == sizeof(nh))
{
len = ntohl(nh.nh_len);
if (len > 1024 || len < 0)
continue;
if( nh.nh_type == 1 && i==0 )
{
i=1;
caplen = read(sock, packet, len);
if(caplen == len)
{
i=2;
break;
}
else
{
i=0;
}
}
else
{
caplen = read(sock, packet, len);
}
}
gettimeofday( &tv2, NULL );
//wait 1000ms for an answer
if (((tv2.tv_sec*1000000 - tv.tv_sec*1000000) + (tv2.tv_usec - tv.tv_usec)) > (1000*1000))
{
break;
}
if(caplen == -1)
usleep(10);
}
if(i==2)
{
PCT; printf("airserv-ng found\n");
}
else
{
PCT; printf("airserv-ng NOT found\n");
}
close(sock);
for(i=0; i<REQUESTS; i++)
{
if ((sock = socket(s_in.sin_family, SOCK_STREAM, IPPROTO_TCP)) == -1)
return -1;
/* avoid blocking on reading the socket */
if( fcntl( sock, F_SETFL, O_NONBLOCK ) < 0 )
{
perror( "fcntl(O_NONBLOCK) failed" );
return( 1 );
}
usleep(1000);
gettimeofday( &tv, NULL );
while (1) //waiting for relayed packet
{
if (connect(sock, (struct sockaddr*) &s_in, sizeof(s_in)) == -1)
{
if(errno != EINPROGRESS && errno != EALREADY)
{
perror("connect");
close(sock);
printf("Failed to connect\n");
return -1;
}
}
else
{
gettimeofday( &tv2, NULL );
break;
}
gettimeofday( &tv2, NULL );
//wait 1000ms for a successful connect
if (((tv2.tv_sec*1000000 - tv.tv_sec*1000000) + (tv2.tv_usec - tv.tv_usec)) > (1000*1000))
{
break;
}
//simple "high-precision" usleep
select(1, NULL, NULL, NULL, &tv3);
}
times[i] = ((tv2.tv_sec*1000000 - tv.tv_sec*1000000) + (tv2.tv_usec - tv.tv_usec));
printf( "\r%d/%d\r", i, REQUESTS);
fflush(stdout);
close(sock);
}
min = INT_MAX;
avg = 0;
max = 0;
for(i=0; i<REQUESTS; i++)
{
if(times[i] < min) min = times[i];
if(times[i] > max) max = times[i];
avg += times[i];
}
avg /= REQUESTS;
PCT; printf("ping %s:%d (min/avg/max): %.3fms/%.3fms/%.3fms\n", ip_str, port, min/1000.0, avg/1000.0, max/1000.0);
return 0;
} | 0 | [
"CWE-787"
]
| aircrack-ng | 091b153f294b9b695b0b2831e65936438b550d7b | 277,492,381,212,296,460,000,000,000,000,000,000,000 | 206 | Aireplay-ng: Fixed tcp_test stack overflow (Closes #14 on GitHub).
git-svn-id: http://svn.aircrack-ng.org/trunk@2417 28c6078b-6c39-48e3-add9-af49d547ecab |
static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u64 features;
/*
* It's not allowed to change the features after they have
* been negotiated.
*/
if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
return -EBUSY;
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
if (vdpa_set_features(vdpa, features))
return -EINVAL;
return 0;
} | 0 | [
"CWE-416"
]
| linux | f6bbf0010ba004f5e90c7aefdebc0ee4bd3283b9 | 213,755,958,905,908,930,000,000,000,000,000,000,000 | 21 | vhost-vdpa: fix use-after-free of v->config_ctx
When the 'v->config_ctx' eventfd_ctx reference is released we didn't
set it to NULL. So if the same character device (e.g. /dev/vhost-vdpa-0)
is re-opened, the 'v->config_ctx' is invalid and calling again
vhost_vdpa_config_put() causes use-after-free issues like the
following refcount_t underflow:
refcount_t: underflow; use-after-free.
WARNING: CPU: 2 PID: 872 at lib/refcount.c:28 refcount_warn_saturate+0xae/0xf0
RIP: 0010:refcount_warn_saturate+0xae/0xf0
Call Trace:
eventfd_ctx_put+0x5b/0x70
vhost_vdpa_release+0xcd/0x150 [vhost_vdpa]
__fput+0x8e/0x240
____fput+0xe/0x10
task_work_run+0x66/0xa0
exit_to_user_mode_prepare+0x118/0x120
syscall_exit_to_user_mode+0x21/0x50
? __x64_sys_close+0x12/0x40
do_syscall_64+0x45/0x50
entry_SYSCALL_64_after_hwframe+0x44/0xae
Fixes: 776f395004d8 ("vhost_vdpa: Support config interrupt in vdpa")
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Stefano Garzarella <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Zhu Lingshan <[email protected]>
Acked-by: Jason Wang <[email protected]> |
OJPEGPreDecodeSkipScanlines(TIFF* tif)
{
static const char module[]="OJPEGPreDecodeSkipScanlines";
OJPEGState* sp=(OJPEGState*)tif->tif_data;
uint32 m;
if (sp->skip_buffer==NULL)
{
sp->skip_buffer=_TIFFmalloc(sp->bytes_per_line);
if (sp->skip_buffer==NULL)
{
TIFFErrorExt(tif->tif_clientdata,module,"Out of memory");
return(0);
}
}
for (m=0; m<sp->lines_per_strile; m++)
{
if (jpeg_read_scanlines_encap(sp,&(sp->libjpeg_jpeg_decompress_struct),&sp->skip_buffer,1)==0)
return(0);
}
return(1);
} | 0 | [
"CWE-369"
]
| libtiff | 43bc256d8ae44b92d2734a3c5bc73957a4d7c1ec | 257,189,592,853,330,620,000,000,000,000,000,000,000 | 21 | * libtiff/tif_ojpeg.c: make OJPEGDecode() early exit in case of failure in
OJPEGPreDecode(). This will avoid a divide by zero, and potential other issues.
Reported by Agostino Sarubbo.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2611 |
Subsets and Splits