func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
_archive_write_disk_header(struct archive *_a, struct archive_entry *entry)
{
struct archive_write_disk *a = (struct archive_write_disk *)_a;
struct fixup_entry *fe;
const char *linkname;
int ret, r;
archive_check_magic(&a->archive, ARCHIVE_WRITE_DISK_MAGIC,
ARCHIVE_STATE_HEADER | ARCHIVE_STATE_DATA,
"archive_write_disk_header");
archive_clear_error(&a->archive);
if (a->archive.state & ARCHIVE_STATE_DATA) {
r = _archive_write_disk_finish_entry(&a->archive);
if (r == ARCHIVE_FATAL)
return (r);
}
/* Set up for this particular entry. */
a->pst = NULL;
a->current_fixup = NULL;
a->deferred = 0;
if (a->entry) {
archive_entry_free(a->entry);
a->entry = NULL;
}
a->entry = archive_entry_clone(entry);
a->fd = -1;
a->fd_offset = 0;
a->offset = 0;
a->restore_pwd = -1;
a->uid = a->user_uid;
a->mode = archive_entry_mode(a->entry);
if (archive_entry_size_is_set(a->entry))
a->filesize = archive_entry_size(a->entry);
else
a->filesize = -1;
archive_strcpy(&(a->_name_data), archive_entry_pathname(a->entry));
a->name = a->_name_data.s;
archive_clear_error(&a->archive);
/*
* Clean up the requested path. This is necessary for correct
* dir restores; the dir restore logic otherwise gets messed
* up by nonsense like "dir/.".
*/
ret = cleanup_pathname(a);
if (ret != ARCHIVE_OK)
return (ret);
/*
* Check if we have a hardlink that points to itself.
*/
linkname = archive_entry_hardlink(a->entry);
if (linkname != NULL && strcmp(a->name, linkname) == 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Skipping hardlink pointing to itself: %s",
a->name);
return (ARCHIVE_WARN);
}
/*
* Query the umask so we get predictable mode settings.
* This gets done on every call to _write_header in case the
* user edits their umask during the extraction for some
* reason.
*/
umask(a->user_umask = umask(0));
/* Figure out what we need to do for this entry. */
a->todo = TODO_MODE_BASE;
if (a->flags & ARCHIVE_EXTRACT_PERM) {
a->todo |= TODO_MODE_FORCE; /* Be pushy about permissions. */
/*
* SGID requires an extra "check" step because we
* cannot easily predict the GID that the system will
* assign. (Different systems assign GIDs to files
* based on a variety of criteria, including process
* credentials and the gid of the enclosing
* directory.) We can only restore the SGID bit if
* the file has the right GID, and we only know the
* GID if we either set it (see set_ownership) or if
* we've actually called stat() on the file after it
* was restored. Since there are several places at
* which we might verify the GID, we need a TODO bit
* to keep track.
*/
if (a->mode & S_ISGID)
a->todo |= TODO_SGID | TODO_SGID_CHECK;
/*
* Verifying the SUID is simpler, but can still be
* done in multiple ways, hence the separate "check" bit.
*/
if (a->mode & S_ISUID)
a->todo |= TODO_SUID | TODO_SUID_CHECK;
} else {
/*
* User didn't request full permissions, so don't
* restore SUID, SGID bits and obey umask.
*/
a->mode &= ~S_ISUID;
a->mode &= ~S_ISGID;
a->mode &= ~S_ISVTX;
a->mode &= ~a->user_umask;
}
if (a->flags & ARCHIVE_EXTRACT_OWNER)
a->todo |= TODO_OWNER;
if (a->flags & ARCHIVE_EXTRACT_TIME)
a->todo |= TODO_TIMES;
if (a->flags & ARCHIVE_EXTRACT_ACL) {
#if ARCHIVE_ACL_DARWIN
/*
* On MacOS, platform ACLs get stored in mac_metadata, too.
* If we intend to extract mac_metadata and it is present
* we skip extracting libarchive NFSv4 ACLs.
*/
size_t metadata_size;
if ((a->flags & ARCHIVE_EXTRACT_MAC_METADATA) == 0 ||
archive_entry_mac_metadata(a->entry,
&metadata_size) == NULL || metadata_size == 0)
#endif
#if ARCHIVE_ACL_LIBRICHACL
/*
* RichACLs are stored in an extended attribute.
* If we intend to extract extended attributes and have this
* attribute we skip extracting libarchive NFSv4 ACLs.
*/
short extract_acls = 1;
if (a->flags & ARCHIVE_EXTRACT_XATTR && (
archive_entry_acl_types(a->entry) &
ARCHIVE_ENTRY_ACL_TYPE_NFS4)) {
const char *attr_name;
const void *attr_value;
size_t attr_size;
int i = archive_entry_xattr_reset(a->entry);
while (i--) {
archive_entry_xattr_next(a->entry, &attr_name,
&attr_value, &attr_size);
if (attr_name != NULL && attr_value != NULL &&
attr_size > 0 && strcmp(attr_name,
"trusted.richacl") == 0) {
extract_acls = 0;
break;
}
}
}
if (extract_acls)
#endif
#if ARCHIVE_ACL_DARWIN || ARCHIVE_ACL_LIBRICHACL
{
#endif
if (archive_entry_filetype(a->entry) == AE_IFDIR)
a->deferred |= TODO_ACLS;
else
a->todo |= TODO_ACLS;
#if ARCHIVE_ACL_DARWIN || ARCHIVE_ACL_LIBRICHACL
}
#endif
}
if (a->flags & ARCHIVE_EXTRACT_MAC_METADATA) {
if (archive_entry_filetype(a->entry) == AE_IFDIR)
a->deferred |= TODO_MAC_METADATA;
else
a->todo |= TODO_MAC_METADATA;
}
#if defined(__APPLE__) && defined(UF_COMPRESSED) && defined(HAVE_ZLIB_H)
if ((a->flags & ARCHIVE_EXTRACT_NO_HFS_COMPRESSION) == 0) {
unsigned long set, clear;
archive_entry_fflags(a->entry, &set, &clear);
if ((set & ~clear) & UF_COMPRESSED) {
a->todo |= TODO_HFS_COMPRESSION;
a->decmpfs_block_count = (unsigned)-1;
}
}
if ((a->flags & ARCHIVE_EXTRACT_HFS_COMPRESSION_FORCED) != 0 &&
(a->mode & AE_IFMT) == AE_IFREG && a->filesize > 0) {
a->todo |= TODO_HFS_COMPRESSION;
a->decmpfs_block_count = (unsigned)-1;
}
{
const char *p;
/* Check if the current file name is a type of the
* resource fork file. */
p = strrchr(a->name, '/');
if (p == NULL)
p = a->name;
else
p++;
if (p[0] == '.' && p[1] == '_') {
/* Do not compress "._XXX" files. */
a->todo &= ~TODO_HFS_COMPRESSION;
if (a->filesize > 0)
a->todo |= TODO_APPLEDOUBLE;
}
}
#endif
if (a->flags & ARCHIVE_EXTRACT_XATTR) {
#if ARCHIVE_XATTR_DARWIN
/*
* On MacOS, extended attributes get stored in mac_metadata,
* too. If we intend to extract mac_metadata and it is present
* we skip extracting extended attributes.
*/
size_t metadata_size;
if ((a->flags & ARCHIVE_EXTRACT_MAC_METADATA) == 0 ||
archive_entry_mac_metadata(a->entry,
&metadata_size) == NULL || metadata_size == 0)
#endif
a->todo |= TODO_XATTR;
}
if (a->flags & ARCHIVE_EXTRACT_FFLAGS)
a->todo |= TODO_FFLAGS;
if (a->flags & ARCHIVE_EXTRACT_SECURE_SYMLINKS) {
ret = check_symlinks(a);
if (ret != ARCHIVE_OK)
return (ret);
}
#if defined(HAVE_FCHDIR) && defined(PATH_MAX)
/* If path exceeds PATH_MAX, shorten the path. */
edit_deep_directories(a);
#endif
ret = restore_entry(a);
#if defined(__APPLE__) && defined(UF_COMPRESSED) && defined(HAVE_ZLIB_H)
/*
* Check if the filesystem the file is restoring on supports
* HFS+ Compression. If not, cancel HFS+ Compression.
*/
if (a->todo | TODO_HFS_COMPRESSION) {
/*
* NOTE: UF_COMPRESSED is ignored even if the filesystem
* supports HFS+ Compression because the file should
* have at least an extended attribute "com.apple.decmpfs"
* before the flag is set to indicate that the file have
* been compressed. If the filesystem does not support
* HFS+ Compression the system call will fail.
*/
if (a->fd < 0 || fchflags(a->fd, UF_COMPRESSED) != 0)
a->todo &= ~TODO_HFS_COMPRESSION;
}
#endif
/*
* TODO: There are rumours that some extended attributes must
* be restored before file data is written. If this is true,
* then we either need to write all extended attributes both
* before and after restoring the data, or find some rule for
* determining which must go first and which last. Due to the
* many ways people are using xattrs, this may prove to be an
* intractable problem.
*/
#ifdef HAVE_FCHDIR
/* If we changed directory above, restore it here. */
if (a->restore_pwd >= 0) {
r = fchdir(a->restore_pwd);
if (r != 0) {
archive_set_error(&a->archive, errno,
"chdir() failure");
ret = ARCHIVE_FATAL;
}
close(a->restore_pwd);
a->restore_pwd = -1;
}
#endif
/*
* Fixup uses the unedited pathname from archive_entry_pathname(),
* because it is relative to the base dir and the edited path
* might be relative to some intermediate dir as a result of the
* deep restore logic.
*/
if (a->deferred & TODO_MODE) {
fe = current_fixup(a, archive_entry_pathname(entry));
if (fe == NULL)
return (ARCHIVE_FATAL);
fe->filetype = archive_entry_filetype(entry);
fe->fixup |= TODO_MODE_BASE;
fe->mode = a->mode;
}
if ((a->deferred & TODO_TIMES)
&& (archive_entry_mtime_is_set(entry)
|| archive_entry_atime_is_set(entry))) {
fe = current_fixup(a, archive_entry_pathname(entry));
if (fe == NULL)
return (ARCHIVE_FATAL);
fe->filetype = archive_entry_filetype(entry);
fe->mode = a->mode;
fe->fixup |= TODO_TIMES;
if (archive_entry_atime_is_set(entry)) {
fe->atime = archive_entry_atime(entry);
fe->atime_nanos = archive_entry_atime_nsec(entry);
} else {
/* If atime is unset, use start time. */
fe->atime = a->start_time;
fe->atime_nanos = 0;
}
if (archive_entry_mtime_is_set(entry)) {
fe->mtime = archive_entry_mtime(entry);
fe->mtime_nanos = archive_entry_mtime_nsec(entry);
} else {
/* If mtime is unset, use start time. */
fe->mtime = a->start_time;
fe->mtime_nanos = 0;
}
if (archive_entry_birthtime_is_set(entry)) {
fe->birthtime = archive_entry_birthtime(entry);
fe->birthtime_nanos = archive_entry_birthtime_nsec(
entry);
} else {
/* If birthtime is unset, use mtime. */
fe->birthtime = fe->mtime;
fe->birthtime_nanos = fe->mtime_nanos;
}
}
if (a->deferred & TODO_ACLS) {
fe = current_fixup(a, archive_entry_pathname(entry));
if (fe == NULL)
return (ARCHIVE_FATAL);
fe->filetype = archive_entry_filetype(entry);
fe->fixup |= TODO_ACLS;
archive_acl_copy(&fe->acl, archive_entry_acl(entry));
}
if (a->deferred & TODO_MAC_METADATA) {
const void *metadata;
size_t metadata_size;
metadata = archive_entry_mac_metadata(a->entry, &metadata_size);
if (metadata != NULL && metadata_size > 0) {
fe = current_fixup(a, archive_entry_pathname(entry));
if (fe == NULL)
return (ARCHIVE_FATAL);
fe->filetype = archive_entry_filetype(entry);
fe->mac_metadata = malloc(metadata_size);
if (fe->mac_metadata != NULL) {
memcpy(fe->mac_metadata, metadata,
metadata_size);
fe->mac_metadata_size = metadata_size;
fe->fixup |= TODO_MAC_METADATA;
}
}
}
if (a->deferred & TODO_FFLAGS) {
fe = current_fixup(a, archive_entry_pathname(entry));
if (fe == NULL)
return (ARCHIVE_FATAL);
fe->filetype = archive_entry_filetype(entry);
fe->fixup |= TODO_FFLAGS;
/* TODO: Complete this.. defer fflags from below. */
}
/* We've created the object and are ready to pour data into it. */
if (ret >= ARCHIVE_WARN)
a->archive.state = ARCHIVE_STATE_DATA;
/*
* If it's not open, tell our client not to try writing.
* In particular, dirs, links, etc, don't get written to.
*/
if (a->fd < 0) {
archive_entry_set_size(entry, 0);
a->filesize = 0;
}
return (ret);
}
| 0 |
[
"CWE-59"
] |
libarchive
|
ede459d2ebb879f5eedb6f7abea203be0b334230
| 202,557,097,360,966,040,000,000,000,000,000,000,000 | 372 |
archive_write_disk_posix: fix writing fflags broken in 8a1bd5c
The fixup list was erroneously assumed to be directories only.
Only in the case of critical file flags modification (e.g. SF_IMMUTABLE
on BSD systems), other file types (e.g. regular files or symbolic links)
may be added to the fixup list. We still need to verify that we are writing
to the correct file type, so compare the archive entry file type with
the file type of the file to be modified.
Fixes #1617
|
static ssize_t firmware_loading_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
int loading = 0;
mutex_lock(&fw_lock);
if (fw_sysfs->fw_priv)
loading = fw_sysfs_loading(fw_sysfs->fw_priv);
mutex_unlock(&fw_lock);
return sprintf(buf, "%d\n", loading);
}
| 1 |
[
"CWE-787"
] |
linux
|
aa838896d87af561a33ecefea1caa4c15a68bc47
| 161,356,541,647,921,970,000,000,000,000,000,000,000 | 13 |
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void __guc_ads_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_private *i915 = gt->i915;
struct __guc_ads_blob *blob = guc->ads_blob;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
u8 engine_class;
/* GuC scheduling policies */
guc_policies_init(&blob->policies);
/*
* GuC expects a per-engine-class context image and size
* (minus hwsp and ring context). The context image will be
* used to reinitialize engines after a reset. It must exist
* and be pinned in the GGTT, so that the address won't change after
* we have told GuC where to find it. The context size will be used
* to validate that the LRC base + size fall within allowed GGTT.
*/
for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
if (engine_class == OTHER_CLASS)
continue;
/*
* TODO: Set context pointer to default state to allow
* GuC to re-init guilty contexts after internal reset.
*/
blob->ads.golden_context_lrca[engine_class] = 0;
blob->ads.eng_state_size[engine_class] =
intel_engine_context_size(guc_to_gt(guc),
engine_class) -
skipped_size;
}
/* System info */
blob->system_info.engine_enabled_masks[RENDER_CLASS] = 1;
blob->system_info.engine_enabled_masks[COPY_ENGINE_CLASS] = 1;
blob->system_info.engine_enabled_masks[VIDEO_DECODE_CLASS] = VDBOX_MASK(gt);
blob->system_info.engine_enabled_masks[VIDEO_ENHANCEMENT_CLASS] = VEBOX_MASK(gt);
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED] =
hweight8(gt->info.sseu.slice_mask);
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK] =
gt->info.vdbox_sfc_access;
if (INTEL_GEN(i915) >= 12 && !IS_DGFX(i915)) {
u32 distdbreg = intel_uncore_read(gt->uncore,
GEN12_DIST_DBS_POPULATED);
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI] =
((distdbreg >> GEN12_DOORBELLS_PER_SQIDI_SHIFT) &
GEN12_DOORBELLS_PER_SQIDI) + 1;
}
guc_mapping_table_init(guc_to_gt(guc), &blob->system_info);
base = intel_guc_ggtt_offset(guc, guc->ads_vma);
/* Clients info */
guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool));
blob->clients_info.clients_num = 1;
blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool);
blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool);
/* ADS */
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
blob->ads.clients_info = base + ptr_offset(blob, clients_info);
/* Private Data */
blob->ads.private_data = base + guc_ads_private_data_offset(guc);
i915_gem_object_flush_map(guc->ads_vma->obj);
}
| 0 |
[
"CWE-20",
"CWE-190"
] |
linux
|
c784e5249e773689e38d2bc1749f08b986621a26
| 97,108,339,229,089,680,000,000,000,000,000,000,000 | 74 |
drm/i915/guc: Update to use firmware v49.0.1
The latest GuC firmware includes a number of interface changes that
require driver updates to match.
* Starting from Gen11, the ID to be provided to GuC needs to contain
the engine class in bits [0..2] and the instance in bits [3..6].
NOTE: this patch breaks pointer dereferences in some existing GuC
functions that use the guc_id to dereference arrays but these functions
are not used for now as we have GuC submission disabled and we will
update these functions in follow up patch which requires new IDs.
* The new GuC requires the additional data structure (ADS) and associated
'private_data' pointer to be setup. This is basically a scratch area
of memory that the GuC owns. The size is read from the CSS header.
* There is now a physical to logical engine mapping table in the ADS
which needs to be configured in order for the firmware to load. For
now, the table is initialised with a 1 to 1 mapping.
* GUC_CTL_CTXINFO has been removed from the initialization params.
* reg_state_buffer is maintained internally by the GuC as part of
the private data.
* The ADS layout has changed significantly. This patch updates the
shared structure and also adds better documentation of the layout.
* While i915 does not use GuC doorbells, the firmware now requires
that some initialisation is done.
* The number of engine classes and instances supported in the ADS has
been increased.
Signed-off-by: John Harrison <[email protected]>
Signed-off-by: Matthew Brost <[email protected]>
Signed-off-by: Daniele Ceraolo Spurio <[email protected]>
Signed-off-by: Oscar Mateo <[email protected]>
Signed-off-by: Michel Thierry <[email protected]>
Signed-off-by: Rodrigo Vivi <[email protected]>
Signed-off-by: Michal Wajdeczko <[email protected]>
Cc: Michal Winiarski <[email protected]>
Cc: Tomasz Lis <[email protected]>
Cc: Joonas Lahtinen <[email protected]>
Reviewed-by: Daniele Ceraolo Spurio <[email protected]>
Signed-off-by: Joonas Lahtinen <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
|
CoreBasicHandler::CoreBasicHandler(CoreNetwork *parent)
: BasicHandler(parent),
_network(parent)
{
connect(this, SIGNAL(displayMsg(Message::Type, BufferInfo::Type, const QString &, const QString &, const QString &, Message::Flags)),
network(), SLOT(displayMsg(Message::Type, BufferInfo::Type, const QString &, const QString &, const QString &, Message::Flags)));
connect(this, SIGNAL(putCmd(QString, const QList<QByteArray> &, const QByteArray &)),
network(), SLOT(putCmd(QString, const QList<QByteArray> &, const QByteArray &)));
connect(this, SIGNAL(putRawLine(const QByteArray &)),
network(), SLOT(putRawLine(const QByteArray &)));
}
| 1 |
[
"CWE-399"
] |
quassel
|
b5e38970ffd55e2dd9f706ce75af9a8d7730b1b8
| 105,124,363,258,815,180,000,000,000,000,000,000,000 | 13 |
Improve the message-splitting algorithm for PRIVMSG and CTCP
This introduces a new message splitting algorithm based on
QTextBoundaryFinder. It works by first starting with the entire
message to be sent, encoding it, and checking to see if it is over
the maximum message length. If it is, it uses QTBF to find the
word boundary most immediately preceding the maximum length. If no
suitable boundary can be found, it falls back to searching for
grapheme boundaries. It repeats this process until the entire
message has been sent.
Unlike what it replaces, the new splitting code is not recursive
and cannot cause stack overflows. Additionally, if it is unable
to split a string, it will give up gracefully and not crash the
core or cause a thread to run away.
This patch fixes two bugs. The first is garbage characters caused
by accidentally splitting the string in the middle of a multibyte
character. Since the new code splits at a character level instead
of a byte level, this will no longer be an issue. The second is
the core crash caused by sending an overlength CTCP query ("/me")
containing only multibyte characters. This bug was caused by the
old CTCP splitter using the byte index from lastParamOverrun() as
a character index for a QString.
|
AP_DECLARE(void) ap_init_scoreboard(void *shared_score)
{
char *more_storage;
int i;
pfn_ap_logio_get_last_bytes = APR_RETRIEVE_OPTIONAL_FN(ap_logio_get_last_bytes);
if (!shared_score) {
return;
}
ap_calc_scoreboard_size();
ap_scoreboard_image =
ap_calloc(1, SIZE_OF_scoreboard + server_limit * sizeof(worker_score *));
more_storage = shared_score;
ap_scoreboard_image->global = (global_score *)more_storage;
more_storage += SIZE_OF_global_score;
ap_scoreboard_image->parent = (process_score *)more_storage;
more_storage += SIZE_OF_process_score * server_limit;
ap_scoreboard_image->servers =
(worker_score **)((char*)ap_scoreboard_image + SIZE_OF_scoreboard);
for (i = 0; i < server_limit; i++) {
ap_scoreboard_image->servers[i] = (worker_score *)more_storage;
more_storage += thread_limit * SIZE_OF_worker_score;
}
ap_assert(more_storage == (char*)shared_score + scoreboard_size);
ap_scoreboard_image->global->server_limit = server_limit;
ap_scoreboard_image->global->thread_limit = thread_limit;
}
| 0 |
[
"CWE-476"
] |
httpd
|
fa7b2a5250e54363b3a6c8ac3aaa7de4e8da9b2e
| 278,724,650,463,810,250,000,000,000,000,000,000,000 | 28 |
Merge r1878092 from trunk:
Fix a NULL pointer dereference
* server/scoreboard.c (ap_increment_counts): In certain cases like certain
invalid requests r->method might be NULL here. r->method_number defaults
to M_GET and hence is M_GET in these cases.
Submitted by: rpluem
Reviewed by: covener, ylavic, jfclere
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1893051 13f79535-47bb-0310-9956-ffa450edef68
|
void MainWindow::onPlaylistModified()
{
setWindowModified(true);
if (MLT.producer() && playlist() && (void*) MLT.producer()->get_producer() == (void*) playlist()->get_playlist())
m_player->onDurationChanged();
updateMarkers();
m_player->enableTab(Player::ProjectTabIndex, true);
}
| 0 |
[
"CWE-89",
"CWE-327",
"CWE-295"
] |
shotcut
|
f008adc039642307f6ee3378d378cdb842e52c1d
| 135,066,200,156,017,360,000,000,000,000,000,000,000 | 8 |
fix upgrade check is not using TLS correctly
|
static void test_bug9478()
{
MYSQL_STMT *stmt;
MYSQL_BIND my_bind[1];
char a[6];
ulong a_len;
int rc, i;
DBUG_ENTER("test_bug9478");
myheader("test_bug9478");
mysql_query(mysql, "drop table if exists t1");
mysql_query(mysql, "create table t1 (id integer not null primary key, "
" name varchar(20) not null)");
rc= mysql_query(mysql, "insert into t1 (id, name) values "
" (1, 'aaa'), (2, 'bbb'), (3, 'ccc')");
myquery(rc);
stmt= open_cursor("select name from t1 where id=2");
bzero((char*) my_bind, sizeof(my_bind));
my_bind[0].buffer_type= MYSQL_TYPE_STRING;
my_bind[0].buffer= (char*) a;
my_bind[0].buffer_length= sizeof(a);
my_bind[0].length= &a_len;
mysql_stmt_bind_result(stmt, my_bind);
for (i= 0; i < 5; i++)
{
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent && i == 0)
printf("Fetched row: %s\n", a);
/*
The query above is a one-row result set. Therefore, there is no
cursor associated with it, as the server won't bother with opening
a cursor for a one-row result set. The first row was read from the
server in the fetch above. But there is eof packet pending in the
network. mysql_stmt_execute will flush the packet and successfully
execute the statement.
*/
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent && i == 0)
printf("Fetched row: %s\n", a);
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc == MYSQL_NO_DATA);
{
char buff[8];
/* Fill in the fetch packet */
int4store(buff, stmt->stmt_id);
buff[4]= 1; /* prefetch rows */
rc= ((*mysql->methods->advanced_command)(mysql, COM_STMT_FETCH,
(uchar*) buff,
sizeof(buff), 0,0,1,NULL) ||
(*mysql->methods->read_query_result)(mysql));
DIE_UNLESS(rc);
if (!opt_silent && i == 0)
printf("Got error (as expected): %s\n", mysql_error(mysql));
}
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent && i == 0)
printf("Fetched row: %s\n", a);
rc= mysql_stmt_reset(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc && mysql_stmt_errno(stmt));
if (!opt_silent && i == 0)
printf("Got error (as expected): %s\n", mysql_stmt_error(stmt));
}
rc= mysql_stmt_close(stmt);
DIE_UNLESS(rc == 0);
/* Test the case with a server side cursor */
stmt= open_cursor("select name from t1");
mysql_stmt_bind_result(stmt, my_bind);
for (i= 0; i < 5; i++)
{
DBUG_PRINT("loop",("i: %d", i));
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent && i == 0)
printf("Fetched row: %s\n", a);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
while (! (rc= mysql_stmt_fetch(stmt)))
{
if (!opt_silent && i == 0)
printf("Fetched row: %s\n", a);
}
DIE_UNLESS(rc == MYSQL_NO_DATA);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent && i == 0)
printf("Fetched row: %s\n", a);
rc= mysql_stmt_reset(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc && mysql_stmt_errno(stmt));
if (!opt_silent && i == 0)
printf("Got error (as expected): %s\n", mysql_stmt_error(stmt));
}
rc= mysql_stmt_close(stmt);
DIE_UNLESS(rc == 0);
rc= mysql_query(mysql, "drop table t1");
myquery(rc);
DBUG_VOID_RETURN;
}
| 0 |
[
"CWE-416"
] |
server
|
eef21014898d61e77890359d6546d4985d829ef6
| 4,573,153,887,048,016,000,000,000,000,000,000,000 | 134 |
MDEV-11933 Wrong usage of linked list in mysql_prune_stmt_list
mysql_prune_stmt_list() was walking the list following
element->next pointers, but inside the loop it was invoking
list_add(element) that modified element->next. So, mysql_prune_stmt_list()
failed to visit and reset all elements, and some of them were left
with pointers to invalid MYSQL.
|
static void sasl_sourceinfo_delete(sasl_sourceinfo_t *ssi)
{
return_if_fail(ssi != NULL);
free(ssi);
}
| 0 |
[
"CWE-288"
] |
atheme
|
de2ba3ca8f6c39b41431d989f3ac66002a487839
| 231,646,323,190,765,470,000,000,000,000,000,000,000 | 6 |
modules/saslserv/main: backport 7.3 commits for pending EID login
This backports commits 4e664c75d0b280a052eb & ceb0235695e6736ce2ab
from the master branch.
The IRCv3.1 SASL specification contains the following wording:
If the client completes registration (with CAP END, NICK, USER
and any other necessary messages) while the SASL authentication
is still in progress, the server SHOULD abort it and send a 906
numeric, then register the client without authentication.
We were relying on this behaviour (which was our mistake; it's a
SHOULD, not a MUST), which turned out to be implemented in every
IRC server daemon (that supports SASL) that we are aware of. This
means that if someone completes registration without having completed
an SASL negotiation, the SASL session would be aborted before the
client is introduced to the network. At that point, the session would
not exist and the client would not be logged in.
The InspIRCd developers changed this behaviour in the
inspircd/inspircd@407b2e004cf66e442771 commit. It no longer aborts
negotiation when a client prematurely completes registration.
This means that if the client is attempting a multi-step (challenge-
response) authentication mechanism, and that mechanism caches user
credentials at some point before completion, the client can pre-
maturely end negotiation and get logged in as that user.
Worse still, SASL impersonation lets the attacker set the authzid to
their intended victim, allowing them to login as anyone, even if they
don't have a challenge-response authentication credential configured.
This does not exist in version 7.1; the victim's account there has to
have such a credential to be vulnerable to this attack.
Vulnerable configurations are as follows:
- All of:
- InspIRCd 3+
- Any of:
- Atheme 7.1 (any version)
- Atheme 7.2 (any version before 7.2.12; this commit)
- Atheme 7.3 (any version before commit 4e664c75d0b280a052eb)
- Any of:
- The saslserv/scram module is loaded
- The saslserv/ecdh-x25519-challenge module is loaded
- The saslserv/ecdsa-nist256p-challenge module is loaded
This is a fix for a security vulnerability. The master (7.3) branch
was already fixed in 4e664c75d0b280a052eb, but the scope of the
problem was not fully known at that time. The 7.1 branch is no longer
supported, is not receiving security updates, and will not be patched;
users of the 7.1 series (using an IRCd that does not abort the SASL
session when the client prematurely completes registration) must
upgrade, or unload the `saslserv/ecdsa-nist256p-challenge` module.
This problem was discovered by and reported by @edk0.
|
static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
struct sk_buff *skb = NULL;
struct net_device *dev;
__be16 proto = 0;
int err;
int extra_len = 0;
/*
* Get and verify the address.
*/
if (saddr) {
if (msg->msg_namelen < sizeof(struct sockaddr))
return -EINVAL;
if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
proto = saddr->spkt_protocol;
} else
return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
/*
* Find the device first to size check it
*/
saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
retry:
rcu_read_lock();
dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
err = -ENODEV;
if (dev == NULL)
goto out_unlock;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_unlock;
/*
* You may not queue a frame bigger than the mtu. This is the lowest level
* raw protocol and you must do your own fragmentation at this level.
*/
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
if (!netif_supports_nofcs(dev)) {
err = -EPROTONOSUPPORT;
goto out_unlock;
}
extra_len = 4; /* We're doing our own CRC */
}
err = -EMSGSIZE;
if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
goto out_unlock;
if (!skb) {
size_t reserved = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
rcu_read_unlock();
skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
if (skb == NULL)
return -ENOBUFS;
/* FIXME: Save some space for broken drivers that write a hard
* header at transmission time by themselves. PPP is the notable
* one here. This should really be fixed at the driver level.
*/
skb_reserve(skb, reserved);
skb_reset_network_header(skb);
/* Try to align data part correctly */
if (hhlen) {
skb->data -= hhlen;
skb->tail -= hhlen;
if (len < hhlen)
skb_reset_network_header(skb);
}
err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (err)
goto out_free;
goto retry;
}
if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
/* Earlier code assumed this would be a VLAN pkt,
* double-check this now that we have the actual
* packet in hand.
*/
struct ethhdr *ehdr;
skb_reset_mac_header(skb);
ehdr = eth_hdr(skb);
if (ehdr->h_proto != htons(ETH_P_8021Q)) {
err = -EMSGSIZE;
goto out_unlock;
}
}
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
skb_probe_transport_header(skb, 0);
dev_queue_xmit(skb);
rcu_read_unlock();
return len;
out_unlock:
rcu_read_unlock();
out_free:
kfree_skb(skb);
return err;
}
| 0 |
[
"CWE-20",
"CWE-269"
] |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 307,938,690,296,426,600,000,000,000,000,000,000,000 | 121 |
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* data,
const TfLiteTensor* segment_ids,
TfLiteTensor* output) {
// Segment ids should be of same cardinality as first input dimension and they
// should be increasing by at most 1, from 0 (e.g., [0, 0, 1, 2, 3] is valid)
const int segment_id_size = segment_ids->dims->data[0];
TF_LITE_ENSURE_EQ(context, segment_id_size, data->dims->data[0]);
int previous_segment_id = -1;
for (int i = 0; i < segment_id_size; i++) {
const int current_segment_id = GetTensorData<int32_t>(segment_ids)[i];
if (i == 0) {
TF_LITE_ENSURE_EQ(context, current_segment_id, 0);
} else {
int delta = current_segment_id - previous_segment_id;
TF_LITE_ENSURE(context, delta == 0 || delta == 1);
}
previous_segment_id = current_segment_id;
}
const int max_index = previous_segment_id;
const int data_rank = NumDimensions(data);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data));
output_shape->data[0] = max_index + 1;
for (int i = 1; i < data_rank; ++i) {
output_shape->data[i] = data->dims->data[i];
}
return context->ResizeTensor(context, output, output_shape);
}
| 0 |
[
"CWE-703",
"CWE-787"
] |
tensorflow
|
204945b19e44b57906c9344c0d00120eeeae178a
| 79,256,682,322,200,740,000,000,000,000,000,000,000 | 30 |
[tflite] Validate segment ids for segment_sum.
Segment identifiers in segment_sum should be in a 1-D tensor of same size as the first dimension of the input. The values of the tensor should be integers from {0, 1, 2, ... k-1}, where k is the first dimension of the input. The segment identifiers must not contain jumps and must be increasing.
See https://www.tensorflow.org/api_docs/python/tf/math#Segmentation as the source for these constraints.
PiperOrigin-RevId: 332510942
Change-Id: I898beaba00642c918bcd4b4d4ce893ebb190d869
|
void Context::onHttpCallResponse(uint32_t token, const Pairs& response_headers,
absl::string_view response_body, const Pairs& response_trailers) {
if (!wasm_->onHttpCallResponse_) {
return;
}
uint64_t headers_ptr, headers_size, trailers_ptr, trailers_size;
exportPairs(this, response_headers, &headers_ptr, &headers_size);
exportPairs(this, response_trailers, &trailers_ptr, &trailers_size);
auto body_ptr = wasm_->copyString(response_body);
auto body_size = response_body.size();
wasm_->onHttpCallResponse_(this, id_, token, headers_ptr, headers_size, body_ptr, body_size,
trailers_ptr, trailers_size);
}
| 0 |
[
"CWE-476"
] |
envoy
|
8788a3cf255b647fd14e6b5e2585abaaedb28153
| 269,628,195,292,015,720,000,000,000,000,000,000,000 | 13 |
1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]>
|
jas_image_t *mif_decode(jas_stream_t *in, char *optstr)
{
mif_hdr_t *hdr;
jas_image_t *image;
jas_image_t *tmpimage;
jas_stream_t *tmpstream;
int cmptno;
mif_cmpt_t *cmpt;
jas_image_cmptparm_t cmptparm;
jas_seq2d_t *data;
int_fast32_t x;
int_fast32_t y;
int bias;
/* Avoid warnings about unused parameters. */
optstr = 0;
hdr = 0;
image = 0;
tmpimage = 0;
tmpstream = 0;
data = 0;
if (!(hdr = mif_hdr_get(in))) {
goto error;
}
if (!(image = jas_image_create0())) {
goto error;
}
for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) {
cmpt = hdr->cmpts[cmptno];
tmpstream = cmpt->data ? jas_stream_fopen(cmpt->data, "rb") : in;
if (!tmpstream) {
goto error;
}
if (!(tmpimage = jas_image_decode(tmpstream, -1, 0))) {
goto error;
}
if (tmpstream != in) {
jas_stream_close(tmpstream);
tmpstream = 0;
}
if (!cmpt->width) {
cmpt->width = jas_image_cmptwidth(tmpimage, 0);
}
if (!cmpt->height) {
cmpt->height = jas_image_cmptwidth(tmpimage, 0);
}
if (!cmpt->prec) {
cmpt->prec = jas_image_cmptprec(tmpimage, 0);
}
if (cmpt->sgnd < 0) {
cmpt->sgnd = jas_image_cmptsgnd(tmpimage, 0);
}
cmptparm.tlx = cmpt->tlx;
cmptparm.tly = cmpt->tly;
cmptparm.hstep = cmpt->sampperx;
cmptparm.vstep = cmpt->samppery;
cmptparm.width = cmpt->width;
cmptparm.height = cmpt->height;
cmptparm.prec = cmpt->prec;
cmptparm.sgnd = cmpt->sgnd;
if (jas_image_addcmpt(image, jas_image_numcmpts(image), &cmptparm)) {
goto error;
}
if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) {
goto error;
}
if (jas_image_readcmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height,
data)) {
goto error;
}
if (cmpt->sgnd) {
bias = 1 << (cmpt->prec - 1);
for (y = 0; y < cmpt->height; ++y) {
for (x = 0; x < cmpt->width; ++x) {
*jas_seq2d_getref(data, x, y) -= bias;
}
}
}
if (jas_image_writecmpt(image, jas_image_numcmpts(image) - 1, 0, 0,
cmpt->width, cmpt->height, data)) {
goto error;
}
jas_seq2d_destroy(data);
data = 0;
jas_image_destroy(tmpimage);
tmpimage = 0;
}
mif_hdr_destroy(hdr);
hdr = 0;
return image;
error:
if (image) {
jas_image_destroy(image);
}
if (hdr) {
mif_hdr_destroy(hdr);
}
if (tmpstream && tmpstream != in) {
jas_stream_close(tmpstream);
}
if (tmpimage) {
jas_image_destroy(tmpimage);
}
if (data) {
jas_seq2d_destroy(data);
}
return 0;
}
| 0 |
[
"CWE-189"
] |
jasper
|
3c55b399c36ef46befcb21e4ebc4799367f89684
| 270,885,114,667,014,300,000,000,000,000,000,000,000 | 114 |
At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems.
|
void SSL_set_psk_server_callback(SSL *s,
unsigned int (*cb) (SSL *ssl,
const char *identity,
unsigned char *psk,
unsigned int
max_psk_len))
{
s->psk_server_callback = cb;
}
| 0 |
[
"CWE-310"
] |
openssl
|
56f1acf5ef8a432992497a04792ff4b3b2c6f286
| 278,065,795,162,788,060,000,000,000,000,000,000,000 | 9 |
Disable SSLv2 default build, default negotiation and weak ciphers.
SSLv2 is by default disabled at build-time. Builds that are not
configured with "enable-ssl2" will not support SSLv2. Even if
"enable-ssl2" is used, users who want to negotiate SSLv2 via the
version-flexible SSLv23_method() will need to explicitly call either
of:
SSL_CTX_clear_options(ctx, SSL_OP_NO_SSLv2);
or
SSL_clear_options(ssl, SSL_OP_NO_SSLv2);
as appropriate. Even if either of those is used, or the application
explicitly uses the version-specific SSLv2_method() or its client
or server variants, SSLv2 ciphers vulnerable to exhaustive search
key recovery have been removed. Specifically, the SSLv2 40-bit
EXPORT ciphers, and SSLv2 56-bit DES are no longer available.
Mitigation for CVE-2016-0800
Reviewed-by: Emilia Käsper <[email protected]>
|
static void qxl_spice_destroy_surfaces_complete(PCIQXLDevice *qxl)
{
trace_qxl_spice_destroy_surfaces_complete(qxl->id);
qemu_mutex_lock(&qxl->track_lock);
memset(qxl->guest_surfaces.cmds, 0,
sizeof(qxl->guest_surfaces.cmds[0]) * qxl->ssd.num_surfaces);
qxl->guest_surfaces.count = 0;
qemu_mutex_unlock(&qxl->track_lock);
}
| 0 |
[
"CWE-476"
] |
qemu
|
d52680fc932efb8a2f334cc6993e705ed1e31e99
| 272,874,761,512,595,400,000,000,000,000,000,000,000 | 9 |
qxl: check release info object
When releasing spice resources in release_resource() routine,
if release info object 'ext.info' is null, it leads to null
pointer dereference. Add check to avoid it.
Reported-by: Bugs SysSec <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>
|
static int __cpuinit msr_device_create(int cpu)
{
struct device *dev;
dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL,
"msr%d", cpu);
return IS_ERR(dev) ? PTR_ERR(dev) : 0;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
c903f0456bc69176912dee6dd25c6a66ee1aed00
| 273,837,030,115,508,600,000,000,000,000,000,000,000 | 8 |
x86/msr: Add capabilities check
At the moment the MSR driver only relies upon file system
checks. This means that anything as root with any capability set
can write to MSRs. Historically that wasn't very interesting but
on modern processors the MSRs are such that writing to them
provides several ways to execute arbitary code in kernel space.
Sample code and documentation on doing this is circulating and
MSR attacks are used on Windows 64bit rootkits already.
In the Linux case you still need to be able to open the device
file so the impact is fairly limited and reduces the security of
some capability and security model based systems down towards
that of a generic "root owns the box" setup.
Therefore they should require CAP_SYS_RAWIO to prevent an
elevation of capabilities. The impact of this is fairly minimal
on most setups because they don't have heavy use of
capabilities. Those using SELinux, SMACK or AppArmor rules might
want to consider if their rulesets on the MSR driver could be
tighter.
Signed-off-by: Alan Cox <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Horses <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
check_pos(buf_T *buf, pos_T *pos)
{
char_u *line;
colnr_T len;
if (pos->lnum > buf->b_ml.ml_line_count)
pos->lnum = buf->b_ml.ml_line_count;
if (pos->col > 0)
{
line = ml_get_buf(buf, pos->lnum, FALSE);
len = (colnr_T)STRLEN(line);
if (pos->col > len)
pos->col = len;
}
}
| 0 |
[
"CWE-120"
] |
vim
|
7ce5b2b590256ce53d6af28c1d203fb3bc1d2d97
| 7,913,358,380,842,406,000,000,000,000,000,000,000 | 16 |
patch 8.2.4969: changing text in Visual mode may cause invalid memory access
Problem: Changing text in Visual mode may cause invalid memory access.
Solution: Check the Visual position after making a change.
|
send_dbus_message (DBusConnection *connection,
DBusMessage *message)
{
gboolean is_connected;
gboolean sent;
g_return_val_if_fail (message != NULL, FALSE);
if (! connection) {
gs_debug ("There is no valid connection to the message bus");
return FALSE;
}
is_connected = dbus_connection_get_is_connected (connection);
if (! is_connected) {
gs_debug ("Not connected to the message bus");
return FALSE;
}
sent = dbus_connection_send (connection, message, NULL);
return sent;
}
| 0 |
[] |
gnome-screensaver
|
284c9924969a49dbf2d5fae1d680d3310c4df4a3
| 155,484,504,664,773,150,000,000,000,000,000,000,000 | 23 |
Remove session inhibitors if the originator falls of the bus
This fixes a problem where totem leaves inhibitors behind, see
bug 600488.
|
add_oc_attributes_to_supported_fields (EBookBackendLDAP *bl,
LDAPObjectClass *oc)
{
gint i;
GHashTable *attr_hash = g_hash_table_new (g_str_hash, g_str_equal);
for (i = 0; i < G_N_ELEMENTS (prop_info); i++)
g_hash_table_insert (attr_hash, (gpointer) prop_info[i].ldap_attr, (gchar *) e_contact_field_name (prop_info[i].field_id));
if (oc->oc_at_oids_must)
add_to_supported_fields (bl, oc->oc_at_oids_must, attr_hash);
if (oc->oc_at_oids_may)
add_to_supported_fields (bl, oc->oc_at_oids_may, attr_hash);
g_hash_table_destroy (attr_hash);
}
| 0 |
[] |
evolution-data-server
|
34bad61738e2127736947ac50e0c7969cc944972
| 281,364,287,298,476,000,000,000,000,000,000,000,000 | 17 |
Bug 796174 - strcat() considered unsafe for buffer overflow
|
get_expr_name(expr_ty e)
{
switch (e->kind) {
case Attribute_kind:
return "attribute";
case Subscript_kind:
return "subscript";
case Starred_kind:
return "starred";
case Name_kind:
return "name";
case List_kind:
return "list";
case Tuple_kind:
return "tuple";
case Lambda_kind:
return "lambda";
case Call_kind:
return "function call";
case BoolOp_kind:
case BinOp_kind:
case UnaryOp_kind:
return "operator";
case GeneratorExp_kind:
return "generator expression";
case Yield_kind:
case YieldFrom_kind:
return "yield expression";
case Await_kind:
return "await expression";
case ListComp_kind:
return "list comprehension";
case SetComp_kind:
return "set comprehension";
case DictComp_kind:
return "dict comprehension";
case Dict_kind:
return "dict display";
case Set_kind:
return "set display";
case JoinedStr_kind:
case FormattedValue_kind:
return "f-string expression";
case Constant_kind: {
PyObject *value = e->v.Constant.value;
if (value == Py_None) {
return "None";
}
if (value == Py_False) {
return "False";
}
if (value == Py_True) {
return "True";
}
if (value == Py_Ellipsis) {
return "Ellipsis";
}
return "literal";
}
case Compare_kind:
return "comparison";
case IfExp_kind:
return "conditional expression";
case NamedExpr_kind:
return "named expression";
default:
PyErr_Format(PyExc_SystemError,
"unexpected expression in assignment %d (line %d)",
e->kind, e->lineno);
return NULL;
}
}
| 0 |
[
"CWE-125"
] |
cpython
|
a4d78362397fc3bced6ea80fbc7b5f4827aec55e
| 141,177,005,759,431,900,000,000,000,000,000,000,000 | 72 |
bpo-36495: Fix two out-of-bounds array reads (GH-12641)
Research and fix by @bradlarsen.
|
static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
{
struct sk_buff *skb;
int err;
if (!ctx->report &&
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
return 0;
err = -ENOBUFS;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err;
err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq,
event, 0, ctx->afi->family, ctx->table,
ctx->chain);
if (err < 0) {
kfree_skb(skb);
goto err;
}
err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
ctx->report, GFP_KERNEL);
err:
if (err < 0) {
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
err);
}
return err;
}
| 0 |
[
"CWE-19"
] |
nf
|
a2f18db0c68fec96631c10cad9384c196e9008ac
| 275,495,180,878,026,930,000,000,000,000,000,000,000 | 31 |
netfilter: nf_tables: fix flush ruleset chain dependencies
Jumping between chains doesn't mix well with flush ruleset. Rules
from a different chain and set elements may still refer to us.
[ 353.373791] ------------[ cut here ]------------
[ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159!
[ 353.373896] invalid opcode: 0000 [#1] SMP
[ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi
[ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98
[ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010
[...]
[ 353.375018] Call Trace:
[ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540
[ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0
[ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0
[ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790
[ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0
[ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70
[ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30
[ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0
[ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400
[ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90
[ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20
[ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0
[ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80
[ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d
[ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20
[ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b
Release objects in this order: rules -> sets -> chains -> tables, to
make sure no references to chains are held anymore.
Reported-by: Asbjoern Sloth Toennesen <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static void stress(int pos, int num, int maxsize, int dnum) {
int i,j,k;
unsigned char *zl;
char posstr[2][5] = { "HEAD", "TAIL" };
long long start;
for (i = 0; i < maxsize; i+=dnum) {
zl = ziplistNew();
for (j = 0; j < i; j++) {
zl = ziplistPush(zl,(unsigned char*)"quux",4,ZIPLIST_TAIL);
}
/* Do num times a push+pop from pos */
start = usec();
for (k = 0; k < num; k++) {
zl = ziplistPush(zl,(unsigned char*)"quux",4,pos);
zl = ziplistDeleteRange(zl,0,1);
}
printf("List size: %8d, bytes: %8d, %dx push+pop (%s): %6lld usec\n",
i,intrev32ifbe(ZIPLIST_BYTES(zl)),num,posstr[pos],usec()-start);
zfree(zl);
}
}
| 0 |
[
"CWE-190"
] |
redis
|
f6a40570fa63d5afdd596c78083d754081d80ae3
| 254,815,570,196,862,430,000,000,000,000,000,000,000 | 22 |
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error.
|
static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
u64 chunk_offset)
{
struct btrfs_block_group *cache;
u64 bytes_used;
u64 chunk_type;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
ASSERT(cache);
chunk_type = cache->flags;
btrfs_put_block_group(cache);
if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
return 0;
spin_lock(&fs_info->data_sinfo->lock);
bytes_used = fs_info->data_sinfo->bytes_used;
spin_unlock(&fs_info->data_sinfo->lock);
if (!bytes_used) {
struct btrfs_trans_handle *trans;
int ret;
trans = btrfs_join_transaction(fs_info->tree_root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
btrfs_end_transaction(trans);
if (ret < 0)
return ret;
return 1;
}
return 0;
}
| 0 |
[
"CWE-476",
"CWE-703"
] |
linux
|
e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091
| 24,440,407,567,924,094,000,000,000,000,000,000,000 | 36 |
btrfs: fix NULL pointer dereference when deleting device by invalid id
[BUG]
It's easy to trigger NULL pointer dereference, just by removing a
non-existing device id:
# mkfs.btrfs -f -m single -d single /dev/test/scratch1 \
/dev/test/scratch2
# mount /dev/test/scratch1 /mnt/btrfs
# btrfs device remove 3 /mnt/btrfs
Then we have the following kernel NULL pointer dereference:
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: 0000 [#1] PREEMPT SMP NOPTI
CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs]
btrfs_ioctl+0x18bb/0x3190 [btrfs]
? lock_is_held_type+0xa5/0x120
? find_held_lock.constprop.0+0x2b/0x80
? do_user_addr_fault+0x201/0x6a0
? lock_release+0xd2/0x2d0
? __x64_sys_ioctl+0x83/0xb0
__x64_sys_ioctl+0x83/0xb0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
[CAUSE]
Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return
btrfs_device directly") moves the "missing" device path check into
btrfs_rm_device().
But btrfs_rm_device() itself can have case where it only receives
@devid, with NULL as @device_path.
In that case, calling strcmp() on NULL will trigger the NULL pointer
dereference.
Before that commit, we handle the "missing" case inside
btrfs_find_device_by_devspec(), which will not check @device_path at all
if @devid is provided, thus no way to trigger the bug.
[FIX]
Before calling strcmp(), also make sure @device_path is not NULL.
Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly")
CC: [email protected] # 5.4+
Reported-by: butt3rflyh4ck <[email protected]>
Reviewed-by: Anand Jain <[email protected]>
Signed-off-by: Qu Wenruo <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
pdf14_begin_transparency_group(gx_device *dev,
const gs_transparency_group_params_t *ptgp,
const gs_rect *pbbox,
gs_gstate *pgs, gs_memory_t *mem)
{
pdf14_device *pdev = (pdf14_device *)dev;
double alpha = pgs->opacity.alpha * pgs->shape.alpha;
gs_int_rect rect;
int code;
bool isolated = ptgp->Isolated;
gs_transparency_color_t group_color;
cmm_profile_t *group_profile;
cmm_profile_t *tos_profile;
gsicc_rendering_param_t render_cond;
cmm_dev_profile_t *dev_profile;
bool cm_back_drop = false;
bool new_icc = false;
code = dev_proc(dev, get_profile)(dev, &dev_profile);
if (code < 0)
return code;
gsicc_extract_profile(GS_UNKNOWN_TAG, dev_profile, &tos_profile, &render_cond);
code = compute_group_device_int_rect(pdev, &rect, pbbox, pgs);
if (code < 0)
return code;
if_debug4m('v', pdev->memory,
"[v]pdf14_begin_transparency_group, I = %d, K = %d, alpha = %g, bm = %d\n",
ptgp->Isolated, ptgp->Knockout, alpha, pgs->blend_mode);
/* If the group color is unknown then use the current device profile. */
if (ptgp->group_color == UNKNOWN){
group_color = ICC;
group_profile = tos_profile;
} else {
group_color = ptgp->group_color;
group_profile = ptgp->iccprofile;
}
/* We have to handle case where the profile is in the clist */
if (group_profile == NULL && pdev->pclist_device != NULL) {
/* Get the serialized data from the clist. */
gx_device_clist_reader *pcrdev = (gx_device_clist_reader *)(pdev->pclist_device);
group_profile = gsicc_read_serial_icc((gx_device *) pcrdev, ptgp->icc_hashcode);
if (group_profile == NULL)
return gs_throw(gs_error_unknownerror, "ICC data not found in clist");
/* Keep a pointer to the clist device */
group_profile->dev = (gx_device *) pcrdev;
new_icc = true;
}
if (group_profile != NULL) {
/* If we have a non-isolated group and the color space is different,
we will need to CM the backdrop. */
if (!(group_profile->hash_is_valid)) {
gsicc_get_icc_buff_hash(group_profile->buffer,
&(group_profile->hashcode),
group_profile->buffer_size);
group_profile->hash_is_valid = true;
}
if (group_profile->hashcode != tos_profile->hashcode) {
cm_back_drop = true;
}
}
code = pdf14_update_device_color_procs(dev, group_color, ptgp->icc_hashcode,
pgs, group_profile, false);
if_debug0m('v', dev->memory, "[v]Transparency group color space update\n");
if (code < 0)
return code;
code = pdf14_push_transparency_group(pdev->ctx, &rect, isolated, ptgp->Knockout,
(byte)floor (255 * alpha + 0.5),
(byte)floor (255 * pgs->shape.alpha + 0.5),
pgs->blend_mode, ptgp->idle,
ptgp->mask_id, pdev->color_info.num_components,
cm_back_drop, group_profile, tos_profile,
pgs, dev);
if (new_icc)
rc_decrement(group_profile, "pdf14_begin_transparency_group");
return code;
}
| 0 |
[
"CWE-416"
] |
ghostpdl
|
90fd0c7ca3efc1ddff64a86f4104b13b3ac969eb
| 173,054,386,544,140,000,000,000,000,000,000,000,000 | 80 |
Bug 697456. Dont create new ctx when pdf14 device reenabled
This bug had yet another weird case where the user created a
file that pushed the pdf14 device twice. We were in that case,
creating a new ctx and blowing away the original one with out
proper clean up. To avoid, only create a new one when we need it.
|
int mk_request_error(int http_status, struct client_session *cs,
struct session_request *sr) {
int ret, fd;
mk_ptr_t message, *page = 0;
struct error_page *entry;
struct mk_list *head;
struct file_info finfo;
mk_header_set_http_status(sr, http_status);
/*
* We are nice sending error pages for clients who at least respect
* the especification
*/
if (http_status != MK_CLIENT_LENGTH_REQUIRED &&
http_status != MK_CLIENT_BAD_REQUEST &&
http_status != MK_CLIENT_REQUEST_ENTITY_TOO_LARGE) {
/* Lookup a customized error page */
mk_list_foreach(head, &sr->host_conf->error_pages) {
entry = mk_list_entry(head, struct error_page, _head);
if (entry->status != http_status) {
continue;
}
/* validate error file */
ret = mk_file_get_info(entry->real_path, &finfo);
if (ret == -1) {
break;
}
/* open file */
fd = open(entry->real_path, config->open_flags);
if (fd == -1) {
break;
}
sr->fd_file = fd;
sr->fd_is_fdt = MK_FALSE;
sr->bytes_to_send = finfo.size;
sr->headers.content_length = finfo.size;
sr->headers.real_length = finfo.size;
memcpy(&sr->file_info, &finfo, sizeof(struct file_info));
mk_header_send(cs->socket, cs, sr);
return mk_http_send_file(cs, sr);
}
}
mk_ptr_reset(&message);
switch (http_status) {
case MK_CLIENT_BAD_REQUEST:
page = mk_request_set_default_page("Bad Request",
sr->uri,
sr->host_conf->host_signature);
break;
case MK_CLIENT_FORBIDDEN:
page = mk_request_set_default_page("Forbidden",
sr->uri,
sr->host_conf->host_signature);
break;
case MK_CLIENT_NOT_FOUND:
mk_string_build(&message.data, &message.len,
"The requested URL was not found on this server.");
page = mk_request_set_default_page("Not Found",
message,
sr->host_conf->host_signature);
mk_ptr_free(&message);
break;
case MK_CLIENT_REQUEST_ENTITY_TOO_LARGE:
mk_string_build(&message.data, &message.len,
"The request entity is too large.");
page = mk_request_set_default_page("Entity too large",
message,
sr->host_conf->host_signature);
mk_ptr_free(&message);
break;
case MK_CLIENT_METHOD_NOT_ALLOWED:
page = mk_request_set_default_page("Method Not Allowed",
sr->uri,
sr->host_conf->host_signature);
break;
case MK_CLIENT_REQUEST_TIMEOUT:
case MK_CLIENT_LENGTH_REQUIRED:
break;
case MK_SERVER_NOT_IMPLEMENTED:
page = mk_request_set_default_page("Method Not Implemented",
sr->uri,
sr->host_conf->host_signature);
break;
case MK_SERVER_INTERNAL_ERROR:
page = mk_request_set_default_page("Internal Server Error",
sr->uri,
sr->host_conf->host_signature);
break;
case MK_SERVER_HTTP_VERSION_UNSUP:
mk_ptr_reset(&message);
page = mk_request_set_default_page("HTTP Version Not Supported",
message,
sr->host_conf->host_signature);
break;
}
if (page) {
sr->headers.content_length = page->len;
}
sr->headers.location = NULL;
sr->headers.cgi = SH_NOCGI;
sr->headers.pconnections_left = 0;
sr->headers.last_modified = -1;
if (!page) {
mk_ptr_reset(&sr->headers.content_type);
}
else {
mk_ptr_set(&sr->headers.content_type, "text/html\r\n");
}
mk_header_send(cs->socket, cs, sr);
if (page) {
if (sr->method != MK_HTTP_METHOD_HEAD)
mk_socket_send(cs->socket, page->data, page->len);
mk_ptr_free(page);
mk_mem_free(page);
}
/* Turn off TCP_CORK */
mk_server_cork_flag(cs->socket, TCP_CORK_OFF);
return EXIT_ERROR;
}
| 0 |
[
"CWE-20"
] |
monkey
|
b2d0e6f92310bb14a15aa2f8e96e1fb5379776dd
| 265,659,448,370,812,480,000,000,000,000,000,000,000 | 143 |
Request: new request session flag to mark those files opened by FDT
This patch aims to fix a potential DDoS problem that can be caused
in the server quering repetitive non-existent resources.
When serving a static file, the core use Vhost FDT mechanism, but if
it sends a static error page it does a direct open(2). When closing
the resources for the same request it was just calling mk_vhost_close()
which did not clear properly the file descriptor.
This patch adds a new field on the struct session_request called 'fd_is_fdt',
which contains MK_TRUE or MK_FALSE depending of how fd_file was opened.
Thanks to Matthew Daley <[email protected]> for report and troubleshoot this
problem.
Signed-off-by: Eduardo Silva <[email protected]>
|
static int hub_configure(struct usb_hub *hub,
struct usb_endpoint_descriptor *endpoint)
{
struct usb_hcd *hcd;
struct usb_device *hdev = hub->hdev;
struct device *hub_dev = hub->intfdev;
u16 hubstatus, hubchange;
u16 wHubCharacteristics;
unsigned int pipe;
int maxp, ret, i;
char *message = "out of memory";
unsigned unit_load;
unsigned full_load;
unsigned maxchild;
hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL);
if (!hub->buffer) {
ret = -ENOMEM;
goto fail;
}
hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL);
if (!hub->status) {
ret = -ENOMEM;
goto fail;
}
mutex_init(&hub->status_mutex);
hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
if (!hub->descriptor) {
ret = -ENOMEM;
goto fail;
}
/* Request the entire hub descriptor.
* hub->descriptor can handle USB_MAXCHILDREN ports,
* but the hub can/will return fewer bytes here.
*/
ret = get_hub_descriptor(hdev, hub->descriptor);
if (ret < 0) {
message = "can't read hub descriptor";
goto fail;
} else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) {
message = "hub has too many ports!";
ret = -ENODEV;
goto fail;
} else if (hub->descriptor->bNbrPorts == 0) {
message = "hub doesn't have any ports!";
ret = -ENODEV;
goto fail;
}
maxchild = hub->descriptor->bNbrPorts;
dev_info(hub_dev, "%d port%s detected\n", maxchild,
(maxchild == 1) ? "" : "s");
hub->ports = kzalloc(maxchild * sizeof(struct usb_port *), GFP_KERNEL);
if (!hub->ports) {
ret = -ENOMEM;
goto fail;
}
wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
if (hub_is_superspeed(hdev)) {
unit_load = 150;
full_load = 900;
} else {
unit_load = 100;
full_load = 500;
}
/* FIXME for USB 3.0, skip for now */
if ((wHubCharacteristics & HUB_CHAR_COMPOUND) &&
!(hub_is_superspeed(hdev))) {
char portstr[USB_MAXCHILDREN + 1];
for (i = 0; i < maxchild; i++)
portstr[i] = hub->descriptor->u.hs.DeviceRemovable
[((i + 1) / 8)] & (1 << ((i + 1) % 8))
? 'F' : 'R';
portstr[maxchild] = 0;
dev_dbg(hub_dev, "compound device; port removable status: %s\n", portstr);
} else
dev_dbg(hub_dev, "standalone hub\n");
switch (wHubCharacteristics & HUB_CHAR_LPSM) {
case HUB_CHAR_COMMON_LPSM:
dev_dbg(hub_dev, "ganged power switching\n");
break;
case HUB_CHAR_INDV_PORT_LPSM:
dev_dbg(hub_dev, "individual port power switching\n");
break;
case HUB_CHAR_NO_LPSM:
case HUB_CHAR_LPSM:
dev_dbg(hub_dev, "no power switching (usb 1.0)\n");
break;
}
switch (wHubCharacteristics & HUB_CHAR_OCPM) {
case HUB_CHAR_COMMON_OCPM:
dev_dbg(hub_dev, "global over-current protection\n");
break;
case HUB_CHAR_INDV_PORT_OCPM:
dev_dbg(hub_dev, "individual port over-current protection\n");
break;
case HUB_CHAR_NO_OCPM:
case HUB_CHAR_OCPM:
dev_dbg(hub_dev, "no over-current protection\n");
break;
}
spin_lock_init(&hub->tt.lock);
INIT_LIST_HEAD(&hub->tt.clear_list);
INIT_WORK(&hub->tt.clear_work, hub_tt_work);
switch (hdev->descriptor.bDeviceProtocol) {
case USB_HUB_PR_FS:
break;
case USB_HUB_PR_HS_SINGLE_TT:
dev_dbg(hub_dev, "Single TT\n");
hub->tt.hub = hdev;
break;
case USB_HUB_PR_HS_MULTI_TT:
ret = usb_set_interface(hdev, 0, 1);
if (ret == 0) {
dev_dbg(hub_dev, "TT per port\n");
hub->tt.multi = 1;
} else
dev_err(hub_dev, "Using single TT (err %d)\n",
ret);
hub->tt.hub = hdev;
break;
case USB_HUB_PR_SS:
/* USB 3.0 hubs don't have a TT */
break;
default:
dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
hdev->descriptor.bDeviceProtocol);
break;
}
/* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */
switch (wHubCharacteristics & HUB_CHAR_TTTT) {
case HUB_TTTT_8_BITS:
if (hdev->descriptor.bDeviceProtocol != 0) {
hub->tt.think_time = 666;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
8, hub->tt.think_time);
}
break;
case HUB_TTTT_16_BITS:
hub->tt.think_time = 666 * 2;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
16, hub->tt.think_time);
break;
case HUB_TTTT_24_BITS:
hub->tt.think_time = 666 * 3;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
24, hub->tt.think_time);
break;
case HUB_TTTT_32_BITS:
hub->tt.think_time = 666 * 4;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
32, hub->tt.think_time);
break;
}
/* probe() zeroes hub->indicator[] */
if (wHubCharacteristics & HUB_CHAR_PORTIND) {
hub->has_indicators = 1;
dev_dbg(hub_dev, "Port indicators are supported\n");
}
dev_dbg(hub_dev, "power on to power good time: %dms\n",
hub->descriptor->bPwrOn2PwrGood * 2);
/* power budgeting mostly matters with bus-powered hubs,
* and battery-powered root hubs (may provide just 8 mA).
*/
ret = usb_get_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus);
if (ret) {
message = "can't get hub status";
goto fail;
}
hcd = bus_to_hcd(hdev->bus);
if (hdev == hdev->bus->root_hub) {
if (hcd->power_budget > 0)
hdev->bus_mA = hcd->power_budget;
else
hdev->bus_mA = full_load * maxchild;
if (hdev->bus_mA >= full_load)
hub->mA_per_port = full_load;
else {
hub->mA_per_port = hdev->bus_mA;
hub->limited_power = 1;
}
} else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
int remaining = hdev->bus_mA -
hub->descriptor->bHubContrCurrent;
dev_dbg(hub_dev, "hub controller current requirement: %dmA\n",
hub->descriptor->bHubContrCurrent);
hub->limited_power = 1;
if (remaining < maxchild * unit_load)
dev_warn(hub_dev,
"insufficient power available "
"to use all downstream ports\n");
hub->mA_per_port = unit_load; /* 7.2.1 */
} else { /* Self-powered external hub */
/* FIXME: What about battery-powered external hubs that
* provide less current per port? */
hub->mA_per_port = full_load;
}
if (hub->mA_per_port < full_load)
dev_dbg(hub_dev, "%umA bus power budget for each child\n",
hub->mA_per_port);
ret = hub_hub_status(hub, &hubstatus, &hubchange);
if (ret < 0) {
message = "can't get hub status";
goto fail;
}
/* local power status reports aren't always correct */
if (hdev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_SELFPOWER)
dev_dbg(hub_dev, "local power source is %s\n",
(hubstatus & HUB_STATUS_LOCAL_POWER)
? "lost (inactive)" : "good");
if ((wHubCharacteristics & HUB_CHAR_OCPM) == 0)
dev_dbg(hub_dev, "%sover-current condition exists\n",
(hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no ");
/* set up the interrupt endpoint
* We use the EP's maxpacket size instead of (PORTS+1+7)/8
* bytes as USB2.0[11.12.3] says because some hubs are known
* to send more data (and thus cause overflow). For root hubs,
* maxpktsize is defined in hcd.c's fake endpoint descriptors
* to be big enough for at least USB_MAXCHILDREN ports. */
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
if (maxp > sizeof(*hub->buffer))
maxp = sizeof(*hub->buffer);
hub->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!hub->urb) {
ret = -ENOMEM;
goto fail;
}
usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq,
hub, endpoint->bInterval);
/* maybe cycle the hub leds */
if (hub->has_indicators && blinkenlights)
hub->indicator[0] = INDICATOR_CYCLE;
mutex_lock(&usb_port_peer_mutex);
for (i = 0; i < maxchild; i++) {
ret = usb_hub_create_port_device(hub, i + 1);
if (ret < 0) {
dev_err(hub->intfdev,
"couldn't create port%d device.\n", i + 1);
break;
}
}
hdev->maxchild = i;
for (i = 0; i < hdev->maxchild; i++) {
struct usb_port *port_dev = hub->ports[i];
pm_runtime_put(&port_dev->dev);
}
mutex_unlock(&usb_port_peer_mutex);
if (ret < 0)
goto fail;
/* Update the HCD's internal representation of this hub before hub_wq
* starts getting port status changes for devices under the hub.
*/
if (hcd->driver->update_hub_device) {
ret = hcd->driver->update_hub_device(hcd, hdev,
&hub->tt, GFP_KERNEL);
if (ret < 0) {
message = "can't update HCD hub info";
goto fail;
}
}
usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
hub_activate(hub, HUB_INIT);
return 0;
fail:
dev_err(hub_dev, "config failed, %s (err %d)\n",
message, ret);
/* hub_disconnect() frees urb and descriptor */
return ret;
}
| 0 |
[
"CWE-703"
] |
linux
|
e50293ef9775c5f1cf3fcc093037dd6a8c5684ea
| 263,074,280,234,775,960,000,000,000,000,000,000,000 | 306 |
USB: fix invalid memory access in hub_activate()
Commit 8520f38099cc ("USB: change hub initialization sleeps to
delayed_work") changed the hub_activate() routine to make part of it
run in a workqueue. However, the commit failed to take a reference to
the usb_hub structure or to lock the hub interface while doing so. As
a result, if a hub is plugged in and quickly unplugged before the work
routine can run, the routine will try to access memory that has been
deallocated. Or, if the hub is unplugged while the routine is
running, the memory may be deallocated while it is in active use.
This patch fixes the problem by taking a reference to the usb_hub at
the start of hub_activate() and releasing it at the end (when the work
is finished), and by locking the hub interface while the work routine
is running. It also adds a check at the start of the routine to see
if the hub has already been disconnected, in which nothing should be
done.
Signed-off-by: Alan Stern <[email protected]>
Reported-by: Alexandru Cornea <[email protected]>
Tested-by: Alexandru Cornea <[email protected]>
Fixes: 8520f38099cc ("USB: change hub initialization sleeps to delayed_work")
CC: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
QHash<BufferId, MsgId> PostgreSqlStorage::bufferLastSeenMsgIds(UserId user)
{
QHash<BufferId, MsgId> lastSeenHash;
QSqlDatabase db = logDb();
if (!beginReadOnlyTransaction(db)) {
qWarning() << "PostgreSqlStorage::bufferLastSeenMsgIds(): cannot start read only transaction!";
qWarning() << " -" << qPrintable(db.lastError().text());
return lastSeenHash;
}
QSqlQuery query(db);
query.prepare(queryString("select_buffer_lastseen_messages"));
query.bindValue(":userid", user.toInt());
safeExec(query);
if (!watchQuery(query)) {
db.rollback();
return lastSeenHash;
}
while (query.next()) {
lastSeenHash[query.value(0).toInt()] = query.value(1).toInt();
}
db.commit();
return lastSeenHash;
}
| 0 |
[
"CWE-89"
] |
quassel
|
aa1008be162cb27da938cce93ba533f54d228869
| 170,199,746,691,938,930,000,000,000,000,000,000,000 | 27 |
Fixing security vulnerability with Qt 4.8.5+ and PostgreSQL.
Properly detects whether Qt performs slash escaping in SQL queries or
not, and then configures PostgreSQL accordingly. This bug was a
introduced due to a bugfix in Qt 4.8.5 disables slash escaping when
binding queries: https://bugreports.qt-project.org/browse/QTBUG-30076
Thanks to brot and Tucos.
[Fixes #1244]
|
static int au1200fb_drv_probe(struct platform_device *dev)
{
struct au1200fb_device *fbdev;
struct au1200fb_platdata *pd;
struct fb_info *fbi = NULL;
unsigned long page;
int bpp, plane, ret, irq;
print_info("" DRIVER_DESC "");
pd = dev->dev.platform_data;
if (!pd)
return -ENODEV;
/* Setup driver with options */
if (au1200fb_setup(pd))
return -ENODEV;
/* Point to the panel selected */
panel = &known_lcd_panels[panel_index];
win = &windows[window_index];
printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
/* shut gcc up */
ret = 0;
fbdev = NULL;
for (plane = 0; plane < device_count; ++plane) {
bpp = winbpp(win->w[plane].mode_winctrl1);
if (win->w[plane].xres == 0)
win->w[plane].xres = panel->Xres;
if (win->w[plane].yres == 0)
win->w[plane].yres = panel->Yres;
fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
&dev->dev);
if (!fbi)
goto failed;
_au1200fb_infos[plane] = fbi;
fbdev = fbi->par;
fbdev->fb_info = fbi;
fbdev->pd = pd;
fbdev->plane = plane;
/* Allocate the framebuffer to the maximum screen size */
fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
return -ENOMEM;
}
/*
* Set page reserved so that mmap will work. This is necessary
* since we'll be remapping normal memory.
*/
for (page = (unsigned long)fbdev->fb_phys;
page < PAGE_ALIGN((unsigned long)fbdev->fb_phys +
fbdev->fb_len);
page += PAGE_SIZE) {
SetPageReserved(pfn_to_page(page >> PAGE_SHIFT)); /* LCD DMA is NOT coherent on Au1200 */
}
print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
/* Init FB data */
if ((ret = au1200fb_init_fbinfo(fbdev)) < 0)
goto failed;
/* Register new framebuffer */
ret = register_framebuffer(fbi);
if (ret < 0) {
print_err("cannot register new framebuffer");
goto failed;
}
au1200fb_fb_set_par(fbi);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
if (plane == 0)
if (fb_prepare_logo(fbi, FB_ROTATE_UR)) {
/* Start display and show logo on boot */
fb_set_cmap(&fbi->cmap, fbi);
fb_show_logo(fbi, FB_ROTATE_UR);
}
#endif
}
/* Now hook interrupt too */
irq = platform_get_irq(dev, 0);
ret = request_irq(irq, au1200fb_handle_irq,
IRQF_SHARED, "lcd", (void *)dev);
if (ret) {
print_err("fail to request interrupt line %d (err: %d)",
irq, ret);
goto failed;
}
platform_set_drvdata(dev, pd);
/* Kickstart the panel */
au1200_setpanel(panel, pd);
return 0;
failed:
/* NOTE: This only does the current plane/window that failed; others are still active */
if (fbi) {
if (fbi->cmap.len != 0)
fb_dealloc_cmap(&fbi->cmap);
kfree(fbi->pseudo_palette);
}
if (plane == 0)
free_irq(AU1200_LCD_INT, (void*)dev);
return ret;
}
| 0 |
[
"CWE-119",
"CWE-189",
"CWE-703"
] |
linux
|
7314e613d5ff9f0934f7a0f74ed7973b903315d1
| 178,249,567,596,931,670,000,000,000,000,000,000,000 | 124 |
Fix a few incorrectly checked [io_]remap_pfn_range() calls
Nico Golde reports a few straggling uses of [io_]remap_pfn_range() that
really should use the vm_iomap_memory() helper. This trivially converts
two of them to the helper, and comments about why the third one really
needs to continue to use remap_pfn_range(), and adds the missing size
check.
Reported-by: Nico Golde <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected].
|
void ndpi_parse_packet_line_info(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) {
u_int32_t a;
struct ndpi_packet_struct *packet = &flow->packet;
if((packet->payload_packet_len < 3) || (packet->payload == NULL))
return;
if(packet->packet_lines_parsed_complete != 0)
return;
packet->packet_lines_parsed_complete = 1;
ndpi_reset_packet_line_info(packet);
packet->line[packet->parsed_lines].ptr = packet->payload;
packet->line[packet->parsed_lines].len = 0;
for (a = 0; (a < packet->payload_packet_len) && (packet->parsed_lines < NDPI_MAX_PARSE_LINES_PER_PACKET); a++) {
if((a + 1) >= packet->payload_packet_len)
return; /* Return if only one byte remains (prevent invalid reads past end-of-buffer) */
if(get_u_int16_t(packet->payload, a) == ntohs(0x0d0a)) {
/* If end of line char sequence CR+NL "\r\n", process line */
if(((a + 3) <= packet->payload_packet_len)
&& (get_u_int16_t(packet->payload, a+2) == ntohs(0x0d0a))) {
/* \r\n\r\n */
int diff; /* No unsigned ! */
u_int32_t a1 = a + 4;
diff = packet->payload_packet_len - a1;
if(diff > 0) {
diff = ndpi_min(diff, sizeof(flow->initial_binary_bytes));
memcpy(&flow->initial_binary_bytes, &packet->payload[a1], diff);
flow->initial_binary_bytes_len = diff;
}
}
packet->line[packet->parsed_lines].len =
(u_int16_t)(((unsigned long) &packet->payload[a]) - ((unsigned long) packet->line[packet->parsed_lines].ptr));
/* First line of a HTTP response parsing. Expected a "HTTP/1.? ???" */
if(packet->parsed_lines == 0 && packet->line[0].len >= NDPI_STATICSTRING_LEN("HTTP/1.X 200 ") &&
strncasecmp((const char *) packet->line[0].ptr, "HTTP/1.", NDPI_STATICSTRING_LEN("HTTP/1.")) == 0 &&
packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.X ")] > '0' && /* response code between 000 and 699 */
packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.X ")] < '6') {
packet->http_response.ptr = &packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.1 ")];
packet->http_response.len = packet->line[0].len - NDPI_STATICSTRING_LEN("HTTP/1.1 ");
packet->http_num_headers++;
/* Set server HTTP response code */
if(packet->payload_packet_len >= 12) {
char buf[4];
/* Set server HTTP response code */
strncpy(buf, (char *) &packet->payload[9], 3);
buf[3] = '\0';
flow->http.response_status_code = atoi(buf);
/* https://en.wikipedia.org/wiki/List_of_HTTP_status_codes */
if((flow->http.response_status_code < 100) || (flow->http.response_status_code > 509))
flow->http.response_status_code = 0; /* Out of range */
}
}
/* "Server:" header line in HTTP response */
if(packet->line[packet->parsed_lines].len > NDPI_STATICSTRING_LEN("Server:") + 1 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr,
"Server:", NDPI_STATICSTRING_LEN("Server:")) == 0) {
// some stupid clients omit a space and place the servername directly after the colon
if(packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:")] == ' ') {
packet->server_line.ptr =
&packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:") + 1];
packet->server_line.len =
packet->line[packet->parsed_lines].len - (NDPI_STATICSTRING_LEN("Server:") + 1);
} else {
packet->server_line.ptr = &packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:")];
packet->server_line.len = packet->line[packet->parsed_lines].len - NDPI_STATICSTRING_LEN("Server:");
}
packet->http_num_headers++;
}
/* "Host:" header line in HTTP request */
if(packet->line[packet->parsed_lines].len > 6 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Host:", 5) == 0) {
// some stupid clients omit a space and place the hostname directly after the colon
if(packet->line[packet->parsed_lines].ptr[5] == ' ') {
packet->host_line.ptr = &packet->line[packet->parsed_lines].ptr[6];
packet->host_line.len = packet->line[packet->parsed_lines].len - 6;
} else {
packet->host_line.ptr = &packet->line[packet->parsed_lines].ptr[5];
packet->host_line.len = packet->line[packet->parsed_lines].len - 5;
}
packet->http_num_headers++;
}
/* "X-Forwarded-For:" header line in HTTP request. Commonly used for HTTP proxies. */
if(packet->line[packet->parsed_lines].len > 17 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "X-Forwarded-For:", 16) == 0) {
// some stupid clients omit a space and place the hostname directly after the colon
if(packet->line[packet->parsed_lines].ptr[16] == ' ') {
packet->forwarded_line.ptr = &packet->line[packet->parsed_lines].ptr[17];
packet->forwarded_line.len = packet->line[packet->parsed_lines].len - 17;
} else {
packet->forwarded_line.ptr = &packet->line[packet->parsed_lines].ptr[16];
packet->forwarded_line.len = packet->line[packet->parsed_lines].len - 16;
}
packet->http_num_headers++;
}
/* "Content-Type:" header line in HTTP. */
if(packet->line[packet->parsed_lines].len > 14 &&
(strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Type: ", 14) == 0 ||
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-type: ", 14) == 0)) {
packet->content_line.ptr = &packet->line[packet->parsed_lines].ptr[14];
packet->content_line.len = packet->line[packet->parsed_lines].len - 14;
while ((packet->content_line.len > 0) && (packet->content_line.ptr[0] == ' '))
packet->content_line.len--, packet->content_line.ptr++;
packet->http_num_headers++;
}
/* "Content-Type:" header line in HTTP AGAIN. Probably a bogus response without space after ":" */
if((packet->content_line.len == 0) && (packet->line[packet->parsed_lines].len > 13) &&
(strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-type:", 13) == 0)) {
packet->content_line.ptr = &packet->line[packet->parsed_lines].ptr[13];
packet->content_line.len = packet->line[packet->parsed_lines].len - 13;
packet->http_num_headers++;
}
if(packet->content_line.len > 0) {
/* application/json; charset=utf-8 */
char separator[] = {';', '\r', '\0'};
int i;
for (i = 0; separator[i] != '\0'; i++) {
char *c = memchr((char *) packet->content_line.ptr, separator[i], packet->content_line.len);
if(c != NULL)
packet->content_line.len = c - (char *) packet->content_line.ptr;
}
}
/* "Accept:" header line in HTTP request. */
if(packet->line[packet->parsed_lines].len > 8 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept: ", 8) == 0) {
packet->accept_line.ptr = &packet->line[packet->parsed_lines].ptr[8];
packet->accept_line.len = packet->line[packet->parsed_lines].len - 8;
packet->http_num_headers++;
}
/* "Referer:" header line in HTTP request. */
if(packet->line[packet->parsed_lines].len > 9 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Referer: ", 9) == 0) {
packet->referer_line.ptr = &packet->line[packet->parsed_lines].ptr[9];
packet->referer_line.len = packet->line[packet->parsed_lines].len - 9;
packet->http_num_headers++;
}
/* "User-Agent:" header line in HTTP request. */
if(packet->line[packet->parsed_lines].len > 12 &&
(strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "User-Agent: ", 12) == 0 ||
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "User-agent: ", 12) == 0)) {
packet->user_agent_line.ptr = &packet->line[packet->parsed_lines].ptr[12];
packet->user_agent_line.len = packet->line[packet->parsed_lines].len - 12;
packet->http_num_headers++;
}
/* "Content-Encoding:" header line in HTTP response (and request?). */
if(packet->line[packet->parsed_lines].len > 18 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Encoding: ", 18) == 0) {
packet->http_encoding.ptr = &packet->line[packet->parsed_lines].ptr[18];
packet->http_encoding.len = packet->line[packet->parsed_lines].len - 18;
packet->http_num_headers++;
}
/* "Transfer-Encoding:" header line in HTTP. */
if(packet->line[packet->parsed_lines].len > 19 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Transfer-Encoding: ", 19) == 0) {
packet->http_transfer_encoding.ptr = &packet->line[packet->parsed_lines].ptr[19];
packet->http_transfer_encoding.len = packet->line[packet->parsed_lines].len - 19;
packet->http_num_headers++;
}
/* "Content-Length:" header line in HTTP. */
if(packet->line[packet->parsed_lines].len > 16 &&
((strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Length: ", 16) == 0) ||
(strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "content-length: ", 16) == 0))) {
packet->http_contentlen.ptr = &packet->line[packet->parsed_lines].ptr[16];
packet->http_contentlen.len = packet->line[packet->parsed_lines].len - 16;
packet->http_num_headers++;
}
/* "Content-Disposition"*/
if(packet->line[packet->parsed_lines].len > 21 &&
((strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Disposition: ", 21) == 0))) {
packet->content_disposition_line.ptr = &packet->line[packet->parsed_lines].ptr[21];
packet->content_disposition_line.len = packet->line[packet->parsed_lines].len - 21;
packet->http_num_headers++;
}
/* "Cookie:" header line in HTTP. */
if(packet->line[packet->parsed_lines].len > 8 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Cookie: ", 8) == 0) {
packet->http_cookie.ptr = &packet->line[packet->parsed_lines].ptr[8];
packet->http_cookie.len = packet->line[packet->parsed_lines].len - 8;
packet->http_num_headers++;
}
/* "Origin:" header line in HTTP. */
if(packet->line[packet->parsed_lines].len > 8 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Origin: ", 8) == 0) {
packet->http_origin.ptr = &packet->line[packet->parsed_lines].ptr[8];
packet->http_origin.len = packet->line[packet->parsed_lines].len - 8;
packet->http_num_headers++;
}
/* "X-Session-Type:" header line in HTTP. */
if(packet->line[packet->parsed_lines].len > 16 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "X-Session-Type: ", 16) == 0) {
packet->http_x_session_type.ptr = &packet->line[packet->parsed_lines].ptr[16];
packet->http_x_session_type.len = packet->line[packet->parsed_lines].len - 16;
packet->http_num_headers++;
}
/* Identification and counting of other HTTP headers.
* We consider the most common headers, but there are many others,
* which can be seen at references below:
* - https://tools.ietf.org/html/rfc7230
* - https://en.wikipedia.org/wiki/List_of_HTTP_header_fields
*/
if((packet->line[packet->parsed_lines].len > 6 &&
(strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Date: ", 6) == 0 ||
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Vary: ", 6) == 0 ||
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "ETag: ", 6) == 0)) ||
(packet->line[packet->parsed_lines].len > 8 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Pragma: ", 8) == 0) ||
(packet->line[packet->parsed_lines].len > 9 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Expires: ", 9) == 0) ||
(packet->line[packet->parsed_lines].len > 12 &&
(strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Set-Cookie: ", 12) == 0 ||
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Keep-Alive: ", 12) == 0 ||
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Connection: ", 12) == 0)) ||
(packet->line[packet->parsed_lines].len > 15 &&
(strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Last-Modified: ", 15) == 0 ||
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Ranges: ", 15) == 0)) ||
(packet->line[packet->parsed_lines].len > 17 &&
(strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Language: ", 17) == 0 ||
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Encoding: ", 17) == 0)) ||
(packet->line[packet->parsed_lines].len > 27 &&
strncasecmp((const char *) packet->line[packet->parsed_lines].ptr,
"Upgrade-Insecure-Requests: ", 27) == 0)) {
/* Just count. In the future, if needed, this if can be splited to parse these headers */
packet->http_num_headers++;
}
if(packet->line[packet->parsed_lines].len == 0) {
packet->empty_line_position = a;
packet->empty_line_position_set = 1;
}
if(packet->parsed_lines >= (NDPI_MAX_PARSE_LINES_PER_PACKET - 1))
return;
packet->parsed_lines++;
packet->line[packet->parsed_lines].ptr = &packet->payload[a + 2];
packet->line[packet->parsed_lines].len = 0;
a++; /* next char in the payload */
}
}
if(packet->parsed_lines >= 1) {
packet->line[packet->parsed_lines].len =
(u_int16_t)(((unsigned long) &packet->payload[packet->payload_packet_len]) -
((unsigned long) packet->line[packet->parsed_lines].ptr));
packet->parsed_lines++;
}
}
| 0 |
[
"CWE-125"
] |
nDPI
|
61066fb106efa6d3d95b67e47b662de208b2b622
| 155,564,344,891,860,040,000,000,000,000,000,000,000 | 266 |
Added check for heap buffer overflow read
|
**/
const CImg<T>& save_gzip_external(const char *const filename) const {
if (!filename)
throw CImgArgumentException(_cimg_instance
"save_gzip_external(): Specified filename is (null).",
cimg_instance);
if (is_empty()) { cimg::fempty(0,filename); return *this; }
CImg<charT> command(1024), filename_tmp(256), body(256);
const char
*ext = cimg::split_filename(filename,body),
*ext2 = cimg::split_filename(body,0);
std::FILE *file;
do {
if (!cimg::strcasecmp(ext,"gz")) {
if (*ext2) cimg_snprintf(filename_tmp,filename_tmp._width,"%s%c%s.%s",
cimg::temporary_path(),cimg_file_separator,cimg::filenamerand(),ext2);
else cimg_snprintf(filename_tmp,filename_tmp._width,"%s%c%s.cimg",
cimg::temporary_path(),cimg_file_separator,cimg::filenamerand());
} else {
if (*ext) cimg_snprintf(filename_tmp,filename_tmp._width,"%s%c%s.%s",
cimg::temporary_path(),cimg_file_separator,cimg::filenamerand(),ext);
else cimg_snprintf(filename_tmp,filename_tmp._width,"%s%c%s.cimg",
cimg::temporary_path(),cimg_file_separator,cimg::filenamerand());
}
if ((file=std_fopen(filename_tmp,"rb"))!=0) cimg::fclose(file);
} while (file);
save(filename_tmp);
cimg_snprintf(command,command._width,"%s -c \"%s\" > \"%s\"",
cimg::gzip_path(),
CImg<charT>::string(filename_tmp)._system_strescape().data(),
CImg<charT>::string(filename)._system_strescape().data());
cimg::system(command);
file = std_fopen(filename,"rb");
if (!file)
throw CImgIOException(_cimg_instance
"save_gzip_external(): Failed to save file '%s' with external command 'gzip'.",
cimg_instance,
filename);
else cimg::fclose(file);
std::remove(filename_tmp);
return *this;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 75,573,048,285,621,405,000,000,000,000,000,000,000 | 43 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
template<typename t1, typename t2, typename t3, typename t4, typename t5>
CImgList<T>& assign(const CImg<t1>& img1, const CImg<t2>& img2, const CImg<t3>& img3, const CImg<t4>& img4,
const CImg<t5>& img5, const bool is_shared=false) {
assign(5);
_data[0].assign(img1,is_shared); _data[1].assign(img2,is_shared); _data[2].assign(img3,is_shared);
_data[3].assign(img4,is_shared); _data[4].assign(img5,is_shared);
return *this;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 37,140,752,479,344,470,000,000,000,000,000,000,000 | 7 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
RemoteIo::Impl::Impl(const std::wstring& wurl, size_t blockSize)
: wpath_(wurl), blockSize_(blockSize), blocksMap_(0), size_(0),
idx_(0), isMalloced_(false), eof_(false), protocol_(fileProtocol(wurl))
{
}
| 0 |
[
"CWE-125"
] |
exiv2
|
6e3855aed7ba8bb4731fc4087ca7f9078b2f3d97
| 96,949,890,190,998,840,000,000,000,000,000,000,000 | 5 |
Fix https://github.com/Exiv2/exiv2/issues/55
|
static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
{
struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
return srpt_get_cmd_state(ioctx);
}
| 0 |
[
"CWE-200",
"CWE-476"
] |
linux
|
51093254bf879bc9ce96590400a87897c7498463
| 176,533,762,070,178,600,000,000,000,000,000,000,000 | 7 |
IB/srpt: Simplify srpt_handle_tsk_mgmt()
Let the target core check task existence instead of the SRP target
driver. Additionally, let the target core check the validity of the
task management request instead of the ib_srpt driver.
This patch fixes the following kernel crash:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000001
IP: [<ffffffffa0565f37>] srpt_handle_new_iu+0x6d7/0x790 [ib_srpt]
Oops: 0002 [#1] SMP
Call Trace:
[<ffffffffa05660ce>] srpt_process_completion+0xde/0x570 [ib_srpt]
[<ffffffffa056669f>] srpt_compl_thread+0x13f/0x160 [ib_srpt]
[<ffffffff8109726f>] kthread+0xcf/0xe0
[<ffffffff81613cfc>] ret_from_fork+0x7c/0xb0
Signed-off-by: Bart Van Assche <[email protected]>
Fixes: 3e4f574857ee ("ib_srpt: Convert TMR path to target_submit_tmr")
Tested-by: Alex Estrin <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: Nicholas Bellinger <[email protected]>
Cc: Sagi Grimberg <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Doug Ledford <[email protected]>
|
static float my_roundf(float x) {
float t;
if (x >= 0.0) {
t = ceilf(x);
if (t - x > 0.5) t -= 1.0;
return t;
} else {
t = ceilf(-x);
if (t + x > 0.5) t -= 1.0;
return -t;
}
}
| 0 |
[
"CWE-129"
] |
LibRaw
|
89d065424f09b788f443734d44857289489ca9e2
| 40,505,116,461,209,445,000,000,000,000,000,000,000 | 12 |
fixed two more problems found by fuzzer
|
inline int strncasecmp(const char *const str1, const char *const str2, const int l) {
if (!l) return 0;
if (!str1) return str2?-1:0;
const char *nstr1 = str1, *nstr2 = str2;
int k, diff = 0; for (k = 0; k<l && !(diff = lowercase(*nstr1) - lowercase(*nstr2)); ++k) { ++nstr1; ++nstr2; }
return k!=l?diff:0;
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 295,805,348,959,004,670,000,000,000,000,000,000,000 | 7 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
_dbus_auth_get_unused_bytes (DBusAuth *auth,
const DBusString **str)
{
if (!DBUS_AUTH_IN_END_STATE (auth))
return;
*str = &auth->incoming;
}
| 0 |
[
"CWE-59"
] |
dbus
|
47b1a4c41004bf494b87370987b222c934b19016
| 38,957,767,099,185,556,000,000,000,000,000,000,000 | 8 |
auth: Reject DBUS_COOKIE_SHA1 for users other than the server owner
The DBUS_COOKIE_SHA1 authentication mechanism aims to prove ownership
of a shared home directory by having the server write a secret "cookie"
into a .dbus-keyrings subdirectory of the desired identity's home
directory with 0700 permissions, and having the client prove that it can
read the cookie. This never actually worked for non-malicious clients in
the case where server uid != client uid (unless the server and client
both have privileges, such as Linux CAP_DAC_OVERRIDE or traditional
Unix uid 0) because an unprivileged server would fail to write out the
cookie, and an unprivileged client would be unable to read the resulting
file owned by the server.
Additionally, since dbus 1.7.10 we have checked that ~/.dbus-keyrings
is owned by the uid of the server (a side-effect of a check added to
harden our use of XDG_RUNTIME_DIR), further ruling out successful use
by a non-malicious client with a uid differing from the server's.
Joe Vennix of Apple Information Security discovered that the
implementation of DBUS_COOKIE_SHA1 was susceptible to a symbolic link
attack: a malicious client with write access to its own home directory
could manipulate a ~/.dbus-keyrings symlink to cause the DBusServer to
read and write in unintended locations. In the worst case this could
result in the DBusServer reusing a cookie that is known to the
malicious client, and treating that cookie as evidence that a subsequent
client connection came from an attacker-chosen uid, allowing
authentication bypass.
This is mitigated by the fact that by default, the well-known system
dbus-daemon (since 2003) and the well-known session dbus-daemon (in
stable releases since dbus 1.10.0 in 2015) only accept the EXTERNAL
authentication mechanism, and as a result will reject DBUS_COOKIE_SHA1
at an early stage, before manipulating cookies. As a result, this
vulnerability only applies to:
* system or session dbus-daemons with non-standard configuration
* third-party dbus-daemon invocations such as at-spi2-core (although
in practice at-spi2-core also only accepts EXTERNAL by default)
* third-party uses of DBusServer such as the one in Upstart
Avoiding symlink attacks in a portable way is difficult, because APIs
like openat() and Linux /proc/self/fd are not universally available.
However, because DBUS_COOKIE_SHA1 already doesn't work in practice for
a non-matching uid, we can solve this vulnerability in an easier way
without regressions, by rejecting it early (before looking at
~/.dbus-keyrings) whenever the requested identity doesn't match the
identity of the process hosting the DBusServer.
Signed-off-by: Simon McVittie <[email protected]>
Closes: https://gitlab.freedesktop.org/dbus/dbus/issues/269
Closes: CVE-2019-12749
|
grub_crypto_memcmp (const void *a, const void *b, grub_size_t n)
{
register grub_size_t counter = 0;
const grub_uint8_t *pa, *pb;
for (pa = a, pb = b; n; pa++, pb++, n--)
{
if (*pa != *pb)
counter++;
}
return !!counter;
}
| 0 |
[
"CWE-264"
] |
grub
|
451d80e52d851432e109771bb8febafca7a5f1f2
| 144,370,345,920,944,500,000,000,000,000,000,000,000 | 13 |
Fix security issue when reading username and password
This patch fixes two integer underflows at:
* grub-core/lib/crypto.c
* grub-core/normal/auth.c
CVE-2015-8370
Signed-off-by: Hector Marco-Gisbert <[email protected]>
Signed-off-by: Ismael Ripoll-Ripoll <[email protected]>
Also-By: Andrey Borzenkov <[email protected]>
|
void gnutls_x509_crt_deinit(gnutls_x509_crt_t cert)
{
if (!cert)
return;
if (cert->cert)
asn1_delete_structure(&cert->cert);
gnutls_free(cert->der.data);
gnutls_free(cert);
}
| 0 |
[
"CWE-295"
] |
gnutls
|
6e76e9b9fa845b76b0b9a45f05f4b54a052578ff
| 331,341,080,836,844,200,000,000,000,000,000,000,000 | 10 |
on certificate import check whether the two signature algorithms match
|
s32 __gettimeofday(struct timeval *tp, void *tz)
{
FILETIME ft;
SYSTEMTIME st;
s32 val;
GetSystemTime(&st);
SystemTimeToFileTime(&st, &ft);
val = (s32) ((*(LONGLONG *) &ft - TIMESPEC_TO_FILETIME_OFFSET) / 10000000);
tp->tv_sec = (u32) val;
val = (s32 ) ((*(LONGLONG *) &ft - TIMESPEC_TO_FILETIME_OFFSET - ((LONGLONG) val * (LONGLONG) 10000000)) * 100);
tp->tv_usec = val;
return 0;
}
| 0 |
[
"CWE-787"
] |
gpac
|
f3698bb1bce62402805c3fda96551a23101a32f9
| 60,734,058,918,477,660,000,000,000,000,000,000,000 | 15 |
fix buffer overrun in gf_bin128_parse
closes #1204
closes #1205
|
IOBuf::IOBuf(IOBuf&& other) noexcept
: data_(other.data_),
buf_(other.buf_),
length_(other.length_),
capacity_(other.capacity_),
flagsAndSharedInfo_(other.flagsAndSharedInfo_) {
// Reset other so it is a clean state to be destroyed.
other.data_ = nullptr;
other.buf_ = nullptr;
other.length_ = 0;
other.capacity_ = 0;
other.flagsAndSharedInfo_ = 0;
// If other was part of the chain, assume ownership of the rest of its chain.
// (It's only valid to perform move assignment on the head of a chain.)
if (other.next_ != &other) {
next_ = other.next_;
next_->prev_ = this;
other.next_ = &other;
prev_ = other.prev_;
prev_->next_ = this;
other.prev_ = &other;
}
// Sanity check to make sure that other is in a valid state to be destroyed.
DCHECK_EQ(other.prev_, &other);
DCHECK_EQ(other.next_, &other);
}
| 0 |
[
"CWE-787"
] |
folly
|
4f304af1411e68851bdd00ef6140e9de4616f7d3
| 123,780,998,619,928,920,000,000,000,000,000,000,000 | 29 |
[folly] Add additional overflow checks to IOBuf - CVE-2021-24036
Summary:
As per title
CVE-2021-24036
Reviewed By: jan
Differential Revision: D27938605
fbshipit-source-id: 7481c54ae6fbb7b67b15b3631d5357c2f7043f9c
|
markrunend(Tuplesortstate *state, int tapenum)
{
unsigned int len = 0;
LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
}
| 0 |
[
"CWE-209"
] |
postgres
|
804b6b6db4dcfc590a468e7be390738f9f7755fb
| 20,394,163,605,673,026,000,000,000,000,000,000,000 | 6 |
Fix column-privilege leak in error-message paths
While building error messages to return to the user,
BuildIndexValueDescription, ExecBuildSlotValueDescription and
ri_ReportViolation would happily include the entire key or entire row in
the result returned to the user, even if the user didn't have access to
view all of the columns being included.
Instead, include only those columns which the user is providing or which
the user has select rights on. If the user does not have any rights
to view the table or any of the columns involved then no detail is
provided and a NULL value is returned from BuildIndexValueDescription
and ExecBuildSlotValueDescription. Note that, for key cases, the user
must have access to all of the columns for the key to be shown; a
partial key will not be returned.
Further, in master only, do not return any data for cases where row
security is enabled on the relation and row security should be applied
for the user. This required a bit of refactoring and moving of things
around related to RLS- note the addition of utils/misc/rls.c.
Back-patch all the way, as column-level privileges are now in all
supported versions.
This has been assigned CVE-2014-8161, but since the issue and the patch
have already been publicized on pgsql-hackers, there's no point in trying
to hide this commit.
|
//! Return a reference to one pixel value of one image of the list \const.
const T& operator()(const unsigned int pos, const unsigned int x, const unsigned int y=0,
const unsigned int z=0, const unsigned int c=0) const {
return (*this)[pos](x,y,z,c);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 139,870,759,579,778,130,000,000,000,000,000,000,000 | 4 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static int technisat_usb2_eeprom_lrc_read(struct dvb_usb_device *d,
u16 offset, u8 *b, u16 length, u8 tries)
{
u8 bo = offset & 0xff;
struct i2c_msg msg[] = {
{
.addr = 0x50 | ((offset >> 8) & 0x3),
.buf = &bo,
.len = 1
}, {
.addr = 0x50 | ((offset >> 8) & 0x3),
.flags = I2C_M_RD,
.buf = b,
.len = length
}
};
while (tries--) {
int status;
if (i2c_transfer(&d->i2c_adap, msg, 2) != 2)
break;
status =
technisat_usb2_calc_lrc(b, length - 1) == b[length - 1];
if (status)
return 0;
}
return -EREMOTEIO;
}
| 0 |
[
"CWE-125"
] |
media_tree
|
0c4df39e504bf925ab666132ac3c98d6cbbe380b
| 46,227,033,086,679,070,000,000,000,000,000,000,000 | 32 |
media: technisat-usb2: break out of loop at end of buffer
Ensure we do not access the buffer beyond the end if no 0xff byte
is encountered.
Reported-by: [email protected]
Signed-off-by: Sean Young <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
static int multi_getsock(struct Curl_easy *data,
curl_socket_t *socks)
{
struct connectdata *conn = data->conn;
/* The no connection case can happen when this is called from
curl_multi_remove_handle() => singlesocket() => multi_getsock().
*/
if(!conn)
return 0;
switch(data->mstate) {
default:
return 0;
case MSTATE_RESOLVING:
return Curl_resolv_getsock(data, socks);
case MSTATE_PROTOCONNECTING:
case MSTATE_PROTOCONNECT:
return protocol_getsock(data, conn, socks);
case MSTATE_DO:
case MSTATE_DOING:
return doing_getsock(data, conn, socks);
case MSTATE_TUNNELING:
return waitproxyconnect_getsock(conn, socks);
case MSTATE_CONNECTING:
return waitconnect_getsock(conn, socks);
case MSTATE_DOING_MORE:
return domore_getsock(data, conn, socks);
case MSTATE_DID: /* since is set after DO is completed, we switch to
waiting for the same as the PERFORMING state */
case MSTATE_PERFORMING:
return Curl_single_getsock(data, conn, socks);
}
}
| 0 |
[
"CWE-416",
"CWE-295"
] |
curl
|
7f4a9a9b2a49547eae24d2e19bc5c346e9026479
| 39,548,044,736,399,060,000,000,000,000,000,000,000 | 41 |
openssl: associate/detach the transfer from connection
CVE-2021-22901
Bug: https://curl.se/docs/CVE-2021-22901.html
|
int rose_parse_facilities(unsigned char *p, unsigned packet_len,
struct rose_facilities_struct *facilities)
{
int facilities_len, len;
facilities_len = *p++;
if (facilities_len == 0 || (unsigned)facilities_len > packet_len)
return 0;
while (facilities_len >= 3 && *p == 0x00) {
facilities_len--;
p++;
switch (*p) {
case FAC_NATIONAL: /* National */
len = rose_parse_national(p + 1, facilities, facilities_len - 1);
break;
case FAC_CCITT: /* CCITT */
len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
break;
default:
printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
len = 1;
break;
}
if (len < 0)
return 0;
if (WARN_ON(len >= facilities_len))
return 0;
facilities_len -= len + 1;
p += len + 1;
}
return facilities_len == 0;
}
| 0 |
[
"CWE-20"
] |
linux
|
e0bccd315db0c2f919e7fcf9cb60db21d9986f52
| 1,700,683,732,903,922,400,000,000,000,000,000,000 | 39 |
rose: Add length checks to CALL_REQUEST parsing
Define some constant offsets for CALL_REQUEST based on the description
at <http://www.techfest.com/networking/wan/x25plp.htm> and the
definition of ROSE as using 10-digit (5-byte) addresses. Use them
consistently. Validate all implicit and explicit facilities lengths.
Validate the address length byte rather than either trusting or
assuming its value.
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
longlong val_time_packed(THD *thd)
{
DBUG_ASSERT(0);
return 0;
}
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 56,412,433,964,609,650,000,000,000,000,000,000,000 | 5 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
static int ZEND_FASTCALL ZEND_IS_SMALLER_OR_EQUAL_SPEC_TMP_CV_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_free_op free_op1;
zval *result = &EX_T(opline->result.u.var).tmp_var;
compare_function(result,
_get_zval_ptr_tmp(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC),
_get_zval_ptr_cv(&opline->op2, EX(Ts), BP_VAR_R TSRMLS_CC) TSRMLS_CC);
ZVAL_BOOL(result, (Z_LVAL_P(result) <= 0));
zval_dtor(free_op1.var);
ZEND_VM_NEXT_OPCODE();
}
| 0 |
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
| 13,704,553,848,023,750,000,000,000,000,000,000,000 | 14 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
|
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret = 0;
bool nolock = false;
if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
return 0;
if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root);
}
return ret;
}
| 0 |
[
"CWE-310"
] |
linux-2.6
|
9c52057c698fb96f8f07e7a4bcf4801a092bda89
| 321,190,926,674,336,750,000,000,000,000,000,000,000 | 24 |
Btrfs: fix hash overflow handling
The handling for directory crc hash overflows was fairly obscure,
split_leaf returns EOVERFLOW when we try to extend the item and that is
supposed to bubble up to userland. For a while it did so, but along the
way we added better handling of errors and forced the FS readonly if we
hit IO errors during the directory insertion.
Along the way, we started testing only for EEXIST and the EOVERFLOW case
was dropped. The end result is that we may force the FS readonly if we
catch a directory hash bucket overflow.
This fixes a few problem spots. First I add tests for EOVERFLOW in the
places where we can safely just return the error up the chain.
btrfs_rename is harder though, because it tries to insert the new
directory item only after it has already unlinked anything the rename
was going to overwrite. Rather than adding very complex logic, I added
a helper to test for the hash overflow case early while it is still safe
to bail out.
Snapshot and subvolume creation had a similar problem, so they are using
the new helper now too.
Signed-off-by: Chris Mason <[email protected]>
Reported-by: Pascal Junod <[email protected]>
|
static void ext4_handle_error(struct super_block *sb)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
if (sb->s_flags & MS_RDONLY)
return;
if (!test_opt(sb, ERRORS_CONT)) {
journal_t *journal = EXT4_SB(sb)->s_journal;
EXT4_SB(sb)->s_mount_opt |= EXT4_MOUNT_ABORT;
if (journal)
jbd2_journal_abort(journal, -EIO);
}
if (test_opt(sb, ERRORS_RO)) {
printk(KERN_CRIT "Remounting filesystem read-only\n");
sb->s_flags |= MS_RDONLY;
}
ext4_commit_super(sb, es, 1);
if (test_opt(sb, ERRORS_PANIC))
panic("EXT4-fs (device %s): panic forced after error\n",
sb->s_id);
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
4ec110281379826c5cf6ed14735e47027c3c5765
| 143,268,387,270,296,880,000,000,000,000,000,000,000 | 26 |
ext4: Add sanity checks for the superblock before mounting the filesystem
This avoids insane superblock configurations that could lead to kernel
oops due to null pointer derefences.
http://bugzilla.kernel.org/show_bug.cgi?id=12371
Thanks to David Maciejak at Fortinet's FortiGuard Global Security
Research Team who discovered this bug independently (but at
approximately the same time) as Thiemo Nagel, who submitted the patch.
Signed-off-by: Thiemo Nagel <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: [email protected]
|
restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc,
int *pd0)
{
int err = 0;
unsigned int ccr;
unsigned int usp;
unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
#define COPY(r) err |= __get_user(regs->r, &usc->sc_##r) /* restore passed registers */
COPY(er1);
COPY(er2);
COPY(er3);
COPY(er5);
COPY(pc);
ccr = regs->ccr & 0x10;
COPY(ccr);
#undef COPY
regs->ccr &= 0xef;
regs->ccr |= ccr;
regs->orig_er0 = -1; /* disable syscall checks */
err |= __get_user(usp, &usc->sc_usp);
wrusp(usp);
err |= __get_user(er0, &usc->sc_er0);
*pd0 = er0;
return err;
}
| 0 |
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
| 124,284,134,392,282,460,000,000,000,000,000,000,000 | 30 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
static int mov_write_identification(AVIOContext *pb, AVFormatContext *s)
{
MOVMuxContext *mov = s->priv_data;
int i;
mov_write_ftyp_tag(pb,s);
if (mov->mode == MODE_PSP) {
int video_streams_nb = 0, audio_streams_nb = 0, other_streams_nb = 0;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
video_streams_nb++;
else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
audio_streams_nb++;
else
other_streams_nb++;
}
if (video_streams_nb != 1 || audio_streams_nb != 1 || other_streams_nb) {
av_log(s, AV_LOG_ERROR, "PSP mode need one video and one audio stream\n");
return AVERROR(EINVAL);
}
return mov_write_uuidprof_tag(pb, s);
}
return 0;
}
| 0 |
[
"CWE-369"
] |
FFmpeg
|
2c0e98a0b478284bdff6d7a4062522605a8beae5
| 149,620,625,520,765,930,000,000,000,000,000,000,000 | 26 |
avformat/movenc: Write version 2 of audio atom if channels is not known
The version 1 needs the channel count and would divide by 0
Fixes: division by 0
Fixes: fpe_movenc.c_1108_1.ogg
Fixes: fpe_movenc.c_1108_2.ogg
Fixes: fpe_movenc.c_1108_3.wav
Found-by: #CHEN HONGXU# <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
(cherry picked from commit fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582)
Signed-off-by: Michael Niedermayer <[email protected]>
|
longlong Item_func_coalesce::int_op()
{
DBUG_ASSERT(fixed == 1);
null_value=0;
for (uint i=0 ; i < arg_count ; i++)
{
longlong res=args[i]->val_int();
if (!args[i]->null_value)
return res;
}
null_value=1;
return 0;
}
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 259,178,590,828,829,500,000,000,000,000,000,000,000 | 13 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
return skcipher_crypt_blkcipher(req, alg->encrypt);
}
| 0 |
[
"CWE-476",
"CWE-703"
] |
linux
|
9933e113c2e87a9f46a40fde8dafbf801dca1ab9
| 239,401,327,383,903,000,000,000,000,000,000,000,000 | 8 |
crypto: skcipher - Add missing API setkey checks
The API setkey checks for key sizes and alignment went AWOL during the
skcipher conversion. This patch restores them.
Cc: <[email protected]>
Fixes: 4e6c3df4d729 ("crypto: skcipher - Add low-level skcipher...")
Reported-by: Baozeng <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_setadaptation adaptation;
if (len != sizeof(struct sctp_setadaptation))
return -EINVAL;
adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
if (copy_to_user(optval, &adaptation, len))
return -EFAULT;
return 0;
}
| 0 |
[
"CWE-476"
] |
linux
|
ea2bc483ff5caada7c4aa0d5fbf87d3a6590273d
| 118,283,946,612,572,540,000,000,000,000,000,000,000 | 14 |
[SCTP]: Fix assertion (!atomic_read(&sk->sk_rmem_alloc)) failed message
In current implementation, LKSCTP does receive buffer accounting for
data in sctp_receive_queue and pd_lobby. However, LKSCTP don't do
accounting for data in frag_list when data is fragmented. In addition,
LKSCTP doesn't do accounting for data in reasm and lobby queue in
structure sctp_ulpq.
When there are date in these queue, assertion failed message is printed
in inet_sock_destruct because sk_rmem_alloc of oldsk does not become 0
when socket is destroyed.
Signed-off-by: Tsutomu Fujii <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
PixarLogVGetField(TIFF* tif, uint32 tag, va_list ap)
{
PixarLogState *sp = (PixarLogState *)tif->tif_data;
switch (tag) {
case TIFFTAG_PIXARLOGQUALITY:
*va_arg(ap, int*) = sp->quality;
break;
case TIFFTAG_PIXARLOGDATAFMT:
*va_arg(ap, int*) = sp->user_datafmt;
break;
default:
return (*sp->vgetparent)(tif, tag, ap);
}
return (1);
}
| 0 |
[
"CWE-369",
"CWE-787"
] |
libtiff
|
391e77fcd217e78b2c51342ac3ddb7100ecacdd2
| 20,308,635,462,220,051,000,000,000,000,000,000,000 | 16 |
* libtiff/tif_pixarlog.c: fix potential buffer write overrun in
PixarLogDecode() on corrupted/unexpected images (reported by Mathias Svensson)
|
libxlDomainShutdownThread(void *opaque)
{
struct libxlEventHandlerThreadInfo *shutdown_info = opaque;
virDomainObj *vm = NULL;
libxl_event *ev = shutdown_info->event;
libxlDriverPrivate *driver = shutdown_info->driver;
virObjectEvent *dom_event = NULL;
libxl_shutdown_reason xl_reason = ev->u.domain_shutdown.shutdown_reason;
g_autoptr(libxlDriverConfig) cfg = libxlDriverConfigGet(driver);
libxl_domain_config d_config;
libxl_domain_config_init(&d_config);
vm = virDomainObjListFindByID(driver->domains, ev->domid);
if (!vm) {
/* Nothing to do if we can't find the virDomainObj */
goto cleanup;
}
if (libxlDomainObjBeginJob(driver, vm, LIBXL_JOB_MODIFY) < 0)
goto cleanup;
if (xl_reason == LIBXL_SHUTDOWN_REASON_POWEROFF) {
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF,
VIR_DOMAIN_SHUTOFF_SHUTDOWN);
dom_event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN);
switch ((virDomainLifecycleAction) vm->def->onPoweroff) {
case VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY:
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART:
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART_RENAME:
libxlDomainShutdownHandleRestart(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE:
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_DESTROY:
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_RESTART:
case VIR_DOMAIN_LIFECYCLE_ACTION_LAST:
goto endjob;
}
} else if (xl_reason == LIBXL_SHUTDOWN_REASON_CRASH) {
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF,
VIR_DOMAIN_SHUTOFF_CRASHED);
dom_event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_CRASHED);
switch ((virDomainLifecycleAction) vm->def->onCrash) {
case VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY:
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART:
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART_RENAME:
libxlDomainShutdownHandleRestart(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE:
case VIR_DOMAIN_LIFECYCLE_ACTION_LAST:
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_DESTROY:
libxlDomainAutoCoreDump(driver, vm);
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_RESTART:
libxlDomainAutoCoreDump(driver, vm);
libxlDomainShutdownHandleRestart(driver, vm);
goto endjob;
}
} else if (xl_reason == LIBXL_SHUTDOWN_REASON_REBOOT) {
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF,
VIR_DOMAIN_SHUTOFF_SHUTDOWN);
dom_event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN);
switch ((virDomainLifecycleAction) vm->def->onReboot) {
case VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY:
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART:
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART_RENAME:
libxlDomainShutdownHandleRestart(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE:
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_DESTROY:
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_RESTART:
case VIR_DOMAIN_LIFECYCLE_ACTION_LAST:
goto endjob;
}
} else if (xl_reason == LIBXL_SHUTDOWN_REASON_SOFT_RESET) {
libxlDomainObjPrivate *priv = vm->privateData;
if (libxlRetrieveDomainConfigurationWrapper(cfg->ctx, vm->def->id,
&d_config) != 0) {
VIR_ERROR(_("Failed to retrieve config for VM '%s'. "
"Unable to perform soft reset. Destroying VM"),
vm->def->name);
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
}
if (priv->deathW) {
libxl_evdisable_domain_death(cfg->ctx, priv->deathW);
priv->deathW = NULL;
}
if (libxl_domain_soft_reset(cfg->ctx, &d_config, vm->def->id,
NULL, NULL) != 0) {
VIR_ERROR(_("Failed to soft reset VM '%s'. Destroying VM"),
vm->def->name);
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
}
libxl_evenable_domain_death(cfg->ctx, vm->def->id, 0, &priv->deathW);
libxlDomainUnpauseWrapper(cfg->ctx, vm->def->id);
} else {
VIR_INFO("Unhandled shutdown_reason %d", xl_reason);
}
endjob:
libxlDomainObjEndJob(driver, vm);
cleanup:
virDomainObjEndAPI(&vm);
virObjectEventStateQueue(driver->domainEventState, dom_event);
libxl_event_free(cfg->ctx, ev);
VIR_FREE(shutdown_info);
libxl_domain_config_dispose(&d_config);
}
| 0 |
[
"CWE-703",
"CWE-667"
] |
libvirt
|
5c5df5310f72be4878a71ace47074c54e0d1a27d
| 208,589,487,898,965,000,000,000,000,000,000,000,000 | 131 |
libxl: Search for virDomainObj in event handler threads
libxl can deliver events and invoke callbacks on any application thread
calling into libxl. This can cause deadlock in the libvirt libxl driver
Thread 19 (Thread 0x7f31411ec700 (LWP 14068) "libvirtd"):
#0 0x00007f318520cc7d in __lll_lock_wait () from /lib64/libpthread.so.0
#1 0x00007f3185205ed5 in pthread_mutex_lock () from /lib64/libpthread.so.0
#2 0x00007f3189488015 in virMutexLock (m=<optimized out>) at ../../src/util/virthread.c:79
#3 0x00007f3189463f3b in virObjectLock (anyobj=<optimized out>) at ../../src/util/virobject.c:433
#4 0x00007f31894f2f41 in virDomainObjListSearchID (payload=0x7f317400a6d0, name=<optimized out>, data=0x7f31411eaeac) at ../../src/conf/virdomainobjlist.c:105
#5 0x00007f3189437ac5 in virHashSearch (ctable=0x7f3124025a30, iter=iter@entry=0x7f31894f2f30 <virDomainObjListSearchID>, data=data@entry=0x7f31411eaeac, name=name@entry=0x0) at ../../src/util/virhash.c:745
#6 0x00007f31894f3919 in virDomainObjListFindByID (doms=0x7f3124025430, id=<optimized out>) at ../../src/conf/virdomainobjlist.c:121
#7 0x00007f3152f292e5 in libxlDomainEventHandler (data=0x7f3124023d80, event=0x7f310c010ae0) at ../../src/libxl/libxl_domain.c:660
#8 0x00007f3152c6ff5d in egc_run_callbacks (egc=egc@entry=0x7f31411eaf50) at libxl_event.c:1427
#9 0x00007f3152c718bd in libxl__egc_cleanup (egc=0x7f31411eaf50) at libxl_event.c:1458
#10 libxl__ao_inprogress (ao=ao@entry=0x7f310c00b8a0, file=file@entry=0x7f3152cce987 "libxl_domain.c", line=line@entry=730, func=func@entry=0x7f3152ccf750 <__func__.22238> "libxl_domain_unpause") at libxl_event.c:2047
#11 0x00007f3152c8c5b8 in libxl_domain_unpause (ctx=0x7f3124015a40, domid=<optimized out>, ao_how=ao_how@entry=0x0) at libxl_domain.c:730
#12 0x00007f3152f2a584 in libxl_domain_unpause_0x041200 (domid=<optimized out>, ctx=<optimized out>) at /usr/include/libxl.h:1756
#13 libxlDomainStart (driver=driver@entry=0x7f3124023d80, vm=vm@entry=0x7f317400a6d0, start_paused=start_paused@entry=false, restore_fd=restore_fd@entry=-1, restore_ver=<optimized out>, restore_ver@entry=2) at ../../src/libxl/libxl_domain.c:1482
#14 0x00007f3152f2a6e3 in libxlDomainStartNew (driver=driver@entry=0x7f3124023d80, vm=vm@entry=0x7f317400a6d0, start_paused=start_paused@entry=false) at ../../src/libxl/libxl_domain.c:1545
#15 0x00007f3152f2a789 in libxlDomainShutdownHandleRestart (driver=0x7f3124023d80, vm=0x7f317400a6d0) at ../../src/libxl/libxl_domain.c:464
#16 0x00007f3152f2a9e4 in libxlDomainShutdownThread (opaque=<optimized out>) at ../../src/libxl/libxl_domain.c:559
#17 0x00007f3189487ee2 in virThreadHelper (data=<optimized out>) at ../../src/util/virthread.c:196
#18 0x00007f3185203539 in start_thread () from /lib64/libpthread.so.0
#19 0x00007f3184f3becf in clone () from /lib64/libc.so.6
Frame 16 runs a thread created to handle domain shutdown processing for
domid 28712. In this case the event contained the reboot reason, so the
old domain is destroyed and a new one is created by libxlDomainStart new.
After starting the domain, it is unpaused by calling libxl_domain_unpause
in frame 12. While the thread is running within libxl, libxl takes the
opportunity to deliver a pending domain shutdown event for unrelated domid
28710. While searching for the associated virDomainObj by ID, a deadlock is
encountered when attempting to lock the virDomainObj for domid 28712, which
is already locked since this thread is processing its shutdown event.
The deadlock can be avoided by moving the search for a virDomainObj
associated with the event domid to the shutdown thread. The same is done
for the death thread.
Signed-off-by: Jim Fehlig <[email protected]>
Reviewed-by: Daniel P. Berrangé <[email protected]>
Reviewed-by: Ján Tomko <[email protected]>
|
IsLogicalVolumeDescriptorSupported (
UDF_LOGICAL_VOLUME_DESCRIPTOR *LogicalVolDesc
)
{
//
// Check for a valid UDF revision range
//
switch (LogicalVolDesc->DomainIdentifier.Suffix.Domain.UdfRevision) {
case 0x0102:
case 0x0150:
case 0x0200:
case 0x0201:
case 0x0250:
case 0x0260:
break;
default:
return FALSE;
}
//
// Check for a single Partition Map
//
if (LogicalVolDesc->NumberOfPartitionMaps > 1) {
return FALSE;
}
//
// UDF 1.02 revision supports only Type 1 (Physical) partitions, but
// let's check it any way.
//
// PartitionMap[0] -> type
// PartitionMap[1] -> length (in bytes)
//
if (LogicalVolDesc->PartitionMaps[0] != 1 ||
LogicalVolDesc->PartitionMaps[1] != 6) {
return FALSE;
}
return TRUE;
}
| 0 |
[] |
edk2
|
4df8f5bfa28b8b881e506437e8f08d92c1a00370
| 99,030,790,954,646,540,000,000,000,000,000,000,000 | 39 |
MdeModulePkg/PartitionDxe: Add check for underlying device block size
REF:https://bugzilla.tianocore.org/show_bug.cgi?id=828
Within FindAnchorVolumeDescriptorPointer():
Add a check for the underlying device block size to ensure it is greater
than the size of an Anchor Volume Descriptor Pointer.
Cc: Ruiyu Ni <[email protected]>
Cc: Jiewen Yao <[email protected]>
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Hao Wu <[email protected]>
Reviewed-by: Paulo Alcantara <[email protected]>
Acked-by: Star Zeng <[email protected]>
|
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
if (!entry->show)
return -EIO;
if (!rdev->mddev)
return -EBUSY;
return entry->show(rdev, page);
}
| 0 |
[
"CWE-200"
] |
linux
|
b6878d9e03043695dbf3fa1caa6dfc09db225b16
| 234,991,087,952,492,060,000,000,000,000,000,000,000 | 11 |
md: use kzalloc() when bitmap is disabled
In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a
mdu_bitmap_file_t called "file".
5769 file = kmalloc(sizeof(*file), GFP_NOIO);
5770 if (!file)
5771 return -ENOMEM;
This structure is copied to user space at the end of the function.
5786 if (err == 0 &&
5787 copy_to_user(arg, file, sizeof(*file)))
5788 err = -EFAULT
But if bitmap is disabled only the first byte of "file" is initialized
with zero, so it's possible to read some bytes (up to 4095) of kernel
space memory from user space. This is an information leak.
5775 /* bitmap disabled, zero the first byte and copy out */
5776 if (!mddev->bitmap_info.file)
5777 file->pathname[0] = '\0';
Signed-off-by: Benjamin Randazzo <[email protected]>
Signed-off-by: NeilBrown <[email protected]>
|
bson_iter_key_len (const bson_iter_t *iter)
{
/*
* f i e l d n a m e \0 _
* ^ ^
* | |
* iter->key iter->d1
*
*/
BSON_ASSERT (iter->d1 > iter->key);
return iter->d1 - iter->key - 1;
}
| 0 |
[
"CWE-125"
] |
mongo-c-driver
|
0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
| 47,229,744,017,021,780,000,000,000,000,000,000,000 | 12 |
Fix for CVE-2018-16790 -- Verify bounds before binary length read.
As reported here: https://jira.mongodb.org/browse/CDRIVER-2819,
a heap overread occurs due a failure to correctly verify data
bounds.
In the original check, len - o returns the data left including the
sizeof(l) we just read. Instead, the comparison should check
against the data left NOT including the binary int32, i.e. just
subtype (byte*) instead of int32 subtype (byte*).
Added in test for corrupted BSON example.
|
njs_array_handler_map(njs_vm_t *vm, njs_iterator_args_t *args,
njs_value_t *entry, int64_t n)
{
njs_int_t ret;
njs_array_t *retval;
njs_value_t this;
retval = args->data;
if (retval->object.fast_array) {
njs_set_invalid(&retval->start[n]);
}
if (njs_is_valid(entry)) {
ret = njs_array_iterator_call(vm, args, entry, n);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (njs_is_valid(&vm->retval)) {
if (retval->object.fast_array) {
retval->start[n] = vm->retval;
} else {
njs_set_array(&this, retval);
ret = njs_value_property_i64_set(vm, &this, n, &vm->retval);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
}
}
}
return NJS_OK;
}
| 0 |
[
"CWE-703"
] |
njs
|
2e00e95473861846aa8538be87db07699d9f676d
| 147,851,069,459,672,950,000,000,000,000,000,000,000 | 36 |
Fixed Array.prototype.slice() with slow "this" argument.
Previously, when "this" argument was not a fast array, but the "deleted" array
was a fast array, the "deleted" array may be left in uninitialized state if
"this" argument had gaps.
This fix is to ensure that "deleted" is properly initialized.
This fixes #485 issue on Github.
|
CString CAuthBase::GetRemoteIP() const {
if (m_pSock) return m_pSock->GetRemoteIP();
return "";
}
| 0 |
[
"CWE-476"
] |
znc
|
2390ad111bde16a78c98ac44572090b33c3bd2d8
| 289,186,345,435,321,260,000,000,000,000,000,000,000 | 4 |
Fix null pointer dereference in echo-message
The bug was introduced while fixing #1705. If a client did not enable
echo-message, and doesn't have a network, it crashes.
Thanks to LunarBNC for reporting this
|
void fli_read_lc(FILE *f, s_fli_header *fli_header, unsigned char *old_framebuf, unsigned char *framebuf)
{
unsigned short yc, firstline, numline;
unsigned char *pos;
memcpy(framebuf, old_framebuf, fli_header->width * fli_header->height);
firstline = fli_read_short(f);
numline = fli_read_short(f);
for (yc=0; yc < numline; yc++) {
unsigned short xc, pc, pcnt;
pc=fli_read_char(f);
xc=0;
pos=framebuf+(fli_header->width * (firstline+yc));
for (pcnt=pc; pcnt>0; pcnt--) {
unsigned short ps,skip;
skip=fli_read_char(f);
ps=fli_read_char(f);
xc+=skip;
if (ps & 0x80) {
unsigned char val;
ps=-(signed char)ps;
val=fli_read_char(f);
memset(&(pos[xc]), val, ps);
xc+=ps;
} else {
fread(&(pos[xc]), ps, 1, f);
xc+=ps;
}
}
}
}
| 1 |
[
"CWE-787"
] |
GIMP
|
edb251a7ef1602d20a5afcbf23f24afb163de63b
| 329,816,824,658,080,550,000,000,000,000,000,000,000 | 30 |
Bug 739133 - (CVE-2017-17785) Heap overflow while parsing FLI files.
It is possible to trigger a heap overflow while parsing FLI files. The
RLE decoder is vulnerable to out of boundary writes due to lack of
boundary checks.
The variable "framebuf" points to a memory area which was allocated
with fli_header->width * fli_header->height bytes. The RLE decoder
therefore must never write beyond that limit.
If an illegal frame is detected, the parser won't stop, which means
that the next valid sequence is properly parsed again. This should
allow GIMP to parse FLI files as good as possible even if they are
broken by an attacker or by accident.
While at it, I changed the variable xc to be of type size_t, because
the multiplication of width and height could overflow a 16 bit type.
Signed-off-by: Tobias Stoeckmann <[email protected]>
|
void ModuleSQL::OnUnloadModule(Module* mod)
{
SQL::Error err(SQL::BAD_DBID);
Dispatcher->LockQueue();
unsigned int i = qq.size();
while (i > 0)
{
i--;
if (qq[i].q->creator == mod)
{
if (i == 0)
{
// need to wait until the query is done
// (the result will be discarded)
qq[i].c->lock.Lock();
qq[i].c->lock.Unlock();
}
qq[i].q->OnError(err);
delete qq[i].q;
qq.erase(qq.begin() + i);
}
}
Dispatcher->UnlockQueue();
// clean up any result queue entries
Dispatcher->OnNotify();
}
| 0 |
[
"CWE-476"
] |
inspircd
|
8745660fcdac7c1b80c94cfc0ff60928cd4dd4b7
| 156,396,500,941,435,420,000,000,000,000,000,000,000 | 26 |
Initialise and deallocate the MySQL library correctly.
|
dav_uri_encode (const char *path_to_encode)
{
char *path;
static const char *allowed_reserved_chars = "/";
path = g_uri_escape_string (path_to_encode,
allowed_reserved_chars,
FALSE);
return path;
}
| 0 |
[] |
gvfs
|
f81ff2108ab3b6e370f20dcadd8708d23f499184
| 196,676,120,567,406,580,000,000,000,000,000,000,000 | 11 |
dav: don't unescape the uri twice
path_equal tries to unescape path before comparing. Unfortunately
this function is used also for already unescaped paths. Therefore
unescaping can fail. This commit reverts changes which was done in
commit 50af53d and unescape just uris, which aren't unescaped yet.
https://bugzilla.gnome.org/show_bug.cgi?id=743298
|
static int json_skip_string(struct json_parser *parser)
{
for (; parser->data != parser->end; parser->data++) {
if (*parser->data == '"') {
parser->data++;
json_parser_update_input_pos(parser);
return 1;
}
if (*parser->data == '\\') {
switch (*++parser->data) {
case '"':
case '\\':
case '/':
case 'b':
case 'f':
case 'n':
case 'r':
case 't':
break;
case 'u':
if (parser->end - parser->data < 4)
return -1;
parser->data += 3;
break;
default:
return -1;
}
}
}
json_parser_update_input_pos(parser);
return 0;
}
| 0 |
[] |
core
|
973769d74433de3c56c4ffdf4f343cb35d98e4f7
| 204,391,884,805,966,970,000,000,000,000,000,000,000 | 32 |
lib: json - Escape invalid UTF-8 as unicode bytes
This prevents dovecot from crashing if invalid UTF-8 input
is given.
|
bool asn1_read_OctetString(struct asn1_data *data, TALLOC_CTX *mem_ctx, DATA_BLOB *blob)
{
int len;
ZERO_STRUCTP(blob);
if (!asn1_start_tag(data, ASN1_OCTET_STRING)) return false;
len = asn1_tag_remaining(data);
if (len < 0) {
data->has_error = true;
return false;
}
*blob = data_blob_talloc(mem_ctx, NULL, len+1);
if (!blob->data || blob->length < len) {
data->has_error = true;
return false;
}
asn1_read(data, blob->data, len);
asn1_end_tag(data);
blob->length--;
blob->data[len] = 0;
if (data->has_error) {
data_blob_free(blob);
*blob = data_blob_null;
return false;
}
return true;
}
| 1 |
[
"CWE-399"
] |
samba
|
9d989c9dd7a5b92d0c5d65287935471b83b6e884
| 285,728,600,288,800,030,000,000,000,000,000,000,000 | 27 |
CVE-2015-7540: lib: util: Check *every* asn1 return call and early return.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Volker Lendecke <[email protected]>
Autobuild-User(master): Jeremy Allison <[email protected]>
Autobuild-Date(master): Fri Sep 19 01:29:00 CEST 2014 on sn-devel-104
(cherry picked from commit b9d3fd4cc551df78a7b066ee8ce43bbaa3ff994a)
|
bool Field_float::send_binary(Protocol *protocol)
{
ASSERT_COLUMN_MARKED_FOR_READ;
return protocol->store((float) Field_float::val_real(), dec, (String*) 0);
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 323,207,020,420,800,100,000,000,000,000,000,000,000 | 5 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
flatpak_dir_create_system_child_repo (FlatpakDir *self,
GLnxLockFile *file_lock,
const char *optional_commit,
GError **error)
{
g_autoptr(GFile) cache_dir = NULL;
g_assert (!self->user);
cache_dir = flatpak_ensure_system_user_cache_dir_location (error);
if (cache_dir == NULL)
return NULL;
return flatpak_dir_create_child_repo (self, cache_dir, file_lock, optional_commit, error);
}
| 0 |
[
"CWE-668"
] |
flatpak
|
cd2142888fc4c199723a0dfca1f15ea8788a5483
| 208,740,772,329,355,200,000,000,000,000,000,000,000 | 15 |
Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this.
|
bool smbXcli_conn_has_async_calls(struct smbXcli_conn *conn)
{
return ((tevent_queue_length(conn->outgoing) != 0)
|| (talloc_array_length(conn->pending) != 0));
}
| 0 |
[
"CWE-20"
] |
samba
|
a819d2b440aafa3138d95ff6e8b824da885a70e9
| 40,777,724,677,973,863,000,000,000,000,000,000,000 | 5 |
CVE-2015-5296: libcli/smb: make sure we require signing when we demand encryption on a session
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11536
Signed-off-by: Stefan Metzmacher <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]>
|
git_commit_list_node *git_commit_list_pop(git_commit_list **stack)
{
git_commit_list *top = *stack;
git_commit_list_node *item = top ? top->item : NULL;
if (top) {
*stack = top->next;
git__free(top);
}
return item;
}
| 0 |
[] |
libgit2
|
3316f666566f768eb8aa8de521a5262524dc3424
| 30,976,489,813,947,674,000,000,000,000,000,000,000 | 11 |
commit_list: fix possible buffer overflow in `commit_quick_parse`
The function `commit_quick_parse` provides a way to quickly parse
parts of a commit without storing or verifying most of its
metadata. The first thing it does is calculating the number of
parents by skipping "parent " lines until it finds the first
non-parent line. Afterwards, this parent count is passed to
`alloc_parents`, which will allocate an array to store all the
parent.
To calculate the amount of storage required for the parents
array, `alloc_parents` simply multiplicates the number of parents
with the respective elements's size. This already screams "buffer
overflow", and in fact this problem is getting worse by the
result being cast to an `uint32_t`.
In fact, triggering this is possible: git-hash-object(1) will
happily write a commit with multiple millions of parents for you.
I've stopped at 67,108,864 parents as git-hash-object(1)
unfortunately soaks up the complete object without streaming
anything to disk and thus will cause an OOM situation at a later
point. The point here is: this commit was about 4.1GB of size but
compressed down to 24MB and thus easy to distribute.
The above doesn't yet trigger the buffer overflow, thus. As the
array's elements are all pointers which are 8 bytes on 64 bit, we
need a total of 536,870,912 parents to trigger the overflow to
`0`. The effect is that we're now underallocating the array
and do an out-of-bound writes. As the buffer is kindly provided
by the adversary, this may easily result in code execution.
Extrapolating from the test file with 67m commits to the one with
536m commits results in a factor of 8. Thus the uncompressed
contents would be about 32GB in size and the compressed ones
192MB. While still easily distributable via the network, only
servers will have that amount of RAM and not cause an
out-of-memory condition previous to triggering the overflow. This
at least makes this attack not an easy vector for client-side use
of libgit2.
|
ftp_getresp(ftpbuf_t *ftp)
{
char *buf;
if (ftp == NULL) {
return 0;
}
buf = ftp->inbuf;
ftp->resp = 0;
while (1) {
if (!ftp_readline(ftp)) {
return 0;
}
/* Break out when the end-tag is found */
if (isdigit(ftp->inbuf[0]) && isdigit(ftp->inbuf[1]) && isdigit(ftp->inbuf[2]) && ftp->inbuf[3] == ' ') {
break;
}
}
/* translate the tag */
if (!isdigit(ftp->inbuf[0]) || !isdigit(ftp->inbuf[1]) || !isdigit(ftp->inbuf[2])) {
return 0;
}
ftp->resp = 100 * (ftp->inbuf[0] - '0') + 10 * (ftp->inbuf[1] - '0') + (ftp->inbuf[2] - '0');
memmove(ftp->inbuf, ftp->inbuf + 4, FTP_BUFSIZE - 4);
if (ftp->extra) {
ftp->extra -= 4;
}
return 1;
}
| 0 |
[
"CWE-189"
] |
php-src
|
ac2832935435556dc593784cd0087b5e576bbe4d
| 171,636,188,712,790,640,000,000,000,000,000,000,000 | 36 |
Fix bug #69545 - avoid overflow when reading list
|
template<typename t, typename tc>
CImg<T>& draw_graph(const CImg<t>& data,
const tc *const color, const float opacity=1,
const unsigned int plot_type=1, const int vertex_type=1,
const double ymin=0, const double ymax=0, const unsigned int pattern=~0U) {
if (is_empty() || _height<=1) return *this;
if (!color)
throw CImgArgumentException(_cimg_instance
"draw_graph(): Specified color is (null).",
cimg_instance);
// Create shaded colors for displaying bar plots.
CImg<tc> color1, color2;
if (plot_type==3) {
color1.assign(_spectrum); color2.assign(_spectrum);
cimg_forC(*this,c) {
color1[c] = (tc)std::min((float)cimg::type<tc>::max(),(float)color[c]*1.2f);
color2[c] = (tc)(color[c]*0.4f);
}
}
// Compute min/max and normalization factors.
const ulongT
siz = data.size(),
_siz1 = siz - (plot_type!=3),
siz1 = _siz1?_siz1:1;
const unsigned int
_width1 = _width - (plot_type!=3),
width1 = _width1?_width1:1;
double m = ymin, M = ymax;
if (ymin==ymax) m = (double)data.max_min(M);
if (m==M) { --m; ++M; }
const float ca = (float)(M-m)/(_height - 1);
bool init_hatch = true;
// Draw graph edges
switch (plot_type%4) {
case 1 : { // Segments
int oX = 0, oY = (int)((data[0] - m)/ca);
if (siz==1) {
const int Y = (int)((*data - m)/ca);
draw_line(0,Y,width() - 1,Y,color,opacity,pattern);
} else {
const float fx = (float)_width/siz1;
for (ulongT off = 1; off<siz; ++off) {
const int
X = (int)(off*fx) - 1,
Y = (int)((data[off]-m)/ca);
draw_line(oX,oY,X,Y,color,opacity,pattern,init_hatch);
oX = X; oY = Y;
init_hatch = false;
}
}
} break;
case 2 : { // Spline
const CImg<t> ndata(data._data,siz,1,1,1,true);
int oY = (int)((data[0] - m)/ca);
cimg_forX(*this,x) {
const int Y = (int)((ndata._cubic_atX((float)x*siz1/width1)-m)/ca);
if (x>0) draw_line(x,oY,x + 1,Y,color,opacity,pattern,init_hatch);
init_hatch = false;
oY = Y;
}
} break;
case 3 : { // Bars
const int Y0 = (int)(-m/ca);
const float fx = (float)_width/siz1;
int oX = 0;
cimg_foroff(data,off) {
const int
X = (int)((off + 1)*fx) - 1,
Y = (int)((data[off] - m)/ca);
draw_rectangle(oX,Y0,X,Y,color,opacity).
draw_line(oX,Y,oX,Y0,color2.data(),opacity).
draw_line(oX,Y0,X,Y0,Y<=Y0?color2.data():color1.data(),opacity).
draw_line(X,Y,X,Y0,color1.data(),opacity).
draw_line(oX,Y,X,Y,Y<=Y0?color1.data():color2.data(),opacity);
oX = X + 1;
}
} break;
default : break; // No edges
}
// Draw graph points
const unsigned int wb2 = plot_type==3?_width1/(2*siz):0;
const float fx = (float)_width1/siz1;
switch (vertex_type%8) {
case 1 : { // Point
cimg_foroff(data,off) {
const int
X = (int)(off*fx + wb2),
Y = (int)((data[off]-m)/ca);
draw_point(X,Y,color,opacity);
}
} break;
case 2 : { // Straight Cross
cimg_foroff(data,off) {
const int
X = (int)(off*fx + wb2),
Y = (int)((data[off]-m)/ca);
draw_line(X - 3,Y,X + 3,Y,color,opacity).draw_line(X,Y - 3,X,Y + 3,color,opacity);
}
} break;
case 3 : { // Diagonal Cross
cimg_foroff(data,off) {
const int
X = (int)(off*fx + wb2),
Y = (int)((data[off]-m)/ca);
draw_line(X - 3,Y - 3,X + 3,Y + 3,color,opacity).draw_line(X - 3,Y + 3,X + 3,Y - 3,color,opacity);
}
} break;
case 4 : { // Filled Circle
cimg_foroff(data,off) {
const int
X = (int)(off*fx + wb2),
Y = (int)((data[off]-m)/ca);
draw_circle(X,Y,3,color,opacity);
}
} break;
case 5 : { // Outlined circle
cimg_foroff(data,off) {
const int
X = (int)(off*fx + wb2),
Y = (int)((data[off]-m)/ca);
draw_circle(X,Y,3,color,opacity,0U);
}
} break;
case 6 : { // Square
cimg_foroff(data,off) {
const int
X = (int)(off*fx + wb2),
Y = (int)((data[off]-m)/ca);
draw_rectangle(X - 3,Y - 3,X + 3,Y + 3,color,opacity,~0U);
}
} break;
case 7 : { // Diamond
cimg_foroff(data,off) {
const int
X = (int)(off*fx + wb2),
Y = (int)((data[off]-m)/ca);
draw_line(X,Y - 4,X + 4,Y,color,opacity).
draw_line(X + 4,Y,X,Y + 4,color,opacity).
draw_line(X,Y + 4,X - 4,Y,color,opacity).
draw_line(X - 4,Y,X,Y - 4,color,opacity);
}
} break;
default : break; // No points
}
return *this;
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 315,993,485,519,094,280,000,000,000,000,000,000,000 | 149 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr,
unsigned char *buffer,
u32 buffer_len)
{
int ret_val;
u32 tag_len;
u32 level;
if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
return -EPERM;
ret_val = cipso_v4_map_lvl_hton(doi_def,
secattr->attr.mls.lvl,
&level);
if (ret_val != 0)
return ret_val;
if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
ret_val = cipso_v4_map_cat_enum_hton(doi_def,
secattr,
&buffer[4],
buffer_len - 4);
if (ret_val < 0)
return ret_val;
tag_len = 4 + ret_val;
} else
tag_len = 4;
buffer[0] = CIPSO_V4_TAG_ENUM;
buffer[1] = tag_len;
buffer[3] = level;
return tag_len;
}
| 0 |
[
"CWE-362"
] |
linux-2.6
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
| 318,484,059,368,477,440,000,000,000,000,000,000,000 | 36 |
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int decode_dc_prg_sa( abitreader* huffr, short* block )
{
// decode next bit of dc coefficient
block[ 0 ] = huffr->read( 1 );
// return 0 if everything is ok
return 0;
}
| 0 |
[
"CWE-399",
"CWE-190"
] |
lepton
|
6a5ceefac1162783fffd9506a3de39c85c725761
| 100,149,476,834,774,770,000,000,000,000,000,000,000 | 8 |
fix #111
|
static void display_resize(struct DisplayState *ds)
{
if (qxl0->mode == QXL_MODE_VGA) {
qemu_spice_display_resize(&qxl0->ssd);
}
}
| 0 |
[] |
qemu-kvm
|
5ff4e36c804157bd84af43c139f8cd3a59722db9
| 127,678,275,490,476,400,000,000,000,000,000,000,000 | 6 |
qxl: async io support using new spice api
Some of the QXL port i/o commands are waiting for the spice server to
complete certain actions. Add async versions for these commands, so we
don't block the vcpu while the spice server processses the command.
Instead the qxl device will raise an IRQ when done.
The async command processing relies on an added QXLInterface::async_complete
and added QXLWorker::*_async additions, in spice server qxl >= 3.1
Signed-off-by: Gerd Hoffmann <[email protected]>
Signed-off-by: Alon Levy <[email protected]>
|
static int fourcomponent(i_ctx_t * i_ctx_p, ref *space, int *n)
{
*n = 4;
return 0;
}
| 0 |
[] |
ghostpdl
|
b326a71659b7837d3acde954b18bda1a6f5e9498
| 236,211,743,342,161,550,000,000,000,000,000,000,000 | 5 |
Bug 699655: Properly check the return value....
...when getting a value from a dictionary
|
int virDomainLeaseInsert(virDomainDefPtr def,
virDomainLeaseDefPtr lease)
{
if (virDomainLeaseInsertPreAlloc(def) < 0)
return -1;
virDomainLeaseInsertPreAlloced(def, lease);
return 0;
}
| 0 |
[
"CWE-212"
] |
libvirt
|
a5b064bf4b17a9884d7d361733737fb614ad8979
| 172,129,238,747,655,740,000,000,000,000,000,000,000 | 9 |
conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]>
|
CliprdrEnumFORMATETC* CliprdrEnumFORMATETC_New(ULONG nFormats, FORMATETC* pFormatEtc)
{
ULONG i;
CliprdrEnumFORMATETC* instance;
IEnumFORMATETC* iEnumFORMATETC;
if ((nFormats != 0) && !pFormatEtc)
return NULL;
instance = (CliprdrEnumFORMATETC*)calloc(1, sizeof(CliprdrEnumFORMATETC));
if (!instance)
goto error;
iEnumFORMATETC = &instance->iEnumFORMATETC;
iEnumFORMATETC->lpVtbl = (IEnumFORMATETCVtbl*)calloc(1, sizeof(IEnumFORMATETCVtbl));
if (!iEnumFORMATETC->lpVtbl)
goto error;
iEnumFORMATETC->lpVtbl->QueryInterface = CliprdrEnumFORMATETC_QueryInterface;
iEnumFORMATETC->lpVtbl->AddRef = CliprdrEnumFORMATETC_AddRef;
iEnumFORMATETC->lpVtbl->Release = CliprdrEnumFORMATETC_Release;
iEnumFORMATETC->lpVtbl->Next = CliprdrEnumFORMATETC_Next;
iEnumFORMATETC->lpVtbl->Skip = CliprdrEnumFORMATETC_Skip;
iEnumFORMATETC->lpVtbl->Reset = CliprdrEnumFORMATETC_Reset;
iEnumFORMATETC->lpVtbl->Clone = CliprdrEnumFORMATETC_Clone;
instance->m_lRefCount = 1;
instance->m_nIndex = 0;
instance->m_nNumFormats = nFormats;
if (nFormats > 0)
{
instance->m_pFormatEtc = (FORMATETC*)calloc(nFormats, sizeof(FORMATETC));
if (!instance->m_pFormatEtc)
goto error;
for (i = 0; i < nFormats; i++)
cliprdr_format_deep_copy(&instance->m_pFormatEtc[i], &pFormatEtc[i]);
}
return instance;
error:
CliprdrEnumFORMATETC_Delete(instance);
return NULL;
}
| 0 |
[
"CWE-20"
] |
FreeRDP
|
0d79670a28c0ab049af08613621aa0c267f977e9
| 130,084,754,072,270,840,000,000,000,000,000,000,000 | 47 |
Fixed missing input checks for file contents request
reported by Valentino Ricotta (Thalium)
|
static inline u32 kvm_async_pf_next_probe(u32 key)
{
return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
}
| 0 |
[
"CWE-476"
] |
linux
|
55749769fe608fa3f4a075e42e89d237c8e37637
| 327,693,583,311,142,250,000,000,000,000,000,000,000 | 4 |
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
DEFUN (clear_bgp_peer_in_prefix_filter,
clear_bgp_peer_in_prefix_filter_cmd,
"clear bgp (A.B.C.D|X:X::X:X) in prefix-filter",
CLEAR_STR
BGP_STR
"BGP neighbor address to clear\n"
"BGP IPv6 neighbor to clear\n"
"Soft reconfig inbound update\n"
"Push out the existing ORF prefix-list\n")
{
return bgp_clear_vty (vty, NULL, AFI_IP6, SAFI_UNICAST, clear_peer,
BGP_CLEAR_SOFT_IN_ORF_PREFIX, argv[0]);
}
| 0 |
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
| 159,394,550,651,990,000,000,000,000,000,000,000,000 | 13 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <[email protected]>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
|
replace_push_mb(char_u *p)
{
int l = (*mb_ptr2len)(p);
int j;
for (j = l - 1; j >= 0; --j)
replace_push(p[j]);
return l;
}
| 0 |
[] |
vim
|
98a336dd497d3422e7efeef9f24cc9e25aeb8a49
| 294,662,639,305,616,860,000,000,000,000,000,000,000 | 9 |
patch 8.2.0133: invalid memory access with search command
Problem: Invalid memory access with search command.
Solution: When :normal runs out of characters in bracketed paste mode break
out of the loop.(closes #5511)
|
TPMT_RSA_SCHEME_Marshal(TPMT_RSA_SCHEME *source, BYTE **buffer, INT32 *size)
{
UINT16 written = 0;
written += TPMI_ALG_RSA_SCHEME_Marshal(&source->scheme, buffer, size);
written += TPMU_ASYM_SCHEME_Marshal(&source->details, buffer, size, source->scheme);
return written;
}
| 0 |
[
"CWE-787"
] |
libtpms
|
3ef9b26cb9f28bd64d738bff9505a20d4eb56acd
| 311,736,647,232,186,760,000,000,000,000,000,000,000 | 8 |
tpm2: Add maxSize parameter to TPM2B_Marshal for sanity checks
Add maxSize parameter to TPM2B_Marshal and assert on it checking
the size of the data intended to be marshaled versus the maximum
buffer size.
Signed-off-by: Stefan Berger <[email protected]>
|
applet_settings_new_secrets_requested_cb (NMAGConfSettings *settings,
NMAGConfConnection *exported,
const char *setting_name,
const char **hints,
gboolean ask_user,
DBusGMethodInvocation *context,
gpointer user_data)
{
NMApplet *applet = NM_APPLET (user_data);
NMActiveConnection *active_connection = NULL;
NMConnection *connection;
NMSettingConnection *s_con;
NMDevice *device;
NMADeviceClass *dclass;
GError *error = NULL;
connection = nm_exported_connection_get_connection (NM_EXPORTED_CONNECTION (exported));
g_return_if_fail (connection != NULL);
s_con = NM_SETTING_CONNECTION (nm_connection_get_setting (connection, NM_TYPE_SETTING_CONNECTION));
g_return_if_fail (s_con != NULL);
/* VPN secrets get handled a bit differently */
if (!strcmp (nm_setting_connection_get_connection_type (s_con), NM_SETTING_VPN_SETTING_NAME)) {
nma_vpn_request_password (NM_EXPORTED_CONNECTION (exported), ask_user, context);
return;
}
/* Find the active device for this connection */
device = find_active_device (exported, applet, &active_connection);
if (!device || !active_connection) {
g_set_error (&error, NM_SETTINGS_ERROR, NM_SETTINGS_ERROR_INTERNAL_ERROR,
"%s.%d (%s): couldn't find details for connection",
__FILE__, __LINE__, __func__);
goto error;
}
dclass = get_device_class (device, applet);
if (!dclass) {
g_set_error (&error, NM_SETTINGS_ERROR, NM_SETTINGS_ERROR_INTERNAL_ERROR,
"%s.%d (%s): device type unknown",
__FILE__, __LINE__, __func__);
goto error;
}
if (!dclass->get_secrets) {
g_set_error (&error, NM_SETTINGS_ERROR, NM_SETTINGS_ERROR_SECRETS_UNAVAILABLE,
"%s.%d (%s): no secrets found",
__FILE__, __LINE__, __func__);
goto error;
}
/* Let the device class handle secrets */
if (!dclass->get_secrets (device, connection, active_connection, setting_name,
hints, context, applet, &error))
goto error;
return;
error:
g_warning ("%s", error->message);
dbus_g_method_return_error (context, error);
g_error_free (error);
}
| 0 |
[
"CWE-200"
] |
network-manager-applet
|
8627880e07c8345f69ed639325280c7f62a8f894
| 2,487,304,751,134,762,300,000,000,000,000,000,000 | 64 |
editor: prevent any registration of objects on the system bus
D-Bus access-control is name-based; so requests for a specific name
are allowed/denied based on the rules in /etc/dbus-1/system.d. But
apparently apps still get a non-named service on the bus, and if we
register *any* object even though we don't have a named service,
dbus and dbus-glib will happily proxy signals. Since the connection
editor shouldn't ever expose anything having to do with connections
on any bus, make sure that's the case.
|
TEST_P(ProtocolIntegrationTest, HittingEncoderFilterLimit) {
useAccessLog();
config_helper_.addFilter("{ name: envoy.http_dynamo_filter, config: {} }");
config_helper_.setBufferLimits(1024, 1024);
initialize();
// Send the request.
codec_client_ = makeHttpConnection(lookupPort("http"));
auto encoder_decoder = codec_client_->startRequest(default_request_headers_);
auto downstream_request = &encoder_decoder.first;
auto response = std::move(encoder_decoder.second);
Buffer::OwnedImpl data(R"({"TableName":"locations"})");
codec_client_->sendData(*downstream_request, data, true);
waitForNextUpstreamRequest();
// Send the response headers.
upstream_request_->encodeHeaders(default_response_headers_, false);
// Now send an overly large response body. At some point, too much data will
// be buffered, the stream will be reset, and the connection will disconnect.
fake_upstreams_[0]->set_allow_unexpected_disconnects(true);
upstream_request_->encodeData(1024 * 65, false);
if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) {
ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());
} else {
ASSERT_TRUE(upstream_request_->waitForReset());
ASSERT_TRUE(fake_upstream_connection_->close());
ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());
}
response->waitForEndStream();
EXPECT_TRUE(response->complete());
EXPECT_EQ("500", response->headers().Status()->value().getStringView());
EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("500"));
test_server_->waitForCounterEq("http.config_test.downstream_rq_5xx", 1);
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
| 3,016,606,482,232,932,000,000,000,000,000,000,000 | 36 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]>
|
static float lite_font_stringwidth( wmfAPI* API, wmfFont* font, char* str)
{
#if 0
wmf_magick_t
*ddata = WMF_MAGICK_GetData(API);
Image
*image = ddata->image;
DrawInfo
*draw_info;
ExceptionInfo
*exception;
TypeMetric
metrics;
float
stringwidth = 0;
double
orig_x_resolution,
orig_y_resolution;
ResolutionType
orig_resolution_units;
orig_x_resolution = image->resolution.x;
orig_y_resolution = image->resolution.y;
orig_resolution_units = image->units;
draw_info=ddata->draw_info;
if (draw_info == (const DrawInfo *) NULL)
return 0;
draw_info->font=WMF_FONT_PSNAME(font);
draw_info->pointsize=12;
draw_info->text=str;
image->resolution.x = 72;
image->resolution.y = 72;
image->units = PixelsPerInchResolution;
exception=ddata->exception;
if (GetTypeMetrics(image, draw_info, &metrics, exception) != MagickFalse)
stringwidth = ((metrics.width * 72)/(image->resolution.x * draw_info->pointsize)); /* *0.916348; */
draw_info->font=NULL;
draw_info->text=NULL;
#if 0
printf("\nlite_font_stringwidth\n");
printf("string = \"%s\"\n", str);
printf("WMF_FONT_NAME = \"%s\"\n", WMF_FONT_NAME(font));
printf("WMF_FONT_PSNAME = \"%s\"\n", WMF_FONT_PSNAME(font));
printf("stringwidth = %g\n", stringwidth);
/* printf("WMF_FONT_HEIGHT = %i\n", (int)WMF_FONT_HEIGHT(font)); */
/* printf("WMF_FONT_WIDTH = %i\n", (int)WMF_FONT_WIDTH(font)); */
fflush(stdout);
#endif
image->resolution.x = orig_x_resolution;
image->resolution.y = orig_y_resolution;
image->units = orig_resolution_units;
return stringwidth;
#else
(void) API;
(void) font;
(void) str;
return 0;
#endif
}
| 0 |
[
"CWE-772"
] |
ImageMagick
|
b2b48d50300a9fbcd0aa0d9230fd6d7a08f7671e
| 11,249,247,768,798,942,000,000,000,000,000,000,000 | 75 |
https://github.com/ImageMagick/ImageMagick/issues/544
|
ZEND_METHOD(error_exception, __construct)
{
char *message = NULL, *filename = NULL;
long code = 0, severity = E_ERROR, lineno;
zval *object, *previous = NULL;
int argc = ZEND_NUM_ARGS(), message_len, filename_len;
if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, argc TSRMLS_CC, "|sllslO!", &message, &message_len, &code, &severity, &filename, &filename_len, &lineno, &previous, default_exception_ce) == FAILURE) {
zend_error(E_ERROR, "Wrong parameters for ErrorException([string $exception [, long $code, [ long $severity, [ string $filename, [ long $lineno [, Exception $previous = NULL]]]]]])");
}
object = getThis();
if (message) {
zend_update_property_string(default_exception_ce, object, "message", sizeof("message")-1, message TSRMLS_CC);
}
if (code) {
zend_update_property_long(default_exception_ce, object, "code", sizeof("code")-1, code TSRMLS_CC);
}
if (previous) {
zend_update_property(default_exception_ce, object, "previous", sizeof("previous")-1, previous TSRMLS_CC);
}
zend_update_property_long(default_exception_ce, object, "severity", sizeof("severity")-1, severity TSRMLS_CC);
if (argc >= 4) {
zend_update_property_string(default_exception_ce, object, "file", sizeof("file")-1, filename TSRMLS_CC);
if (argc < 5) {
lineno = 0; /* invalidate lineno */
}
zend_update_property_long(default_exception_ce, object, "line", sizeof("line")-1, lineno TSRMLS_CC);
}
}
| 0 |
[] |
php-src
|
a894a8155fab068d68a04bf181dbaddfa01ccbb0
| 165,717,883,003,105,930,000,000,000,000,000,000,000 | 35 |
More fixes for bug #69152
|
static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
struct audit_buffer *audit_buf)
{
struct xfrm_sec_ctx *ctx = x->security;
u32 spi = ntohl(x->id.spi);
if (ctx)
audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
switch (x->props.family) {
case AF_INET:
audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
&x->props.saddr.a4, &x->id.daddr.a4);
break;
case AF_INET6:
audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
x->props.saddr.a6, x->id.daddr.a6);
break;
}
audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
}
| 0 |
[
"CWE-416"
] |
linux
|
dbb2483b2a46fbaf833cfb5deb5ed9cace9c7399
| 179,252,315,249,826,200,000,000,000,000,000,000,000 | 23 |
xfrm: clean up xfrm protocol checks
In commit 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
I introduced a check for xfrm protocol, but according to Herbert
IPSEC_PROTO_ANY should only be used as a wildcard for lookup, so
it should be removed from validate_tmpl().
And, IPSEC_PROTO_ANY is expected to only match 3 IPSec-specific
protocols, this is why xfrm_state_flush() could still miss
IPPROTO_ROUTING, which leads that those entries are left in
net->xfrm.state_all before exit net. Fix this by replacing
IPSEC_PROTO_ANY with zero.
This patch also extracts the check from validate_tmpl() to
xfrm_id_proto_valid() and uses it in parse_ipsecrequest().
With this, no other protocols should be added into xfrm.
Fixes: 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
Reported-by: [email protected]
Cc: Steffen Klassert <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
GF_Err HintFile(GF_ISOFile *file, u32 MTUSize, u32 max_ptime, u32 rtp_rate, u32 base_flags, Bool copy_data, Bool interleave, Bool regular_iod, Bool single_group, Bool hint_no_offset)
{
GF_ESD *esd;
GF_InitialObjectDescriptor *iod;
u32 i, val, res, streamType;
u32 sl_mode, prev_ocr, single_ocr, nb_done, tot_bw, bw, flags, spec_type;
GF_Err e;
char szPayload[30];
GF_RTPHinter *hinter;
Bool copy, has_iod, single_av;
u8 init_payt = BASE_PAYT;
u32 mtype;
GF_SDP_IODProfile iod_mode = GF_SDP_IOD_NONE;
u32 media_group = 0;
u8 media_prio = 0;
tot_bw = 0;
prev_ocr = 0;
single_ocr = 1;
has_iod = 1;
iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file);
if (!iod) has_iod = 0;
else {
if (!gf_list_count(iod->ESDescriptors)) has_iod = 0;
gf_odf_desc_del((GF_Descriptor *) iod);
}
spec_type = gf_isom_guess_specification(file);
single_av = single_group ? 1 : gf_isom_is_single_av(file);
/*first make sure we use a systems track as base OCR*/
for (i=0; i<gf_isom_get_track_count(file); i++) {
res = gf_isom_get_media_type(file, i+1);
if ((res==GF_ISOM_MEDIA_SCENE) || (res==GF_ISOM_MEDIA_OD)) {
if (gf_isom_is_track_in_root_od(file, i+1)) {
gf_isom_set_default_sync_track(file, i+1);
break;
}
}
}
nb_done = 0;
for (i=0; i<gf_isom_get_track_count(file); i++) {
sl_mode = base_flags;
copy = copy_data;
/*skip emty tracks (mainly MPEG-4 interaction streams...*/
if (!gf_isom_get_sample_count(file, i+1)) continue;
if (!gf_isom_is_track_enabled(file, i+1)) {
M4_LOG(GF_LOG_INFO, ("Track ID %d disabled - skipping hint\n", gf_isom_get_track_id(file, i+1) ));
continue;
}
mtype = gf_isom_get_media_type(file, i+1);
switch (mtype) {
case GF_ISOM_MEDIA_VISUAL:
if (single_av) {
media_group = 2;
media_prio = 2;
}
break;
case GF_ISOM_MEDIA_AUXV:
if (single_av) {
media_group = 2;
media_prio = 3;
}
break;
case GF_ISOM_MEDIA_PICT:
if (single_av) {
media_group = 2;
media_prio = 4;
}
break;
case GF_ISOM_MEDIA_AUDIO:
if (single_av) {
media_group = 2;
media_prio = 1;
}
break;
case GF_ISOM_MEDIA_HINT:
continue;
default:
/*no hinting of systems track on isma*/
if (spec_type==GF_ISOM_BRAND_ISMA) continue;
}
mtype = gf_isom_get_media_subtype(file, i+1, 1);
if ((mtype==GF_ISOM_SUBTYPE_MPEG4) || (mtype==GF_ISOM_SUBTYPE_MPEG4_CRYP) ) mtype = gf_isom_get_mpeg4_subtype(file, i+1, 1);
if (!single_av) {
/*one media per group only (we should prompt user for group selection)*/
media_group ++;
media_prio = 1;
}
streamType = 0;
esd = gf_isom_get_esd(file, i+1, 1);
if (esd && esd->decoderConfig) {
streamType = esd->decoderConfig->streamType;
if (!prev_ocr) {
prev_ocr = esd->OCRESID;
if (!esd->OCRESID) prev_ocr = esd->ESID;
} else if (esd->OCRESID && prev_ocr != esd->OCRESID) {
single_ocr = 0;
}
/*OD MUST BE WITHOUT REFERENCES*/
if (streamType==1) copy = 1;
}
gf_odf_desc_del((GF_Descriptor *) esd);
if (!regular_iod && gf_isom_is_track_in_root_od(file, i+1)) {
/*single AU - check if base64 would fit in ESD (consider 33% overhead of base64), otherwise stream*/
if (gf_isom_get_sample_count(file, i+1)==1) {
GF_ISOSample *samp = gf_isom_get_sample(file, i+1, 1, &val);
if (streamType && samp) {
res = gf_hinter_can_embbed_data(samp->data, samp->dataLength, streamType);
} else {
/*not a system track, we shall hint it*/
res = 0;
}
if (samp) gf_isom_sample_del(&samp);
if (res) continue;
}
}
if (interleave) sl_mode |= GP_RTP_PCK_USE_INTERLEAVING;
hinter = gf_hinter_track_new(file, i+1, MTUSize, max_ptime, rtp_rate, sl_mode, init_payt, copy, media_group, media_prio, &e);
if (!hinter) {
if (e) {
M4_LOG(nb_done ? GF_LOG_WARNING : GF_LOG_ERROR, ("Cannot create hinter (%s)\n", gf_error_to_string(e) ));
if (!nb_done) return e;
}
continue;
}
if (hint_no_offset)
gf_hinter_track_force_no_offsets(hinter);
bw = gf_hinter_track_get_bandwidth(hinter);
tot_bw += bw;
flags = gf_hinter_track_get_flags(hinter);
//set extraction mode for AVC/SVC
gf_isom_set_nalu_extract_mode(file, i+1, GF_ISOM_NALU_EXTRACT_LAYER_ONLY);
gf_hinter_track_get_payload_name(hinter, szPayload);
M4_LOG(GF_LOG_INFO, ("Hinting track ID %d - Type \"%s:%s\" (%s) - BW %d kbps\n", gf_isom_get_track_id(file, i+1), gf_4cc_to_str(mtype), gf_4cc_to_str(mtype), szPayload, bw));
if (flags & GP_RTP_PCK_SYSTEMS_CAROUSEL) M4_LOG(GF_LOG_INFO, ("\tMPEG-4 Systems stream carousel enabled\n"));
e = gf_hinter_track_process(hinter);
if (!e) e = gf_hinter_track_finalize(hinter, has_iod);
gf_hinter_track_del(hinter);
if (e) {
M4_LOG(GF_LOG_ERROR, ("Error while hinting (%s)\n", gf_error_to_string(e)));
if (!nb_done) return e;
}
init_payt++;
nb_done ++;
}
if (has_iod) {
iod_mode = GF_SDP_IOD_ISMA;
if (regular_iod) iod_mode = GF_SDP_IOD_REGULAR;
} else {
iod_mode = GF_SDP_IOD_NONE;
}
gf_hinter_finalize(file, iod_mode, tot_bw);
if (!single_ocr)
M4_LOG(GF_LOG_WARNING, ("Warning: at least 2 timelines found in the file\nThis may not be supported by servers/players\n\n"));
return GF_OK;
}
| 0 |
[
"CWE-401",
"CWE-787"
] |
gpac
|
a51f951b878c2b73c1d8e2f1518c7cdc5fb82c3f
| 288,258,412,129,595,040,000,000,000,000,000,000,000 | 174 |
fixed #1782 (fuzz)
|
static inline struct sk_buff *handle_ing(struct sk_buff *skb,
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
if (!rxq || rxq->qdisc == &noop_qdisc)
goto out;
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
switch (ing_filter(skb, rxq)) {
case TC_ACT_SHOT:
case TC_ACT_STOLEN:
kfree_skb(skb);
return NULL;
}
out:
skb->tc_verd = 0;
return skb;
}
| 0 |
[
"CWE-264"
] |
linux
|
8909c9ad8ff03611c9c96c9a92656213e4bb495b
| 246,376,250,141,048,800,000,000,000,000,000,000,000 | 25 |
net: don't allow CAP_NET_ADMIN to load non-netdev kernel modules
Since a8f80e8ff94ecba629542d9b4b5f5a8ee3eb565c any process with
CAP_NET_ADMIN may load any module from /lib/modules/. This doesn't mean
that CAP_NET_ADMIN is a superset of CAP_SYS_MODULE as modules are
limited to /lib/modules/**. However, CAP_NET_ADMIN capability shouldn't
allow anybody load any module not related to networking.
This patch restricts an ability of autoloading modules to netdev modules
with explicit aliases. This fixes CVE-2011-1019.
Arnd Bergmann suggested to leave untouched the old pre-v2.6.32 behavior
of loading netdev modules by name (without any prefix) for processes
with CAP_SYS_MODULE to maintain the compatibility with network scripts
that use autoloading netdev modules by aliases like "eth0", "wlan0".
Currently there are only three users of the feature in the upstream
kernel: ipip, ip_gre and sit.
root@albatros:~# capsh --drop=$(seq -s, 0 11),$(seq -s, 13 34) --
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: fffffff800001000
CapEff: fffffff800001000
CapBnd: fffffff800001000
root@albatros:~# modprobe xfs
FATAL: Error inserting xfs
(/lib/modules/2.6.38-rc6-00001-g2bf4ca3/kernel/fs/xfs/xfs.ko): Operation not permitted
root@albatros:~# lsmod | grep xfs
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit
sit: error fetching interface information: Device not found
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit0
sit0 Link encap:IPv6-in-IPv4
NOARP MTU:1480 Metric:1
root@albatros:~# lsmod | grep sit
sit 10457 0
tunnel4 2957 1 sit
For CAP_SYS_MODULE module loading is still relaxed:
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: ffffffffffffffff
CapEff: ffffffffffffffff
CapBnd: ffffffffffffffff
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
xfs 745319 0
Reference: https://lkml.org/lkml/2011/2/24/203
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Michael Tokarev <[email protected]>
Acked-by: David S. Miller <[email protected]>
Acked-by: Kees Cook <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
{
struct xt_match *match;
match = xt_find_match(nfproto, name, revision);
if (IS_ERR(match)) {
request_module("%st_%s", xt_prefix[nfproto], name);
match = xt_find_match(nfproto, name, revision);
}
return match;
}
| 0 |
[
"CWE-119"
] |
nf-next
|
d7591f0c41ce3e67600a982bab6989ef0f07b3ce
| 127,785,265,459,589,400,000,000,000,000,000,000,000 | 12 |
netfilter: x_tables: introduce and use xt_copy_counters_from_user
The three variants use same copy&pasted code, condense this into a
helper and use that.
Make sure info.name is 0-terminated.
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static int atusb_get_and_conf_chip(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
u8 man_id_0, man_id_1, part_num, version_num;
const char *chip;
struct ieee802154_hw *hw = atusb->hw;
man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
part_num = atusb_read_reg(atusb, RG_PART_NUM);
version_num = atusb_read_reg(atusb, RG_VERSION_NUM);
if (atusb->err)
return atusb->err;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
WPAN_PHY_FLAG_CCA_MODE;
hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
BIT(NL802154_CCA_CARRIER) |
BIT(NL802154_CCA_ENERGY_CARRIER);
hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
hw->phy->cca.mode = NL802154_CCA_ENERGY;
hw->phy->current_page = 0;
if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) {
dev_err(&usb_dev->dev,
"non-Atmel transceiver xxxx%02x%02x\n",
man_id_1, man_id_0);
goto fail;
}
switch (part_num) {
case 2:
chip = "AT86RF230";
atusb->hw->phy->supported.channels[0] = 0x7FFF800;
atusb->hw->phy->current_channel = 11; /* reset default */
atusb->hw->phy->symbol_duration = 16;
atusb->hw->phy->supported.tx_powers = atusb_powers;
atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
hw->phy->supported.cca_ed_levels = atusb_ed_levels;
hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels);
break;
case 3:
chip = "AT86RF231";
atusb->hw->phy->supported.channels[0] = 0x7FFF800;
atusb->hw->phy->current_channel = 11; /* reset default */
atusb->hw->phy->symbol_duration = 16;
atusb->hw->phy->supported.tx_powers = atusb_powers;
atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
hw->phy->supported.cca_ed_levels = atusb_ed_levels;
hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels);
break;
case 7:
chip = "AT86RF212";
atusb->hw->flags |= IEEE802154_HW_LBT;
atusb->hw->phy->supported.channels[0] = 0x00007FF;
atusb->hw->phy->supported.channels[2] = 0x00007FF;
atusb->hw->phy->current_channel = 5;
atusb->hw->phy->symbol_duration = 25;
atusb->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
atusb->hw->phy->supported.tx_powers = at86rf212_powers;
atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
atusb->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
atusb->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
break;
default:
dev_err(&usb_dev->dev,
"unexpected transceiver, part 0x%02x version 0x%02x\n",
part_num, version_num);
goto fail;
}
hw->phy->transmit_power = hw->phy->supported.tx_powers[0];
hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7];
dev_info(&usb_dev->dev, "ATUSB: %s version %d\n", chip, version_num);
return 0;
fail:
atusb->err = -ENODEV;
return -ENODEV;
}
| 0 |
[
"CWE-416"
] |
linux
|
7fd25e6fc035f4b04b75bca6d7e8daa069603a76
| 230,741,666,001,119,100,000,000,000,000,000,000,000 | 90 |
ieee802154: atusb: fix use-after-free at disconnect
The disconnect callback was accessing the hardware-descriptor private
data after having having freed it.
Fixes: 7490b008d123 ("ieee802154: add support for atusb transceiver")
Cc: stable <[email protected]> # 4.2
Cc: Alexander Aring <[email protected]>
Reported-by: [email protected]
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Stefan Schmidt <[email protected]>
|
GF_Err prhd_box_size(GF_Box *s)
{
s->size += 12;
return GF_OK;
| 0 |
[
"CWE-476",
"CWE-787"
] |
gpac
|
b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8
| 67,758,678,721,394,810,000,000,000,000,000,000,000 | 5 |
fixed #1757
|
bool __skb_flow_dissect(const struct net *net,
const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
void *data, __be16 proto, int nhoff, int hlen,
unsigned int flags)
{
struct flow_dissector_key_control *key_control;
struct flow_dissector_key_basic *key_basic;
struct flow_dissector_key_addrs *key_addrs;
struct flow_dissector_key_ports *key_ports;
struct flow_dissector_key_icmp *key_icmp;
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_vlan *key_vlan;
struct bpf_prog *attached = NULL;
enum flow_dissect_ret fdret;
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
int num_hdrs = 0;
u8 ip_proto = 0;
bool ret;
if (!data) {
data = skb->data;
proto = skb_vlan_tag_present(skb) ?
skb->vlan_proto : skb->protocol;
nhoff = skb_network_offset(skb);
hlen = skb_headlen(skb);
#if IS_ENABLED(CONFIG_NET_DSA)
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) {
const struct dsa_device_ops *ops;
int offset;
ops = skb->dev->dsa_ptr->tag_ops;
if (ops->flow_dissect &&
!ops->flow_dissect(skb, &proto, &offset)) {
hlen -= offset;
nhoff += offset;
}
}
#endif
}
/* It is ensured by skb_flow_dissector_init() that control key will
* be always present.
*/
key_control = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_CONTROL,
target_container);
/* It is ensured by skb_flow_dissector_init() that basic key will
* be always present.
*/
key_basic = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
if (skb) {
if (!net) {
if (skb->dev)
net = dev_net(skb->dev);
else if (skb->sk)
net = sock_net(skb->sk);
}
}
WARN_ON_ONCE(!net);
if (net) {
rcu_read_lock();
attached = rcu_dereference(net->flow_dissector_prog);
if (attached) {
struct bpf_flow_keys flow_keys;
struct bpf_flow_dissector ctx = {
.flow_keys = &flow_keys,
.data = data,
.data_end = data + hlen,
};
__be16 n_proto = proto;
if (skb) {
ctx.skb = skb;
/* we can't use 'proto' in the skb case
* because it might be set to skb->vlan_proto
* which has been pulled from the data
*/
n_proto = skb->protocol;
}
ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff,
hlen, flags);
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
target_container);
rcu_read_unlock();
return ret;
}
rcu_read_unlock();
}
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct ethhdr *eth = eth_hdr(skb);
struct flow_dissector_key_eth_addrs *key_eth_addrs;
key_eth_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
target_container);
memcpy(key_eth_addrs, ð->h_dest, sizeof(*key_eth_addrs));
}
proto_again:
fdret = FLOW_DISSECT_RET_CONTINUE;
switch (proto) {
case htons(ETH_P_IP): {
const struct iphdr *iph;
struct iphdr _iph;
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph || iph->ihl < 5) {
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
}
nhoff += iph->ihl * 4;
ip_proto = iph->protocol;
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
key_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
target_container);
memcpy(&key_addrs->v4addrs, &iph->saddr,
sizeof(key_addrs->v4addrs));
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
if (ip_is_fragment(iph)) {
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
if (iph->frag_off & htons(IP_OFFSET)) {
fdret = FLOW_DISSECT_RET_OUT_GOOD;
break;
} else {
key_control->flags |= FLOW_DIS_FIRST_FRAG;
if (!(flags &
FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
fdret = FLOW_DISSECT_RET_OUT_GOOD;
break;
}
}
}
__skb_flow_dissect_ipv4(skb, flow_dissector,
target_container, data, iph);
break;
}
case htons(ETH_P_IPV6): {
const struct ipv6hdr *iph;
struct ipv6hdr _iph;
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph) {
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
}
ip_proto = iph->nexthdr;
nhoff += sizeof(struct ipv6hdr);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
key_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
target_container);
memcpy(&key_addrs->v6addrs, &iph->saddr,
sizeof(key_addrs->v6addrs));
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
if ((dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
(flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
ip6_flowlabel(iph)) {
__be32 flow_label = ip6_flowlabel(iph);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
key_tags = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL,
target_container);
key_tags->flow_label = ntohl(flow_label);
}
if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
fdret = FLOW_DISSECT_RET_OUT_GOOD;
break;
}
}
__skb_flow_dissect_ipv6(skb, flow_dissector,
target_container, data, iph);
break;
}
case htons(ETH_P_8021AD):
case htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan = NULL;
struct vlan_hdr _vlan;
__be16 saved_vlan_tpid = proto;
if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
skb && skb_vlan_tag_present(skb)) {
proto = skb->protocol;
} else {
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
data, hlen, &_vlan);
if (!vlan) {
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
}
proto = vlan->h_vlan_encapsulated_proto;
nhoff += sizeof(*vlan);
}
if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
} else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
} else {
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
break;
}
if (dissector_uses_key(flow_dissector, dissector_vlan)) {
key_vlan = skb_flow_dissector_target(flow_dissector,
dissector_vlan,
target_container);
if (!vlan) {
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
} else {
key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
VLAN_VID_MASK;
key_vlan->vlan_priority =
(ntohs(vlan->h_vlan_TCI) &
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
key_vlan->vlan_tpid = saved_vlan_tpid;
}
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
break;
}
case htons(ETH_P_PPP_SES): {
struct {
struct pppoe_hdr hdr;
__be16 proto;
} *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr) {
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
}
proto = hdr->proto;
nhoff += PPPOE_SES_HLEN;
switch (proto) {
case htons(PPP_IP):
proto = htons(ETH_P_IP);
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
break;
case htons(PPP_IPV6):
proto = htons(ETH_P_IPV6);
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
break;
default:
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
}
break;
}
case htons(ETH_P_TIPC): {
struct tipc_basic_hdr *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
data, hlen, &_hdr);
if (!hdr) {
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
}
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_TIPC)) {
key_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_TIPC,
target_container);
key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
}
fdret = FLOW_DISSECT_RET_OUT_GOOD;
break;
}
case htons(ETH_P_MPLS_UC):
case htons(ETH_P_MPLS_MC):
fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
target_container, data,
nhoff, hlen);
break;
case htons(ETH_P_FCOE):
if ((hlen - nhoff) < FCOE_HEADER_LEN) {
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
}
nhoff += FCOE_HEADER_LEN;
fdret = FLOW_DISSECT_RET_OUT_GOOD;
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
fdret = __skb_flow_dissect_arp(skb, flow_dissector,
target_container, data,
nhoff, hlen);
break;
case htons(ETH_P_BATMAN):
fdret = __skb_flow_dissect_batadv(skb, key_control, data,
&proto, &nhoff, hlen, flags);
break;
default:
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
}
/* Process result of proto processing */
switch (fdret) {
case FLOW_DISSECT_RET_OUT_GOOD:
goto out_good;
case FLOW_DISSECT_RET_PROTO_AGAIN:
if (skb_flow_dissect_allowed(&num_hdrs))
goto proto_again;
goto out_good;
case FLOW_DISSECT_RET_CONTINUE:
case FLOW_DISSECT_RET_IPPROTO_AGAIN:
break;
case FLOW_DISSECT_RET_OUT_BAD:
default:
goto out_bad;
}
ip_proto_again:
fdret = FLOW_DISSECT_RET_CONTINUE;
switch (ip_proto) {
case IPPROTO_GRE:
fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
target_container, data,
&proto, &nhoff, &hlen, flags);
break;
case NEXTHDR_HOP:
case NEXTHDR_ROUTING:
case NEXTHDR_DEST: {
u8 _opthdr[2], *opthdr;
if (proto != htons(ETH_P_IPV6))
break;
opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
data, hlen, &_opthdr);
if (!opthdr) {
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
}
ip_proto = opthdr[0];
nhoff += (opthdr[1] + 1) << 3;
fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
break;
}
case NEXTHDR_FRAGMENT: {
struct frag_hdr _fh, *fh;
if (proto != htons(ETH_P_IPV6))
break;
fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
data, hlen, &_fh);
if (!fh) {
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
}
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
nhoff += sizeof(_fh);
ip_proto = fh->nexthdr;
if (!(fh->frag_off & htons(IP6_OFFSET))) {
key_control->flags |= FLOW_DIS_FIRST_FRAG;
if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
break;
}
}
fdret = FLOW_DISSECT_RET_OUT_GOOD;
break;
}
case IPPROTO_IPIP:
proto = htons(ETH_P_IP);
key_control->flags |= FLOW_DIS_ENCAPSULATION;
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
fdret = FLOW_DISSECT_RET_OUT_GOOD;
break;
}
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
break;
case IPPROTO_IPV6:
proto = htons(ETH_P_IPV6);
key_control->flags |= FLOW_DIS_ENCAPSULATION;
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
fdret = FLOW_DISSECT_RET_OUT_GOOD;
break;
}
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
break;
case IPPROTO_MPLS:
proto = htons(ETH_P_MPLS_UC);
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
break;
case IPPROTO_TCP:
__skb_flow_dissect_tcp(skb, flow_dissector, target_container,
data, nhoff, hlen);
break;
default:
break;
}
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
!(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
key_ports = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS,
target_container);
key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
data, hlen);
}
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ICMP)) {
key_icmp = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ICMP,
target_container);
key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
}
/* Process result of IP proto processing */
switch (fdret) {
case FLOW_DISSECT_RET_PROTO_AGAIN:
if (skb_flow_dissect_allowed(&num_hdrs))
goto proto_again;
break;
case FLOW_DISSECT_RET_IPPROTO_AGAIN:
if (skb_flow_dissect_allowed(&num_hdrs))
goto ip_proto_again;
break;
case FLOW_DISSECT_RET_OUT_GOOD:
case FLOW_DISSECT_RET_CONTINUE:
break;
case FLOW_DISSECT_RET_OUT_BAD:
default:
goto out_bad;
}
out_good:
ret = true;
out:
key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
return ret;
out_bad:
ret = false;
goto out;
}
| 0 |
[
"CWE-330"
] |
linux
|
55667441c84fa5e0911a0aac44fb059c15ba6da2
| 323,295,493,770,826,000,000,000,000,000,000,000,000 | 506 |
net/flow_dissector: switch to siphash
UDP IPv6 packets auto flowlabels are using a 32bit secret
(static u32 hashrnd in net/core/flow_dissector.c) and
apply jhash() over fields known by the receivers.
Attackers can easily infer the 32bit secret and use this information
to identify a device and/or user, since this 32bit secret is only
set at boot time.
Really, using jhash() to generate cookies sent on the wire
is a serious security concern.
Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be
a dead end. Trying to periodically change the secret (like in sch_sfq.c)
could change paths taken in the network for long lived flows.
Let's switch to siphash, as we did in commit df453700e8d8
("inet: switch IP ID generator to siphash")
Using a cryptographically strong pseudo random function will solve this
privacy issue and more generally remove other weak points in the stack.
Packet schedulers using skb_get_hash_perturb() benefit from this change.
Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default")
Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels")
Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel")
Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Berger <[email protected]>
Reported-by: Amit Klein <[email protected]>
Reported-by: Benny Pinkas <[email protected]>
Cc: Tom Herbert <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
{
struct wmi *wmi;
wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
if (!wmi)
return NULL;
wmi->drv_priv = priv;
wmi->stopped = false;
skb_queue_head_init(&wmi->wmi_event_queue);
spin_lock_init(&wmi->wmi_lock);
spin_lock_init(&wmi->event_lock);
mutex_init(&wmi->op_mutex);
mutex_init(&wmi->multi_write_mutex);
mutex_init(&wmi->multi_rmw_mutex);
init_completion(&wmi->cmd_wait);
INIT_LIST_HEAD(&wmi->pending_tx_events);
tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
(unsigned long)wmi);
return wmi;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
728c1e2a05e4b5fc52fab3421dce772a806612a2
| 178,736,323,933,219,200,000,000,000,000,000,000,000 | 23 |
ath9k: release allocated buffer if timed out
In ath9k_wmi_cmd, the allocated network buffer needs to be released
if timeout happens. Otherwise memory will be leaked.
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
void Examples::RandomShuffle() {
std::iota(sampled_index_.begin(), sampled_index_.end(), 0);
std::random_device rd;
std::mt19937 rng(rd());
std::shuffle(sampled_index_.begin(), sampled_index_.end(), rng);
}
| 0 |
[
"CWE-703",
"CWE-125"
] |
tensorflow
|
a4e138660270e7599793fa438cd7b2fc2ce215a6
| 114,354,435,735,587,330,000,000,000,000,000,000,000 | 7 |
Add remaining validation to `sdca_internal.cc`
PiperOrigin-RevId: 387738010
Change-Id: I28eedcfd87a53aaf34deb075acea1f8c95470808
|
DEFINE_RUN_ONCE_STATIC_ALT(ossl_init_no_load_crypto_strings,
ossl_init_load_crypto_strings)
{
/* Do nothing in this case */
return 1;
}
| 0 |
[
"CWE-330"
] |
openssl
|
1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be
| 125,401,930,231,962,630,000,000,000,000,000,000,000 | 6 |
drbg: ensure fork-safety without using a pthread_atfork handler
When the new OpenSSL CSPRNG was introduced in version 1.1.1,
it was announced in the release notes that it would be fork-safe,
which the old CSPRNG hadn't been.
The fork-safety was implemented using a fork count, which was
incremented by a pthread_atfork handler. Initially, this handler
was enabled by default. Unfortunately, the default behaviour
had to be changed for other reasons in commit b5319bdbd095, so
the new OpenSSL CSPRNG failed to keep its promise.
This commit restores the fork-safety using a different approach.
It replaces the fork count by a fork id, which coincides with
the process id on UNIX-like operating systems and is zero on other
operating systems. It is used to detect when an automatic reseed
after a fork is necessary.
To prevent a future regression, it also adds a test to verify that
the child reseeds after fork.
CVE-2019-1549
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9802)
|
static int replmd_defer_add_backlink(struct ldb_module *module,
struct replmd_private *replmd_private,
const struct dsdb_schema *schema,
struct replmd_replicated_request *ac,
struct ldb_dn *forward_dn,
struct GUID *target_guid, bool active,
const struct dsdb_attribute *schema_attr,
struct ldb_request *parent)
{
const struct dsdb_attribute *target_attr;
struct la_backlink *bl;
bl = talloc(ac, struct la_backlink);
if (bl == NULL) {
ldb_module_oom(module);
return LDB_ERR_OPERATIONS_ERROR;
}
target_attr = dsdb_attribute_by_linkID(schema, schema_attr->linkID ^ 1);
if (!target_attr) {
/*
* windows 2003 has a broken schema where the
* definition of msDS-IsDomainFor is missing (which is
* supposed to be the backlink of the
* msDS-HasDomainNCs attribute
*/
return LDB_SUCCESS;
}
bl->attr_name = target_attr->lDAPDisplayName;
bl->forward_dn = talloc_steal(bl, forward_dn);
bl->target_guid = *target_guid;
bl->active = active;
DLIST_ADD(ac->la_backlinks, bl);
return LDB_SUCCESS;
}
| 0 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 240,792,594,202,294,740,000,000,000,000,000,000,000 | 38 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
static enum_func_status
php_mysqlnd_read_row_ex(MYSQLND_PFC * pfc,
MYSQLND_VIO * vio,
MYSQLND_STATS * stats,
MYSQLND_ERROR_INFO * error_info,
MYSQLND_CONNECTION_STATE * connection_state,
MYSQLND_MEMORY_POOL * pool,
MYSQLND_ROW_BUFFER * buffer,
size_t * const data_size)
{
enum_func_status ret = PASS;
MYSQLND_PACKET_HEADER header;
zend_uchar * p = NULL;
size_t prealloc_more_bytes;
DBG_ENTER("php_mysqlnd_read_row_ex");
/*
To ease the process the server splits everything in packets up to 2^24 - 1.
Even in the case the payload is evenly divisible by this value, the last
packet will be empty, namely 0 bytes. Thus, we can read every packet and ask
for next one if they have 2^24 - 1 sizes. But just read the header of a
zero-length byte, don't read the body, there is no such.
*/
/*
We're allocating an extra byte, as php_mysqlnd_rowp_read_text_protocol_aux
needs to be able to append a terminating \0 for atoi/atof.
*/
prealloc_more_bytes = 1;
*data_size = 0;
if (UNEXPECTED(FAIL == mysqlnd_read_header(pfc, vio, &header, stats, error_info))) {
ret = FAIL;
SET_CONNECTION_STATE(connection_state, CONN_QUIT_SENT);
set_packet_error(error_info, CR_SERVER_GONE_ERROR, UNKNOWN_SQLSTATE, mysqlnd_server_gone);
} else {
*data_size += header.size;
buffer->ptr = pool->get_chunk(pool, *data_size + prealloc_more_bytes);
p = buffer->ptr;
if (UNEXPECTED(PASS != (ret = pfc->data->m.receive(pfc, vio, p, header.size, stats, error_info)))) {
DBG_ERR("Empty row packet body");
SET_CONNECTION_STATE(connection_state, CONN_QUIT_SENT);
set_packet_error(error_info, CR_SERVER_GONE_ERROR, UNKNOWN_SQLSTATE, mysqlnd_server_gone);
} else {
while (header.size >= MYSQLND_MAX_PACKET_SIZE) {
if (FAIL == mysqlnd_read_header(pfc, vio, &header, stats, error_info)) {
ret = FAIL;
break;
}
*data_size += header.size;
/* Empty packet after MYSQLND_MAX_PACKET_SIZE packet. That's ok, break */
if (!header.size) {
break;
}
/*
We have to realloc the buffer.
*/
buffer->ptr = pool->resize_chunk(pool, buffer->ptr, *data_size - header.size, *data_size + prealloc_more_bytes);
if (!buffer->ptr) {
SET_OOM_ERROR(error_info);
ret = FAIL;
break;
}
/* The position could have changed, recalculate */
p = (zend_uchar *) buffer->ptr + (*data_size - header.size);
if (PASS != (ret = pfc->data->m.receive(pfc, vio, p, header.size, stats, error_info))) {
DBG_ERR("Empty row packet body");
SET_CONNECTION_STATE(connection_state, CONN_QUIT_SENT);
set_packet_error(error_info, CR_SERVER_GONE_ERROR, UNKNOWN_SQLSTATE, mysqlnd_server_gone);
break;
}
}
}
}
if (ret == FAIL && buffer->ptr) {
pool->free_chunk(pool, buffer->ptr);
buffer->ptr = NULL;
}
DBG_RETURN(ret);
| 0 |
[
"CWE-120"
] |
php-src
|
58006537fc5f133ae8549efe5118cde418b3ace9
| 49,222,602,760,936,400,000,000,000,000,000,000,000 | 85 |
Fix bug #81719: mysqlnd/pdo password buffer overflow
|
transformFkeyCheckAttrs(Relation pkrel,
int numattrs, int16 *attnums,
Oid *opclasses) /* output parameter */
{
Oid indexoid = InvalidOid;
bool found = false;
bool found_deferrable = false;
List *indexoidlist;
ListCell *indexoidscan;
/*
* Get the list of index OIDs for the table from the relcache, and look up
* each one in the pg_index syscache, and match unique indexes to the list
* of attnums we are given.
*/
indexoidlist = RelationGetIndexList(pkrel);
foreach(indexoidscan, indexoidlist)
{
HeapTuple indexTuple;
Form_pg_index indexStruct;
int i,
j;
indexoid = lfirst_oid(indexoidscan);
indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexoid));
if (!HeapTupleIsValid(indexTuple))
elog(ERROR, "cache lookup failed for index %u", indexoid);
indexStruct = (Form_pg_index) GETSTRUCT(indexTuple);
/*
* Must have the right number of columns; must be unique and not a
* partial index; forget it if there are any expressions, too. Invalid
* indexes are out as well.
*/
if (indexStruct->indnatts == numattrs &&
indexStruct->indisunique &&
IndexIsValid(indexStruct) &&
heap_attisnull(indexTuple, Anum_pg_index_indpred) &&
heap_attisnull(indexTuple, Anum_pg_index_indexprs))
{
/* Must get indclass the hard way */
Datum indclassDatum;
bool isnull;
oidvector *indclass;
indclassDatum = SysCacheGetAttr(INDEXRELID, indexTuple,
Anum_pg_index_indclass, &isnull);
Assert(!isnull);
indclass = (oidvector *) DatumGetPointer(indclassDatum);
/*
* The given attnum list may match the index columns in any order.
* Check that each list is a subset of the other.
*/
for (i = 0; i < numattrs; i++)
{
found = false;
for (j = 0; j < numattrs; j++)
{
if (attnums[i] == indexStruct->indkey.values[j])
{
found = true;
break;
}
}
if (!found)
break;
}
if (found)
{
for (i = 0; i < numattrs; i++)
{
found = false;
for (j = 0; j < numattrs; j++)
{
if (attnums[j] == indexStruct->indkey.values[i])
{
opclasses[j] = indclass->values[i];
found = true;
break;
}
}
if (!found)
break;
}
}
/*
* Refuse to use a deferrable unique/primary key. This is per SQL
* spec, and there would be a lot of interesting semantic problems
* if we tried to allow it.
*/
if (found && !indexStruct->indimmediate)
{
/*
* Remember that we found an otherwise matching index, so that
* we can generate a more appropriate error message.
*/
found_deferrable = true;
found = false;
}
}
ReleaseSysCache(indexTuple);
if (found)
break;
}
if (!found)
{
if (found_deferrable)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot use a deferrable unique constraint for referenced table \"%s\"",
RelationGetRelationName(pkrel))));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FOREIGN_KEY),
errmsg("there is no unique constraint matching given keys for referenced table \"%s\"",
RelationGetRelationName(pkrel))));
}
list_free(indexoidlist);
return indexoid;
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 322,380,965,070,649,600,000,000,000,000,000,000,000 | 126 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
gconf_changed_callback (GConfClient *client,
guint cnxn_id,
GConfEntry *entry,
GSManager *manager)
{
gnome_bg_load_from_preferences (manager->priv->bg,
manager->priv->client);
}
| 0 |
[] |
gnome-screensaver
|
2f597ea9f1f363277fd4dfc109fa41bbc6225aca
| 149,660,521,142,094,780,000,000,000,000,000,000,000 | 8 |
Fix adding monitors
Make sure to show windows that are added. And fix an off by one bug.
|
static int svc_dropparty(struct socket *sock, int ep_ref)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
lock_sock(sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk->sk_sleep, &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
}
error = xchg(&sk->sk_err_soft, 0);
out:
release_sock(sk);
return error;
}
| 0 |
[
"CWE-399"
] |
linux-2.6
|
17b24b3c97498935a2ef9777370b1151dfed3f6f
| 278,646,426,101,153,250,000,000,000,000,000,000,000 | 25 |
ATM: CVE-2008-5079: duplicate listen() on socket corrupts the vcc table
As reported by Hugo Dias that it is possible to cause a local denial
of service attack by calling the svc_listen function twice on the same
socket and reading /proc/net/atm/*vc
Signed-off-by: Chas Williams <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.