func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
void SSL_CTX_set_default_passwd_cb(SSL_CTX* ctx, pem_password_cb cb)
{
ctx->SetPasswordCb(cb);
} | 0 | [
"CWE-254"
] | mysql-server | e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69 | 258,780,734,124,697,900,000,000,000,000,000,000,000 | 4 | Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED. |
sds sdscpylen(sds s, const char *t, size_t len) {
if (sdsalloc(s) < len) {
s = sdsMakeRoomFor(s,len-sdslen(s));
if (s == NULL) return NULL;
}
memcpy(s, t, len);
s[len] = '\0';
sdssetlen(s, len);
return s;
} | 0 | [
"CWE-190"
] | redis | d32f2e9999ce003bad0bd2c3bca29f64dcce4433 | 119,933,246,021,783,570,000,000,000,000,000,000,000 | 10 | Fix integer overflow (CVE-2021-21309). (#8522)
On 32-bit systems, setting the proto-max-bulk-len config parameter to a high value may result with integer overflow and a subsequent heap overflow when parsing an input bulk (CVE-2021-21309).
This fix has two parts:
Set a reasonable limit to the config parameter.
Add additional checks to prevent the problem in other potential but unknown code paths. |
ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device;
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
if (ret == DMA_COMPLETE)
return ret;
device->cleanup_fn((unsigned long) c);
return dma_cookie_status(c, cookie, txstate);
} | 0 | [] | linux | 7bced397510ab569d31de4c70b39e13355046387 | 147,551,893,751,263,340,000,000,000,000,000,000,000 | 15 | net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]> |
GetCipherPtr(VALUE obj)
{
EVP_CIPHER_CTX *ctx;
SafeGetCipher(obj, ctx);
return EVP_CIPHER_CTX_cipher(ctx);
} | 0 | [
"CWE-326"
] | ruby | 739782e37a6662fea379e7ef3ec89e851b04b46c | 234,760,043,525,518,330,000,000,000,000,000,000,000 | 8 | * ext/openssl/ossl_cipher.c: remove the encryption key initialization
from Cipher#initialize. This is effectively a revert of r32723
("Avoid possible SEGV from AES encryption/decryption", 2011-07-28).
the patch is derived from https://github.com/ruby/openssl/commit/8108e0a6db133f3375608303fdd2083eb5115062,
written by Kazuki Yamaguchi.
[Backport #8221]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_2_3@59267 b2dd03c8-39d4-4d8f-98ff-823fe69b080e |
archive_write_get_bytes_in_last_block(struct archive *_a)
{
struct archive_write *a = (struct archive_write *)_a;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_ANY, "archive_write_get_bytes_in_last_block");
return (a->bytes_in_last_block);
} | 0 | [
"CWE-703",
"CWE-189"
] | libarchive | 22531545514043e04633e1c015c7540b9de9dbe4 | 261,685,578,720,710,520,000,000,000,000,000,000,000 | 7 | Limit write requests to at most INT_MAX.
This prevents a certain common programming error (passing -1 to write)
from leading to other problems deeper in the library. |
static int readOHDRHeaderMessageDataFill1or2(struct READER *reader) {
int spaceAllocationTime = fgetc(reader->fhd);
int fillValueWriteTime = fgetc(reader->fhd);
int fillValueDefined = fgetc(reader->fhd);
if (spaceAllocationTime < 0 || fillValueWriteTime < 0 || fillValueDefined < 0)
return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE
if ((spaceAllocationTime & ~1) != 2 || fillValueWriteTime != 2 ||
(fillValueDefined & ~1) != 0) {
mylog("spaceAllocationTime %d fillValueWriteTime %d fillValueDefined %d\n",
spaceAllocationTime, fillValueWriteTime, fillValueDefined);
return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE
}
if (fillValueDefined > 0) {
uint32_t size = (uint32_t)readValue(reader, 4);
if (fseek(reader->fhd, size, SEEK_CUR) < 0)
return errno; // LCOV_EXCL_LINE
}
return MYSOFA_OK;
} | 0 | [
"CWE-787"
] | libmysofa | 890400ebd092c574707d0c132124f8ff047e20e1 | 130,488,941,134,424,830,000,000,000,000,000,000,000 | 22 | Fix for issue 163 |
bool Match(IMkvReader* pReader, long long& pos, unsigned long expected_id,
unsigned char*& buf, size_t& buflen) {
if (!pReader || pos < 0)
return false;
long long total = 0;
long long available = 0;
long status = pReader->Length(&total, &available);
if (status < 0 || (total >= 0 && available > total))
return false;
long len = 0;
const long long id = ReadID(pReader, pos, len);
if (id < 0 || (available - pos) > len)
return false;
if (static_cast<unsigned long>(id) != expected_id)
return false;
pos += len; // consume id
const long long size = ReadUInt(pReader, pos, len);
if (size < 0 || len <= 0 || len > 8 || (available - pos) > len)
return false;
unsigned long long rollover_check =
static_cast<unsigned long long>(pos) + len;
if (rollover_check > LLONG_MAX)
return false;
pos += len; // consume length of size of payload
rollover_check = static_cast<unsigned long long>(pos) + size;
if (rollover_check > LLONG_MAX)
return false;
if ((pos + size) > available)
return false;
if (size >= LONG_MAX)
return false;
const long buflen_ = static_cast<long>(size);
buf = SafeArrayAlloc<unsigned char>(1, buflen_);
if (!buf)
return false;
status = pReader->Read(pos, buflen_, buf);
if (status != 0)
return false;
buflen = buflen_;
pos += size; // consume size of payload
return true;
} | 0 | [
"CWE-20"
] | libvpx | 34d54b04e98dd0bac32e9aab0fbda0bf501bc742 | 203,140,366,357,750,500,000,000,000,000,000,000,000 | 58 | update libwebm to libwebm-1.0.0.27-358-gdbf1d10
changelog:
https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10
Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3 |
to_pixman_image (cairo_surface_t *s)
{
return ((cairo_image_surface_t *)s)->pixman_image;
} | 0 | [
"CWE-787"
] | cairo | c986a7310bb06582b7d8a566d5f007ba4e5e75bf | 55,343,601,728,149,740,000,000,000,000,000,000,000 | 4 | image: Enable inplace compositing with opacities for general routines
On a SNB i5-2500:
Speedups
========
firefox-chalkboard 34284.16 -> 19637.40: 1.74x speedup
swfdec-giant-steps 778.35 -> 665.37: 1.17x speedup
ocitysmap 485.64 -> 431.94: 1.12x speedup
Slowdowns
=========
firefox-fishbowl 46878.98 -> 54407.14: 1.16x slowdown
That slow down is due to overhead of the increased number of calls to
pixman_image_composite32() (pixman_transform_point for analyzing the
source extents in particular) outweighing any advantage gained by
performing the rasterisation in a single pass and eliding gaps. The
solution that has been floated in the past is for an interface into
pixman to only perform the analysis once and then to return a kernel to
use for all spans.
Signed-off-by: Chris Wilson <[email protected]> |
static struct ldb_val ldb_dn_convert_local(struct ldb_module *module, void *mem_ctx, const struct ldb_val *val)
{
struct ldb_context *ldb;
struct ldb_dn *dn, *newdn;
struct ldb_val newval;
ldb = ldb_module_get_ctx(module);
dn = ldb_dn_from_ldb_val(mem_ctx, ldb, val);
if (! ldb_dn_validate(dn)) {
newval.length = 0;
newval.data = NULL;
talloc_free(dn);
return newval;
}
newdn = ldb_dn_map_local(module, mem_ctx, dn);
talloc_free(dn);
newval.length = 0;
newval.data = (uint8_t *)ldb_dn_alloc_linearized(mem_ctx, newdn);
if (newval.data) {
newval.length = strlen((char *)newval.data);
}
talloc_free(newdn);
return newval;
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 293,994,864,736,971,230,000,000,000,000,000,000,000 | 27 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
apr_byte_t oidc_util_jwt_verify(request_rec *r, const char *secret, const char *compact_encoded_jwt,
json_t **result, apr_byte_t stripped_header) {
oidc_debug(r, "enter: JWT header=%s", oidc_proto_peek_jwt_header(r, compact_encoded_jwt, NULL));
apr_byte_t rv = FALSE;
oidc_jose_error_t err;
oidc_jwk_t *jwk = NULL;
oidc_jwt_t *jwt = NULL;
if (oidc_util_create_symmetric_key(r, secret, 0, OIDC_JOSE_ALG_SHA256, FALSE, &jwk) == FALSE)
goto end;
apr_hash_t *keys = apr_hash_make(r->pool);
apr_hash_set(keys, "", APR_HASH_KEY_STRING, jwk);
if (stripped_header == TRUE)
compact_encoded_jwt =
apr_pstrcat(r->pool, OIDC_JWT_HDR_DIR_A256GCM, compact_encoded_jwt, NULL);
if (oidc_jwt_parse(r->pool, compact_encoded_jwt, &jwt, keys, &err) == FALSE) {
oidc_error(r, "parsing JWT failed: %s", oidc_jose_e2s(r->pool, err));
goto end;
}
if (oidc_jwt_verify(r->pool, jwt, keys, &err) == FALSE) {
oidc_error(r, "verifying JWT failed: %s", oidc_jose_e2s(r->pool, err));
goto end;
}
*result = json_deep_copy(jwt->payload.value.json);
rv = TRUE;
end:
if (jwk != NULL)
oidc_jwk_destroy(jwk);
if (jwt != NULL)
oidc_jwt_destroy(jwt);
return rv;
} | 0 | [
"CWE-79"
] | mod_auth_openidc | 55ea0a085290cd2c8cdfdd960a230cbc38ba8b56 | 213,165,911,450,463,030,000,000,000,000,000,000,000 | 44 | Add a function to escape Javascript characters |
static int do_move_mount(struct path *path, const char *old_name)
{
struct path old_path, parent_path;
struct mount *p;
struct mount *old;
struct mountpoint *mp;
int err;
if (!old_name || !*old_name)
return -EINVAL;
err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
if (err)
return err;
mp = lock_mount(path);
err = PTR_ERR(mp);
if (IS_ERR(mp))
goto out;
old = real_mount(old_path.mnt);
p = real_mount(path->mnt);
err = -EINVAL;
if (!check_mnt(p) || !check_mnt(old))
goto out1;
if (old->mnt.mnt_flags & MNT_LOCKED)
goto out1;
err = -EINVAL;
if (old_path.dentry != old_path.mnt->mnt_root)
goto out1;
if (!mnt_has_parent(old))
goto out1;
if (d_is_dir(path->dentry) !=
d_is_dir(old_path.dentry))
goto out1;
/*
* Don't move a mount residing in a shared parent.
*/
if (IS_MNT_SHARED(old->mnt_parent))
goto out1;
/*
* Don't move a mount tree containing unbindable mounts to a destination
* mount which is shared.
*/
if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
goto out1;
err = -ELOOP;
for (; mnt_has_parent(p); p = p->mnt_parent)
if (p == old)
goto out1;
err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path);
if (err)
goto out1;
/* if the mount is moved, it should no longer be expire
* automatically */
list_del_init(&old->mnt_expire);
out1:
unlock_mount(mp);
out:
if (!err)
path_put(&parent_path);
path_put(&old_path);
return err;
} | 0 | [
"CWE-703"
] | linux | cd4a40174b71acd021877341684d8bb1dc8ea4ae | 199,697,165,402,149,600,000,000,000,000,000,000,000 | 69 | mnt: Fail collect_mounts when applied to unmounted mounts
The only users of collect_mounts are in audit_tree.c
In audit_trim_trees and audit_add_tree_rule the path passed into
collect_mounts is generated from kern_path passed an audit_tree
pathname which is guaranteed to be an absolute path. In those cases
collect_mounts is obviously intended to work on mounted paths and
if a race results in paths that are unmounted when collect_mounts
it is reasonable to fail early.
The paths passed into audit_tag_tree don't have the absolute path
check. But are used to play with fsnotify and otherwise interact with
the audit_trees, so again operating only on mounted paths appears
reasonable.
Avoid having to worry about what happens when we try and audit
unmounted filesystems by restricting collect_mounts to mounts
that appear in the mount tree.
Signed-off-by: "Eric W. Biederman" <[email protected]> |
static void ext4_mb_pa_callback(struct rcu_head *head)
{
struct ext4_prealloc_space *pa;
pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
BUG_ON(atomic_read(&pa->pa_count));
BUG_ON(pa->pa_deleted == 0);
kmem_cache_free(ext4_pspace_cachep, pa);
} | 0 | [
"CWE-416"
] | linux | 8844618d8aa7a9973e7b527d038a2a589665002c | 108,801,731,321,232,000,000,000,000,000,000,000,000 | 9 | ext4: only look at the bg_flags field if it is valid
The bg_flags field in the block group descripts is only valid if the
uninit_bg or metadata_csum feature is enabled. We were not
consistently looking at this field; fix this.
Also block group #0 must never have uninitialized allocation bitmaps,
or need to be zeroed, since that's where the root inode, and other
special inodes are set up. Check for these conditions and mark the
file system as corrupted if they are detected.
This addresses CVE-2018-10876.
https://bugzilla.kernel.org/show_bug.cgi?id=199403
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected] |
int gnutls_x509_ext_import_private_key_usage_period(const gnutls_datum_t * ext,
time_t * activation,
time_t * expiration)
{
int result, ret;
ASN1_TYPE c2 = ASN1_TYPE_EMPTY;
result = asn1_create_element
(_gnutls_get_pkix(), "PKIX1.PrivateKeyUsagePeriod", &c2);
if (result != ASN1_SUCCESS) {
gnutls_assert();
ret = _gnutls_asn2err(result);
goto cleanup;
}
result = asn1_der_decoding(&c2, ext->data, ext->size, NULL);
if (result != ASN1_SUCCESS) {
gnutls_assert();
ret = _gnutls_asn2err(result);
goto cleanup;
}
if (activation)
*activation = _gnutls_x509_get_time(c2, "notBefore", 1);
if (expiration)
*expiration = _gnutls_x509_get_time(c2, "notAfter", 1);
ret = 0;
cleanup:
asn1_delete_structure(&c2);
return ret;
} | 0 | [] | gnutls | d6972be33264ecc49a86cd0958209cd7363af1e9 | 143,742,744,600,662,320,000,000,000,000,000,000,000 | 35 | eliminated double-free in the parsing of dist points
Reported by Robert Święcki. |
handleURI(const char *str, const char *base, FILE *o) {
int ret;
xmlURIPtr uri;
xmlChar *res = NULL;
uri = xmlCreateURI();
if (base == NULL) {
ret = xmlParseURIReference(uri, str);
if (ret != 0)
fprintf(o, "%s : error %d\n", str, ret);
else {
xmlNormalizeURIPath(uri->path);
xmlPrintURI(o, uri);
fprintf(o, "\n");
}
} else {
res = xmlBuildURI((xmlChar *)str, (xmlChar *) base);
if (res != NULL) {
fprintf(o, "%s\n", (char *) res);
}
else
fprintf(o, "::ERROR::\n");
}
if (res != NULL)
xmlFree(res);
xmlFreeURI(uri);
} | 0 | [
"CWE-125"
] | libxml2 | a820dbeac29d330bae4be05d9ecd939ad6b4aa33 | 233,386,981,009,027,370,000,000,000,000,000,000,000 | 28 | Bug 758605: Heap-based buffer overread in xmlDictAddString <https://bugzilla.gnome.org/show_bug.cgi?id=758605>
Reviewed by David Kilzer.
* HTMLparser.c:
(htmlParseName): Add bounds check.
(htmlParseNameComplex): Ditto.
* result/HTML/758605.html: Added.
* result/HTML/758605.html.err: Added.
* result/HTML/758605.html.sax: Added.
* runtest.c:
(pushParseTest): The input for the new test case was so small
(4 bytes) that htmlParseChunk() was never called after
htmlCreatePushParserCtxt(), thereby creating a false positive
test failure. Fixed by using a do-while loop so we always call
htmlParseChunk() at least once.
* test/HTML/758605.html: Added. |
void MACH0_(kv_loadlibs)(struct MACH0_(obj_t) * bin) {
int i;
for (i = 0; i < bin->nlibs; i++) {
sdb_set(bin->kv, sdb_fmt("libs.%d.name", i), bin->libs[i], 0);
}
} | 0 | [
"CWE-787"
] | rizin | 348b1447d1452f978b69631d6de5b08dd3bdf79d | 51,916,680,485,372,970,000,000,000,000,000,000,000 | 6 | fix #2956 - oob write in mach0.c |
merge_keys_and_selfsig (KBNODE keyblock)
{
if (!keyblock)
;
else if (keyblock->pkt->pkttype == PKT_PUBLIC_KEY)
merge_selfsigs (keyblock);
else
log_debug ("FIXME: merging secret key blocks is not anymore available\n");
} | 0 | [
"CWE-310"
] | gnupg | 4bde12206c5bf199dc6e12a74af8da4558ba41bf | 231,385,246,840,796,500,000,000,000,000,000,000,000 | 9 | gpg: Distinguish between missing and cleared key flags.
* include/cipher.h (PUBKEY_USAGE_NONE): New.
* g10/getkey.c (parse_key_usage): Set new flag.
--
We do not want to use the default capabilities (derived from the
algorithm) if any key flags are given in a signature. Thus if key
flags are used in any way, the default key capabilities are never
used.
This allows to create a key with key flags set to all zero so it can't
be used. This better reflects common sense. |
static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev,
int id, struct uvc_entity *entity)
{
unsigned int i;
if (entity == NULL)
entity = list_entry(&dev->entities, struct uvc_entity, list);
list_for_each_entry_continue(entity, &dev->entities, list) {
for (i = 0; i < entity->bNrInPins; ++i)
if (entity->baSourceID[i] == id)
return entity;
}
return NULL;
} | 0 | [
"CWE-269"
] | linux | 68035c80e129c4cfec659aac4180354530b26527 | 194,070,061,358,313,130,000,000,000,000,000,000,000 | 16 | media: uvcvideo: Avoid cyclic entity chains due to malformed USB descriptors
Way back in 2017, fuzzing the 4.14-rc2 USB stack with syzkaller kicked
up the following WARNING from the UVC chain scanning code:
| list_add double add: new=ffff880069084010, prev=ffff880069084010,
| next=ffff880067d22298.
| ------------[ cut here ]------------
| WARNING: CPU: 1 PID: 1846 at lib/list_debug.c:31 __list_add_valid+0xbd/0xf0
| Modules linked in:
| CPU: 1 PID: 1846 Comm: kworker/1:2 Not tainted
| 4.14.0-rc2-42613-g1488251d1a98 #238
| Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
| Workqueue: usb_hub_wq hub_event
| task: ffff88006b01ca40 task.stack: ffff880064358000
| RIP: 0010:__list_add_valid+0xbd/0xf0 lib/list_debug.c:29
| RSP: 0018:ffff88006435ddd0 EFLAGS: 00010286
| RAX: 0000000000000058 RBX: ffff880067d22298 RCX: 0000000000000000
| RDX: 0000000000000058 RSI: ffffffff85a58800 RDI: ffffed000c86bbac
| RBP: ffff88006435dde8 R08: 1ffff1000c86ba52 R09: 0000000000000000
| R10: 0000000000000002 R11: 0000000000000000 R12: ffff880069084010
| R13: ffff880067d22298 R14: ffff880069084010 R15: ffff880067d222a0
| FS: 0000000000000000(0000) GS:ffff88006c900000(0000) knlGS:0000000000000000
| CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
| CR2: 0000000020004ff2 CR3: 000000006b447000 CR4: 00000000000006e0
| Call Trace:
| __list_add ./include/linux/list.h:59
| list_add_tail+0x8c/0x1b0 ./include/linux/list.h:92
| uvc_scan_chain_forward.isra.8+0x373/0x416
| drivers/media/usb/uvc/uvc_driver.c:1471
| uvc_scan_chain drivers/media/usb/uvc/uvc_driver.c:1585
| uvc_scan_device drivers/media/usb/uvc/uvc_driver.c:1769
| uvc_probe+0x77f2/0x8f00 drivers/media/usb/uvc/uvc_driver.c:2104
Looking into the output from usbmon, the interesting part is the
following data packet:
ffff880069c63e00 30710169 C Ci:1:002:0 0 143 = 09028f00 01030080
00090403 00000e01 00000924 03000103 7c003328 010204db
If we drop the lead configuration and interface descriptors, we're left
with an output terminal descriptor describing a generic display:
/* Output terminal descriptor */
buf[0] 09
buf[1] 24
buf[2] 03 /* UVC_VC_OUTPUT_TERMINAL */
buf[3] 00 /* ID */
buf[4] 01 /* type == 0x0301 (UVC_OTT_DISPLAY) */
buf[5] 03
buf[6] 7c
buf[7] 00 /* source ID refers to self! */
buf[8] 33
The problem with this descriptor is that it is self-referential: the
source ID of 0 matches itself! This causes the 'struct uvc_entity'
representing the display to be added to its chain list twice during
'uvc_scan_chain()': once via 'uvc_scan_chain_entity()' when it is
processed directly from the 'dev->entities' list and then again
immediately afterwards when trying to follow the source ID in
'uvc_scan_chain_forward()'
Add a check before adding an entity to a chain list to ensure that the
entity is not already part of a chain.
Link: https://lore.kernel.org/linux-media/CAAeHK+z+Si69jUR+N-SjN9q4O+o5KFiNManqEa-PjUta7EOb7A@mail.gmail.com/
Cc: <[email protected]>
Fixes: c0efd232929c ("V4L/DVB (8145a): USB Video Class driver")
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: Laurent Pinchart <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static int snd_pcm_free(struct snd_pcm *pcm)
{
if (!pcm)
return 0;
if (!pcm->internal)
pcm_call_notify(pcm, n_unregister);
if (pcm->private_free)
pcm->private_free(pcm);
snd_pcm_lib_preallocate_free_for_all(pcm);
snd_pcm_free_stream(&pcm->streams[SNDRV_PCM_STREAM_PLAYBACK]);
snd_pcm_free_stream(&pcm->streams[SNDRV_PCM_STREAM_CAPTURE]);
kfree(pcm);
return 0;
} | 0 | [
"CWE-416"
] | linux | 362bca57f5d78220f8b5907b875961af9436e229 | 178,297,068,078,881,970,000,000,000,000,000,000,000 | 14 | ALSA: pcm: prevent UAF in snd_pcm_info
When the device descriptor is closed, the `substream->runtime` pointer
is freed. But another thread may be in the ioctl handler, case
SNDRV_CTL_IOCTL_PCM_INFO. This case calls snd_pcm_info_user() which
calls snd_pcm_info() which accesses the now freed `substream->runtime`.
Note: this fixes CVE-2017-0861
Signed-off-by: Robb Glasser <[email protected]>
Signed-off-by: Nick Desaulniers <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
static void usage(void) {
printf(PACKAGE " " VERSION "\n");
printf("-p <num> TCP port number to listen on (default: 11211)\n"
"-U <num> UDP port number to listen on (default: 11211, 0 is off)\n"
"-s <file> UNIX socket path to listen on (disables network support)\n"
"-a <mask> access mask for UNIX socket, in octal (default: 0700)\n"
"-l <ip_addr> interface to listen on (default: INADDR_ANY, all addresses)\n"
"-d run as a daemon\n"
"-r maximize core file limit\n"
"-u <username> assume identity of <username> (only when run as root)\n"
"-m <num> max memory to use for items in megabytes (default: 64 MB)\n"
"-M return error on memory exhausted (rather than removing items)\n"
"-c <num> max simultaneous connections (default: 1024)\n"
"-k lock down all paged memory. Note that there is a\n"
" limit on how much memory you may lock. Trying to\n"
" allocate more than that would fail, so be sure you\n"
" set the limit correctly for the user you started\n"
" the daemon with (not for -u <username> user;\n"
" under sh this is done with 'ulimit -S -l NUM_KB').\n"
"-v verbose (print errors/warnings while in event loop)\n"
"-vv very verbose (also print client commands/reponses)\n"
"-vvv extremely verbose (also print internal state transitions)\n"
"-h print this help and exit\n"
"-i print memcached and libevent license\n"
"-P <file> save PID in <file>, only used with -d option\n"
"-f <factor> chunk size growth factor (default: 1.25)\n"
"-n <bytes> minimum space allocated for key+value+flags (default: 48)\n");
printf("-L Try to use large memory pages (if available). Increasing\n"
" the memory page size could reduce the number of TLB misses\n"
" and improve the performance. In order to get large pages\n"
" from the OS, memcached will allocate the total item-cache\n"
" in one large chunk.\n");
printf("-D <char> Use <char> as the delimiter between key prefixes and IDs.\n"
" This is used for per-prefix stats reporting. The default is\n"
" \":\" (colon). If this option is specified, stats collection\n"
" is turned on automatically; if not, then it may be turned on\n"
" by sending the \"stats detail on\" command to the server.\n");
printf("-t <num> number of threads to use (default: 4)\n");
printf("-R Maximum number of requests per event, limits the number of\n"
" requests process for a given connection to prevent \n"
" starvation (default: 20)\n");
printf("-C Disable use of CAS\n");
printf("-b Set the backlog queue limit (default: 1024)\n");
printf("-B Binding protocol - one of ascii, binary, or auto (default)\n");
printf("-I Override the size of each slab page. Adjusts max item size\n"
" (default: 1mb, min: 1k, max: 128m)\n");
#ifdef ENABLE_SASL
printf("-S Turn on Sasl authentication\n");
#endif
return;
} | 0 | [
"CWE-20"
] | memcached | d9cd01ede97f4145af9781d448c62a3318952719 | 93,397,198,303,447,680,000,000,000,000,000,000,000 | 51 | Use strncmp when checking for large ascii multigets. |
htp_status_t htp_tx_state_response_headers(htp_tx_t *tx) {
if (tx == NULL) return HTP_ERROR;
// Check for compression.
// Determine content encoding.
tx->response_content_encoding = HTP_COMPRESSION_NONE;
htp_header_t *ce = htp_table_get_c(tx->response_headers, "content-encoding");
if (ce != NULL) {
if ((bstr_cmp_c_nocase(ce->value, "gzip") == 0) || (bstr_cmp_c_nocase(ce->value, "x-gzip") == 0)) {
tx->response_content_encoding = HTP_COMPRESSION_GZIP;
} else if ((bstr_cmp_c_nocase(ce->value, "deflate") == 0) || (bstr_cmp_c_nocase(ce->value, "x-deflate") == 0)) {
tx->response_content_encoding = HTP_COMPRESSION_DEFLATE;
} else if (bstr_cmp_c_nocase(ce->value, "inflate") != 0) {
htp_log(tx->connp, HTP_LOG_MARK, HTP_LOG_WARNING, 0, "Unknown response content encoding");
}
}
// Configure decompression, if enabled in the configuration.
if (tx->connp->cfg->response_decompression_enabled) {
tx->response_content_encoding_processing = tx->response_content_encoding;
} else {
tx->response_content_encoding_processing = HTP_COMPRESSION_NONE;
}
// Finalize sending raw header data.
htp_status_t rc = htp_connp_res_receiver_finalize_clear(tx->connp);
if (rc != HTP_OK) return rc;
// Run hook RESPONSE_HEADERS.
rc = htp_hook_run_all(tx->connp->cfg->hook_response_headers, tx);
if (rc != HTP_OK) return rc;
// Initialize the decompression engine as necessary. We can deal with three
// scenarios:
//
// 1. Decompression is enabled, compression indicated in headers, and we decompress.
//
// 2. As above, but the user disables decompression by setting response_content_encoding
// to COMPRESSION_NONE.
//
// 3. Decompression is disabled and we do not attempt to enable it, but the user
// forces decompression by setting response_content_encoding to one of the
// supported algorithms.
if ((tx->response_content_encoding_processing == HTP_COMPRESSION_GZIP) || (tx->response_content_encoding_processing == HTP_COMPRESSION_DEFLATE)) {
if (tx->connp->out_decompressor != NULL) {
tx->connp->out_decompressor->destroy(tx->connp->out_decompressor);
tx->connp->out_decompressor = NULL;
}
tx->connp->out_decompressor = htp_gzip_decompressor_create(tx->connp, tx->response_content_encoding_processing);
if (tx->connp->out_decompressor == NULL) return HTP_ERROR;
tx->connp->out_decompressor->callback = htp_tx_res_process_body_data_decompressor_callback;
} else if (tx->response_content_encoding_processing != HTP_COMPRESSION_NONE) {
return HTP_ERROR;
}
return HTP_OK;
} | 0 | [] | libhtp | c7c03843cd6b1cbf44eb435d160ba53aec948828 | 274,269,601,792,147,330,000,000,000,000,000,000,000 | 62 | Harden decompress code against memory stress
Under severe memory pressure the decompress code can fail to setup
properly. Add checks before dereferencing pointers. |
GF_Err csgp_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 i, bits, gidx_mask;
Bool index_msb_indicates_fragment_local_description, grouping_type_parameter_present;
u32 pattern_size, scount_size, index_size;
GF_CompactSampleGroupBox *ptr = (GF_CompactSampleGroupBox *)s;
ISOM_DECREASE_SIZE(ptr, 8);
ptr->version = gf_bs_read_u8(bs);
ptr->flags = gf_bs_read_u24(bs);
index_msb_indicates_fragment_local_description = (ptr->flags & (1<<7)) ? GF_TRUE : GF_FALSE;
grouping_type_parameter_present = (ptr->flags & (1<<6)) ? GF_TRUE : GF_FALSE;
pattern_size = get_size_by_code( ((ptr->flags>>4) & 0x3) );
scount_size = get_size_by_code( ((ptr->flags>>2) & 0x3) );
index_size = get_size_by_code( (ptr->flags & 0x3) );
if (((pattern_size==4) && (scount_size!=4)) || ((pattern_size!=4) && (scount_size==4))) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] compact sample gorup pattern_size and sample_count_size mare not both 4 bits\n"));
return GF_ISOM_INVALID_FILE;
}
ptr->grouping_type = gf_bs_read_u32(bs);
if (grouping_type_parameter_present) {
ISOM_DECREASE_SIZE(ptr, 4);
ptr->grouping_type_parameter = gf_bs_read_u32(bs);
}
ISOM_DECREASE_SIZE(ptr, 4);
ptr->pattern_count = gf_bs_read_u32(bs);
if ( (ptr->size / ( (pattern_size + scount_size) / 8 ) < ptr->pattern_count) || (u64)ptr->pattern_count > (u64)SIZE_MAX/sizeof(GF_CompactSampleGroupPattern) ) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] compact sample gorup pattern_count value (%lu) invalid\n", ptr->pattern_count));
return GF_ISOM_INVALID_FILE;
}
ptr->patterns = gf_malloc(sizeof(GF_CompactSampleGroupPattern) * ptr->pattern_count);
if (!ptr->patterns) return GF_OUT_OF_MEM;
memset(ptr->patterns, 0, sizeof(GF_CompactSampleGroupPattern) * ptr->pattern_count);
u64 patterns_sizes=0;
bits = 0;
for (i=0; i<ptr->pattern_count; i++) {
ptr->patterns[i].length = gf_bs_read_int(bs, pattern_size);
ptr->patterns[i].sample_count = gf_bs_read_int(bs, scount_size);
bits += pattern_size + scount_size;
if (! (bits % 8)) {
bits/=8;
ISOM_DECREASE_SIZE(ptr, bits);
bits=0;
}
patterns_sizes+=ptr->patterns[i].length;
if (patterns_sizes * index_size > ptr->size*8) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] compact sample gorup pattern cumulated sizes "LLU" larger than box size "LLU"\n", patterns_sizes, ptr->size));
ptr->patterns[i].sample_group_description_indices = NULL;
return GF_ISOM_INVALID_FILE;
}
if ( (u64)ptr->patterns[i].length > (u64)SIZE_MAX/sizeof(u32) ) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] compact sample gorup pattern #%d value (%lu) invalid\n", i, ptr->patterns[i].length));
ptr->patterns[i].sample_group_description_indices = NULL;
return GF_ISOM_INVALID_FILE;
}
ptr->patterns[i].sample_group_description_indices = gf_malloc(sizeof(u32) * ptr->patterns[i].length);
if (!ptr->patterns[i].sample_group_description_indices) return GF_OUT_OF_MEM;
}
bits = 0;
gidx_mask = ((u32)1) << (index_size-1);
for (i=0; i<ptr->pattern_count; i++) {
u32 j;
for (j=0; j<ptr->patterns[i].length; j++) {
u32 idx = gf_bs_read_int(bs, index_size);
if (index_msb_indicates_fragment_local_description) {
//MSB set, this is a index of a group described in the fragment
if (idx & gidx_mask) {
idx += 0x10000;
idx &= ~gidx_mask;
}
}
ptr->patterns[i].sample_group_description_indices[j] = idx;
bits += index_size;
if (! (bits % 8)) {
bits/=8;
ISOM_DECREASE_SIZE(ptr, bits);
bits=0;
}
}
}
if (bits)
gf_bs_align(bs);
return GF_OK;
} | 0 | [
"CWE-787"
] | gpac | 77510778516803b7f7402d7423c6d6bef50254c3 | 260,800,820,163,561,730,000,000,000,000,000,000,000 | 94 | fixed #2255 |
GF_Err gf_sm_load_init_svg(GF_SceneLoader *load)
{
load->process = load_svg_run;
load->done = load_svg_done;
load->parse_string = load_svg_parse_string;
load->suspend = load_svg_suspend;
return GF_OK;
} | 0 | [
"CWE-787"
] | gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 129,844,112,795,458,140,000,000,000,000,000,000,000 | 10 | fixed #2138 |
HCURSOR CBINDInstallDlg::OnQueryDragIcon() {
return((HCURSOR)m_hIcon);
} | 0 | [
"CWE-284"
] | bind9 | 967a3b9419a3c12b8c0870c86d1ee3840bcbbad7 | 160,182,358,437,171,060,000,000,000,000,000,000,000 | 3 | [master] quote service registry paths
4532. [security] The BIND installer on Windows used an unquoted
service path, which can enable privilege escalation.
(CVE-2017-3141) [RT #45229] |
TEST_P(JSITest, PreparedJavaScriptSourceTest) {
rt.evaluateJavaScript(std::make_unique<StringBuffer>("var q = 0;"), "");
auto prep = rt.prepareJavaScript(std::make_unique<StringBuffer>("q++;"), "");
EXPECT_EQ(rt.global().getProperty(rt, "q").getNumber(), 0);
rt.evaluatePreparedJavaScript(prep);
EXPECT_EQ(rt.global().getProperty(rt, "q").getNumber(), 1);
rt.evaluatePreparedJavaScript(prep);
EXPECT_EQ(rt.global().getProperty(rt, "q").getNumber(), 2);
} | 0 | [
"CWE-843",
"CWE-125"
] | hermes | fe52854cdf6725c2eaa9e125995da76e6ceb27da | 261,810,362,154,030,430,000,000,000,000,000,000,000 | 9 | [CVE-2020-1911] Look up HostObject computed properties on the right object in the prototype chain.
Summary:
The change in the hermes repository fixes the security vulnerability
CVE-2020-1911. This vulnerability only affects applications which
allow evaluation of uncontrolled, untrusted JavaScript code not
shipped with the app, so React Native apps will generally not be affected.
This revision includes a test for the bug. The test is generic JSI
code, so it is included in the hermes and react-native repositories.
Changelog: [Internal]
Reviewed By: tmikov
Differential Revision: D23322992
fbshipit-source-id: 4e88c974afe1ad33a263f9cac03e9dc98d33649a |
template<typename tc>
CImg<T>& draw_circle(const int x0, const int y0, int radius,
const tc *const color, const float opacity,
const unsigned int pattern) {
if (pattern!=~0U) return draw_ellipse(x0,y0,radius,radius,0,color,opacity,pattern);
if (is_empty()) return *this;
if (!color)
throw CImgArgumentException(_cimg_instance
"draw_circle(): Specified color is (null).",
cimg_instance);
if (radius<0 || x0 - radius>=width() || y0 + radius<0 || y0 - radius>=height()) return *this;
if (!radius) return draw_point(x0,y0,color,opacity);
draw_point(x0 - radius,y0,color,opacity).draw_point(x0 + radius,y0,color,opacity).
draw_point(x0,y0 - radius,color,opacity).draw_point(x0,y0 + radius,color,opacity);
if (radius==1) return *this;
for (int f = 1 - radius, ddFx = 0, ddFy = -(radius<<1), x = 0, y = radius; x<y; ) {
if (f>=0) { f+=(ddFy+=2); --y; }
++x; ++(f+=(ddFx+=2));
if (x!=y + 1) {
const int x1 = x0 - y, x2 = x0 + y, y1 = y0 - x, y2 = y0 + x,
x3 = x0 - x, x4 = x0 + x, y3 = y0 - y, y4 = y0 + y;
draw_point(x1,y1,color,opacity).draw_point(x1,y2,color,opacity).
draw_point(x2,y1,color,opacity).draw_point(x2,y2,color,opacity);
if (x!=y)
draw_point(x3,y3,color,opacity).draw_point(x4,y4,color,opacity).
draw_point(x4,y3,color,opacity).draw_point(x3,y4,color,opacity);
}
}
return *this; | 0 | [
"CWE-119",
"CWE-787"
] | CImg | ac8003393569aba51048c9d67e1491559877b1d1 | 88,246,814,846,948,040,000,000,000,000,000,000,000 | 30 | . |
int shmem_lock(struct file *file, int lock, struct user_struct *user)
{
return 0;
} | 0 | [
"CWE-399"
] | linux | 5f00110f7273f9ff04ac69a5f85bb535a4fd0987 | 241,457,370,814,717,800,000,000,000,000,000,000,000 | 4 | tmpfs: fix use-after-free of mempolicy object
The tmpfs remount logic preserves filesystem mempolicy if the mpol=M
option is not specified in the remount request. A new policy can be
specified if mpol=M is given.
Before this patch remounting an mpol bound tmpfs without specifying
mpol= mount option in the remount request would set the filesystem's
mempolicy object to a freed mempolicy object.
To reproduce the problem boot a DEBUG_PAGEALLOC kernel and run:
# mkdir /tmp/x
# mount -t tmpfs -o size=100M,mpol=interleave nodev /tmp/x
# grep /tmp/x /proc/mounts
nodev /tmp/x tmpfs rw,relatime,size=102400k,mpol=interleave:0-3 0 0
# mount -o remount,size=200M nodev /tmp/x
# grep /tmp/x /proc/mounts
nodev /tmp/x tmpfs rw,relatime,size=204800k,mpol=??? 0 0
# note ? garbage in mpol=... output above
# dd if=/dev/zero of=/tmp/x/f count=1
# panic here
Panic:
BUG: unable to handle kernel NULL pointer dereference at (null)
IP: [< (null)>] (null)
[...]
Oops: 0010 [#1] SMP DEBUG_PAGEALLOC
Call Trace:
mpol_shared_policy_init+0xa5/0x160
shmem_get_inode+0x209/0x270
shmem_mknod+0x3e/0xf0
shmem_create+0x18/0x20
vfs_create+0xb5/0x130
do_last+0x9a1/0xea0
path_openat+0xb3/0x4d0
do_filp_open+0x42/0xa0
do_sys_open+0xfe/0x1e0
compat_sys_open+0x1b/0x20
cstar_dispatch+0x7/0x1f
Non-debug kernels will not crash immediately because referencing the
dangling mpol will not cause a fault. Instead the filesystem will
reference a freed mempolicy object, which will cause unpredictable
behavior.
The problem boils down to a dropped mpol reference below if
shmem_parse_options() does not allocate a new mpol:
config = *sbinfo
shmem_parse_options(data, &config, true)
mpol_put(sbinfo->mpol)
sbinfo->mpol = config.mpol /* BUG: saves unreferenced mpol */
This patch avoids the crash by not releasing the mempolicy if
shmem_parse_options() doesn't create a new mpol.
How far back does this issue go? I see it in both 2.6.36 and 3.3. I did
not look back further.
Signed-off-by: Greg Thelen <[email protected]>
Acked-by: Hugh Dickins <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
scroll_start(void)
{
if (*T_VS != NUL && *T_CVS != NUL)
{
MAY_WANT_TO_LOG_THIS;
out_str(T_VS);
out_str(T_CVS);
screen_start(); // don't know where cursor is now
}
} | 0 | [
"CWE-125",
"CWE-787"
] | vim | e178af5a586ea023622d460779fdcabbbfac0908 | 172,887,244,647,583,700,000,000,000,000,000,000,000 | 10 | patch 8.2.5160: accessing invalid memory after changing terminal size
Problem: Accessing invalid memory after changing terminal size.
Solution: Adjust cmdline_row and msg_row to the value of Rows. |
int socket_create(uint16_t port)
{
int sfd = -1;
int yes = 1;
#ifdef WIN32
WSADATA wsa_data;
if (!wsa_init) {
if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) {
fprintf(stderr, "WSAStartup failed!\n");
ExitProcess(-1);
}
wsa_init = 1;
}
#endif
struct sockaddr_in saddr;
if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) {
perror("socket()");
return -1;
}
if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) {
perror("setsockopt()");
socket_close(sfd);
return -1;
}
memset((void *) &saddr, 0, sizeof(saddr));
saddr.sin_family = AF_INET;
saddr.sin_addr.s_addr = htonl(INADDR_ANY);
saddr.sin_port = htons(port);
if (0 > bind(sfd, (struct sockaddr *) &saddr, sizeof(saddr))) {
perror("bind()");
socket_close(sfd);
return -1;
}
if (listen(sfd, 1) == -1) {
perror("listen()");
socket_close(sfd);
return -1;
}
return sfd;
} | 1 | [
"CWE-284"
] | libimobiledevice | df1f5c4d70d0c19ad40072f5246ca457e7f9849e | 911,592,361,452,309,300,000,000,000,000,000,000 | 46 | common: [security fix] Make sure sockets only listen locally |
static inline void free_arg_pages(struct linux_binprm *bprm)
{
int i;
for (i = 0; i < MAX_ARG_PAGES; i++) {
if (bprm->page[i])
__free_page(bprm->page[i]);
bprm->page[i] = NULL;
}
} | 0 | [] | linux-2.6 | 822191a2fa1584a29c3224ab328507adcaeac1ab | 111,973,301,259,186,610,000,000,000,000,000,000,000 | 10 | [PATCH] skip data conversion in compat_sys_mount when data_page is NULL
OpenVZ Linux kernel team has found a problem with mounting in compat mode.
Simple command "mount -t smbfs ..." on Fedora Core 5 distro in 32-bit mode
leads to oops:
Unable to handle kernel NULL pointer dereference at 0000000000000000 RIP: compat_sys_mount+0xd6/0x290
Process mount (pid: 14656, veid=300, threadinfo ffff810034d30000, task ffff810034c86bc0)
Call Trace: ia32_sysret+0x0/0xa
The problem is that data_page pointer can be NULL, so we should skip data
conversion in this case.
Signed-off-by: Andrey Mirkin <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
inValidate(
Syntax *syntax,
struct berval *in )
{
/* no value allowed */
return LDAP_INVALID_SYNTAX;
} | 0 | [
"CWE-617"
] | openldap | 67670f4544e28fb09eb7319c39f404e1d3229e65 | 27,731,114,008,995,680,000,000,000,000,000,000,000 | 7 | ITS#9383 remove assert in certificateListValidate |
_copyDefineStmt(const DefineStmt *from)
{
DefineStmt *newnode = makeNode(DefineStmt);
COPY_SCALAR_FIELD(kind);
COPY_SCALAR_FIELD(oldstyle);
COPY_NODE_FIELD(defnames);
COPY_NODE_FIELD(args);
COPY_NODE_FIELD(definition);
return newnode;
} | 0 | [
"CWE-362"
] | postgres | 5f173040e324f6c2eebb90d86cf1b0cdb5890f0a | 41,854,590,432,164,730,000,000,000,000,000,000,000 | 12 | Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062 |
static void dynstr_append_checked(DYNAMIC_STRING* dest, const char* src)
{
if (dynstr_append(dest, src))
die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG);
} | 0 | [
"CWE-284",
"CWE-295"
] | mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 173,582,452,133,006,180,000,000,000,000,000,000,000 | 5 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
gdk_pixbuf__ico_image_stop_load(gpointer data,
GError **error)
{
struct ico_progressive_state *context =
(struct ico_progressive_state *) data;
/* FIXME this thing needs to report errors if
* we have unused image data
*/
g_return_val_if_fail(context != NULL, TRUE);
context_free (context);
return TRUE;
} | 0 | [
"CWE-787"
] | gdk-pixbuf | 88af50a864195da1a4f7bda5f02539704fbda599 | 3,002,555,398,843,795,700,000,000,000,000,000,000 | 15 | ico: Be more careful when parsing headers
There is some redundancy between the ico directory and the
bitmap image header. If the two disagree on the icon dimensions,
just toss the image, instead of risking crashes or OOM later. Also
add some more debug spew that helped in tracking this down, and
make error messages more unique.
The commit also includes a test image that has an example of
this discrepancy and triggers the early exit.
https://bugzilla.gnome.org/show_bug.cgi?id=769170 |
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
binder_size_t *failed_at)
{
binder_size_t *offp, *off_start, *off_end;
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d buffer release %d, size %zd-%zd, failed at %pK\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, failed_at);
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
off_start = (binder_size_t *)(buffer->data +
ALIGN(buffer->data_size, sizeof(void *)));
if (failed_at)
off_end = failed_at;
else
off_end = (void *)off_start + buffer->offsets_size;
for (offp = off_start; offp < off_end; offp++) {
struct binder_object_header *hdr;
size_t object_size = binder_validate_object(buffer, *offp);
if (object_size == 0) {
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
debug_id, (u64)*offp, buffer->data_size);
continue;
}
hdr = (struct binder_object_header *)(buffer->data + *offp);
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct flat_binder_object *fp;
struct binder_node *node;
fp = to_flat_binder_object(hdr);
node = binder_get_node(proc, fp->binder);
if (node == NULL) {
pr_err("transaction release %d bad node %016llx\n",
debug_id, (u64)fp->binder);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx\n",
node->debug_id, (u64)node->ptr);
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
0);
binder_put_node(node);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
struct binder_ref_data rdata;
int ret;
fp = to_flat_binder_object(hdr);
ret = binder_dec_ref_for_handle(proc, fp->handle,
hdr->type == BINDER_TYPE_HANDLE, &rdata);
if (ret) {
pr_err("transaction release %d bad handle %d, ret = %d\n",
debug_id, fp->handle, ret);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d\n",
rdata.debug_id, rdata.desc);
} break;
case BINDER_TYPE_FD: {
struct binder_fd_object *fp = to_binder_fd_object(hdr);
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d\n", fp->fd);
if (failed_at)
task_close_fd(proc, fp->fd);
} break;
case BINDER_TYPE_PTR:
/*
* Nothing to do here, this will get cleaned up when the
* transaction buffer gets freed
*/
break;
case BINDER_TYPE_FDA: {
struct binder_fd_array_object *fda;
struct binder_buffer_object *parent;
uintptr_t parent_buffer;
u32 *fd_array;
size_t fd_index;
binder_size_t fd_buf_size;
fda = to_binder_fd_array_object(hdr);
parent = binder_validate_ptr(buffer, fda->parent,
off_start,
offp - off_start);
if (!parent) {
pr_err("transaction release %d bad parent offset\n",
debug_id);
continue;
}
/*
* Since the parent was already fixed up, convert it
* back to kernel address space to access it
*/
parent_buffer = parent->buffer -
binder_alloc_get_user_buffer_offset(
&proc->alloc);
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
pr_err("transaction release %d invalid number of fds (%lld)\n",
debug_id, (u64)fda->num_fds);
continue;
}
if (fd_buf_size > parent->length ||
fda->parent_offset > parent->length - fd_buf_size) {
/* No space for all file descriptors here. */
pr_err("transaction release %d not enough space for %lld fds in buffer\n",
debug_id, (u64)fda->num_fds);
continue;
}
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
task_close_fd(proc, fd_array[fd_index]);
} break;
default:
pr_err("transaction release %d bad object type %x\n",
debug_id, hdr->type);
break;
}
}
} | 0 | [
"CWE-362"
] | linux | 5eeb2ca02a2f6084fc57ae5c244a38baab07033a | 107,722,742,760,986,260,000,000,000,000,000,000,000 | 134 | ANDROID: binder: synchronize_rcu() when using POLLFREE.
To prevent races with ep_remove_waitqueue() removing the
waitqueue at the same time.
Reported-by: [email protected]
Signed-off-by: Martijn Coenen <[email protected]>
Cc: stable <[email protected]> # 4.14+
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
doit (struct query *z, int state)
{
char key[257];
char misc[20], header[12];
char *buf = 0, *cached = 0;
const char *whichserver = 0;
unsigned int rcode = 0;
unsigned int posanswers = 0;
unsigned int len = 0, cachedlen = 0;
uint16 numanswers = 0;
uint16 numauthority = 0;
unsigned int posauthority = 0;
uint16 numglue = 0;
unsigned int pos = 0, pos2 = 0;
uint16 datalen = 0;
char *control = 0, *d = 0;
const char *dtype = 0;
unsigned int dlen = 0;
int flagout = 0, flagcname = 0;
int flagreferral = 0, flagsoa = 0;
int i = 0, j = 0, k = 0, p = 0, q = 0;
uint32 ttl = 0, soattl = 0, cnamettl = 0;
errno = error_io;
if (state == 1)
goto HAVEPACKET;
if (state == -1)
{
if (debug_level > 1)
log_servfail (z->name[z->level]);
goto SERVFAIL;
}
NEWNAME:
if (++z->loop == 100)
goto DIE;
d = z->name[z->level];
dtype = z->level ? DNS_T_A : z->type;
dlen = dns_domain_length (d);
if (globalip (d, misc))
{
if (z->level)
{
for (k = 0; k < 64; k += 4)
{
if (byte_equal (z->servers[z->level - 1] + k, 4, "\0\0\0\0"))
{
byte_copy (z->servers[z->level - 1] + k, 4, misc);
break;
}
}
goto LOWERLEVEL;
}
if (!rqa (z))
goto DIE;
if (typematch (DNS_T_A, dtype))
{
if (!response_rstart (d, DNS_T_A, 655360))
goto DIE;
if (!response_addbytes (misc, 4))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup (z);
return 1;
}
if (dns_domain_equal (d, "\0011\0010\0010\003127\7in-addr\4arpa\0"))
{
if (z->level)
goto LOWERLEVEL;
if (!rqa (z))
goto DIE;
if (typematch (DNS_T_PTR, dtype))
{
if (!response_rstart (d, DNS_T_PTR, 655360))
goto DIE;
if (!response_addname ("\011localhost\0"))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup (z);
if (debug_level > 2)
log_stats ();
return 1;
}
if (dlen <= 255)
{
byte_copy (key, 2, DNS_T_ANY);
byte_copy (key + 2, dlen, d);
case_lowerb (key + 2, dlen);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached)
{
if (debug_level > 2)
log_cachednxdomain (d);
goto NXDOMAIN;
}
byte_copy (key, 2, DNS_T_CNAME);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached)
{
if (typematch (DNS_T_CNAME, dtype))
{
if (debug_level > 2)
log_cachedanswer (d, DNS_T_CNAME);
if (!rqa (z))
goto DIE;
if (!response_cname (z->name[0], cached, ttl))
goto DIE;
cleanup (z);
return 1;
}
if (debug_level > 2)
log_cachedcname (d, cached);
if (!dns_domain_copy (&cname, cached))
goto DIE;
goto CNAME;
}
if (typematch (DNS_T_NS, dtype))
{
byte_copy (key, 2, DNS_T_NS);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff (dtype, 2, DNS_T_ANY)))
{
if (debug_level > 2)
log_cachedanswer (d, DNS_T_NS);
if (!rqa (z))
goto DIE;
pos = 0;
while ((pos=dns_packet_getname (cached, cachedlen, pos, &t2)))
{
if (!response_rstart (d, DNS_T_NS, ttl))
goto DIE;
if (!response_addname (t2))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup (z);
return 1;
}
}
if (typematch (DNS_T_PTR, dtype))
{
byte_copy (key, 2, DNS_T_PTR);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff(dtype, 2, DNS_T_ANY)))
{
if (debug_level > 2)
log_cachedanswer (d, DNS_T_PTR);
if (!rqa (z))
goto DIE;
pos = 0;
while ((pos=dns_packet_getname (cached, cachedlen, pos, &t2)))
{
if (!response_rstart (d, DNS_T_PTR, ttl))
goto DIE;
if (!response_addname (t2))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup(z);
return 1;
}
}
if (typematch (DNS_T_MX, dtype))
{
byte_copy (key, 2, DNS_T_MX);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff (dtype, 2, DNS_T_ANY)))
{
if (debug_level > 2)
log_cachedanswer (d, DNS_T_MX);
if (!rqa (z))
goto DIE;
pos = 0;
while ((pos=dns_packet_copy (cached, cachedlen, pos, misc, 2)))
{
pos = dns_packet_getname (cached, cachedlen, pos, &t2);
if (!pos)
break;
if (!response_rstart (d, DNS_T_MX, ttl))
goto DIE;
if (!response_addbytes (misc, 2))
goto DIE;
if (!response_addname (t2))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup (z);
return 1;
}
}
if (typematch (DNS_T_SOA, dtype))
{
byte_copy (key, 2, DNS_T_SOA);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff (dtype, 2, DNS_T_ANY)))
{
log_cachedanswer (d, DNS_T_SOA);
if (!rqa (z))
goto DIE;
pos = 0;
while((pos = dns_packet_copy(cached, cachedlen,pos, misc, 20)))
{
pos = dns_packet_getname (cached, cachedlen, pos, &t2);
if (!pos)
break;
pos = dns_packet_getname (cached, cachedlen, pos, &t3);
if (!pos)
break;
if (!response_rstart (d, DNS_T_SOA, ttl))
goto DIE;
if (!response_addname (t2))
goto DIE;
if (!response_addname (t3))
goto DIE;
if (!response_addbytes(misc, 20))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup (z);
return 1;
}
}
if (typematch (DNS_T_A, dtype))
{
byte_copy (key, 2, DNS_T_A);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff (dtype, 2, DNS_T_ANY)))
{
if (z->level)
{
if (debug_level > 2)
log_cachedanswer (d, DNS_T_A);
while (cachedlen >= 4)
{
for (k = 0; k < 64; k += 4)
{
if (byte_equal (z->servers[z->level - 1] + k,
4, "\0\0\0\0"))
{
byte_copy (z->servers[z->level - 1] + k,
4, cached);
break;
}
}
cached += 4;
cachedlen -= 4;
}
goto LOWERLEVEL;
}
if (debug_level > 2)
log_cachedanswer (d, DNS_T_A);
if (!rqa (z))
goto DIE;
while (cachedlen >= 4)
{
if (!response_rstart (d, DNS_T_A, ttl))
goto DIE;
if (!response_addbytes (cached, 4))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
cached += 4;
cachedlen -= 4;
}
cleanup (z);
return 1;
}
}
if (!typematch (DNS_T_ANY, dtype)
&& !typematch (DNS_T_AXFR, dtype)
&& !typematch (DNS_T_CNAME, dtype)
&& !typematch (DNS_T_NS, dtype)
&& !typematch (DNS_T_PTR, dtype)
&& !typematch (DNS_T_A, dtype)
&& !typematch (DNS_T_MX, dtype)
&& !typematch (DNS_T_SOA, dtype))
{
byte_copy (key, 2, dtype);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff (dtype, 2, DNS_T_ANY)))
{
if (debug_level > 2)
log_cachedanswer (d, dtype);
if (!rqa (z))
goto DIE;
while (cachedlen >= 2)
{
uint16_unpack_big (cached, &datalen);
cached += 2;
cachedlen -= 2;
if (datalen > cachedlen)
goto DIE;
if (!response_rstart (d, dtype, ttl))
goto DIE;
if (!response_addbytes (cached, datalen))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
cached += datalen;
cachedlen -= datalen;
}
cleanup (z);
return 1;
}
}
}
for (;;)
{
if (roots (z->servers[z->level], d))
{
for (j = 0; j < QUERY_MAXNS; ++j)
dns_domain_free (&z->ns[z->level][j]);
z->control[z->level] = d;
break;
}
if (!flagforwardonly && (z->level < 2))
{
if (dlen < 255)
{
byte_copy (key,2,DNS_T_NS);
byte_copy (key + 2,dlen,d);
case_lowerb (key + 2,dlen);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && cachedlen)
{
z->control[z->level] = d;
byte_zero (z->servers[z->level],64);
for (j = 0; j < QUERY_MAXNS; ++j)
dns_domain_free (&z->ns[z->level][j]);
j = pos = 0;
pos = dns_packet_getname (cached, cachedlen, pos, &t1);
while (pos)
{
if (debug_level > 2)
log_cachedns (d, t1);
if (j < QUERY_MAXNS)
if (!dns_domain_copy (&z->ns[z->level][j++], t1))
goto DIE;
pos = dns_packet_getname (cached, cachedlen, pos, &t1);
}
break;
}
}
}
if (!*d)
goto DIE;
j = 1 + (unsigned int) (unsigned char) *d;
dlen -= j;
d += j;
}
HAVENS:
for (j = 0; j < QUERY_MAXNS; ++j)
{
if (z->ns[z->level][j])
{
if (z->level + 1 < QUERY_MAXLEVEL)
{
int dc = dns_domain_copy (&z->name[z->level + 1],
z->ns[z->level][j]);
if (!dc)
goto DIE;
dns_domain_free (&z->ns[z->level][j]);
++z->level;
goto NEWNAME;
}
dns_domain_free (&z->ns[z->level][j]);
}
}
for (j = 0; j < 64; j += 4)
if (byte_diff (z->servers[z->level] + j, 4, "\0\0\0\0"))
break;
if (j == 64)
goto SERVFAIL;
dns_sortip (z->servers[z->level], 64);
dtype = z->level ? DNS_T_A : z->type;
if (qmerge_start (&z->qm, z->servers[z->level],
flagforwardonly, z->name[z->level], dtype,
z->localip, z->control[z->level]) == -1)
goto DIE;
return 0;
LOWERLEVEL:
dns_domain_free (&z->name[z->level]);
for (j = 0; j < QUERY_MAXNS; ++j)
dns_domain_free (&z->ns[z->level][j]);
--z->level;
goto HAVENS;
HAVEPACKET:
if (++z->loop == 100)
goto DIE;
buf = z->qm->dt.packet;
len = z->qm->dt.packetlen;
whichserver = z->qm->dt.servers + 4 * z->qm->dt.curserver;
control = z->control[z->level];
d = z->name[z->level];
dtype = z->level ? DNS_T_A : z->type;
if (!(pos = dns_packet_copy (buf, len, 0, header, 12)))
goto DIE;
if (!(pos = dns_packet_skipname (buf, len, pos)))
goto DIE;
pos += 4;
posanswers = pos;
uint16_unpack_big (header + 6, &numanswers);
uint16_unpack_big (header + 8, &numauthority);
uint16_unpack_big (header + 10, &numglue);
rcode = header[3] & 15;
if (rcode && (rcode != 3))
goto DIE; /* impossible; see irrelevant() */
flagsoa = soattl = cnamettl = 0;
flagout = flagcname = flagreferral = 0;
for (j = 0; j < numanswers; ++j)
{
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
if (dns_domain_equal (t1, d))
{
if (byte_equal (header + 2, 2, DNS_C_IN))
{
/* should always be true */
if (typematch (header, dtype))
flagout = 1;
else if (typematch (header, DNS_T_CNAME))
{
if (!dns_packet_getname (buf, len, pos, &cname))
goto DIE;
flagcname = 1;
cnamettl = ttlget (header + 4);
}
}
}
uint16_unpack_big (header + 8, &datalen);
pos += datalen;
}
posauthority = pos;
for (j = 0; j < numauthority; ++j)
{
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
if (typematch (header, DNS_T_SOA))
{
flagsoa = 1;
soattl = ttlget (header + 4);
if (soattl > 3600)
soattl = 3600;
}
else if (typematch (header, DNS_T_NS))
{
flagreferral = 1;
if (!dns_domain_copy (&referral, t1))
goto DIE;
}
uint16_unpack_big (header + 8, &datalen);
pos += datalen;
}
if (!flagcname && !rcode && !flagout && flagreferral && !flagsoa)
{
if (dns_domain_equal (referral, control)
|| !dns_domain_suffix (referral, control))
{
if (debug_level > 2)
log_lame (whichserver, control, referral);
byte_zero (whichserver, 4);
goto HAVENS;
}
}
if (records)
{
alloc_free (records);
records = 0;
}
k = numanswers + numauthority + numglue;
records = (unsigned int *) alloc (k * sizeof (unsigned int));
if (!records)
goto DIE;
pos = posanswers;
for (j = 0; j < k; ++j)
{
records[j] = pos;
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
uint16_unpack_big (header + 8, &datalen);
pos += datalen;
}
i = j = k;
while (j > 1)
{
if (i > 1)
{
--i;
pos = records[i - 1];
}
else
{
pos = records[j - 1];
records[j - 1] = records[i - 1];
--j;
}
q = i;
while ((p = q * 2) < j)
{
if (!smaller (buf, len, records[p], records[p - 1]))
++p;
records[q - 1] = records[p - 1];
q = p;
}
if (p == j)
{
records[q - 1] = records[p - 1];
q = p;
}
while ((q > i) && smaller (buf, len, records[(p = q/2) - 1], pos))
{
records[q - 1] = records[p - 1];
q = p;
}
records[q - 1] = pos;
}
i = 0;
while (i < k)
{
char type[2];
if (!(pos = dns_packet_getname (buf, len, records[i], &t1)))
goto DIE;
if (!(pos = dns_packet_copy (buf, len, pos, header, 10)))
goto DIE;
ttl = ttlget (header + 4);
byte_copy (type, 2, header);
if (byte_diff (header + 2, 2, DNS_C_IN))
{
++i;
continue;
}
for (j = i + 1; j < k; ++j)
{
pos = dns_packet_getname (buf, len, records[j], &t2);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
if (!dns_domain_equal (t1, t2))
break;
if (byte_diff (header, 2, type))
break;
if (byte_diff (header + 2, 2, DNS_C_IN))
break;
}
if (!dns_domain_suffix (t1, control))
{
i = j;
continue;
}
if (!roots_same (t1, control))
{
i = j;
continue;
}
if (byte_equal (type, 2, DNS_T_ANY))
;
else if (byte_equal(type, 2, DNS_T_AXFR))
;
else if (byte_equal (type, 2, DNS_T_SOA))
{
int non_authority = 0;
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos + 10, &t2);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos, &t3);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, misc, 20);
if (!pos)
goto DIE;
if (records[i] < posauthority)
{
if (debug_level > 2)
log_rrsoa (whichserver, t1, t2, t3, misc, ttl);
save_data (misc, 20);
save_data (t2, dns_domain_length (t2));
save_data (t3, dns_domain_length (t3));
non_authority++;
}
++i;
}
if (non_authority)
save_finish (DNS_T_SOA, t1, ttl);
}
else if (byte_equal (type, 2, DNS_T_CNAME))
{
pos = dns_packet_skipname (buf, len, records[j - 1]);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos + 10, &t2);
if (!pos)
goto DIE;
if (debug_level > 2)
log_rrcname (whichserver, t1, t2, ttl);
cachegeneric (DNS_T_CNAME, t1, t2, dns_domain_length (t2), ttl);
}
else if (byte_equal (type, 2, DNS_T_PTR))
{
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos + 10, &t2);
if (!pos)
goto DIE;
if (debug_level > 2)
log_rrptr (whichserver, t1, t2, ttl);
save_data (t2, dns_domain_length (t2));
++i;
}
save_finish (DNS_T_PTR, t1, ttl);
}
else if (byte_equal (type, 2, DNS_T_NS))
{
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos + 10, &t2);
if (!pos)
goto DIE;
if (debug_level > 2)
log_rrns (whichserver, t1, t2, ttl);
save_data (t2, dns_domain_length (t2));
++i;
}
save_finish (DNS_T_NS, t1, ttl);
}
else if (byte_equal (type, 2, DNS_T_MX))
{
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos + 10, misc, 2);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos, &t2);
if (!pos)
goto DIE;
if (debug_level > 2)
log_rrmx (whichserver, t1, t2, misc, ttl);
save_data (misc, 2);
save_data (t2, dns_domain_length (t2));
++i;
}
save_finish (DNS_T_MX, t1, ttl);
}
else if (byte_equal (type, 2, DNS_T_A))
{
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
if (byte_equal (header + 8, 2, "\0\4"))
{
pos = dns_packet_copy (buf, len, pos, header, 4);
if (!pos)
goto DIE;
save_data (header, 4);
if (debug_level > 2)
log_rr (whichserver, t1, DNS_T_A, header, 4, ttl);
}
++i;
}
save_finish (DNS_T_A, t1, ttl);
}
else
{
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
uint16_unpack_big (header + 8, &datalen);
if (datalen > len - pos)
goto DIE;
save_data (header + 8, 2);
save_data (buf + pos, datalen);
if (debug_level > 2)
log_rr (whichserver, t1, type, buf + pos, datalen, ttl);
++i;
}
save_finish (type, t1, ttl);
}
i = j;
}
alloc_free (records);
records = 0;
if (flagcname)
{
ttl = cnamettl;
CNAME:
if (!z->level)
{
if (z->alias[QUERY_MAXALIAS - 1])
goto DIE;
for (j = QUERY_MAXALIAS - 1; j > 0; --j)
z->alias[j] = z->alias[j - 1];
for (j = QUERY_MAXALIAS - 1; j > 0; --j)
z->aliasttl[j] = z->aliasttl[j - 1];
z->alias[0] = z->name[0];
z->aliasttl[0] = ttl;
z->name[0] = 0;
}
if (!dns_domain_copy (&z->name[z->level], cname))
goto DIE;
goto NEWNAME;
}
if (rcode == 3)
{
if (debug_level > 2)
log_nxdomain (whichserver, d, soattl);
cachegeneric (DNS_T_ANY, d, "", 0, soattl);
NXDOMAIN:
if (z->level)
goto LOWERLEVEL;
if (!rqa (z))
goto DIE;
response_nxdomain ();
cleanup (z);
return 1;
}
if (!flagout && flagsoa)
if (byte_diff (DNS_T_ANY, 2, dtype))
if (byte_diff (DNS_T_AXFR, 2, dtype))
if (byte_diff (DNS_T_CNAME, 2, dtype))
{
save_start ();
save_finish (dtype, d, soattl);
if (debug_level > 2)
log_nodata (whichserver, d, dtype, soattl);
}
if (debug_level > 2)
log_stats ();
if (flagout || flagsoa || !flagreferral)
{
if (z->level)
{
pos = posanswers;
for (j = 0; j < numanswers; ++j)
{
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
uint16_unpack_big (header + 8, &datalen);
if (dns_domain_equal (t1, d))
if (typematch (header, DNS_T_A))
if (byte_equal (header + 2, 2, DNS_C_IN))
/* should always be true */
if (datalen == 4)
for (k = 0; k < 64; k += 4)
{
if (byte_equal (z->servers[z->level - 1]
+ k, 4, "\0\0\0\0"))
{
if (!dns_packet_copy (buf, len, pos,
z->servers[z->level - 1] + k, 4))
goto DIE;
break;
}
}
pos += datalen;
}
goto LOWERLEVEL;
}
if (!rqa (z))
goto DIE;
pos = posanswers;
for (j = 0; j < numanswers; ++j)
{
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
ttl = ttlget (header + 4);
uint16_unpack_big (header + 8, &datalen);
if (dns_domain_equal (t1, d))
{
if (byte_equal (header + 2, 2, DNS_C_IN))
{ /* should always be true */
if (typematch (header, dtype))
{
if (!response_rstart (t1, header, ttl))
goto DIE;
if (typematch (header, DNS_T_NS)
|| typematch (header, DNS_T_CNAME)
|| typematch (header, DNS_T_PTR))
{
if (!dns_packet_getname (buf, len, pos, &t2))
goto DIE;
if (!response_addname (t2))
goto DIE;
}
else if (typematch (header, DNS_T_MX))
{
pos2 = dns_packet_copy (buf, len, pos, misc, 2);
if (!pos2)
goto DIE;
if (!response_addbytes (misc, 2))
goto DIE;
if (!dns_packet_getname (buf, len, pos2, &t2))
goto DIE;
if (!response_addname (t2))
goto DIE;
}
else if (typematch (header, DNS_T_SOA))
{
pos2 = dns_packet_getname (buf, len, pos, &t2);
if (!pos2)
goto DIE;
if (!response_addname (t2))
goto DIE;
pos2 = dns_packet_getname (buf, len, pos2, &t3);
if (!pos2)
goto DIE;
if (!response_addname (t3))
goto DIE;
pos2 = dns_packet_copy (buf, len, pos2, misc, 20);
if (!pos2)
goto DIE;
if (!response_addbytes (misc, 20))
goto DIE;
}
else
{
if (pos + datalen > len)
goto DIE;
if (!response_addbytes (buf + pos, datalen))
goto DIE;
}
response_rfinish(RESPONSE_ANSWER);
}
}
}
pos += datalen;
}
cleanup (z);
return 1;
}
if (!dns_domain_suffix (d, referral))
goto DIE;
control = d + dns_domain_suffixpos (d, referral);
z->control[z->level] = control;
byte_zero (z->servers[z->level], 64);
for (j = 0; j < QUERY_MAXNS; ++j)
dns_domain_free (&z->ns[z->level][j]);
k = 0;
pos = posauthority;
for (j = 0; j < numauthority; ++j)
{
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
uint16_unpack_big (header + 8, &datalen);
if (dns_domain_equal (referral, t1)) /* should always be true */
if (typematch (header, DNS_T_NS)) /* should always be true */
/* should always be true */
if (byte_equal (header + 2, 2, DNS_C_IN))
if (k < QUERY_MAXNS)
if (!dns_packet_getname (buf, len, pos,
&z->ns[z->level][k++]))
goto DIE;
pos += datalen;
}
goto HAVENS;
SERVFAIL:
if (z->level)
goto LOWERLEVEL;
if (!rqa (z))
goto DIE;
response_servfail ();
cleanup (z);
return 1;
DIE:
cleanup (z);
if (records)
{
alloc_free (records);
records = 0;
}
return -1;
} | 0 | [
"CWE-362"
] | ndjbdns | 847523271f3966cf4618c5689b905703c41dec1c | 73,096,909,867,660,750,000,000,000,000,000,000,000 | 1,034 | Merge identical outgoing requests.
This patch fixes dnscache to combine *same* client queries into one
single outgoing request, thus securing the server from possible cache
poisoning attacks. This fixes one of the cache poisoning vulnerability
reported by Mr Mark Johnson
-> https://bugzilla.redhat.com/show_bug.cgi?id=838965.
Nonetheless the original patch for this issue was created by
Mr Jeff king -> http://www.your.org/dnscache/
Sincere thanks to Mr Mark for reporting this issue and Mr Jeff for
creating the patch and releasing it under public domain. |
test_bson_copy (void)
{
bson_t b;
bson_t *c;
bson_init (&b);
BSON_ASSERT (bson_append_int32 (&b, "foobar", -1, 1234));
c = bson_copy (&b);
BSON_ASSERT_BSON_EQUAL (&b, c);
bson_destroy (c);
bson_destroy (&b);
} | 0 | [
"CWE-125"
] | libbson | 42900956dc461dfe7fb91d93361d10737c1602b3 | 116,321,789,140,376,870,000,000,000,000,000,000,000 | 12 | CDRIVER-2269 Check for zero string length in codewscope |
PHP_MINIT_FUNCTION(spl_observer)
{
REGISTER_SPL_INTERFACE(SplObserver);
REGISTER_SPL_INTERFACE(SplSubject);
REGISTER_SPL_STD_CLASS_EX(SplObjectStorage, spl_SplObjectStorage_new, spl_funcs_SplObjectStorage);
memcpy(&spl_handler_SplObjectStorage, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
spl_handler_SplObjectStorage.offset = XtOffsetOf(spl_SplObjectStorage, std);
spl_handler_SplObjectStorage.get_debug_info = spl_object_storage_debug_info;
spl_handler_SplObjectStorage.compare_objects = spl_object_storage_compare_objects;
spl_handler_SplObjectStorage.clone_obj = spl_object_storage_clone;
spl_handler_SplObjectStorage.get_gc = spl_object_storage_get_gc;
spl_handler_SplObjectStorage.dtor_obj = zend_objects_destroy_object;
spl_handler_SplObjectStorage.free_obj = spl_SplObjectStorage_free_storage;
REGISTER_SPL_IMPLEMENTS(SplObjectStorage, Countable);
REGISTER_SPL_IMPLEMENTS(SplObjectStorage, Iterator);
REGISTER_SPL_IMPLEMENTS(SplObjectStorage, Serializable);
REGISTER_SPL_IMPLEMENTS(SplObjectStorage, ArrayAccess);
REGISTER_SPL_STD_CLASS_EX(MultipleIterator, spl_SplObjectStorage_new, spl_funcs_MultipleIterator);
REGISTER_SPL_ITERATOR(MultipleIterator);
REGISTER_SPL_CLASS_CONST_LONG(MultipleIterator, "MIT_NEED_ANY", MIT_NEED_ANY);
REGISTER_SPL_CLASS_CONST_LONG(MultipleIterator, "MIT_NEED_ALL", MIT_NEED_ALL);
REGISTER_SPL_CLASS_CONST_LONG(MultipleIterator, "MIT_KEYS_NUMERIC", MIT_KEYS_NUMERIC);
REGISTER_SPL_CLASS_CONST_LONG(MultipleIterator, "MIT_KEYS_ASSOC", MIT_KEYS_ASSOC);
return SUCCESS;
} | 0 | [
"CWE-119",
"CWE-787"
] | php-src | 61cdd1255d5b9c8453be71aacbbf682796ac77d4 | 296,983,288,870,856,600,000,000,000,000,000,000,000 | 31 | Fix bug #73257 and bug #73258 - SplObjectStorage unserialize allows use of non-object as key |
void CalendarRegressionTest::test4071385()
{
UErrorCode status = U_ZERO_ERROR;
Calendar *cal = Calendar::createInstance(status);
if(U_FAILURE(status)) {
dataerrln("Error creating Calendar: %s", u_errorName(status));
delete cal;
return;
}
cal->setTime(makeDate(1998, UCAL_JUNE, 24),status);
cal->set(UCAL_MONTH, UCAL_NOVEMBER); // change a field
//logln(cal.getTime().toString());
if (cal->getTime(status) != makeDate(1998, UCAL_NOVEMBER, 24))
errln("Fail");
delete cal;
} | 0 | [
"CWE-190"
] | icu | 71dd84d4ffd6600a70e5bca56a22b957e6642bd4 | 32,307,301,571,184,070,000,000,000,000,000,000,000 | 17 | ICU-12504 in ICU4C Persian cal, use int64_t math for one operation to avoid overflow; add tests in C and J
X-SVN-Rev: 40654 |
aodv_v6_draft_01_rrep(netdissect_options *ndo, const u_char *dat, u_int length)
{
u_int i;
const struct aodv_rrep6_draft_01 *ap = (const struct aodv_rrep6_draft_01 *)dat;
ND_TCHECK(*ap);
if (length < sizeof(*ap))
goto trunc;
ND_PRINT((ndo, " rrep %u %s%sprefix %u hops %u\n"
"\tdst %s dseq %lu src %s %lu ms", length,
ap->rrep_type & RREP_REPAIR ? "[R]" : "",
ap->rrep_type & RREP_ACK ? "[A] " : " ",
ap->rrep_ps & RREP_PREFIX_MASK,
ap->rrep_hops,
ip6addr_string(ndo, &ap->rrep_da),
(unsigned long)EXTRACT_32BITS(&ap->rrep_ds),
ip6addr_string(ndo, &ap->rrep_oa),
(unsigned long)EXTRACT_32BITS(&ap->rrep_life)));
i = length - sizeof(*ap);
if (i >= sizeof(struct aodv_ext))
aodv_extension(ndo, (const struct aodv_ext *)(dat + sizeof(*ap)), i);
return;
trunc:
ND_PRINT((ndo, " [|rreq"));
} | 0 | [
"CWE-125",
"CWE-787"
] | tcpdump | cbddb98484ea8ec1deece351abd56e063d775b38 | 45,826,645,465,078,960,000,000,000,000,000,000,000 | 26 | CVE-2017-13002/AODV: Add some missing bounds checks.
In aodv_extension() do a bounds check on the extension header before we
look at it.
This fixes a buffer over-read discovered by Kamil Frankowicz.
Add a test using the capture file supplied by the reporter(s).
While we're at it, add the RFC number, and check the validity of the
length for the Hello extension. |
static int test_invalid_template(void)
{
const unsigned char *p = t_invalid_template;
INVALIDTEMPLATE *tmp = d2i_INVALIDTEMPLATE(NULL, &p,
sizeof(t_invalid_template));
/* We expect a NULL pointer return */
if (TEST_ptr_null(tmp))
return 1;
INVALIDTEMPLATE_free(tmp);
return 0;
} | 0 | [
"CWE-476"
] | openssl | 22b88fc9c0e22545401c0b34d24843883ea73fec | 248,327,048,162,946,030,000,000,000,000,000,000,000 | 13 | Add a test for encoding/decoding using an invalid ASN.1 Template
If you have a CHOICE type that it must use explicit tagging - otherwise
the template is invalid. We add tests for this.
Reviewed-by: Tomas Mraz <[email protected]> |
static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
{
bool nmi_safe = false;
switch (clk_id) {
case CLOCK_MONOTONIC:
event->clock = &ktime_get_mono_fast_ns;
nmi_safe = true;
break;
case CLOCK_MONOTONIC_RAW:
event->clock = &ktime_get_raw_fast_ns;
nmi_safe = true;
break;
case CLOCK_REALTIME:
event->clock = &ktime_get_real_ns;
break;
case CLOCK_BOOTTIME:
event->clock = &ktime_get_boottime_ns;
break;
case CLOCK_TAI:
event->clock = &ktime_get_clocktai_ns;
break;
default:
return -EINVAL;
}
if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
return -EINVAL;
return 0;
} | 0 | [
"CWE-401"
] | tip | 7bdb157cdebbf95a1cd94ed2e01b338714075d00 | 308,721,811,286,666,900,000,000,000,000,000,000,000 | 36 | perf/core: Fix a memory leak in perf_event_parse_addr_filter()
As shown through runtime testing, the "filename" allocation is not
always freed in perf_event_parse_addr_filter().
There are three possible ways that this could happen:
- It could be allocated twice on subsequent iterations through the loop,
- or leaked on the success path,
- or on the failure path.
Clean up the code flow to make it obvious that 'filename' is always
freed in the reallocation path and in the two return paths as well.
We rely on the fact that kfree(NULL) is NOP and filename is initialized
with NULL.
This fixes the leak. No other side effects expected.
[ Dan Carpenter: cleaned up the code flow & added a changelog. ]
[ Ingo Molnar: updated the changelog some more. ]
Fixes: 375637bc5249 ("perf/core: Introduce address range filtering")
Signed-off-by: "kiyin(尹亮)" <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Cc: "Srivatsa S. Bhat" <[email protected]>
Cc: Anthony Liguori <[email protected]>
--
kernel/events/core.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-) |
md_is_entity(MD_CTX* ctx, OFF beg, OFF max_end, OFF* p_end)
{
return md_is_entity_str(ctx, ctx->text, beg, max_end, p_end);
} | 0 | [
"CWE-125",
"CWE-908"
] | md4c | 4fc808d8fe8d8904f8525bb4231d854f45e23a19 | 119,804,623,993,085,600,000,000,000,000,000,000,000 | 4 | md_analyze_line: Avoid reading 1 byte beyond the input size.
Fixes #155. |
avifResult avifDecoderSetSource(avifDecoder * decoder, avifDecoderSource source)
{
decoder->requestedSource = source;
return avifDecoderReset(decoder);
} | 0 | [
"CWE-703",
"CWE-787"
] | libavif | 0a8e7244d494ae98e9756355dfbfb6697ded2ff9 | 248,833,785,985,180,950,000,000,000,000,000,000,000 | 5 | Set max image size to 16384 * 16384
Fix https://crbug.com/oss-fuzz/24728 and
https://crbug.com/oss-fuzz/24734. |
load_counted_binunicode(UnpicklerObject *self, int nbytes)
{
PyObject *str;
Py_ssize_t size;
char *s;
if (_Unpickler_Read(self, &s, nbytes) < 0)
return -1;
size = calc_binsize(s, nbytes);
if (size < 0) {
PyErr_Format(PyExc_OverflowError,
"BINUNICODE exceeds system's maximum size of %zd bytes",
PY_SSIZE_T_MAX);
return -1;
}
if (_Unpickler_Read(self, &s, size) < 0)
return -1;
str = PyUnicode_DecodeUTF8(s, size, "surrogatepass");
if (str == NULL)
return -1;
PDATA_PUSH(self->stack, str, -1);
return 0;
} | 0 | [
"CWE-190",
"CWE-369"
] | cpython | a4ae828ee416a66d8c7bf5ee71d653c2cc6a26dd | 320,653,254,705,997,820,000,000,000,000,000,000,000 | 27 | closes bpo-34656: Avoid relying on signed overflow in _pickle memos. (GH-9261) |
static void cpuacct_update_stats(struct task_struct *tsk,
enum cpuacct_stat_index idx, cputime_t val)
{
struct cpuacct *ca;
int batch = CPUACCT_BATCH;
if (unlikely(!cpuacct_subsys.active))
return;
rcu_read_lock();
ca = task_ca(tsk);
do {
__percpu_counter_add(&ca->cpustat[idx], val, batch);
ca = ca->parent;
} while (ca);
rcu_read_unlock();
} | 0 | [
"CWE-703",
"CWE-835"
] | linux | f26f9aff6aaf67e9a430d16c266f91b13a5bff64 | 14,871,438,229,888,060,000,000,000,000,000,000,000 | 18 | Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <[email protected]>
Reported-by: Bjoern B. Brandenburg <[email protected]>
Tested-by: Yong Zhang <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]> |
check_topfill(
win_T *wp,
int down) // when TRUE scroll down when not enough space
{
int n;
if (wp->w_topfill > 0)
{
n = plines_win_nofill(wp, wp->w_topline, TRUE);
if (wp->w_topfill + n > wp->w_height)
{
if (down && wp->w_topline > 1)
{
--wp->w_topline;
wp->w_topfill = 0;
}
else
{
wp->w_topfill = wp->w_height - n;
if (wp->w_topfill < 0)
wp->w_topfill = 0;
}
}
}
} | 0 | [
"CWE-122"
] | vim | 777e7c21b7627be80961848ac560cb0a9978ff43 | 108,629,706,838,915,060,000,000,000,000,000,000,000 | 25 | patch 8.2.3564: invalid memory access when scrolling without valid screen
Problem: Invalid memory access when scrolling without a valid screen.
Solution: Do not set VALID_BOTLINE in w_valid. |
TEST_P(Http2FloodMitigationTest, RST_STREAM) {
// Use invalid HTTP headers to trigger sending RST_STREAM frames.
config_helper_.addConfigModifier(
[](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&
hcm) -> void {
hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true);
});
beginSession();
int i = 0;
auto request = Http::Http2::Http2Frame::makeMalformedRequest(i);
sendFame(request);
auto response = readFrame();
// Make sure we've got RST_STREAM from the server
EXPECT_EQ(Http2Frame::Type::RstStream, response.type());
// Disable reading to make sure that the RST_STREAM frames stack up on the server.
tcp_client_->readDisable(true);
uint64_t total_bytes_sent = 0;
while (total_bytes_sent < TransmitThreshold && tcp_client_->connected()) {
request = Http::Http2::Http2Frame::makeMalformedRequest(++i);
sendFame(request);
total_bytes_sent += request.size();
}
EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken.";
EXPECT_EQ(1, test_server_->counter("http2.outbound_control_flood")->value());
EXPECT_EQ(1,
test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value());
} | 0 | [
"CWE-400"
] | envoy | 0e49a495826ea9e29134c1bd54fdeb31a034f40c | 177,869,710,484,618,260,000,000,000,000,000,000,000 | 30 | http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]> |
GF_Err xml_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_XMLBox *ptr = (GF_XMLBox *)s;
return gf_isom_read_null_terminated_string(s, bs, s->size, &ptr->xml);
} | 0 | [
"CWE-401",
"CWE-787"
] | gpac | ec64c7b8966d7e4642d12debb888be5acf18efb9 | 100,463,319,012,021,570,000,000,000,000,000,000,000 | 5 | fixed #1786 (fuzz) |
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
if (sk_rcvqueues_full(sk, skb))
return -ENOBUFS;
__sk_add_backlog(sk, skb);
sk->sk_backlog.len += skb->truesize;
return 0;
} | 0 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 324,977,283,226,684,470,000,000,000,000,000,000,000 | 9 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
find_help_tags(
char_u *arg,
int *num_matches,
char_u ***matches,
int keep_lang)
{
char_u *s, *d;
int i;
// Specific tags that either have a specific replacement or won't go
// through the generic rules.
static char *(except_tbl[][2]) = {
{"*", "star"},
{"g*", "gstar"},
{"[*", "[star"},
{"]*", "]star"},
{":*", ":star"},
{"/*", "/star"},
{"/\\*", "/\\\\star"},
{"\"*", "quotestar"},
{"**", "starstar"},
{"cpo-*", "cpo-star"},
{"/\\(\\)", "/\\\\(\\\\)"},
{"/\\%(\\)", "/\\\\%(\\\\)"},
{"?", "?"},
{"??", "??"},
{":?", ":?"},
{"?<CR>", "?<CR>"},
{"g?", "g?"},
{"g?g?", "g?g?"},
{"g??", "g??"},
{"-?", "-?"},
{"q?", "q?"},
{"v_g?", "v_g?"},
{"/\\?", "/\\\\?"},
{"/\\z(\\)", "/\\\\z(\\\\)"},
{"\\=", "\\\\="},
{":s\\=", ":s\\\\="},
{"[count]", "\\[count]"},
{"[quotex]", "\\[quotex]"},
{"[range]", "\\[range]"},
{":[range]", ":\\[range]"},
{"[pattern]", "\\[pattern]"},
{"\\|", "\\\\bar"},
{"\\%$", "/\\\\%\\$"},
{"s/\\~", "s/\\\\\\~"},
{"s/\\U", "s/\\\\U"},
{"s/\\L", "s/\\\\L"},
{"s/\\1", "s/\\\\1"},
{"s/\\2", "s/\\\\2"},
{"s/\\3", "s/\\\\3"},
{"s/\\9", "s/\\\\9"},
{NULL, NULL}
};
static char *(expr_table[]) = {"!=?", "!~?", "<=?", "<?", "==?", "=~?",
">=?", ">?", "is?", "isnot?"};
int flags;
d = IObuff; // assume IObuff is long enough!
d[0] = NUL;
if (STRNICMP(arg, "expr-", 5) == 0)
{
// When the string starting with "expr-" and containing '?' and matches
// the table, it is taken literally (but ~ is escaped). Otherwise '?'
// is recognized as a wildcard.
for (i = (int)ARRAY_LENGTH(expr_table); --i >= 0; )
if (STRCMP(arg + 5, expr_table[i]) == 0)
{
int si = 0, di = 0;
for (;;)
{
if (arg[si] == '~')
d[di++] = '\\';
d[di++] = arg[si];
if (arg[si] == NUL)
break;
++si;
}
break;
}
}
else
{
// Recognize a few exceptions to the rule. Some strings that contain
// '*'are changed to "star", otherwise '*' is recognized as a wildcard.
for (i = 0; except_tbl[i][0] != NULL; ++i)
if (STRCMP(arg, except_tbl[i][0]) == 0)
{
STRCPY(d, except_tbl[i][1]);
break;
}
}
if (d[0] == NUL) // no match in table
{
// Replace "\S" with "/\\S", etc. Otherwise every tag is matched.
// Also replace "\%^" and "\%(", they match every tag too.
// Also "\zs", "\z1", etc.
// Also "\@<", "\@=", "\@<=", etc.
// And also "\_$" and "\_^".
if (arg[0] == '\\'
&& ((arg[1] != NUL && arg[2] == NUL)
|| (vim_strchr((char_u *)"%_z@", arg[1]) != NULL
&& arg[2] != NUL)))
{
STRCPY(d, "/\\\\");
STRCPY(d + 3, arg + 1);
// Check for "/\\_$", should be "/\\_\$"
if (d[3] == '_' && d[4] == '$')
STRCPY(d + 4, "\\$");
}
else
{
// Replace:
// "[:...:]" with "\[:...:]"
// "[++...]" with "\[++...]"
// "\{" with "\\{" -- matching "} \}"
if ((arg[0] == '[' && (arg[1] == ':'
|| (arg[1] == '+' && arg[2] == '+')))
|| (arg[0] == '\\' && arg[1] == '{'))
*d++ = '\\';
// If tag starts with "('", skip the "(". Fixes CTRL-] on ('option'.
if (*arg == '(' && arg[1] == '\'')
arg++;
for (s = arg; *s; ++s)
{
// Replace "|" with "bar" and '"' with "quote" to match the name of
// the tags for these commands.
// Replace "*" with ".*" and "?" with "." to match command line
// completion.
// Insert a backslash before '~', '$' and '.' to avoid their
// special meaning.
if (d - IObuff > IOSIZE - 10) // getting too long!?
break;
switch (*s)
{
case '|': STRCPY(d, "bar");
d += 3;
continue;
case '"': STRCPY(d, "quote");
d += 5;
continue;
case '*': *d++ = '.';
break;
case '?': *d++ = '.';
continue;
case '$':
case '.':
case '~': *d++ = '\\';
break;
}
// Replace "^x" by "CTRL-X". Don't do this for "^_" to make
// ":help i_^_CTRL-D" work.
// Insert '-' before and after "CTRL-X" when applicable.
if (*s < ' ' || (*s == '^' && s[1] && (ASCII_ISALPHA(s[1])
|| vim_strchr((char_u *)"?@[\\]^", s[1]) != NULL)))
{
if (d > IObuff && d[-1] != '_' && d[-1] != '\\')
*d++ = '_'; // prepend a '_' to make x_CTRL-x
STRCPY(d, "CTRL-");
d += 5;
if (*s < ' ')
{
#ifdef EBCDIC
*d++ = CtrlChar(*s);
#else
*d++ = *s + '@';
#endif
if (d[-1] == '\\')
*d++ = '\\'; // double a backslash
}
else
*d++ = *++s;
if (s[1] != NUL && s[1] != '_')
*d++ = '_'; // append a '_'
continue;
}
else if (*s == '^') // "^" or "CTRL-^" or "^_"
*d++ = '\\';
// Insert a backslash before a backslash after a slash, for search
// pattern tags: "/\|" --> "/\\|".
else if (s[0] == '\\' && s[1] != '\\'
&& *arg == '/' && s == arg + 1)
*d++ = '\\';
// "CTRL-\_" -> "CTRL-\\_" to avoid the special meaning of "\_" in
// "CTRL-\_CTRL-N"
if (STRNICMP(s, "CTRL-\\_", 7) == 0)
{
STRCPY(d, "CTRL-\\\\");
d += 7;
s += 6;
}
*d++ = *s;
// If tag contains "({" or "([", tag terminates at the "(".
// This is for help on functions, e.g.: abs({expr}).
if (*s == '(' && (s[1] == '{' || s[1] =='['))
break;
// If tag starts with ', toss everything after a second '. Fixes
// CTRL-] on 'option'. (would include the trailing '.').
if (*s == '\'' && s > arg && *arg == '\'')
break;
// Also '{' and '}'.
if (*s == '}' && s > arg && *arg == '{')
break;
}
*d = NUL;
if (*IObuff == '`')
{
if (d > IObuff + 2 && d[-1] == '`')
{
// remove the backticks from `command`
mch_memmove(IObuff, IObuff + 1, STRLEN(IObuff));
d[-2] = NUL;
}
else if (d > IObuff + 3 && d[-2] == '`' && d[-1] == ',')
{
// remove the backticks and comma from `command`,
mch_memmove(IObuff, IObuff + 1, STRLEN(IObuff));
d[-3] = NUL;
}
else if (d > IObuff + 4 && d[-3] == '`'
&& d[-2] == '\\' && d[-1] == '.')
{
// remove the backticks and dot from `command`\.
mch_memmove(IObuff, IObuff + 1, STRLEN(IObuff));
d[-4] = NUL;
}
}
}
}
*matches = (char_u **)"";
*num_matches = 0;
flags = TAG_HELP | TAG_REGEXP | TAG_NAMES | TAG_VERBOSE | TAG_NO_TAGFUNC;
if (keep_lang)
flags |= TAG_KEEP_LANG;
if (find_tags(IObuff, num_matches, matches, flags, (int)MAXCOL, NULL) == OK
&& *num_matches > 0)
{
// Sort the matches found on the heuristic number that is after the
// tag name.
qsort((void *)*matches, (size_t)*num_matches,
sizeof(char_u *), help_compare);
// Delete more than TAG_MANY to reduce the size of the listing.
while (*num_matches > TAG_MANY)
vim_free((*matches)[--*num_matches]);
}
return OK;
} | 1 | [
"CWE-122",
"CWE-787"
] | vim | bd228fd097b41a798f90944b5d1245eddd484142 | 206,239,926,352,612,340,000,000,000,000,000,000,000 | 258 | patch 8.2.3669: buffer overflow with long help argument
Problem: Buffer overflow with long help argument.
Solution: Use snprintf(). |
void CLASS kodak_radc_load_raw()
{
static const char src[] = {
1,1, 2,3, 3,4, 4,2, 5,7, 6,5, 7,6, 7,8,
1,0, 2,1, 3,3, 4,4, 5,2, 6,7, 7,6, 8,5, 8,8,
2,1, 2,3, 3,0, 3,2, 3,4, 4,6, 5,5, 6,7, 6,8,
2,0, 2,1, 2,3, 3,2, 4,4, 5,6, 6,7, 7,5, 7,8,
2,1, 2,4, 3,0, 3,2, 3,3, 4,7, 5,5, 6,6, 6,8,
2,3, 3,1, 3,2, 3,4, 3,5, 3,6, 4,7, 5,0, 5,8,
2,3, 2,6, 3,0, 3,1, 4,4, 4,5, 4,7, 5,2, 5,8,
2,4, 2,7, 3,3, 3,6, 4,1, 4,2, 4,5, 5,0, 5,8,
2,6, 3,1, 3,3, 3,5, 3,7, 3,8, 4,0, 5,2, 5,4,
2,0, 2,1, 3,2, 3,3, 4,4, 4,5, 5,6, 5,7, 4,8,
1,0, 2,2, 2,-2,
1,-3, 1,3,
2,-17, 2,-5, 2,5, 2,17,
2,-7, 2,2, 2,9, 2,18,
2,-18, 2,-9, 2,-2, 2,7,
2,-28, 2,28, 3,-49, 3,-9, 3,9, 4,49, 5,-79, 5,79,
2,-1, 2,13, 2,26, 3,39, 4,-16, 5,55, 6,-37, 6,76,
2,-26, 2,-13, 2,1, 3,-39, 4,16, 5,-55, 6,-76, 6,37
};
ushort huff[19][256];
int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val;
short last[3] = { 16,16,16 }, mul[3], buf[3][3][386];
static const ushort pt[] =
{ 0,0, 1280,1344, 2320,3616, 3328,8000, 4095,16383, 65535,16383 };
for (i=2; i < 12; i+=2)
for (c=pt[i-2]; c <= pt[i]; c++)
curve[c] = (float)
(c-pt[i-2]) / (pt[i]-pt[i-2]) * (pt[i+1]-pt[i-1]) + pt[i-1] + 0.5;
for (s=i=0; i < sizeof src; i+=2)
FORC(256 >> src[i])
huff[0][s++] = src[i] << 8 | (uchar) src[i+1];
s = kodak_cbpp == 243 ? 2 : 3;
FORC(256) huff[18][c] = (8-s) << 8 | c >> s << s | 1 << (s-1);
getbits(-1);
for (i=0; i < sizeof(buf)/sizeof(short); i++)
buf[0][0][i] = 2048;
for (row=0; row < height; row+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC3 mul[c] = getbits(6);
FORC3 {
val = ((0x1000000/last[c] + 0x7ff) >> 12) * mul[c];
s = val > 65564 ? 10:12;
x = ~(-1 << (s-1));
val <<= 12-s;
for (i=0; i < sizeof(buf[0])/sizeof(short); i++)
buf[c][0][i] = (buf[c][0][i] * val + x) >> s;
last[c] = mul[c];
for (r=0; r <= !c; r++) {
buf[c][1][width/2] = buf[c][2][width/2] = mul[c] << 7;
for (tree=1, col=width/2; col > 0; ) {
if ((tree = radc_token(tree))) {
col -= 2;
if (tree == 8)
FORYX buf[c][y][x] = (uchar) radc_token(18) * mul[c];
else
FORYX buf[c][y][x] = radc_token(tree+10) * 16 + PREDICTOR;
} else
do {
nreps = (col > 2) ? radc_token(9) + 1 : 1;
for (rep=0; rep < 8 && rep < nreps && col > 0; rep++) {
col -= 2;
FORYX buf[c][y][x] = PREDICTOR;
if (rep & 1) {
step = radc_token(10) << 4;
FORYX buf[c][y][x] += step;
}
}
} while (nreps == 9);
}
for (y=0; y < 2; y++)
for (x=0; x < width/2; x++) {
val = (buf[c][y+1][x] << 4) / mul[c];
if (val < 0) val = 0;
if (c) RAW(row+y*2+c-1,x*2+2-c) = val;
else RAW(row+r*2+y,x*2+y) = val;
}
memcpy (buf[c][0]+!c, buf[c][2], sizeof buf[c][0]-2*!c);
}
}
for (y=row; y < row+4; y++)
for (x=0; x < width; x++)
if ((x+y) & 1) {
r = x ? x-1 : x+1;
s = x+1 < width ? x+1 : x-1;
val = (RAW(y,x)-2048)*2 + (RAW(y,r)+RAW(y,s))/2;
if (val < 0) val = 0;
RAW(y,x) = val;
}
}
for (i=0; i < height*width; i++)
raw_image[i] = curve[raw_image[i]];
maximum = 0x3fff;
} | 0 | [] | LibRaw | 9ae25d8c3a6bfb40c582538193264f74c9b93bc0 | 159,497,467,789,836,530,000,000,000,000,000,000,000 | 99 | backported 0.15.4 datachecks |
void ssl3_clear(SSL *s)
{
unsigned char *rp,*wp;
size_t rlen, wlen;
#ifdef TLSEXT_TYPE_opaque_prf_input
if (s->s3->client_opaque_prf_input != NULL)
OPENSSL_free(s->s3->client_opaque_prf_input);
s->s3->client_opaque_prf_input = NULL;
if (s->s3->server_opaque_prf_input != NULL)
OPENSSL_free(s->s3->server_opaque_prf_input);
s->s3->server_opaque_prf_input = NULL;
#endif
ssl3_cleanup_key_block(s);
if (s->s3->tmp.ca_names != NULL)
sk_X509_NAME_pop_free(s->s3->tmp.ca_names,X509_NAME_free);
if (s->s3->rrec.comp != NULL)
{
OPENSSL_free(s->s3->rrec.comp);
s->s3->rrec.comp=NULL;
}
#ifndef OPENSSL_NO_DH
if (s->s3->tmp.dh != NULL)
DH_free(s->s3->tmp.dh);
#endif
#ifndef OPENSSL_NO_ECDH
if (s->s3->tmp.ecdh != NULL)
EC_KEY_free(s->s3->tmp.ecdh);
#endif
rp = s->s3->rbuf.buf;
wp = s->s3->wbuf.buf;
rlen = s->s3->rbuf.len;
wlen = s->s3->wbuf.len;
if (s->s3->handshake_buffer) {
BIO_free(s->s3->handshake_buffer);
s->s3->handshake_buffer = NULL;
}
if (s->s3->handshake_dgst) {
ssl3_free_digest_list(s);
}
memset(s->s3,0,sizeof *s->s3);
s->s3->rbuf.buf = rp;
s->s3->wbuf.buf = wp;
s->s3->rbuf.len = rlen;
s->s3->wbuf.len = wlen;
ssl_free_wbio_buffer(s);
s->packet_length=0;
s->s3->renegotiate=0;
s->s3->total_renegotiations=0;
s->s3->num_renegotiations=0;
s->s3->in_read_app_data=0;
s->version=SSL3_VERSION;
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NPN)
if (s->next_proto_negotiated)
{
OPENSSL_free(s->next_proto_negotiated);
s->next_proto_negotiated = NULL;
s->next_proto_negotiated_len = 0;
}
#endif
} | 0 | [] | openssl | ee2ffc279417f15fef3b1073c7dc81a908991516 | 88,000,316,815,532,480,000,000,000,000,000,000,000 | 67 | Add Next Protocol Negotiation. |
static s32 SVC_ReadNal_header_extension(GF_BitStream *bs, SVC_NALUHeader *NalHeader)
{
gf_bs_read_int_log(bs, 1, "reserved_one_bit");
NalHeader->idr_pic_flag = gf_bs_read_int_log(bs, 1, "idr_flag");
NalHeader->priority_id = gf_bs_read_int_log(bs, 6, "priority_id");
gf_bs_read_int_log(bs, 1, "no_inter_layer_pred_flag");
NalHeader->dependency_id = gf_bs_read_int_log(bs, 3, "DependencyId");
NalHeader->quality_id = gf_bs_read_int_log(bs, 4, "quality_id");
NalHeader->temporal_id = gf_bs_read_int_log(bs, 3, "temporal_id");
gf_bs_read_int_log(bs, 1, "use_ref_base_pic_flag");
gf_bs_read_int_log(bs, 1, "discardable_flag");
gf_bs_read_int_log(bs, 1, "output_flag");
gf_bs_read_int_log(bs, 2, "reserved_three_2bits");
return 1;
} | 0 | [
"CWE-190",
"CWE-787"
] | gpac | 51cdb67ff7c5f1242ac58c5aa603ceaf1793b788 | 295,619,186,094,668,350,000,000,000,000,000,000,000 | 15 | add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722 |
trunc_string(
char_u *s,
char_u *buf,
int room_in,
int buflen)
{
size_t room = room_in - 3; // "..." takes 3 chars
size_t half;
size_t len = 0;
int e;
int i;
int n;
if (*s == NUL)
{
if (buflen > 0)
*buf = NUL;
return;
}
if (room_in < 3)
room = 0;
half = room / 2;
// First part: Start of the string.
for (e = 0; len < half && e < buflen; ++e)
{
if (s[e] == NUL)
{
// text fits without truncating!
buf[e] = NUL;
return;
}
n = ptr2cells(s + e);
if (len + n > half)
break;
len += n;
buf[e] = s[e];
if (has_mbyte)
for (n = (*mb_ptr2len)(s + e); --n > 0; )
{
if (++e == buflen)
break;
buf[e] = s[e];
}
}
// Last part: End of the string.
i = e;
if (enc_dbcs != 0)
{
// For DBCS going backwards in a string is slow, but
// computing the cell width isn't too slow: go forward
// until the rest fits.
n = vim_strsize(s + i);
while (len + n > room)
{
n -= ptr2cells(s + i);
i += (*mb_ptr2len)(s + i);
}
}
else if (enc_utf8)
{
// For UTF-8 we can go backwards easily.
half = i = (int)STRLEN(s);
for (;;)
{
do
half = half - utf_head_off(s, s + half - 1) - 1;
while (half > 0 && utf_iscomposing(utf_ptr2char(s + half)));
n = ptr2cells(s + half);
if (len + n > room || half == 0)
break;
len += n;
i = (int)half;
}
}
else
{
for (i = (int)STRLEN(s);
i - 1 >= 0 && len + (n = ptr2cells(s + i - 1)) <= room; --i)
len += n;
}
if (i <= e + 3)
{
// text fits without truncating
if (s != buf)
{
len = STRLEN(s);
if (len >= (size_t)buflen)
len = buflen - 1;
len = len - e + 1;
if (len < 1)
buf[e - 1] = NUL;
else
mch_memmove(buf + e, s + e, len);
}
}
else if (e + 3 < buflen)
{
// set the middle and copy the last part
mch_memmove(buf + e, "...", (size_t)3);
len = STRLEN(s + i) + 1;
if (len >= (size_t)buflen - e - 3)
len = buflen - e - 3 - 1;
mch_memmove(buf + e + 3, s + i, len);
buf[e + 3 + len - 1] = NUL;
}
else
{
// can't fit in the "...", just truncate it
buf[e - 1] = NUL;
}
} | 0 | [
"CWE-416"
] | vim | 9f1a39a5d1cd7989ada2d1cb32f97d84360e050f | 4,788,569,192,332,829,300,000,000,000,000,000,000 | 116 | patch 8.2.4040: keeping track of allocated lines is too complicated
Problem: Keeping track of allocated lines in user functions is too
complicated.
Solution: Instead of freeing individual lines keep them all until the end. |
int move_y(DviContext *dvi, int opcode)
{
int v, vv;
if(opcode != DVI_Y0)
dvi->pos.y = dsgetn(dvi, opcode - DVI_Y0);
v = dvi->pos.v;
vv = move_vertical(dvi, dvi->pos.y);
SHOWCMD((dvi, "y", opcode - DVI_Y0,
"%d h:=%d%c%d=%d, hh:=%d\n",
dvi->pos.y, DBGSUM(v, dvi->pos.y, dvi->pos.v), vv));
dvi->pos.vv = vv;
return 0;
} | 0 | [
"CWE-20"
] | evince | d4139205b010ed06310d14284e63114e88ec6de2 | 205,545,961,604,304,030,000,000,000,000,000,000,000 | 14 | backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643. |
QPDFWriter::calculateXrefStreamPadding(int xref_bytes)
{
// This routine is called right after a linearization first pass
// xref stream has been written without compression. Calculate
// the amount of padding that would be required in the worst case,
// assuming the number of uncompressed bytes remains the same.
// The worst case for zlib is that the output is larger than the
// input by 6 bytes plus 5 bytes per 16K, and then we'll add 10
// extra bytes for number length increases.
return 16 + (5 * ((xref_bytes + 16383) / 16384));
} | 1 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 147,483,079,657,515,570,000,000,000,000,000,000,000 | 12 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
static signed char cut(const double val) {
return val<(double)min()?min():val>(double)max()?max():(signed char)val; } | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 157,274,835,906,382,670,000,000,000,000,000,000,000 | 2 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; } | 0 | [
"CWE-703",
"CWE-189"
] | linux | fc9bbca8f650e5f738af8806317c0a041a48ae4a | 150,312,894,149,578,270,000,000,000,000,000,000,000 | 1 | vm: convert fb_mmap to vm_iomap_memory() helper
This is my example conversion of a few existing mmap users. The
fb_mmap() case is a good example because it is a bit more complicated
than some: fb_mmap() mmaps one of two different memory areas depending
on the page offset of the mmap (but happily there is never any mixing of
the two, so the helper function still works).
Signed-off-by: Linus Torvalds <[email protected]> |
int sqlite3MemCompare(const Mem *pMem1, const Mem *pMem2, const CollSeq *pColl){
int f1, f2;
int combined_flags;
f1 = pMem1->flags;
f2 = pMem2->flags;
combined_flags = f1|f2;
assert( !sqlite3VdbeMemIsRowSet(pMem1) && !sqlite3VdbeMemIsRowSet(pMem2) );
/* If one value is NULL, it is less than the other. If both values
** are NULL, return 0.
*/
if( combined_flags&MEM_Null ){
return (f2&MEM_Null) - (f1&MEM_Null);
}
/* At least one of the two values is a number
*/
if( combined_flags&(MEM_Int|MEM_Real|MEM_IntReal) ){
testcase( combined_flags & MEM_Int );
testcase( combined_flags & MEM_Real );
testcase( combined_flags & MEM_IntReal );
if( (f1 & f2 & (MEM_Int|MEM_IntReal))!=0 ){
testcase( f1 & f2 & MEM_Int );
testcase( f1 & f2 & MEM_IntReal );
if( pMem1->u.i < pMem2->u.i ) return -1;
if( pMem1->u.i > pMem2->u.i ) return +1;
return 0;
}
if( (f1 & f2 & MEM_Real)!=0 ){
if( pMem1->u.r < pMem2->u.r ) return -1;
if( pMem1->u.r > pMem2->u.r ) return +1;
return 0;
}
if( (f1&(MEM_Int|MEM_IntReal))!=0 ){
testcase( f1 & MEM_Int );
testcase( f1 & MEM_IntReal );
if( (f2&MEM_Real)!=0 ){
return sqlite3IntFloatCompare(pMem1->u.i, pMem2->u.r);
}else if( (f2&(MEM_Int|MEM_IntReal))!=0 ){
if( pMem1->u.i < pMem2->u.i ) return -1;
if( pMem1->u.i > pMem2->u.i ) return +1;
return 0;
}else{
return -1;
}
}
if( (f1&MEM_Real)!=0 ){
if( (f2&(MEM_Int|MEM_IntReal))!=0 ){
testcase( f2 & MEM_Int );
testcase( f2 & MEM_IntReal );
return -sqlite3IntFloatCompare(pMem2->u.i, pMem1->u.r);
}else{
return -1;
}
}
return +1;
}
/* If one value is a string and the other is a blob, the string is less.
** If both are strings, compare using the collating functions.
*/
if( combined_flags&MEM_Str ){
if( (f1 & MEM_Str)==0 ){
return 1;
}
if( (f2 & MEM_Str)==0 ){
return -1;
}
assert( pMem1->enc==pMem2->enc || pMem1->db->mallocFailed );
assert( pMem1->enc==SQLITE_UTF8 ||
pMem1->enc==SQLITE_UTF16LE || pMem1->enc==SQLITE_UTF16BE );
/* The collation sequence must be defined at this point, even if
** the user deletes the collation sequence after the vdbe program is
** compiled (this was not always the case).
*/
assert( !pColl || pColl->xCmp );
if( pColl ){
return vdbeCompareMemString(pMem1, pMem2, pColl, 0);
}
/* If a NULL pointer was passed as the collate function, fall through
** to the blob case and use memcmp(). */
}
/* Both values must be blobs. Compare using memcmp(). */
return sqlite3BlobCompare(pMem1, pMem2);
} | 0 | [
"CWE-755"
] | sqlite | 8654186b0236d556aa85528c2573ee0b6ab71be3 | 50,454,202,702,686,460,000,000,000,000,000,000,000 | 90 | When an error occurs while rewriting the parser tree for window functions
in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set,
and make sure that this shuts down any subsequent code generation that might
depend on the transformations that were implemented. This fixes a problem
discovered by the Yongheng and Rui fuzzer.
FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f |
void ElectronApiIPCHandlerImpl::Invoke(bool internal,
const std::string& channel,
blink::CloneableMessage arguments,
InvokeCallback callback) {
api::WebContents* api_web_contents = api::WebContents::From(web_contents());
if (api_web_contents) {
api_web_contents->Invoke(internal, channel, std::move(arguments),
std::move(callback), GetRenderFrameHost());
}
} | 0 | [] | electron | e9fa834757f41c0b9fe44a4dffe3d7d437f52d34 | 190,836,731,042,876,230,000,000,000,000,000,000,000 | 10 | fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]> |
static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
{
ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
PHYAddr[qdev->mac_index]);
} | 0 | [
"CWE-401"
] | linux | 1acb8f2a7a9f10543868ddd737e37424d5c36cf4 | 295,931,946,721,869,540,000,000,000,000,000,000,000 | 5 | net: qlogic: Fix memory leak in ql_alloc_large_buffers
In ql_alloc_large_buffers, a new skb is allocated via netdev_alloc_skb.
This skb should be released if pci_dma_mapping_error fails.
Fixes: 0f8ab89e825f ("qla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_free_list(), ql_populate_free_queue(), ql_alloc_large_buffers(), and ql3xxx_send()")
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp)
{
char buf[8 + 8 + 8 + 128];
int ret;
const uint16_t myflags = (NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_TRIM |
NBD_FLAG_SEND_FLUSH | NBD_FLAG_SEND_FUA |
NBD_FLAG_SEND_WRITE_ZEROES);
bool oldStyle;
/* Old style negotiation header without options
[ 0 .. 7] passwd ("NBDMAGIC")
[ 8 .. 15] magic (NBD_CLIENT_MAGIC)
[16 .. 23] size
[24 .. 25] server flags (0)
[26 .. 27] export flags
[28 .. 151] reserved (0)
New style negotiation header with options
[ 0 .. 7] passwd ("NBDMAGIC")
[ 8 .. 15] magic (NBD_OPTS_MAGIC)
[16 .. 17] server flags (0)
....options sent, ending in NBD_OPT_EXPORT_NAME....
*/
qio_channel_set_blocking(client->ioc, false, NULL);
trace_nbd_negotiate_begin();
memset(buf, 0, sizeof(buf));
memcpy(buf, "NBDMAGIC", 8);
oldStyle = client->exp != NULL && !client->tlscreds;
if (oldStyle) {
trace_nbd_negotiate_old_style(client->exp->size,
client->exp->nbdflags | myflags);
stq_be_p(buf + 8, NBD_CLIENT_MAGIC);
stq_be_p(buf + 16, client->exp->size);
stw_be_p(buf + 26, client->exp->nbdflags | myflags);
if (nbd_write(client->ioc, buf, sizeof(buf), errp) < 0) {
error_prepend(errp, "write failed: ");
return -EINVAL;
}
} else {
stq_be_p(buf + 8, NBD_OPTS_MAGIC);
stw_be_p(buf + 16, NBD_FLAG_FIXED_NEWSTYLE | NBD_FLAG_NO_ZEROES);
if (nbd_write(client->ioc, buf, 18, errp) < 0) {
error_prepend(errp, "write failed: ");
return -EINVAL;
}
ret = nbd_negotiate_options(client, myflags, errp);
if (ret != 0) {
if (ret < 0) {
error_prepend(errp, "option negotiation failed: ");
}
return ret;
}
}
trace_nbd_negotiate_success();
return 0;
} | 1 | [] | qemu | f37708f6b8e0bef0dd85c6aad7fc2062071f8227 | 22,362,860,261,008,885,000,000,000,000,000,000,000 | 63 | nbd: Implement NBD_OPT_GO on server
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires us to close the connection rather than report an error.
Therefore, upstream NBD recently added NBD_OPT_GO as the improved
version of the option that does what we want [1], along with
NBD_OPT_INFO that returns the same information but does not
transition to transmission phase.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at the information types, and only passes the
same information already available through NBD_OPT_LIST and
NBD_OPT_EXPORT_NAME; items like NBD_INFO_BLOCK_SIZE (and thus any
use of NBD_REP_ERR_BLOCK_SIZE_REQD) are intentionally left for
later patches.
Signed-off-by: Eric Blake <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
virDomainRestoreFlags(virConnectPtr conn, const char *from, const char *dxml,
unsigned int flags)
{
VIR_DEBUG("conn=%p, from=%s, dxml=%s, flags=%x",
conn, NULLSTR(from), NULLSTR(dxml), flags);
virResetLastError();
virCheckConnectReturn(conn, -1);
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonNullArgGoto(from, error);
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DOMAIN_SAVE_RUNNING,
VIR_DOMAIN_SAVE_PAUSED,
error);
if (conn->driver->domainRestoreFlags) {
int ret;
char *absolute_from;
/* We must absolutize the file path as the restore is done out of process */
if (virFileAbsPath(from, &absolute_from) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("could not build absolute input file path"));
goto error;
}
ret = conn->driver->domainRestoreFlags(conn, absolute_from, dxml,
flags);
VIR_FREE(absolute_from);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(conn);
return -1;
} | 0 | [
"CWE-254"
] | libvirt | 506e9d6c2d4baaf580d489fff0690c0ff2ff588f | 200,622,014,522,411,700,000,000,000,000,000,000,000 | 43 | virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <[email protected]> |
static int tun_chr_fasync(int fd, struct file *file, int on)
{
struct tun_file *tfile = file->private_data;
int ret;
if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
goto out;
if (on) {
__f_setown(file, task_pid(current), PIDTYPE_PID, 0);
tfile->flags |= TUN_FASYNC;
} else
tfile->flags &= ~TUN_FASYNC;
ret = 0;
out:
return ret;
} | 0 | [
"CWE-476"
] | linux | 0ad646c81b2182f7fa67ec0c8c825e0ee165696d | 322,746,226,457,985,800,000,000,000,000,000,000,000 | 17 | tun: call dev_get_valid_name() before register_netdevice()
register_netdevice() could fail early when we have an invalid
dev name, in which case ->ndo_uninit() is not called. For tun
device, this is a problem because a timer etc. are already
initialized and it expects ->ndo_uninit() to clean them up.
We could move these initializations into a ->ndo_init() so
that register_netdevice() knows better, however this is still
complicated due to the logic in tun_detach().
Therefore, I choose to just call dev_get_valid_name() before
register_netdevice(), which is quicker and much easier to audit.
And for this specific case, it is already enough.
Fixes: 96442e42429e ("tuntap: choose the txq based on rxq")
Reported-by: Dmitry Alexeev <[email protected]>
Cc: Jason Wang <[email protected]>
Cc: "Michael S. Tsirkin" <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
forward_callback(isc_task_t *task, isc_event_t *event) {
const char me[] = "forward_callback";
dns_requestevent_t *revent = (dns_requestevent_t *)event;
dns_message_t *msg = NULL;
char master[ISC_SOCKADDR_FORMATSIZE];
isc_result_t result;
dns_forward_t *forward;
dns_zone_t *zone;
UNUSED(task);
forward = revent->ev_arg;
INSIST(DNS_FORWARD_VALID(forward));
zone = forward->zone;
INSIST(DNS_ZONE_VALID(zone));
ENTER;
isc_sockaddr_format(&forward->addr, master, sizeof(master));
if (revent->result != ISC_R_SUCCESS) {
dns_zone_log(zone, ISC_LOG_INFO,
"could not forward dynamic update to %s: %s",
master, dns_result_totext(revent->result));
goto next_master;
}
result = dns_message_create(zone->mctx, DNS_MESSAGE_INTENTPARSE, &msg);
if (result != ISC_R_SUCCESS)
goto next_master;
result = dns_request_getresponse(revent->request, msg,
DNS_MESSAGEPARSE_PRESERVEORDER |
DNS_MESSAGEPARSE_CLONEBUFFER);
if (result != ISC_R_SUCCESS)
goto next_master;
switch (msg->rcode) {
/*
* Pass these rcodes back to client.
*/
case dns_rcode_noerror:
case dns_rcode_yxdomain:
case dns_rcode_yxrrset:
case dns_rcode_nxrrset:
case dns_rcode_refused:
case dns_rcode_nxdomain: {
char rcode[128];
isc_buffer_t rb;
isc_buffer_init(&rb, rcode, sizeof(rcode));
(void)dns_rcode_totext(msg->rcode, &rb);
dns_zone_log(zone, ISC_LOG_INFO,
"forwarded dynamic update: "
"master %s returned: %.*s",
master, (int)rb.used, rcode);
break;
}
/* These should not occur if the masters/zone are valid. */
case dns_rcode_notzone:
case dns_rcode_notauth: {
char rcode[128];
isc_buffer_t rb;
isc_buffer_init(&rb, rcode, sizeof(rcode));
(void)dns_rcode_totext(msg->rcode, &rb);
dns_zone_log(zone, ISC_LOG_WARNING,
"forwarding dynamic update: "
"unexpected response: master %s returned: %.*s",
master, (int)rb.used, rcode);
goto next_master;
}
/* Try another server for these rcodes. */
case dns_rcode_formerr:
case dns_rcode_servfail:
case dns_rcode_notimp:
case dns_rcode_badvers:
default:
goto next_master;
}
/* call callback */
(forward->callback)(forward->callback_arg, ISC_R_SUCCESS, msg);
msg = NULL;
dns_request_destroy(&forward->request);
forward_destroy(forward);
isc_event_free(&event);
return;
next_master:
if (msg != NULL)
dns_message_destroy(&msg);
isc_event_free(&event);
forward->which++;
dns_request_destroy(&forward->request);
result = sendtomaster(forward);
if (result != ISC_R_SUCCESS) {
/* call callback */
dns_zone_log(zone, ISC_LOG_DEBUG(3),
"exhausted dynamic update forwarder list");
(forward->callback)(forward->callback_arg, result, NULL);
forward_destroy(forward);
}
} | 0 | [
"CWE-327"
] | bind9 | f09352d20a9d360e50683cd1d2fc52ccedcd77a0 | 45,883,741,603,305,090,000,000,000,000,000,000,000 | 106 | Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key. |
inline void getrf(int &N, float *lapA, int *IPIV, int &INFO) {
sgetrf_(&N,&N,lapA,&N,IPIV,&INFO); | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 148,012,534,932,729,350,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos)
{
return seq_list_next(v, &nfs_volume_list, pos);
} | 0 | [
"CWE-20"
] | linux-2.6 | 54af3bb543c071769141387a42deaaab5074da55 | 186,643,566,939,542,830,000,000,000,000,000,000,000 | 4 | NFS: Fix an Oops in encode_lookup()
It doesn't look as if the NFS file name limit is being initialised correctly
in the struct nfs_server. Make sure that we limit whatever is being set in
nfs_probe_fsinfo() and nfs_init_server().
Also ensure that readdirplus and nfs4_path_walk respect our file name
limits.
Signed-off-by: Trond Myklebust <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
generate_a_aaaa_check(struct module_qstate* qstate, struct iter_qstate* iq,
int id)
{
struct iter_env* ie = (struct iter_env*)qstate->env->modinfo[id];
struct module_qstate* subq;
size_t i;
struct reply_info* rep = iq->response->rep;
struct ub_packed_rrset_key* s;
log_assert(iq->dp);
if(iq->depth == ie->max_dependency_depth)
return;
/* walk through additional, and check if in-zone,
* only relevant A, AAAA are left after scrub anyway */
for(i=rep->an_numrrsets+rep->ns_numrrsets; i<rep->rrset_count; i++) {
s = rep->rrsets[i];
/* check *ALL* addresses that are transmitted in additional*/
/* is it an address ? */
if( !(ntohs(s->rk.type)==LDNS_RR_TYPE_A ||
ntohs(s->rk.type)==LDNS_RR_TYPE_AAAA)) {
continue;
}
/* is this query the same as the A/AAAA check for it */
if(qstate->qinfo.qtype == ntohs(s->rk.type) &&
qstate->qinfo.qclass == ntohs(s->rk.rrset_class) &&
query_dname_compare(qstate->qinfo.qname,
s->rk.dname)==0 &&
(qstate->query_flags&BIT_RD) &&
!(qstate->query_flags&BIT_CD))
continue;
/* generate subrequest for it */
log_nametypeclass(VERB_ALGO, "schedule addr fetch",
s->rk.dname, ntohs(s->rk.type),
ntohs(s->rk.rrset_class));
if(!generate_sub_request(s->rk.dname, s->rk.dname_len,
ntohs(s->rk.type), ntohs(s->rk.rrset_class),
qstate, id, iq,
INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1, 0)) {
verbose(VERB_ALGO, "could not generate addr check");
return;
}
/* ignore subq - not need for more init */
}
} | 0 | [
"CWE-400"
] | unbound | ba0f382eee814e56900a535778d13206b86b6d49 | 311,481,591,450,962,100,000,000,000,000,000,000,000 | 45 | - CVE-2020-12662 Unbound can be tricked into amplifying an incoming
query into a large number of queries directed to a target.
- CVE-2020-12663 Malformed answers from upstream name servers can be
used to make Unbound unresponsive. |
CImg<T>& load_pdf_external(const char *const filename, const unsigned int resolution=400) {
if (!filename)
throw CImgArgumentException(_cimg_instance
"load_pdf_external(): Specified filename is (null).",
cimg_instance);
CImg<charT> command(1024), filename_tmp(256);
std::FILE *file = 0;
const CImg<charT> s_filename = CImg<charT>::string(filename)._system_strescape();
#if cimg_OS==1
cimg_snprintf(command,command._width,"gs -q -dNOPAUSE -sDEVICE=ppmraw -o - -r%u \"%s\"",
resolution,s_filename.data());
file = popen(command,"r");
if (file) {
const unsigned int omode = cimg::exception_mode();
cimg::exception_mode(0);
try { load_pnm(file); } catch (...) {
pclose(file);
cimg::exception_mode(omode);
throw CImgIOException(_cimg_instance
"load_pdf_external(): Failed to load file '%s' with external command 'gs'.",
cimg_instance,
filename);
}
pclose(file);
return *this;
}
#endif
do {
cimg_snprintf(filename_tmp,filename_tmp._width,"%s%c%s.ppm",
cimg::temporary_path(),cimg_file_separator,cimg::filenamerand());
if ((file=cimg::std_fopen(filename_tmp,"rb"))!=0) cimg::fclose(file);
} while (file);
cimg_snprintf(command,command._width,"gs -q -dNOPAUSE -sDEVICE=ppmraw -o \"%s\" -r%u \"%s\"",
CImg<charT>::string(filename_tmp)._system_strescape().data(),resolution,s_filename.data());
cimg::system(command,"gs");
if (!(file=cimg::std_fopen(filename_tmp,"rb"))) {
cimg::fclose(cimg::fopen(filename,"r"));
throw CImgIOException(_cimg_instance
"load_pdf_external(): Failed to load file '%s' with external command 'gs'.",
cimg_instance,
filename);
} else cimg::fclose(file);
load_pnm(filename_tmp);
std::remove(filename_tmp);
return *this;
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 37,322,987,410,487,743,000,000,000,000,000,000,000 | 46 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static int fib6_new_sernum(struct net *net)
{
int new, old;
do {
old = atomic_read(&net->ipv6.fib6_sernum);
new = old < INT_MAX ? old + 1 : 1;
} while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
old, new) != old);
return new;
} | 0 | [
"CWE-755"
] | linux | 7b09c2d052db4b4ad0b27b97918b46a7746966fa | 118,589,271,333,220,300,000,000,000,000,000,000,000 | 11 | ipv6: fix a typo in fib6_rule_lookup()
Yi Ren reported an issue discovered by syzkaller, and bisected
to the cited commit.
Many thanks to Yi, this trivial patch does not reflect the patient
work that has been done.
Fixes: d64a1f574a29 ("ipv6: honor RT6_LOOKUP_F_DST_NOREF in rule lookup logic")
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Wei Wang <[email protected]>
Bisected-and-reported-by: Yi Ren <[email protected]>
Signed-off-by: Jakub Kicinski <[email protected]> |
ip_rbTkWaitCommand(clientData, interp, objc, objv)
ClientData clientData;
Tcl_Interp *interp;
int objc;
char *objv[];
#endif
{
Tk_Window tkwin = (Tk_Window) clientData;
Tk_Window window;
int done, index;
static CONST char *optionStrings[] = { "variable", "visibility", "window",
(char *) NULL };
enum options { TKWAIT_VARIABLE, TKWAIT_VISIBILITY, TKWAIT_WINDOW };
char *nameString;
int ret, dummy;
int thr_crit_bup;
DUMP1("Ruby's 'tkwait' is called");
if (interp == (Tcl_Interp*)NULL) {
rbtk_pending_exception = rb_exc_new2(rb_eRuntimeError,
"IP is deleted");
return TCL_ERROR;
}
#if 0
if (!rb_thread_alone()
&& eventloop_thread != Qnil
&& eventloop_thread != rb_thread_current()) {
#if TCL_MAJOR_VERSION >= 8
DUMP1("call ip_rb_threadTkWaitObjCmd");
return ip_rb_threadTkWaitObjCmd((ClientData)tkwin, interp, objc, objv);
#else /* TCL_MAJOR_VERSION < 8 */
DUMP1("call ip_rb_threadTkWaitCommand");
return ip_rb_threadTkWwaitCommand((ClientData)tkwin, interp, objc, objv);
#endif
}
#endif
Tcl_Preserve(interp);
Tcl_ResetResult(interp);
if (objc != 3) {
#ifdef Tcl_WrongNumArgs
Tcl_WrongNumArgs(interp, 1, objv, "variable|visibility|window name");
#else
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
#if TCL_MAJOR_VERSION >= 8
Tcl_AppendResult(interp, "wrong number of arguments: should be \"",
Tcl_GetStringFromObj(objv[0], &dummy),
" variable|visibility|window name\"",
(char *) NULL);
#else /* TCL_MAJOR_VERSION < 8 */
Tcl_AppendResult(interp, "wrong number of arguments: should be \"",
objv[0], " variable|visibility|window name\"",
(char *) NULL);
#endif
rb_thread_critical = thr_crit_bup;
#endif
Tcl_Release(interp);
return TCL_ERROR;
}
#if TCL_MAJOR_VERSION >= 8
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
/*
if (Tcl_GetIndexFromObj(interp, objv[1],
(CONST84 char **)optionStrings,
"option", 0, &index) != TCL_OK) {
return TCL_ERROR;
}
*/
ret = Tcl_GetIndexFromObj(interp, objv[1],
(CONST84 char **)optionStrings,
"option", 0, &index);
rb_thread_critical = thr_crit_bup;
if (ret != TCL_OK) {
Tcl_Release(interp);
return TCL_ERROR;
}
#else /* TCL_MAJOR_VERSION < 8 */
{
int c = objv[1][0];
size_t length = strlen(objv[1]);
if ((c == 'v') && (strncmp(objv[1], "variable", length) == 0)
&& (length >= 2)) {
index = TKWAIT_VARIABLE;
} else if ((c == 'v') && (strncmp(objv[1], "visibility", length) == 0)
&& (length >= 2)) {
index = TKWAIT_VISIBILITY;
} else if ((c == 'w') && (strncmp(objv[1], "window", length) == 0)) {
index = TKWAIT_WINDOW;
} else {
Tcl_AppendResult(interp, "bad option \"", objv[1],
"\": must be variable, visibility, or window",
(char *) NULL);
Tcl_Release(interp);
return TCL_ERROR;
}
}
#endif
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
#if TCL_MAJOR_VERSION >= 8
Tcl_IncrRefCount(objv[2]);
/* nameString = Tcl_GetString(objv[2]); */
nameString = Tcl_GetStringFromObj(objv[2], &dummy);
#else /* TCL_MAJOR_VERSION < 8 */
nameString = objv[2];
#endif
rb_thread_critical = thr_crit_bup;
switch ((enum options) index) {
case TKWAIT_VARIABLE:
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
/*
if (Tcl_TraceVar(interp, nameString,
TCL_GLOBAL_ONLY|TCL_TRACE_WRITES|TCL_TRACE_UNSETS,
WaitVariableProc, (ClientData) &done) != TCL_OK) {
return TCL_ERROR;
}
*/
ret = Tcl_TraceVar(interp, nameString,
TCL_GLOBAL_ONLY|TCL_TRACE_WRITES|TCL_TRACE_UNSETS,
WaitVariableProc, (ClientData) &done);
rb_thread_critical = thr_crit_bup;
if (ret != TCL_OK) {
#if TCL_MAJOR_VERSION >= 8
Tcl_DecrRefCount(objv[2]);
#endif
Tcl_Release(interp);
return TCL_ERROR;
}
done = 0;
/* lib_eventloop_core(check_rootwidget_flag, 0, &done); */
lib_eventloop_launcher(check_rootwidget_flag, 0, &done, interp);
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
Tcl_UntraceVar(interp, nameString,
TCL_GLOBAL_ONLY|TCL_TRACE_WRITES|TCL_TRACE_UNSETS,
WaitVariableProc, (ClientData) &done);
#if TCL_MAJOR_VERSION >= 8
Tcl_DecrRefCount(objv[2]);
#endif
rb_thread_critical = thr_crit_bup;
/* exception check */
if (!NIL_P(rbtk_pending_exception)) {
Tcl_Release(interp);
/*
if (rb_obj_is_kind_of(rbtk_pending_exception, rb_eSystemExit)) {
*/
if (rb_obj_is_kind_of(rbtk_pending_exception, rb_eSystemExit)
|| rb_obj_is_kind_of(rbtk_pending_exception, rb_eInterrupt)) {
return TCL_RETURN;
} else{
return TCL_ERROR;
}
}
/* trap check */
if (rb_thread_check_trap_pending()) {
Tcl_Release(interp);
return TCL_RETURN;
}
break;
case TKWAIT_VISIBILITY:
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
/* This function works on the Tk eventloop thread only. */
if (!tk_stubs_init_p() || Tk_MainWindow(interp) == (Tk_Window)NULL) {
window = NULL;
} else {
window = Tk_NameToWindow(interp, nameString, tkwin);
}
if (window == NULL) {
Tcl_AppendResult(interp, ": tkwait: ",
"no main-window (not Tk application?)",
(char*)NULL);
rb_thread_critical = thr_crit_bup;
#if TCL_MAJOR_VERSION >= 8
Tcl_DecrRefCount(objv[2]);
#endif
Tcl_Release(interp);
return TCL_ERROR;
}
Tk_CreateEventHandler(window,
VisibilityChangeMask|StructureNotifyMask,
WaitVisibilityProc, (ClientData) &done);
rb_thread_critical = thr_crit_bup;
done = 0;
/* lib_eventloop_core(check_rootwidget_flag, 0, &done); */
lib_eventloop_launcher(check_rootwidget_flag, 0, &done, interp);
/* exception check */
if (!NIL_P(rbtk_pending_exception)) {
#if TCL_MAJOR_VERSION >= 8
Tcl_DecrRefCount(objv[2]);
#endif
Tcl_Release(interp);
/*
if (rb_obj_is_kind_of(rbtk_pending_exception, rb_eSystemExit)) {
*/
if (rb_obj_is_kind_of(rbtk_pending_exception, rb_eSystemExit)
|| rb_obj_is_kind_of(rbtk_pending_exception, rb_eInterrupt)) {
return TCL_RETURN;
} else{
return TCL_ERROR;
}
}
/* trap check */
if (rb_thread_check_trap_pending()) {
#if TCL_MAJOR_VERSION >= 8
Tcl_DecrRefCount(objv[2]);
#endif
Tcl_Release(interp);
return TCL_RETURN;
}
if (done != 1) {
/*
* Note that we do not delete the event handler because it
* was deleted automatically when the window was destroyed.
*/
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
Tcl_ResetResult(interp);
Tcl_AppendResult(interp, "window \"", nameString,
"\" was deleted before its visibility changed",
(char *) NULL);
rb_thread_critical = thr_crit_bup;
#if TCL_MAJOR_VERSION >= 8
Tcl_DecrRefCount(objv[2]);
#endif
Tcl_Release(interp);
return TCL_ERROR;
}
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
#if TCL_MAJOR_VERSION >= 8
Tcl_DecrRefCount(objv[2]);
#endif
Tk_DeleteEventHandler(window,
VisibilityChangeMask|StructureNotifyMask,
WaitVisibilityProc, (ClientData) &done);
rb_thread_critical = thr_crit_bup;
break;
case TKWAIT_WINDOW:
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
/* This function works on the Tk eventloop thread only. */
if (!tk_stubs_init_p() || Tk_MainWindow(interp) == (Tk_Window)NULL) {
window = NULL;
} else {
window = Tk_NameToWindow(interp, nameString, tkwin);
}
#if TCL_MAJOR_VERSION >= 8
Tcl_DecrRefCount(objv[2]);
#endif
if (window == NULL) {
Tcl_AppendResult(interp, ": tkwait: ",
"no main-window (not Tk application?)",
(char*)NULL);
rb_thread_critical = thr_crit_bup;
Tcl_Release(interp);
return TCL_ERROR;
}
Tk_CreateEventHandler(window, StructureNotifyMask,
WaitWindowProc, (ClientData) &done);
rb_thread_critical = thr_crit_bup;
done = 0;
/* lib_eventloop_core(check_rootwidget_flag, 0, &done); */
lib_eventloop_launcher(check_rootwidget_flag, 0, &done, interp);
/* exception check */
if (!NIL_P(rbtk_pending_exception)) {
Tcl_Release(interp);
/*
if (rb_obj_is_kind_of(rbtk_pending_exception, rb_eSystemExit)) {
*/
if (rb_obj_is_kind_of(rbtk_pending_exception, rb_eSystemExit)
|| rb_obj_is_kind_of(rbtk_pending_exception, rb_eInterrupt)) {
return TCL_RETURN;
} else{
return TCL_ERROR;
}
}
/* trap check */
if (rb_thread_check_trap_pending()) {
Tcl_Release(interp);
return TCL_RETURN;
}
/*
* Note: there's no need to delete the event handler. It was
* deleted automatically when the window was destroyed.
*/
break;
}
/*
* Clear out the interpreter's result, since it may have been set
* by event handlers.
*/
Tcl_ResetResult(interp);
Tcl_Release(interp);
return TCL_OK;
} | 0 | [] | tk | ebd0fc80d62eeb7b8556522256f8d035e013eb65 | 70,013,302,892,625,240,000,000,000,000,000,000,000 | 358 | tcltklib.c: check argument
* ext/tk/tcltklib.c (ip_cancel_eval_core): check argument type and
length.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e |
void set(DTCollation &dt)
{
collation= dt.collation;
derivation= dt.derivation;
repertoire= dt.repertoire;
} | 0 | [] | mysql-server | f7316aa0c9a3909fc7498e7b95d5d3af044a7e21 | 263,050,188,522,964,700,000,000,000,000,000,000,000 | 6 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
_archive_read (struct archive *archive,
void *data,
const void **buffer)
{
ZipArchive *zip = (ZipArchive *)data;
gssize read_bytes;
*buffer = zip->buffer;
read_bytes = g_input_stream_read (G_INPUT_STREAM (zip->stream),
zip->buffer,
sizeof (zip->buffer),
NULL,
&zip->error);
return read_bytes;
} | 0 | [
"CWE-125"
] | libgxps | b458226e162fe1ffe7acb4230c114a52ada5131b | 149,993,062,557,401,080,000,000,000,000,000,000,000 | 15 | gxps-archive: Ensure gxps_archive_read_entry() fills the GError in case of failure
And fix the callers to not overwrite the GError. |
static int usb_host_handle_control(USBHostDevice *s, USBPacket *p)
{
struct usbdevfs_urb *urb;
AsyncURB *aurb;
int ret, value, index;
/*
* Process certain standard device requests.
* These are infrequent and are processed synchronously.
*/
value = le16_to_cpu(s->ctrl.req.wValue);
index = le16_to_cpu(s->ctrl.req.wIndex);
dprintf("husb: ctrl type 0x%x req 0x%x val 0x%x index %u len %u\n",
s->ctrl.req.bRequestType, s->ctrl.req.bRequest, value, index,
s->ctrl.len);
if (s->ctrl.req.bRequestType == 0) {
switch (s->ctrl.req.bRequest) {
case USB_REQ_SET_ADDRESS:
return usb_host_set_address(s, value);
case USB_REQ_SET_CONFIGURATION:
return usb_host_set_config(s, value & 0xff);
}
}
if (s->ctrl.req.bRequestType == 1 &&
s->ctrl.req.bRequest == USB_REQ_SET_INTERFACE)
return usb_host_set_interface(s, index, value);
/* The rest are asynchronous */
aurb = async_alloc();
aurb->hdev = s;
aurb->packet = p;
/*
* Setup ctrl transfer.
*
* s->ctrl is layed out such that data buffer immediately follows
* 'req' struct which is exactly what usbdevfs expects.
*/
urb = &aurb->urb;
urb->type = USBDEVFS_URB_TYPE_CONTROL;
urb->endpoint = p->devep;
urb->buffer = &s->ctrl.req;
urb->buffer_length = 8 + s->ctrl.len;
urb->usercontext = s;
ret = ioctl(s->fd, USBDEVFS_SUBMITURB, urb);
dprintf("husb: submit ctrl. len %u aurb %p\n", urb->buffer_length, aurb);
if (ret < 0) {
dprintf("husb: submit failed. errno %d\n", errno);
async_free(aurb);
switch(errno) {
case ETIMEDOUT:
return USB_RET_NAK;
case EPIPE:
default:
return USB_RET_STALL;
}
}
usb_defer_packet(p, async_cancel, aurb);
return USB_RET_ASYNC;
} | 1 | [
"CWE-119"
] | qemu | babd03fde68093482528010a5435c14ce9128e3f | 312,416,163,750,000,940,000,000,000,000,000,000,000 | 73 | usb-linux.c: fix buffer overflow
In usb-linux.c:usb_host_handle_control, we pass a 1024-byte buffer and
length to the kernel. However, the length was provided by the caller
of dev->handle_packet, and is not checked, so the kernel might provide
too much data and overflow our buffer.
For example, hw/usb-uhci.c could set the length to 2047.
hw/usb-ohci.c looks like it might go up to 4096 or 8192.
This causes a qemu crash, as reported here:
http://www.mail-archive.com/[email protected]/msg18447.html
This patch increases the usb-linux.c buffer size to 2048 to fix the
specific device reported, and adds a check to avoid the overflow in
any case.
Signed-off-by: Jim Paris <[email protected]>
Signed-off-by: Anthony Liguori <[email protected]> |
TEST_F(QueryPlannerTest, NegationEqArray) {
addIndex(BSON("i" << 1));
runQuery(fromjson("{i: {$not: {$eq: [1, 2]}}}"));
assertHasOnlyCollscan();
} | 0 | [] | mongo | 64095239f41e9f3841d8be9088347db56d35c891 | 176,044,595,574,816,800,000,000,000,000,000,000,000 | 6 | SERVER-51083 Reject invalid UTF-8 from $regex match expressions |
lys_parse_mem_(struct ly_ctx *ctx, const char *data, LYS_INFORMAT format, const char *revision, int internal, int implement)
{
char *enlarged_data = NULL;
struct lys_module *mod = NULL;
unsigned int len;
if (!ctx || !data) {
LOGARG;
return NULL;
}
if (!internal && format == LYS_IN_YANG) {
/* enlarge data by 2 bytes for flex */
len = strlen(data);
enlarged_data = malloc((len + 2) * sizeof *enlarged_data);
LY_CHECK_ERR_RETURN(!enlarged_data, LOGMEM(ctx), NULL);
memcpy(enlarged_data, data, len);
enlarged_data[len] = enlarged_data[len + 1] = '\0';
data = enlarged_data;
}
switch (format) {
case LYS_IN_YIN:
mod = yin_read_module(ctx, data, revision, implement);
break;
case LYS_IN_YANG:
mod = yang_read_module(ctx, data, 0, revision, implement);
break;
default:
LOGERR(ctx, LY_EINVAL, "Invalid schema input format.");
break;
}
free(enlarged_data);
/* hack for NETCONF's edit-config's operation attribute. It is not defined in the schema, but since libyang
* implements YANG metadata (annotations), we need its definition. Because the ietf-netconf schema is not the
* internal part of libyang, we cannot add the annotation into the schema source, but we do it here to have
* the anotation definitions available in the internal schema structure. There is another hack in schema
* printers to do not print this internally added annotation. */
if (mod && ly_strequal(mod->name, "ietf-netconf", 0)) {
if (lyp_add_ietf_netconf_annotations_config(mod)) {
lys_free(mod, NULL, 1, 1);
return NULL;
}
}
return mod;
} | 0 | [
"CWE-119"
] | libyang | 32fb4993bc8bb49e93e84016af3c10ea53964be5 | 278,199,465,421,652,860,000,000,000,000,000,000,000 | 49 | schema tree BUGFIX do not check features while still resolving schema
Fixes #723 |
static int rtreeQueryStat1(sqlite3 *db, Rtree *pRtree){
const char *zFmt = "SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'";
char *zSql;
sqlite3_stmt *p;
int rc;
i64 nRow = 0;
rc = sqlite3_table_column_metadata(
db, pRtree->zDb, "sqlite_stat1",0,0,0,0,0,0
);
if( rc!=SQLITE_OK ){
pRtree->nRowEst = RTREE_DEFAULT_ROWEST;
return rc==SQLITE_ERROR ? SQLITE_OK : rc;
}
zSql = sqlite3_mprintf(zFmt, pRtree->zDb, pRtree->zName);
if( zSql==0 ){
rc = SQLITE_NOMEM;
}else{
rc = sqlite3_prepare_v2(db, zSql, -1, &p, 0);
if( rc==SQLITE_OK ){
if( sqlite3_step(p)==SQLITE_ROW ) nRow = sqlite3_column_int64(p, 0);
rc = sqlite3_finalize(p);
}else if( rc!=SQLITE_NOMEM ){
rc = SQLITE_OK;
}
if( rc==SQLITE_OK ){
if( nRow==0 ){
pRtree->nRowEst = RTREE_DEFAULT_ROWEST;
}else{
pRtree->nRowEst = MAX(nRow, RTREE_MIN_ROWEST);
}
}
sqlite3_free(zSql);
}
return rc;
} | 0 | [
"CWE-125"
] | sqlite | e41fd72acc7a06ce5a6a7d28154db1ffe8ba37a8 | 329,911,000,246,831,800,000,000,000,000,000,000,000 | 38 | Enhance the rtreenode() function of rtree (used for testing) so that it
uses the newer sqlite3_str object for better performance and improved
error reporting.
FossilOrigin-Name: 90acdbfce9c088582d5165589f7eac462b00062bbfffacdcc786eb9cf3ea5377 |
static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *next_skb = skb_rb_next(skb);
int skb_size, next_skb_size;
skb_size = skb->len;
next_skb_size = next_skb->len;
BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
if (next_skb_size) {
if (next_skb_size <= skb_availroom(skb))
skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
next_skb_size);
else if (!skb_shift(skb, next_skb, next_skb_size))
return false;
}
tcp_highest_sack_replace(sk, next_skb, skb);
/* Update sequence range on original skb. */
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
/* Merge over control information. This moves PSH/FIN etc. over */
TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
/* All done, get rid of second SKB and account for it so
* packet counting does not break.
*/
TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
/* changed transmit queue under us so clear hints */
tcp_clear_retrans_hints_partial(tp);
if (next_skb == tp->retransmit_skb_hint)
tp->retransmit_skb_hint = skb;
tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
tcp_skb_collapse_tstamp(skb, next_skb);
tcp_rtx_queue_unlink_and_free(next_skb, sk);
return true;
} | 0 | [
"CWE-416"
] | linux | 7f582b248d0a86bae5788c548d7bb5bca6f7691a | 250,642,223,092,554,150,000,000,000,000,000,000,000 | 44 | tcp: purge write queue in tcp_connect_init()
syzkaller found a reliable way to crash the host, hitting a BUG()
in __tcp_retransmit_skb()
Malicous MSG_FASTOPEN is the root cause. We need to purge write queue
in tcp_connect_init() at the point we init snd_una/write_seq.
This patch also replaces the BUG() by a less intrusive WARN_ON_ONCE()
kernel BUG at net/ipv4/tcp_output.c:2837!
invalid opcode: 0000 [#1] SMP KASAN
Dumping ftrace buffer:
(ftrace buffer empty)
Modules linked in:
CPU: 0 PID: 5276 Comm: syz-executor0 Not tainted 4.17.0-rc3+ #51
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
RIP: 0010:__tcp_retransmit_skb+0x2992/0x2eb0 net/ipv4/tcp_output.c:2837
RSP: 0000:ffff8801dae06ff8 EFLAGS: 00010206
RAX: ffff8801b9fe61c0 RBX: 00000000ffc18a16 RCX: ffffffff864e1a49
RDX: 0000000000000100 RSI: ffffffff864e2e12 RDI: 0000000000000005
RBP: ffff8801dae073a0 R08: ffff8801b9fe61c0 R09: ffffed0039c40dd2
R10: ffffed0039c40dd2 R11: ffff8801ce206e93 R12: 00000000421eeaad
R13: ffff8801ce206d4e R14: ffff8801ce206cc0 R15: ffff8801cd4f4a80
FS: 0000000000000000(0000) GS:ffff8801dae00000(0063) knlGS:00000000096bc900
CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033
CR2: 0000000020000000 CR3: 00000001c47b6000 CR4: 00000000001406f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
<IRQ>
tcp_retransmit_skb+0x2e/0x250 net/ipv4/tcp_output.c:2923
tcp_retransmit_timer+0xc50/0x3060 net/ipv4/tcp_timer.c:488
tcp_write_timer_handler+0x339/0x960 net/ipv4/tcp_timer.c:573
tcp_write_timer+0x111/0x1d0 net/ipv4/tcp_timer.c:593
call_timer_fn+0x230/0x940 kernel/time/timer.c:1326
expire_timers kernel/time/timer.c:1363 [inline]
__run_timers+0x79e/0xc50 kernel/time/timer.c:1666
run_timer_softirq+0x4c/0x70 kernel/time/timer.c:1692
__do_softirq+0x2e0/0xaf5 kernel/softirq.c:285
invoke_softirq kernel/softirq.c:365 [inline]
irq_exit+0x1d1/0x200 kernel/softirq.c:405
exiting_irq arch/x86/include/asm/apic.h:525 [inline]
smp_apic_timer_interrupt+0x17e/0x710 arch/x86/kernel/apic/apic.c:1052
apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:863
Fixes: cf60af03ca4e ("net-tcp: Fast Open client - sendmsg(MSG_FASTOPEN)")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Neal Cardwell <[email protected]>
Reported-by: syzbot <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
apply_autocmds_group(
event_T event,
char_u *fname, /* NULL or empty means use actual file name */
char_u *fname_io, /* fname to use for <afile> on cmdline, NULL means
use fname */
int force, /* when TRUE, ignore autocmd_busy */
int group, /* group ID, or AUGROUP_ALL */
buf_T *buf, /* buffer for <abuf> */
exarg_T *eap) /* command arguments */
{
char_u *sfname = NULL; /* short file name */
char_u *tail;
int save_changed;
buf_T *old_curbuf;
int retval = FALSE;
char_u *save_sourcing_name;
linenr_T save_sourcing_lnum;
char_u *save_autocmd_fname;
int save_autocmd_fname_full;
int save_autocmd_bufnr;
char_u *save_autocmd_match;
int save_autocmd_busy;
int save_autocmd_nested;
static int nesting = 0;
AutoPatCmd patcmd;
AutoPat *ap;
#ifdef FEAT_EVAL
scid_T save_current_SID;
void *save_funccalp;
char_u *save_cmdarg;
long save_cmdbang;
#endif
static int filechangeshell_busy = FALSE;
#ifdef FEAT_PROFILE
proftime_T wait_time;
#endif
int did_save_redobuff = FALSE;
save_redo_T save_redo;
/*
* Quickly return if there are no autocommands for this event or
* autocommands are blocked.
*/
if (event == NUM_EVENTS || first_autopat[(int)event] == NULL
|| autocmd_blocked > 0)
goto BYPASS_AU;
/*
* When autocommands are busy, new autocommands are only executed when
* explicitly enabled with the "nested" flag.
*/
if (autocmd_busy && !(force || autocmd_nested))
goto BYPASS_AU;
#ifdef FEAT_EVAL
/*
* Quickly return when immediately aborting on error, or when an interrupt
* occurred or an exception was thrown but not caught.
*/
if (aborting())
goto BYPASS_AU;
#endif
/*
* FileChangedShell never nests, because it can create an endless loop.
*/
if (filechangeshell_busy && (event == EVENT_FILECHANGEDSHELL
|| event == EVENT_FILECHANGEDSHELLPOST))
goto BYPASS_AU;
/*
* Ignore events in 'eventignore'.
*/
if (event_ignored(event))
goto BYPASS_AU;
/*
* Allow nesting of autocommands, but restrict the depth, because it's
* possible to create an endless loop.
*/
if (nesting == 10)
{
EMSG(_("E218: autocommand nesting too deep"));
goto BYPASS_AU;
}
/*
* Check if these autocommands are disabled. Used when doing ":all" or
* ":ball".
*/
if ( (autocmd_no_enter
&& (event == EVENT_WINENTER || event == EVENT_BUFENTER))
|| (autocmd_no_leave
&& (event == EVENT_WINLEAVE || event == EVENT_BUFLEAVE)))
goto BYPASS_AU;
/*
* Save the autocmd_* variables and info about the current buffer.
*/
save_autocmd_fname = autocmd_fname;
save_autocmd_fname_full = autocmd_fname_full;
save_autocmd_bufnr = autocmd_bufnr;
save_autocmd_match = autocmd_match;
save_autocmd_busy = autocmd_busy;
save_autocmd_nested = autocmd_nested;
save_changed = curbuf->b_changed;
old_curbuf = curbuf;
/*
* Set the file name to be used for <afile>.
* Make a copy to avoid that changing a buffer name or directory makes it
* invalid.
*/
if (fname_io == NULL)
{
if (event == EVENT_COLORSCHEME || event == EVENT_OPTIONSET)
autocmd_fname = NULL;
else if (fname != NULL && !ends_excmd(*fname))
autocmd_fname = fname;
else if (buf != NULL)
autocmd_fname = buf->b_ffname;
else
autocmd_fname = NULL;
}
else
autocmd_fname = fname_io;
if (autocmd_fname != NULL)
autocmd_fname = vim_strsave(autocmd_fname);
autocmd_fname_full = FALSE; /* call FullName_save() later */
/*
* Set the buffer number to be used for <abuf>.
*/
if (buf == NULL)
autocmd_bufnr = 0;
else
autocmd_bufnr = buf->b_fnum;
/*
* When the file name is NULL or empty, use the file name of buffer "buf".
* Always use the full path of the file name to match with, in case
* "allow_dirs" is set.
*/
if (fname == NULL || *fname == NUL)
{
if (buf == NULL)
fname = NULL;
else
{
#ifdef FEAT_SYN_HL
if (event == EVENT_SYNTAX)
fname = buf->b_p_syn;
else
#endif
if (event == EVENT_FILETYPE)
fname = buf->b_p_ft;
else
{
if (buf->b_sfname != NULL)
sfname = vim_strsave(buf->b_sfname);
fname = buf->b_ffname;
}
}
if (fname == NULL)
fname = (char_u *)"";
fname = vim_strsave(fname); /* make a copy, so we can change it */
}
else
{
sfname = vim_strsave(fname);
/* Don't try expanding FileType, Syntax, FuncUndefined, WindowID,
* ColorScheme or QuickFixCmd* */
if (event == EVENT_FILETYPE
|| event == EVENT_SYNTAX
|| event == EVENT_FUNCUNDEFINED
|| event == EVENT_REMOTEREPLY
|| event == EVENT_SPELLFILEMISSING
|| event == EVENT_QUICKFIXCMDPRE
|| event == EVENT_COLORSCHEME
|| event == EVENT_OPTIONSET
|| event == EVENT_QUICKFIXCMDPOST)
fname = vim_strsave(fname);
else
fname = FullName_save(fname, FALSE);
}
if (fname == NULL) /* out of memory */
{
vim_free(sfname);
retval = FALSE;
goto BYPASS_AU;
}
#ifdef BACKSLASH_IN_FILENAME
/*
* Replace all backslashes with forward slashes. This makes the
* autocommand patterns portable between Unix and MS-DOS.
*/
if (sfname != NULL)
forward_slash(sfname);
forward_slash(fname);
#endif
#ifdef VMS
/* remove version for correct match */
if (sfname != NULL)
vms_remove_version(sfname);
vms_remove_version(fname);
#endif
/*
* Set the name to be used for <amatch>.
*/
autocmd_match = fname;
/* Don't redraw while doing auto commands. */
++RedrawingDisabled;
save_sourcing_name = sourcing_name;
sourcing_name = NULL; /* don't free this one */
save_sourcing_lnum = sourcing_lnum;
sourcing_lnum = 0; /* no line number here */
#ifdef FEAT_EVAL
save_current_SID = current_SID;
# ifdef FEAT_PROFILE
if (do_profiling == PROF_YES)
prof_child_enter(&wait_time); /* doesn't count for the caller itself */
# endif
/* Don't use local function variables, if called from a function */
save_funccalp = save_funccal();
#endif
/*
* When starting to execute autocommands, save the search patterns.
*/
if (!autocmd_busy)
{
save_search_patterns();
#ifdef FEAT_INS_EXPAND
if (!ins_compl_active())
#endif
{
saveRedobuff(&save_redo);
did_save_redobuff = TRUE;
}
did_filetype = keep_filetype;
}
/*
* Note that we are applying autocmds. Some commands need to know.
*/
autocmd_busy = TRUE;
filechangeshell_busy = (event == EVENT_FILECHANGEDSHELL);
++nesting; /* see matching decrement below */
/* Remember that FileType was triggered. Used for did_filetype(). */
if (event == EVENT_FILETYPE)
did_filetype = TRUE;
tail = gettail(fname);
/* Find first autocommand that matches */
patcmd.curpat = first_autopat[(int)event];
patcmd.nextcmd = NULL;
patcmd.group = group;
patcmd.fname = fname;
patcmd.sfname = sfname;
patcmd.tail = tail;
patcmd.event = event;
patcmd.arg_bufnr = autocmd_bufnr;
patcmd.next = NULL;
auto_next_pat(&patcmd, FALSE);
/* found one, start executing the autocommands */
if (patcmd.curpat != NULL)
{
/* add to active_apc_list */
patcmd.next = active_apc_list;
active_apc_list = &patcmd;
#ifdef FEAT_EVAL
/* set v:cmdarg (only when there is a matching pattern) */
save_cmdbang = (long)get_vim_var_nr(VV_CMDBANG);
if (eap != NULL)
{
save_cmdarg = set_cmdarg(eap, NULL);
set_vim_var_nr(VV_CMDBANG, (long)eap->forceit);
}
else
save_cmdarg = NULL; /* avoid gcc warning */
#endif
retval = TRUE;
/* mark the last pattern, to avoid an endless loop when more patterns
* are added when executing autocommands */
for (ap = patcmd.curpat; ap->next != NULL; ap = ap->next)
ap->last = FALSE;
ap->last = TRUE;
check_lnums(TRUE); /* make sure cursor and topline are valid */
do_cmdline(NULL, getnextac, (void *)&patcmd,
DOCMD_NOWAIT|DOCMD_VERBOSE|DOCMD_REPEAT);
#ifdef FEAT_EVAL
if (eap != NULL)
{
(void)set_cmdarg(NULL, save_cmdarg);
set_vim_var_nr(VV_CMDBANG, save_cmdbang);
}
#endif
/* delete from active_apc_list */
if (active_apc_list == &patcmd) /* just in case */
active_apc_list = patcmd.next;
}
--RedrawingDisabled;
autocmd_busy = save_autocmd_busy;
filechangeshell_busy = FALSE;
autocmd_nested = save_autocmd_nested;
vim_free(sourcing_name);
sourcing_name = save_sourcing_name;
sourcing_lnum = save_sourcing_lnum;
vim_free(autocmd_fname);
autocmd_fname = save_autocmd_fname;
autocmd_fname_full = save_autocmd_fname_full;
autocmd_bufnr = save_autocmd_bufnr;
autocmd_match = save_autocmd_match;
#ifdef FEAT_EVAL
current_SID = save_current_SID;
restore_funccal(save_funccalp);
# ifdef FEAT_PROFILE
if (do_profiling == PROF_YES)
prof_child_exit(&wait_time);
# endif
#endif
vim_free(fname);
vim_free(sfname);
--nesting; /* see matching increment above */
/*
* When stopping to execute autocommands, restore the search patterns and
* the redo buffer. Free any buffers in the au_pending_free_buf list and
* free any windows in the au_pending_free_win list.
*/
if (!autocmd_busy)
{
restore_search_patterns();
if (did_save_redobuff)
restoreRedobuff(&save_redo);
did_filetype = FALSE;
while (au_pending_free_buf != NULL)
{
buf_T *b = au_pending_free_buf->b_next;
vim_free(au_pending_free_buf);
au_pending_free_buf = b;
}
while (au_pending_free_win != NULL)
{
win_T *w = au_pending_free_win->w_next;
vim_free(au_pending_free_win);
au_pending_free_win = w;
}
}
/*
* Some events don't set or reset the Changed flag.
* Check if still in the same buffer!
*/
if (curbuf == old_curbuf
&& (event == EVENT_BUFREADPOST
|| event == EVENT_BUFWRITEPOST
|| event == EVENT_FILEAPPENDPOST
|| event == EVENT_VIMLEAVE
|| event == EVENT_VIMLEAVEPRE))
{
#ifdef FEAT_TITLE
if (curbuf->b_changed != save_changed)
need_maketitle = TRUE;
#endif
curbuf->b_changed = save_changed;
}
au_cleanup(); /* may really delete removed patterns/commands now */
BYPASS_AU:
/* When wiping out a buffer make sure all its buffer-local autocommands
* are deleted. */
if (event == EVENT_BUFWIPEOUT && buf != NULL)
aubuflocal_remove(buf);
if (retval == OK && event == EVENT_FILETYPE)
au_did_filetype = TRUE;
return retval;
} | 0 | [
"CWE-200",
"CWE-668"
] | vim | 5a73e0ca54c77e067c3b12ea6f35e3e8681e8cf8 | 131,882,964,562,116,880,000,000,000,000,000,000,000 | 394 | patch 8.0.1263: others can read the swap file if a user is careless
Problem: Others can read the swap file if a user is careless with his
primary group.
Solution: If the group permission allows for reading but the world
permissions doesn't, make sure the group is right. |
STATIC regnode_offset
S_regnode_guts(pTHX_ RExC_state_t *pRExC_state, const U8 op, const STRLEN extra_size, const char* const name)
{
/* Allocate a regnode for 'op', with 'extra_size' extra space. It aligns
* and increments RExC_size and RExC_emit
*
* It returns the regnode's offset into the regex engine program */
const regnode_offset ret = RExC_emit;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGNODE_GUTS;
SIZE_ALIGN(RExC_size);
change_engine_size(pRExC_state, (Ptrdiff_t) 1 + extra_size);
NODE_ALIGN_FILL(REGNODE_p(ret));
#ifndef RE_TRACK_PATTERN_OFFSETS
PERL_UNUSED_ARG(name);
PERL_UNUSED_ARG(op);
#else
assert(extra_size >= regarglen[op] || PL_regkind[op] == ANYOF);
if (RExC_offsets) { /* MJD */
MJD_OFFSET_DEBUG(
("%s:%d: (op %s) %s %" UVuf " (len %" UVuf ") (max %" UVuf ").\n",
name, __LINE__,
PL_reg_name[op],
(UV)(RExC_emit) > RExC_offsets[0]
? "Overwriting end of array!\n" : "OK",
(UV)(RExC_emit),
(UV)(RExC_parse - RExC_start),
(UV)RExC_offsets[0]));
Set_Node_Offset(REGNODE_p(RExC_emit), RExC_parse + (op == END));
}
#endif
return(ret); | 0 | [
"CWE-190",
"CWE-787"
] | perl5 | 897d1f7fd515b828e4b198d8b8bef76c6faf03ed | 283,954,622,368,218,600,000,000,000,000,000,000,000 | 37 | regcomp.c: Prevent integer overflow from nested regex quantifiers.
(CVE-2020-10543) On 32bit systems the size calculations for nested regular
expression quantifiers could overflow causing heap memory corruption.
Fixes: Perl/perl5-security#125
(cherry picked from commit bfd31397db5dc1a5c5d3e0a1f753a4f89a736e71) |
xfs_attr_shortform_list(xfs_attr_list_context_t *context)
{
attrlist_cursor_kern_t *cursor;
xfs_attr_sf_sort_t *sbuf, *sbp;
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
xfs_inode_t *dp;
int sbsize, nsbuf, count, i;
int error;
ASSERT(context != NULL);
dp = context->dp;
ASSERT(dp != NULL);
ASSERT(dp->i_afp != NULL);
sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
ASSERT(sf != NULL);
if (!sf->hdr.count)
return 0;
cursor = context->cursor;
ASSERT(cursor != NULL);
trace_xfs_attr_list_sf(context);
/*
* If the buffer is large enough and the cursor is at the start,
* do not bother with sorting since we will return everything in
* one buffer and another call using the cursor won't need to be
* made.
* Note the generous fudge factor of 16 overhead bytes per entry.
* If bufsize is zero then put_listent must be a search function
* and can just scan through what we have.
*/
if (context->bufsize == 0 ||
(XFS_ISRESET_CURSOR(cursor) &&
(dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
error = context->put_listent(context,
sfe->flags,
sfe->nameval,
(int)sfe->namelen,
(int)sfe->valuelen,
&sfe->nameval[sfe->namelen]);
/*
* Either search callback finished early or
* didn't fit it all in the buffer after all.
*/
if (context->seen_enough)
break;
if (error)
return error;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
}
trace_xfs_attr_list_sf_all(context);
return 0;
}
/* do no more for a search callback */
if (context->bufsize == 0)
return 0;
/*
* It didn't all fit, so we have to sort everything on hashval.
*/
sbsize = sf->hdr.count * sizeof(*sbuf);
sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
/*
* Scan the attribute list for the rest of the entries, storing
* the relevant info from only those that match into a buffer.
*/
nsbuf = 0;
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
if (unlikely(
((char *)sfe < (char *)sf) ||
((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, sfe);
kmem_free(sbuf);
return -EFSCORRUPTED;
}
sbp->entno = i;
sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
sbp->name = sfe->nameval;
sbp->namelen = sfe->namelen;
/* These are bytes, and both on-disk, don't endian-flip */
sbp->valuelen = sfe->valuelen;
sbp->flags = sfe->flags;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
sbp++;
nsbuf++;
}
/*
* Sort the entries on hash then entno.
*/
xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
/*
* Re-find our place IN THE SORTED LIST.
*/
count = 0;
cursor->initted = 1;
cursor->blkno = 0;
for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
if (sbp->hash == cursor->hashval) {
if (cursor->offset == count) {
break;
}
count++;
} else if (sbp->hash > cursor->hashval) {
break;
}
}
if (i == nsbuf) {
kmem_free(sbuf);
return 0;
}
/*
* Loop putting entries into the user buffer.
*/
for ( ; i < nsbuf; i++, sbp++) {
if (cursor->hashval != sbp->hash) {
cursor->hashval = sbp->hash;
cursor->offset = 0;
}
error = context->put_listent(context,
sbp->flags,
sbp->name,
sbp->namelen,
sbp->valuelen,
&sbp->name[sbp->namelen]);
if (error) {
kmem_free(sbuf);
return error;
}
if (context->seen_enough)
break;
cursor->offset++;
}
kmem_free(sbuf);
return 0;
} | 0 | [
"CWE-400",
"CWE-703"
] | linux | 2e83b79b2d6c78bf1b4aa227938a214dcbddc83f | 304,119,451,401,702,100,000,000,000,000,000,000,000 | 148 | xfs: fix two memory leaks in xfs_attr_list.c error paths
This plugs 2 trivial leaks in xfs_attr_shortform_list and
xfs_attr3_leaf_list_int.
Signed-off-by: Mateusz Guzik <[email protected]>
Cc: <[email protected]>
Reviewed-by: Eric Sandeen <[email protected]>
Signed-off-by: Dave Chinner <[email protected]> |
explicit HeartbeatSession(int p) : peer(p) {} | 0 | [
"CWE-287",
"CWE-284"
] | ceph | 5ead97120e07054d80623dada90a5cc764c28468 | 165,756,387,741,792,260,000,000,000,000,000,000,000 | 1 | auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random() |
ins_compl_del_pum(void)
{
if (compl_match_array == NULL)
return;
pum_undisplay();
VIM_CLEAR(compl_match_array);
} | 0 | [
"CWE-125"
] | vim | f12129f1714f7d2301935bb21d896609bdac221c | 73,312,351,910,750,230,000,000,000,000,000,000,000 | 8 | patch 9.0.0020: with some completion reading past end of string
Problem: With some completion reading past end of string.
Solution: Check the length of the string. |
port_name_needs_quotes(const char *port_name)
{
if (!isalpha((unsigned char) port_name[0])) {
return true;
}
for (const char *p = port_name + 1; *p; p++) {
if (!isalnum((unsigned char) *p)) {
return true;
}
}
return false;
} | 0 | [
"CWE-772"
] | ovs | 77ad4225d125030420d897c873e4734ac708c66b | 264,729,311,680,850,440,000,000,000,000,000,000,000 | 13 | ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
return AllowedOnSecondary::kNever;
} | 0 | [
"CWE-613"
] | mongo | e55d6e2292e5dbe2f97153251d8193d1cc89f5d7 | 62,111,641,490,205,990,000,000,000,000,000,000,000 | 3 | SERVER-38984 Validate unique User ID on UserCache hit |
void setFilterChainName(absl::string_view filter_chain_name) override {
filter_chain_name_ = std::string(filter_chain_name);
} | 0 | [
"CWE-416"
] | envoy | fe7c69c248f4fe5a9080c7ccb35275b5218bb5ab | 81,327,496,823,727,740,000,000,000,000,000,000,000 | 3 | internal redirect: fix a lifetime bug (#785)
Signed-off-by: Alyssa Wilk <[email protected]>
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]> |
int netns_identify_pid(const char *pidstr, char *name, int len)
{
char net_path[PATH_MAX];
int netns;
struct stat netst;
DIR *dir;
struct dirent *entry;
name[0] = '\0';
snprintf(net_path, sizeof(net_path), "/proc/%s/ns/net", pidstr);
netns = open(net_path, O_RDONLY);
if (netns < 0) {
fprintf(stderr, "Cannot open network namespace: %s\n",
strerror(errno));
return -1;
}
if (fstat(netns, &netst) < 0) {
fprintf(stderr, "Stat of netns failed: %s\n",
strerror(errno));
return -1;
}
dir = opendir(NETNS_RUN_DIR);
if (!dir) {
/* Succeed treat a missing directory as an empty directory */
if (errno == ENOENT)
return 0;
fprintf(stderr, "Failed to open directory %s:%s\n",
NETNS_RUN_DIR, strerror(errno));
return -1;
}
while ((entry = readdir(dir))) {
char name_path[PATH_MAX];
struct stat st;
if (strcmp(entry->d_name, ".") == 0)
continue;
if (strcmp(entry->d_name, "..") == 0)
continue;
snprintf(name_path, sizeof(name_path), "%s/%s", NETNS_RUN_DIR,
entry->d_name);
if (stat(name_path, &st) != 0)
continue;
if ((st.st_dev == netst.st_dev) &&
(st.st_ino == netst.st_ino)) {
strlcpy(name, entry->d_name, len);
}
}
closedir(dir);
return 0;
} | 0 | [
"CWE-416"
] | iproute2 | 9bf2c538a0eb10d66e2365a655bf6c52f5ba3d10 | 21,609,782,636,877,407,000,000,000,000,000,000,000 | 57 | ipnetns: use-after-free problem in get_netnsid_from_name func
Follow the following steps:
# ip netns add net1
# export MALLOC_MMAP_THRESHOLD_=0
# ip netns list
then Segmentation fault (core dumped) will occur.
In get_netnsid_from_name func, answer is freed before
rta_getattr_u32(tb[NETNSA_NSID]), where tb[] refers to answer`s
content. If we set MALLOC_MMAP_THRESHOLD_=0, mmap will be adoped to
malloc memory, which will be freed immediately after calling free
func. So reading tb[NETNSA_NSID] will access the released memory
after free(answer).
Here, we will call get_netnsid_from_name(tb[NETNSA_NSID]) before free(answer).
Fixes: 86bf43c7c2f ("lib/libnetlink: update rtnl_talk to support malloc buff at run time")
Reported-by: Huiying Kou <[email protected]>
Signed-off-by: Zhiqiang Liu <[email protected]>
Acked-by: Phil Sutter <[email protected]>
Signed-off-by: Stephen Hemminger <[email protected]> |
PHP_MINIT_FUNCTION(xml)
{
le_xml_parser = zend_register_list_destructors_ex(xml_parser_dtor, NULL, "xml", module_number);
REGISTER_LONG_CONSTANT("XML_ERROR_NONE", XML_ERROR_NONE, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_NO_MEMORY", XML_ERROR_NO_MEMORY, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_SYNTAX", XML_ERROR_SYNTAX, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_NO_ELEMENTS", XML_ERROR_NO_ELEMENTS, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_INVALID_TOKEN", XML_ERROR_INVALID_TOKEN, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_UNCLOSED_TOKEN", XML_ERROR_UNCLOSED_TOKEN, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_PARTIAL_CHAR", XML_ERROR_PARTIAL_CHAR, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_TAG_MISMATCH", XML_ERROR_TAG_MISMATCH, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_DUPLICATE_ATTRIBUTE", XML_ERROR_DUPLICATE_ATTRIBUTE, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_JUNK_AFTER_DOC_ELEMENT", XML_ERROR_JUNK_AFTER_DOC_ELEMENT, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_PARAM_ENTITY_REF", XML_ERROR_PARAM_ENTITY_REF, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_UNDEFINED_ENTITY", XML_ERROR_UNDEFINED_ENTITY, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_RECURSIVE_ENTITY_REF", XML_ERROR_RECURSIVE_ENTITY_REF, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_ASYNC_ENTITY", XML_ERROR_ASYNC_ENTITY, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_BAD_CHAR_REF", XML_ERROR_BAD_CHAR_REF, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_BINARY_ENTITY_REF", XML_ERROR_BINARY_ENTITY_REF, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF", XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_MISPLACED_XML_PI", XML_ERROR_MISPLACED_XML_PI, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_UNKNOWN_ENCODING", XML_ERROR_UNKNOWN_ENCODING, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_INCORRECT_ENCODING", XML_ERROR_INCORRECT_ENCODING, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_UNCLOSED_CDATA_SECTION", XML_ERROR_UNCLOSED_CDATA_SECTION, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_ERROR_EXTERNAL_ENTITY_HANDLING", XML_ERROR_EXTERNAL_ENTITY_HANDLING, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_OPTION_CASE_FOLDING", PHP_XML_OPTION_CASE_FOLDING, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_OPTION_TARGET_ENCODING", PHP_XML_OPTION_TARGET_ENCODING, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_OPTION_SKIP_TAGSTART", PHP_XML_OPTION_SKIP_TAGSTART, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("XML_OPTION_SKIP_WHITE", PHP_XML_OPTION_SKIP_WHITE, CONST_CS|CONST_PERSISTENT);
/* this object should not be pre-initialised at compile time,
as the order of members may vary */
php_xml_mem_hdlrs.malloc_fcn = php_xml_malloc_wrapper;
php_xml_mem_hdlrs.realloc_fcn = php_xml_realloc_wrapper;
php_xml_mem_hdlrs.free_fcn = php_xml_free_wrapper;
#ifdef LIBXML_EXPAT_COMPAT
REGISTER_STRING_CONSTANT("XML_SAX_IMPL", "libxml", CONST_CS|CONST_PERSISTENT);
#else
REGISTER_STRING_CONSTANT("XML_SAX_IMPL", "expat", CONST_CS|CONST_PERSISTENT);
#endif
return SUCCESS;
} | 1 | [
"CWE-119"
] | php-src | 1248079be837808da4c97364fb3b4c96c8015fbf | 132,570,834,273,027,740,000,000,000,000,000,000,000 | 47 | Fix bug #72099: xml_parse_into_struct segmentation fault |
SecureRandomDataProvider::~SecureRandomDataProvider()
{
} | 0 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 32,266,484,743,026,854,000,000,000,000,000,000,000 | 3 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
void clear_binlog_table_maps() {
binlog_table_maps= 0;
} | 0 | [
"CWE-416"
] | server | 4681b6f2d8c82b4ec5cf115e83698251963d80d5 | 307,088,764,706,398,670,000,000,000,000,000,000,000 | 3 | MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do) |
int main(int argc, char *argv[]) {
struct mschm_decompressor *chmd;
struct mschmd_header *chm;
struct mschmd_file *file, **f;
unsigned int numf, i;
setbuf(stdout, NULL);
setbuf(stderr, NULL);
user_umask = umask(0); umask(user_umask);
MSPACK_SYS_SELFTEST(i);
if (i) return 0;
if ((chmd = mspack_create_chm_decompressor(NULL))) {
for (argv++; *argv; argv++) {
printf("%s\n", *argv);
if ((chm = chmd->open(chmd, *argv))) {
/* build an ordered list of files for maximum extraction speed */
for (numf=0, file=chm->files; file; file = file->next) numf++;
if ((f = (struct mschmd_file **) calloc(numf, sizeof(struct mschmd_file *)))) {
for (i=0, file=chm->files; file; file = file->next) f[i++] = file;
qsort(f, numf, sizeof(struct mschmd_file *), &sortfunc);
for (i = 0; i < numf; i++) {
char *outname = create_output_name(f[i]->filename);
printf("Extracting %s\n", outname);
ensure_filepath(outname);
if (chmd->extract(chmd, f[i], outname)) {
printf("%s: extract error on \"%s\": %s\n",
*argv, f[i]->filename, ERROR(chmd));
}
free(outname);
}
free(f);
}
chmd->close(chmd, chm);
}
else {
printf("%s: can't open -- %s\n", *argv, ERROR(chmd));
}
}
mspack_destroy_chm_decompressor(chmd);
}
return 0;
} | 0 | [
"CWE-22"
] | libmspack | 7cadd489698be117c47efcadd742651594429e6d | 131,554,311,186,808,100,000,000,000,000,000,000,000 | 46 | add anti "../" and leading slash protection to chmextract |
EIGEN_STRONG_INLINE bool operator==(const QUInt16 a, const QUInt16 b) {
return a.value == b.value;
} | 0 | [
"CWE-908",
"CWE-787"
] | tensorflow | ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2 | 76,097,394,122,719,090,000,000,000,000,000,000,000 | 3 | Default initialize fixed point Eigen types.
In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN.
PiperOrigin-RevId: 344101137
Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2 |
virtual GBool useDrawForm() { return gFalse; } | 0 | [] | poppler | abf167af8b15e5f3b510275ce619e6fdb42edd40 | 198,916,050,141,505,380,000,000,000,000,000,000,000 | 1 | Implement tiling/patterns in SplashOutputDev
Fixes bug 13518 |
static inline int skipthis(struct SISTREAM *s) {
return skip(s, ALIGN4(s->fsize[s->level]));
} | 0 | [
"CWE-189"
] | clamav-devel | c6870a6c857dd722dffaf6d37ae52ec259d12492 | 188,280,382,783,459,000,000,000,000,000,000,000,000 | 3 | bb #6808 |
static size_t push_pipe(struct iov_iter *i, size_t size,
int *idxp, size_t *offp)
{
struct pipe_inode_info *pipe = i->pipe;
size_t off;
int idx;
ssize_t left;
if (unlikely(size > i->count))
size = i->count;
if (unlikely(!size))
return 0;
left = size;
data_start(i, &idx, &off);
*idxp = idx;
*offp = off;
if (off) {
left -= PAGE_SIZE - off;
if (left <= 0) {
pipe->bufs[idx].len += size;
return size;
}
pipe->bufs[idx].len = PAGE_SIZE;
idx = next_idx(idx, pipe);
}
while (idx != pipe->curbuf || !pipe->nrbufs) {
struct page *page = alloc_page(GFP_USER);
if (!page)
break;
pipe->nrbufs++;
pipe->bufs[idx].ops = &default_pipe_buf_ops;
pipe->bufs[idx].page = page;
pipe->bufs[idx].offset = 0;
if (left <= PAGE_SIZE) {
pipe->bufs[idx].len = left;
return size;
}
pipe->bufs[idx].len = PAGE_SIZE;
left -= PAGE_SIZE;
idx = next_idx(idx, pipe);
}
return size - left;
} | 0 | [
"CWE-200"
] | linux | b9dc6f65bc5e232d1c05fe34b5daadc7e8bbf1fb | 339,488,904,149,965,430,000,000,000,000,000,000,000 | 44 | fix a fencepost error in pipe_advance()
The logics in pipe_advance() used to release all buffers past the new
position failed in cases when the number of buffers to release was equal
to pipe->buffers. If that happened, none of them had been released,
leaving pipe full. Worse, it was trivial to trigger and we end up with
pipe full of uninitialized pages. IOW, it's an infoleak.
Cc: [email protected] # v4.9
Reported-by: "Alan J. Wylie" <[email protected]>
Tested-by: "Alan J. Wylie" <[email protected]>
Signed-off-by: Al Viro <[email protected]> |
static const char *wsgi_add_import_script(cmd_parms *cmd, void *mconfig,
const char *args)
{
WSGIScriptFile *object = NULL;
const char *option = NULL;
const char *value = NULL;
if (!wsgi_import_list) {
wsgi_import_list = apr_array_make(cmd->pool, 20,
sizeof(WSGIScriptFile));
}
object = (WSGIScriptFile *)apr_array_push(wsgi_import_list);
object->handler_script = ap_getword_conf(cmd->pool, &args);
object->process_group = NULL;
object->application_group = NULL;
if (!object->handler_script || !*object->handler_script)
return "Location of import script not supplied.";
while (*args) {
if (wsgi_parse_option(cmd->pool, &args, &option,
&value) != APR_SUCCESS) {
return "Invalid option to WSGI import script definition.";
}
if (!strcmp(option, "application-group")) {
if (!*value)
return "Invalid name for WSGI application group.";
object->application_group = value;
}
#if defined(MOD_WSGI_WITH_DAEMONS)
else if (!strcmp(option, "process-group")) {
if (!*value)
return "Invalid name for WSGI process group.";
object->process_group = value;
}
#endif
else
return "Invalid option to WSGI import script definition.";
}
if (!object->application_group)
return "Name of WSGI application group required.";
if (!strcmp(object->application_group, "%{GLOBAL}"))
object->application_group = "";
#if defined(MOD_WSGI_WITH_DAEMONS)
if (!object->process_group)
return "Name of WSGI process group required.";
if (!strcmp(object->process_group, "%{GLOBAL}"))
object->process_group = "";
if (*object->process_group) {
WSGIProcessGroup *group = NULL;
WSGIProcessGroup *entries = NULL;
WSGIProcessGroup *entry = NULL;
int i;
if (!wsgi_daemon_list)
return "WSGI process group not yet configured.";
entries = (WSGIProcessGroup *)wsgi_daemon_list->elts;
for (i = 0; i < wsgi_daemon_list->nelts; ++i) {
entry = &entries[i];
if (!strcmp(entry->name, object->process_group)) {
group = entry;
break;
}
}
if (!group)
return "WSGI process group not yet configured.";
if (group->server != cmd->server && group->server->is_virtual)
return "WSGI process group not accessible.";
}
#else
object->process_group = "";
#endif
if (!*object->process_group)
wsgi_python_required = 1;
return NULL;
} | 0 | [
"CWE-254"
] | mod_wsgi | 545354a80b9cc20d8b6916ca30542eab36c3b8bd | 83,096,530,187,157,170,000,000,000,000,000,000,000 | 94 | When there is any sort of error in setting up daemon process group, kill the process rather than risk running in an unexpected state. |
void net_configure_sandbox_ip(Bridge *br) {
assert(br);
if (br->configured == 0)
return;
if (br->arg_ip_none)
br->ipsandbox = 0;
else if (br->ipsandbox) {
// check network range
char *rv = in_netrange(br->ipsandbox, br->ip, br->mask);
if (rv) {
fprintf(stderr, "%s\n", rv);
exit(1);
}
// send an ARP request and check if there is anybody on this IP address
if (arp_check(br->dev, br->ipsandbox)) {
fprintf(stderr, "Error: IP address %d.%d.%d.%d is already in use\n", PRINT_IP(br->ipsandbox));
exit(1);
}
}
else
// ip address assigned by arp-scan for a bridge device
br->ipsandbox = arp_assign(br->dev, br); //br->ip, br->mask);
} | 0 | [
"CWE-269",
"CWE-94"
] | firejail | 27cde3d7d1e4e16d4190932347c7151dc2a84c50 | 293,041,628,548,668,340,000,000,000,000,000,000,000 | 24 | fixing CVE-2022-31214 |
static vpx_codec_err_t ctrl_set_row_mt(vpx_codec_alg_priv_t *ctx,
va_list args) {
ctx->row_mt = va_arg(args, int);
return VPX_CODEC_OK;
} | 0 | [
"CWE-125"
] | libvpx | 0681cff1ad36b3ef8ec242f59b5a6c4234ccfb88 | 149,239,433,667,042,100,000,000,000,000,000,000,000 | 6 | vp9: fix OOB read in decoder_peek_si_internal
Profile 1 or 3 bitstreams may require 11 bytes for the header in the
intra-only case.
Additionally add a check on the bit reader's error handler callback to
ensure it's non-NULL before calling to avoid future regressions.
This has existed since at least (pre-1.4.0):
09bf1d61c Changes hdr for profiles > 1 for intraonly frames
BUG=webm:1543
Change-Id: I23901e6e3a219170e8ea9efecc42af0be2e5c378 |
e_named_parameters_unref (ENamedParameters *params)
{
g_ptr_array_unref ((GPtrArray *) params);
} | 0 | [
"CWE-295"
] | evolution-data-server | 6672b8236139bd6ef41ecb915f4c72e2a052dba5 | 282,592,086,213,745,800,000,000,000,000,000,000,000 | 4 | Let child source with 'none' authentication method use collection source authentication
That might be the same as having set NULL authentication method.
Related to https://gitlab.gnome.org/GNOME/evolution-ews/issues/27 |
void fatal(const char* s, int code)
{
static int recurse_code = 0;
if (recurse_code) {
/* We were called recursively. Just give up */
proc_cleanup();
exit(recurse_code);
}
recurse_code = code;
if (nntp_out) {
prot_printf(nntp_out, "400 Fatal error: %s\r\n", s);
prot_flush(nntp_out);
}
if (stage) append_removestage(stage);
syslog(LOG_ERR, "Fatal error: %s", s);
shut_down(code);
} | 0 | [
"CWE-119"
] | cyrus-imapd | 0f8f026699829b65733c3081657b24e2174f4f4d | 290,624,578,265,429,360,000,000,000,000,000,000,000 | 18 | CVE-2011-3208 - fix buffer overflow in nntpd |
Subsets and Splits