func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
bool SplashOutputDev::getVectorAntialias() {
return splash->getVectorAntialias();
}
| 0 |
[
"CWE-369"
] |
poppler
|
b224e2f5739fe61de9fa69955d016725b2a4b78d
| 289,212,227,544,274,260,000,000,000,000,000,000,000 | 3 |
SplashOutputDev::tilingPatternFill: Fix crash on broken file
Issue #802
|
static void cjson_replace_item_in_object_should_preserve_name(void)
{
cJSON root[1] = {{ NULL, NULL, NULL, 0, NULL, 0, 0, NULL }};
cJSON *child = NULL;
cJSON *replacement = NULL;
child = cJSON_CreateNumber(1);
TEST_ASSERT_NOT_NULL(child);
replacement = cJSON_CreateNumber(2);
TEST_ASSERT_NOT_NULL(replacement);
cJSON_AddItemToObject(root, "child", child);
cJSON_ReplaceItemInObject(root, "child", replacement);
TEST_ASSERT_TRUE(root->child == replacement);
TEST_ASSERT_EQUAL_STRING("child", replacement->string);
cJSON_Delete(replacement);
}
| 0 |
[
"CWE-754",
"CWE-787"
] |
cJSON
|
be749d7efa7c9021da746e685bd6dec79f9dd99b
| 146,625,020,981,544,200,000,000,000,000,000,000,000 | 19 |
Fix crash of cJSON_GetObjectItemCaseSensitive when calling it on arrays
|
compression::algorithm compression_algorithm()
{
return comp_algorithm_;
}
| 0 |
[
"CWE-416"
] |
Crow
|
fba01dc76d6ea940ad7c8392e8f39f9647241d8e
| 17,883,745,427,004,279,000,000,000,000,000,000,000 | 4 |
Prevent HTTP pipelining which Crow doesn't support.
|
bool AuthorizationSessionImpl::isAuthorizedForAnyActionOnResource(const ResourcePattern& resource) {
if (_externalState->shouldIgnoreAuthChecks()) {
return true;
}
std::array<ResourcePattern, resourceSearchListCapacity> resourceSearchList;
const int resourceSearchListLength =
buildResourceSearchList(resource, resourceSearchList.data());
for (int i = 0; i < resourceSearchListLength; ++i) {
for (const auto& user : _authenticatedUsers) {
if (user->hasActionsForResource(resourceSearchList[i])) {
return true;
}
}
}
return false;
}
| 0 |
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
| 49,855,779,206,424,370,000,000,000,000,000,000,000 | 19 |
SERVER-38984 Validate unique User ID on UserCache hit
|
static void register_http_post_files_variable_ex(char *var, zval *val, zval *http_post_files, zend_bool override_protection TSRMLS_DC) /* {{{ */
{
safe_php_register_variable_ex(var, val, http_post_files, override_protection TSRMLS_CC);
}
| 0 |
[
"CWE-399"
] |
php-src
|
4605d536d23b00813d11cc906bb48d39bdcf5f25
| 213,642,126,210,803,420,000,000,000,000,000,000,000 | 4 |
Fixed bug #69364 - use smart_str to assemble strings
|
static inline size_t GetPixelMetaChannels(const Image *magick_restrict image)
{
return(image->number_meta_channels);
}
| 0 |
[
"CWE-20",
"CWE-125"
] |
ImageMagick
|
8187d2d8fd010d2d6b1a3a8edd935beec404dddc
| 291,232,954,138,008,000,000,000,000,000,000,000,000 | 4 |
https://github.com/ImageMagick/ImageMagick/issues/1610
|
static void free_user(struct kref *ref)
{
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
kfree(user);
}
| 1 |
[
"CWE-416",
"CWE-284"
] |
linux
|
77f8269606bf95fcb232ee86f6da80886f1dfae8
| 193,842,034,869,084,300,000,000,000,000,000,000,000 | 5 |
ipmi: fix use-after-free of user->release_barrier.rda
When we do the following test, we got oops in ipmi_msghandler driver
while((1))
do
service ipmievd restart & service ipmievd restart
done
---------------------------------------------------------------
[ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008
[ 294.230188] Mem abort info:
[ 294.230190] ESR = 0x96000004
[ 294.230191] Exception class = DABT (current EL), IL = 32 bits
[ 294.230193] SET = 0, FnV = 0
[ 294.230194] EA = 0, S1PTW = 0
[ 294.230195] Data abort info:
[ 294.230196] ISV = 0, ISS = 0x00000004
[ 294.230197] CM = 0, WnR = 0
[ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a
[ 294.230201] [0000803fea6ea008] pgd=0000000000000000
[ 294.230204] Internal error: Oops: 96000004 [#1] SMP
[ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio
[ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113
[ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017
[ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO)
[ 294.297695] pc : __srcu_read_lock+0x38/0x58
[ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.307853] sp : ffff00001001bc80
[ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000
[ 294.316594] x27: 0000000000000000 x26: dead000000000100
[ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800
[ 294.327366] x23: 0000000000000000 x22: 0000000000000000
[ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018
[ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000
[ 294.343523] x17: 0000000000000000 x16: 0000000000000000
[ 294.348908] x15: 0000000000000000 x14: 0000000000000002
[ 294.354293] x13: 0000000000000000 x12: 0000000000000000
[ 294.359679] x11: 0000000000000000 x10: 0000000000100000
[ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004
[ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678
[ 294.375836] x5 : 000000000000000c x4 : 0000000000000000
[ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000
[ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001
[ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293)
[ 294.398791] Call trace:
[ 294.401266] __srcu_read_lock+0x38/0x58
[ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler]
[ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler]
[ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler]
[ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler]
[ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler]
[ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler]
[ 294.451618] tasklet_action_common.isra.5+0x88/0x138
[ 294.460661] tasklet_action+0x2c/0x38
[ 294.468191] __do_softirq+0x120/0x2f8
[ 294.475561] irq_exit+0x134/0x140
[ 294.482445] __handle_domain_irq+0x6c/0xc0
[ 294.489954] gic_handle_irq+0xb8/0x178
[ 294.497037] el1_irq+0xb0/0x140
[ 294.503381] arch_cpu_idle+0x34/0x1a8
[ 294.510096] do_idle+0x1d4/0x290
[ 294.516322] cpu_startup_entry+0x28/0x30
[ 294.523230] secondary_start_kernel+0x184/0x1d0
[ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25)
[ 294.539746] ---[ end trace 8a7a880dee570b29 ]---
[ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt
[ 294.556837] SMP: stopping secondary CPUs
[ 294.563996] Kernel Offset: disabled
[ 294.570515] CPU features: 0x002,21006008
[ 294.577638] Memory Limit: none
[ 294.587178] Starting crashdump kernel...
[ 294.594314] Bye!
Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but
the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda
in __srcu_read_lock(), it causes oops.
Fix this by calling cleanup_srcu_struct() when the refcount is zero.
Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove")
Cc: [email protected] # 4.18
Signed-off-by: Yang Yingliang <[email protected]>
Signed-off-by: Corey Minyard <[email protected]>
|
DECLAREContigPutFunc(putRGBcontig16bittile)
{
int samplesperpixel = img->samplesperpixel;
uint16 *wp = (uint16 *)pp;
(void) y;
fromskew *= samplesperpixel;
while (h-- > 0) {
for (x = w; x-- > 0;) {
*cp++ = PACK(img->Bitdepth16To8[wp[0]],
img->Bitdepth16To8[wp[1]],
img->Bitdepth16To8[wp[2]]);
wp += samplesperpixel;
}
cp += toskew;
wp += fromskew;
}
}
| 0 |
[
"CWE-119"
] |
libtiff
|
40a5955cbf0df62b1f9e9bd7d9657b0070725d19
| 75,989,507,225,063,285,000,000,000,000,000,000,000 | 17 |
* libtiff/tif_next.c: add new tests to check that we don't read outside of
the compressed input stream buffer.
* libtiff/tif_getimage.c: in OJPEG case, fix checks on strile width/height
|
static HostnameValidationResult matches_subject_alternative_name(const char *hostname, const X509 *server_cert) {
HostnameValidationResult result = MatchNotFound;
int i;
int san_names_nb = -1;
STACK_OF(GENERAL_NAME) *san_names = NULL;
// Try to extract the names within the SAN extension from the certificate
san_names = X509_get_ext_d2i((X509 *) server_cert, NID_subject_alt_name, NULL, NULL);
if (san_names == NULL) {
return NoSANPresent;
}
san_names_nb = sk_GENERAL_NAME_num(san_names);
// Check each name within the extension
for (i=0; i<san_names_nb; i++) {
const GENERAL_NAME *current_name = sk_GENERAL_NAME_value(san_names, i);
if (current_name->type == GEN_DNS) {
// Current name is a DNS name, let's check it
char *dns_name = (char *) ASN1_STRING_data(current_name->d.dNSName);
// Make sure there isn't an embedded NUL character in the DNS name
if (ASN1_STRING_length(current_name->d.dNSName) != strlen(dns_name)) {
result = MalformedCertificate;
break;
}
else { // Compare expected hostname with the DNS name
if (strcasecmp(hostname, dns_name) == 0) {
result = MatchFound;
break;
}
}
}
}
sk_GENERAL_NAME_pop_free(san_names, GENERAL_NAME_free);
return result;
}
| 0 |
[
"CWE-295"
] |
openfortivpn
|
6328a070ddaab16faaf008cb9a8a62439c30f2a8
| 232,180,728,765,391,900,000,000,000,000,000,000,000 | 38 |
fix TLS Certificate CommonName NULL Byte Vulnerability
CVE-2020-7043 TLS Certificate CommonName NULL Byte Vulnerability is fixed
with this commit
with #8 hostname validation for the certificate was introduced
but unfortunately strncasecmp() was used to compare the byte array
against the expected hostname. This does not correctly treat a CN
which contains a NULL byte. In order to fix this vulnerability
the reference implementation from iSECPartners has been included
into the code.
|
static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
{
unsigned int i, len = 0;
for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
return len;
| 0 |
[
"CWE-20"
] |
linux
|
2b16f048729bf35e6c28a40cbfad07239f9dcd90
| 222,890,740,283,970,660,000,000,000,000,000,000,000 | 8 |
net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void drop_privs(int force_nogroups) {
gid_t gid = getgid();
if (arg_debug)
printf("Drop privileges: pid %d, uid %d, gid %d, force_nogroups %d\n",
getpid(), getuid(), gid, force_nogroups);
// configure supplementary groups
EUID_ROOT();
if (gid == 0 || force_nogroups) {
if (setgroups(0, NULL) < 0)
errExit("setgroups");
if (arg_debug)
printf("No supplementary groups\n");
}
else if (arg_noroot || arg_nogroups)
clean_supplementary_groups(gid);
// set uid/gid
if (setresgid(-1, getgid(), getgid()) != 0)
errExit("setresgid");
if (setresuid(-1, getuid(), getuid()) != 0)
errExit("setresuid");
}
| 0 |
[
"CWE-269",
"CWE-94"
] |
firejail
|
27cde3d7d1e4e16d4190932347c7151dc2a84c50
| 216,267,890,600,534,620,000,000,000,000,000,000,000 | 23 |
fixing CVE-2022-31214
|
longlong Item_func_int_div::val_int()
{
DBUG_ASSERT(fixed == 1);
/*
Perform division using DECIMAL math if either of the operands has a
non-integer type
*/
if (args[0]->result_type() != INT_RESULT ||
args[1]->result_type() != INT_RESULT)
{
my_decimal tmp;
my_decimal *val0p= args[0]->val_decimal(&tmp);
if ((null_value= args[0]->null_value))
return 0;
my_decimal val0= *val0p;
my_decimal *val1p= args[1]->val_decimal(&tmp);
if ((null_value= args[1]->null_value))
return 0;
my_decimal val1= *val1p;
int err;
if ((err= my_decimal_div(E_DEC_FATAL_ERROR & ~E_DEC_DIV_ZERO, &tmp,
&val0, &val1, 0)) > 3)
{
if (err == E_DEC_DIV_ZERO)
signal_divide_by_null();
return 0;
}
my_decimal truncated;
const bool do_truncate= true;
if (my_decimal_round(E_DEC_FATAL_ERROR, &tmp, 0, do_truncate, &truncated))
DBUG_ASSERT(false);
longlong res;
if (my_decimal2int(E_DEC_FATAL_ERROR, &truncated, unsigned_flag, &res) &
E_DEC_OVERFLOW)
raise_integer_overflow();
return res;
}
Longlong_hybrid val0= args[0]->to_longlong_hybrid();
Longlong_hybrid val1= args[1]->to_longlong_hybrid();
if ((null_value= (args[0]->null_value || args[1]->null_value)))
return 0;
if (val1 == 0)
{
signal_divide_by_null();
return 0;
}
bool res_negative= val0.neg() != val1.neg();
ulonglong res= val0.abs() / val1.abs();
if (res_negative)
{
if (res > (ulonglong) LONGLONG_MAX)
return raise_integer_overflow();
res= (ulonglong) (-(longlong) res);
}
return check_integer_overflow(res, !res_negative);
}
| 0 |
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
| 147,877,722,119,735,610,000,000,000,000,000,000,000 | 63 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
|
static void remove_disconnect_timer(struct avdtp *session)
{
if (!session->dc_timer)
return;
timeout_remove(session->dc_timer);
session->dc_timer = 0;
session->stream_setup = FALSE;
/* Release disconnect timer reference */
avdtp_unref(session);
}
| 0 |
[
"CWE-703"
] |
bluez
|
7a80d2096f1b7125085e21448112aa02f49f5e9a
| 167,379,269,961,969,060,000,000,000,000,000,000,000 | 12 |
avdtp: Fix accepting invalid/malformed capabilities
Check if capabilities are valid before attempting to copy them.
|
void CLASS apply_tiff()
{
int max_samp=0, raw=-1, thm=-1, i;
struct jhead jh;
thumb_misc = 16;
if (thumb_offset) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
thumb_misc = jh.bits;
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
for (i=0; i < (int) tiff_nifds; i++) {
if (max_samp < tiff_ifd[i].samples)
max_samp = tiff_ifd[i].samples;
if (max_samp > 3) max_samp = 3;
if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) &&
(tiff_ifd[i].width | tiff_ifd[i].height) < 0x10000 &&
tiff_ifd[i].width*tiff_ifd[i].height > raw_width*raw_height) {
raw_width = tiff_ifd[i].width;
raw_height = tiff_ifd[i].height;
tiff_bps = tiff_ifd[i].bps;
tiff_compress = tiff_ifd[i].comp;
data_offset = tiff_ifd[i].offset;
tiff_flip = tiff_ifd[i].flip;
tiff_samples = tiff_ifd[i].samples;
raw = i;
}
}
for (i=tiff_nifds; i--; )
if (tiff_ifd[i].flip) tiff_flip = tiff_ifd[i].flip;
if (raw >= 0 && !load_raw)
switch (tiff_compress) {
case 0: case 1:
switch (tiff_bps) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 12: load_raw = &CLASS packed_load_raw;
if (tiff_ifd[raw].phint == 2)
load_flags = 6;
if (strncmp(make,"PENTAX",6)) break;
case 14:
case 16: load_raw = &CLASS unpacked_load_raw; break;
}
if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) {
tiff_bps = 12;
load_raw = &CLASS packed_load_raw;
load_flags = 81;
}
break;
case 6: case 7: case 99:
load_raw = &CLASS lossless_jpeg_load_raw; break;
case 262:
load_raw = &CLASS kodak_262_load_raw; break;
case 32767:
if (tiff_ifd[raw].bytes == raw_width*raw_height) {
tiff_bps = 12;
load_raw = &CLASS sony_arw2_load_raw; break;
}
if (tiff_ifd[raw].bytes*8 != (int)(raw_width*raw_height*tiff_bps)) {
raw_height += 8;
load_raw = &CLASS sony_arw_load_raw; break;
}
load_flags = 79;
case 32769:
load_flags++;
case 32770:
case 32773:
load_raw = &CLASS packed_load_raw; break;
case 34713:
load_raw = &CLASS nikon_compressed_load_raw; break;
case 65535:
load_raw = &CLASS pentax_load_raw; break;
case 65000:
switch (tiff_ifd[raw].phint) {
case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break;
case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break;
case 32803: load_raw = &CLASS kodak_65000_load_raw;
}
case 32867: break;
default: is_raw = 0;
}
if (!dng_version)
if ( (tiff_samples == 3 && tiff_ifd[raw].bytes &&
tiff_bps != 14 && tiff_bps != 2048)
|| (tiff_bps == 8 && !strstr(make,"KODAK") && !strstr(make,"Kodak") &&
!strstr(model2,"DEBUG RAW")))
is_raw = 0;
for (i=0; i < (int) tiff_nifds; i++)
if (i != raw && tiff_ifd[i].samples == max_samp &&
tiff_ifd[i].width * tiff_ifd[i].height / SQR(tiff_ifd[i].bps+1) >
(int)(thumb_width * thumb_height / SQR(thumb_misc+1))) {
thumb_width = tiff_ifd[i].width;
thumb_height = tiff_ifd[i].height;
thumb_offset = tiff_ifd[i].offset;
thumb_length = tiff_ifd[i].bytes;
thumb_misc = tiff_ifd[i].bps;
thm = i;
}
if (thm >= 0) {
thumb_misc |= tiff_ifd[thm].samples << 5;
switch (tiff_ifd[thm].comp) {
case 0:
write_thumb = &CLASS layer_thumb;
break;
case 1:
if (tiff_ifd[thm].bps > 8)
thumb_load_raw = &CLASS kodak_thumb_load_raw;
else
write_thumb = &CLASS ppm_thumb;
break;
case 65000:
thumb_load_raw = tiff_ifd[thm].phint == 6 ?
&CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw;
}
}
}
| 0 |
[
"CWE-189"
] |
rawstudio
|
983bda1f0fa5fa86884381208274198a620f006e
| 308,459,519,947,794,500,000,000,000,000,000,000,000 | 118 |
Avoid overflow in ljpeg_start().
|
static int vdbeRecordCompareDebug(
int nKey1, const void *pKey1, /* Left key */
const UnpackedRecord *pPKey2, /* Right key */
int desiredResult /* Correct answer */
){
u32 d1; /* Offset into aKey[] of next data element */
u32 idx1; /* Offset into aKey[] of next header element */
u32 szHdr1; /* Number of bytes in header */
int i = 0;
int rc = 0;
const unsigned char *aKey1 = (const unsigned char *)pKey1;
KeyInfo *pKeyInfo;
Mem mem1;
pKeyInfo = pPKey2->pKeyInfo;
if( pKeyInfo->db==0 ) return 1;
mem1.enc = pKeyInfo->enc;
mem1.db = pKeyInfo->db;
/* mem1.flags = 0; // Will be initialized by sqlite3VdbeSerialGet() */
VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */
/* Compilers may complain that mem1.u.i is potentially uninitialized.
** We could initialize it, as shown here, to silence those complaints.
** But in fact, mem1.u.i will never actually be used uninitialized, and doing
** the unnecessary initialization has a measurable negative performance
** impact, since this routine is a very high runner. And so, we choose
** to ignore the compiler warnings and leave this variable uninitialized.
*/
/* mem1.u.i = 0; // not needed, here to silence compiler warning */
idx1 = getVarint32(aKey1, szHdr1);
if( szHdr1>98307 ) return SQLITE_CORRUPT;
d1 = szHdr1;
assert( pKeyInfo->nAllField>=pPKey2->nField || CORRUPT_DB );
assert( pKeyInfo->aSortFlags!=0 );
assert( pKeyInfo->nKeyField>0 );
assert( idx1<=szHdr1 || CORRUPT_DB );
do{
u32 serial_type1;
/* Read the serial types for the next element in each key. */
idx1 += getVarint32( aKey1+idx1, serial_type1 );
/* Verify that there is enough key space remaining to avoid
** a buffer overread. The "d1+serial_type1+2" subexpression will
** always be greater than or equal to the amount of required key space.
** Use that approximation to avoid the more expensive call to
** sqlite3VdbeSerialTypeLen() in the common case.
*/
if( d1+(u64)serial_type1+2>(u64)nKey1
&& d1+(u64)sqlite3VdbeSerialTypeLen(serial_type1)>(u64)nKey1
){
break;
}
/* Extract the values to be compared.
*/
d1 += sqlite3VdbeSerialGet(&aKey1[d1], serial_type1, &mem1);
/* Do the comparison
*/
rc = sqlite3MemCompare(&mem1, &pPKey2->aMem[i],
pKeyInfo->nAllField>i ? pKeyInfo->aColl[i] : 0);
if( rc!=0 ){
assert( mem1.szMalloc==0 ); /* See comment below */
if( (pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_BIGNULL)
&& ((mem1.flags & MEM_Null) || (pPKey2->aMem[i].flags & MEM_Null))
){
rc = -rc;
}
if( pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_DESC ){
rc = -rc; /* Invert the result for DESC sort order. */
}
goto debugCompareEnd;
}
i++;
}while( idx1<szHdr1 && i<pPKey2->nField );
/* No memory allocation is ever used on mem1. Prove this using
** the following assert(). If the assert() fails, it indicates a
** memory leak and a need to call sqlite3VdbeMemRelease(&mem1).
*/
assert( mem1.szMalloc==0 );
/* rc==0 here means that one of the keys ran out of fields and
** all the fields up to that point were equal. Return the default_rc
** value. */
rc = pPKey2->default_rc;
debugCompareEnd:
if( desiredResult==0 && rc==0 ) return 1;
if( desiredResult<0 && rc<0 ) return 1;
if( desiredResult>0 && rc>0 ) return 1;
if( CORRUPT_DB ) return 1;
if( pKeyInfo->db->mallocFailed ) return 1;
return 0;
}
| 0 |
[
"CWE-755"
] |
sqlite
|
8654186b0236d556aa85528c2573ee0b6ab71be3
| 306,402,945,198,982,970,000,000,000,000,000,000,000 | 97 |
When an error occurs while rewriting the parser tree for window functions
in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set,
and make sure that this shuts down any subsequent code generation that might
depend on the transformations that were implemented. This fixes a problem
discovered by the Yongheng and Rui fuzzer.
FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f
|
mbc_case_fold(OnigCaseFoldType flag ARG_UNUSED,
const UChar** pp, const UChar* end ARG_UNUSED, UChar* lower)
{
const UChar* p = *pp;
if (ONIGENC_IS_MBC_ASCII(p)) {
*lower = ONIGENC_ASCII_CODE_TO_LOWER_CASE(*p);
(*pp)++;
return 1;
}
else {
int i;
int len = enclen(ONIG_ENCODING_SJIS, p);
for (i = 0; i < len; i++) {
*lower++ = *p++;
}
(*pp) += len;
return len; /* return byte length of converted char to lower */
}
}
| 0 |
[
"CWE-125"
] |
oniguruma
|
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
| 143,302,188,681,513,410,000,000,000,000,000,000,000 | 21 |
onig-5.9.2
|
reload_check_thread(__attribute__((unused)) thread_t * thread)
{
list old_checkers_queue;
log_message(LOG_INFO, "Reloading");
/* Use standard scheduling while reloading */
reset_process_priorities();
/* set the reloading flag */
SET_RELOAD;
log_message(LOG_INFO, "Got SIGHUP, reloading checker configuration");
/* Terminate all script process */
script_killall(master, SIGTERM, false);
/* Remove the notify fifo - we don't know if it will be the same after a reload */
notify_fifo_close(&global_data->notify_fifo, &global_data->lvs_notify_fifo);
/* Destroy master thread */
checker_dispatcher_release();
thread_cleanup_master(master);
thread_add_base_threads(master);
/* Save previous checker data */
old_checkers_queue = checkers_queue;
checkers_queue = NULL;
free_ssl();
ipvs_stop();
/* Save previous conf data */
old_check_data = check_data;
check_data = NULL;
old_global_data = global_data;
global_data = NULL;
/* Reload the conf */
start_check(old_checkers_queue, old_global_data);
/* free backup data */
free_check_data(old_check_data);
free_global_data(old_global_data);
free_list(&old_checkers_queue);
UNSET_RELOAD;
return 0;
}
| 0 |
[
"CWE-200"
] |
keepalived
|
26c8d6374db33bcfcdcd758b1282f12ceef4b94f
| 205,212,281,865,642,800,000,000,000,000,000,000,000 | 49 |
Disable fopen_safe() append mode by default
If a non privileged user creates /tmp/keepalived.log and has it open
for read (e.g. tail -f), then even though keepalived will change the
owner to root and remove all read/write permissions from non owners,
the application which already has the file open will be able to read
the added log entries.
Accordingly, opening a file in append mode is disabled by default, and
only enabled if --enable-smtp-alert-debug or --enable-log-file (which
are debugging options and unset by default) are enabled.
This should further alleviate security concerns related to CVE-2018-19046.
Signed-off-by: Quentin Armitage <[email protected]>
|
unsigned short mg_url_port(const char *url) {
struct url u = urlparse(url);
unsigned short port = 0;
if (strncmp(url, "http:", 5) == 0 || strncmp(url, "ws:", 3) == 0) port = 80;
if (strncmp(url, "wss:", 4) == 0 || strncmp(url, "https:", 6) == 0)
port = 443;
if (strncmp(url, "mqtt:", 5) == 0) port = 1883;
if (strncmp(url, "mqtts:", 6) == 0) port = 8883;
if (u.port) port = (unsigned short) atoi(url + u.port);
return port;
}
| 0 |
[
"CWE-552"
] |
mongoose
|
c65c8fdaaa257e0487ab0aaae9e8f6b439335945
| 167,500,941,685,098,170,000,000,000,000,000,000,000 | 11 |
Protect against the directory traversal in mg_upload()
|
static inline int current_is_64bit(void)
{
/*
* We can't use test_thread_flag() here because we may be on an
* interrupt stack, and the thread flags don't get copied over
* from the thread_info on the main stack to the interrupt stack.
*/
return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
}
| 0 |
[
"CWE-399"
] |
linux
|
9a5cbce421a283e6aea3c4007f141735bf9da8c3
| 155,145,328,846,481,940,000,000,000,000,000,000,000 | 9 |
powerpc/perf: Cap 64bit userspace backtraces to PERF_MAX_STACK_DEPTH
We cap 32bit userspace backtraces to PERF_MAX_STACK_DEPTH
(currently 127), but we forgot to do the same for 64bit backtraces.
Cc: [email protected]
Signed-off-by: Anton Blanchard <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
|
RefreshAll(isblank)
int isblank;
{
struct canvas *cv;
ASSERT(display);
debug("Signalling full refresh!\n");
for (cv = D_cvlist; cv; cv = cv->c_next)
{
CV_CALL(cv, LayRedisplayLine(-1, -1, -1, isblank));
display = cv->c_display; /* just in case! */
}
RefreshArea(0, 0, D_width - 1, D_height - 1, isblank);
}
| 0 |
[] |
screen
|
c5db181b6e017cfccb8d7842ce140e59294d9f62
| 121,314,881,784,793,520,000,000,000,000,000,000,000 | 14 |
ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected]
|
static uint64_t find_max_local_usn(struct replPropertyMetaDataBlob omd)
{
uint32_t count = omd.ctr.ctr1.count;
uint64_t max = 0;
uint32_t i;
for (i=0; i < count; i++) {
struct replPropertyMetaData1 m = omd.ctr.ctr1.array[i];
if (max < m.local_usn) {
max = m.local_usn;
}
}
return max;
}
| 0 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 271,825,180,059,693,900,000,000,000,000,000,000,000 | 13 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
enc_start (struct b64state *state, FILE *fp, estream_t stream,
const char *title)
{
memset (state, 0, sizeof *state);
state->fp = fp;
state->stream = stream;
state->lasterr = 0;
if (title && !*title)
state->flags |= B64ENC_NO_LINEFEEDS;
else if (title)
{
if (!strncmp (title, "PGP ", 4))
{
state->flags |= B64ENC_USE_PGPCRC;
state->crc = CRCINIT;
}
state->title = xtrystrdup (title);
if (!state->title)
state->lasterr = gpg_error_from_syserror ();
}
return state->lasterr;
}
| 0 |
[
"CWE-20"
] |
gnupg
|
2183683bd633818dd031b090b5530951de76f392
| 163,496,253,132,760,540,000,000,000,000,000,000,000 | 22 |
Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]>
|
static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
{
struct io_kiocb *link;
io_prep_async_work(nxt, &link);
*workptr = &nxt->work;
if (link) {
nxt->work.flags |= IO_WQ_WORK_CB;
nxt->work.func = io_link_work_cb;
nxt->work.data = link;
}
}
| 0 |
[] |
linux
|
ff002b30181d30cdfbca316dadd099c3ca0d739c
| 290,605,862,530,966,530,000,000,000,000,000,000,000 | 12 |
io_uring: grab ->fs as part of async preparation
This passes it in to io-wq, so it assumes the right fs_struct when
executing async work that may need to do lookups.
Cc: [email protected] # 5.3+
Signed-off-by: Jens Axboe <[email protected]>
|
static int set_serial_info(struct acm *acm,
struct serial_struct __user *newinfo)
{
struct serial_struct new_serial;
unsigned int closing_wait, close_delay;
int retval = 0;
if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
return -EFAULT;
close_delay = new_serial.close_delay * 10;
closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
mutex_lock(&acm->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
if ((close_delay != acm->port.close_delay) ||
(closing_wait != acm->port.closing_wait))
retval = -EPERM;
else
retval = -EOPNOTSUPP;
} else {
acm->port.close_delay = close_delay;
acm->port.closing_wait = closing_wait;
}
mutex_unlock(&acm->port.mutex);
return retval;
}
| 0 |
[
"CWE-703"
] |
linux
|
8835ba4a39cf53f705417b3b3a94eb067673f2c9
| 129,464,857,466,197,560,000,000,000,000,000,000,000 | 30 |
USB: cdc-acm: more sanity checking
An attack has become available which pretends to be a quirky
device circumventing normal sanity checks and crashes the kernel
by an insufficient number of interfaces. This patch adds a check
to the code path for quirky devices.
Signed-off-by: Oliver Neukum <[email protected]>
CC: [email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
void tcp_init_transfer(struct sock *sk, int bpf_op)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
tcp_mtup_init(sk);
icsk->icsk_af_ops->rebuild_header(sk);
tcp_init_metrics(sk);
/* Initialize the congestion window to start the transfer.
* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
* retransmitted. In light of RFC6298 more aggressive 1sec
* initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
* retransmission has occurred.
*/
if (tp->total_retrans > 1 && tp->undo_marker)
tp->snd_cwnd = 1;
else
tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
tp->snd_cwnd_stamp = tcp_jiffies32;
tcp_call_bpf(sk, bpf_op, 0, NULL);
tcp_init_congestion_control(sk);
tcp_init_buffer_space(sk);
}
| 0 |
[
"CWE-190"
] |
net
|
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
| 45,388,308,217,836,310,000,000,000,000,000,000,000 | 25 |
tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Looney <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Bruce Curtis <[email protected]>
Cc: Jonathan Lemon <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
inline size_t WireFormatLite::SInt64SizePlusOne(int64_t value) {
return io::CodedOutputStream::VarintSize64PlusOne(ZigZagEncode64(value));
}
| 0 |
[
"CWE-703"
] |
protobuf
|
d1635e1496f51e0d5653d856211e8821bc47adc4
| 67,810,457,442,346,520,000,000,000,000,000,000,000 | 3 |
Apply patch
|
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
{
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
if (tipc_skb_tailroom(skb) < TLV_SPACE(len))
return -EMSGSIZE;
skb_put(skb, TLV_SPACE(len));
tlv->tlv_type = htons(type);
tlv->tlv_len = htons(TLV_LENGTH(len));
if (len && data)
memcpy(TLV_DATA(tlv), data, len);
return 0;
}
| 0 |
[
"CWE-200"
] |
net
|
5d2be1422e02ccd697ccfcd45c85b4a26e6178e2
| 187,742,968,901,360,430,000,000,000,000,000,000,000 | 15 |
tipc: fix an infoleak in tipc_nl_compat_link_dump
link_info.str is a char array of size 60. Memory after the NULL
byte is not initialized. Sending the whole object out can cause
a leak.
Signed-off-by: Kangjie Lu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void stub_device_reset(struct usbip_device *ud)
{
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
struct usb_device *udev = sdev->udev;
int ret;
dev_dbg(&udev->dev, "device reset");
ret = usb_lock_device_for_reset(udev, NULL);
if (ret < 0) {
dev_err(&udev->dev, "lock for reset\n");
spin_lock_irq(&ud->lock);
ud->status = SDEV_ST_ERROR;
spin_unlock_irq(&ud->lock);
return;
}
/* try to reset the device */
ret = usb_reset_device(udev);
usb_unlock_device(udev);
spin_lock_irq(&ud->lock);
if (ret) {
dev_err(&udev->dev, "device reset\n");
ud->status = SDEV_ST_ERROR;
} else {
dev_info(&udev->dev, "device reset\n");
ud->status = SDEV_ST_AVAILABLE;
}
spin_unlock_irq(&ud->lock);
}
| 0 |
[
"CWE-362"
] |
linux
|
22076557b07c12086eeb16b8ce2b0b735f7a27e7
| 84,756,036,977,478,380,000,000,000,000,000,000,000 | 31 |
usbip: usbip_host: fix NULL-ptr deref and use-after-free errors
usbip_host updates device status without holding lock from stub probe,
disconnect and rebind code paths. When multiple requests to import a
device are received, these unprotected code paths step all over each
other and drive fails with NULL-ptr deref and use-after-free errors.
The driver uses a table lock to protect the busid array for adding and
deleting busids to the table. However, the probe, disconnect and rebind
paths get the busid table entry and update the status without holding
the busid table lock. Add a new finer grain lock to protect the busid
entry. This new lock will be held to search and update the busid entry
fields from get_busid_idx(), add_match_busid() and del_match_busid().
match_busid_show() does the same to access the busid entry fields.
get_busid_priv() changed to return the pointer to the busid entry holding
the busid lock. stub_probe(), stub_disconnect() and stub_device_rebind()
call put_busid_priv() to release the busid lock before returning. This
changes fixes the unprotected code paths eliminating the race conditions
in updating the busid entries.
Reported-by: Jakub Jirasek
Signed-off-by: Shuah Khan (Samsung OSG) <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void read_vw(
XLCd lcd,
OMData font_set,
int num)
{
char **value, buf[BUFSIZ];
int count;
snprintf(buf, sizeof(buf), "fs%d.font.vertical_map", num);
_XlcGetResource(lcd, "XLC_FONTSET", buf, &value, &count);
if (count > 0){
_XlcDbg_printValue(buf,value,count);
font_set->vmap_num = count;
font_set->vmap = read_EncodingInfo(count,value);
}
snprintf(buf, sizeof(buf), "fs%d.font.vertical_rotate", num);
_XlcGetResource(lcd, "XLC_FONTSET", buf, &value, &count);
if (count > 0){
_XlcDbg_printValue(buf,value,count);
font_set->vrotate = read_vrotate(count,value,&(font_set->vrotate_type),
&(font_set->vrotate_num));
}
}
| 0 |
[
"CWE-190"
] |
libx11
|
acdaaadcb3d85c61fd43669fc5dddf0f8c3f911d
| 103,646,659,550,220,020,000,000,000,000,000,000,000 | 24 |
Fix an integer overflow in init_om()
CVE-2020-14363
This can lead to a double free later, as reported by Jayden Rivers.
Signed-off-by: Matthieu Herrb <[email protected]>
|
static int dcc_send_one_file(int queue, const char *target, const char *fname,
IRC_SERVER_REC *server, CHAT_DCC_REC *chat,
int passive)
{
struct stat st;
char *str;
char host[MAX_IP_LEN];
int hfile, port = 0;
SEND_DCC_REC *dcc;
IPADDR own_ip;
GIOChannel *handle;
if (dcc_find_request(DCC_SEND_TYPE, target, fname)) {
signal_emit("dcc error send exists", 2, target, fname);
return FALSE;
}
str = dcc_send_get_file(fname);
hfile = open(str, O_RDONLY);
g_free(str);
if (hfile == -1) {
signal_emit("dcc error file open", 3, target, fname,
GINT_TO_POINTER(errno));
return FALSE;
}
if (fstat(hfile, &st) < 0) {
g_warning("fstat() failed: %s", strerror(errno));
close(hfile);
return FALSE;
}
/* start listening (only if passive == FALSE )*/
if (passive == FALSE) {
handle = dcc_listen(chat != NULL ? chat->handle :
net_sendbuffer_handle(server->handle),
&own_ip, &port);
if (handle == NULL) {
close(hfile);
g_warning("dcc_listen() failed: %s", strerror(errno));
return FALSE;
}
} else {
handle = NULL;
}
str = g_path_get_basename(fname);
/* Replace all the spaces with underscore so that lesser
intelligent clients can communicate.. */
if (settings_get_bool("dcc_send_replace_space_with_underscore"))
g_strdelimit(str, " ", '_');
dcc = dcc_send_create(server, chat, target, str);
g_free(str);
dcc->handle = handle;
dcc->port = port;
dcc->size = st.st_size;
dcc->fhandle = hfile;
dcc->queue = queue;
dcc->file_quoted = strchr(fname, ' ') != NULL;
if (!passive) {
dcc->tagconn = g_input_add(handle, G_INPUT_READ,
(GInputFunction) dcc_send_connected,
dcc);
}
/* Generate an ID for this send if using passive protocol */
if (passive) {
dcc->pasv_id = rand() % 64;
}
/* send DCC request */
signal_emit("dcc request send", 1, dcc);
dcc_ip2str(&own_ip, host);
if (passive == FALSE) {
str = g_strdup_printf(dcc->file_quoted ?
"DCC SEND \"%s\" %s %d %"PRIuUOFF_T :
"DCC SEND %s %s %d %"PRIuUOFF_T,
dcc->arg, host, port, dcc->size);
} else {
str = g_strdup_printf(dcc->file_quoted ?
"DCC SEND \"%s\" 16843009 0 %"PRIuUOFF_T" %d" :
"DCC SEND %s 16843009 0 %"PRIuUOFF_T" %d",
dcc->arg, dcc->size, dcc->pasv_id);
}
dcc_ctcp_message(server, target, chat, FALSE, str);
g_free(str);
return TRUE;
}
| 1 |
[
"CWE-416"
] |
irssi
|
43e44d553d44e313003cee87e6ea5e24d68b84a1
| 170,161,230,237,265,500,000,000,000,000,000,000,000 | 96 |
Merge branch 'security' into 'master'
Security
Closes GL#12, GL#13, GL#14, GL#15, GL#16
See merge request irssi/irssi!23
|
static void inotify_dev_event_dequeue(struct inotify_device *dev)
{
if (!list_empty(&dev->events)) {
struct inotify_kernel_event *kevent;
kevent = inotify_dev_get_event(dev);
remove_kevent(dev, kevent);
free_kevent(kevent);
}
}
| 0 |
[
"CWE-399"
] |
linux-2.6
|
3632dee2f8b8a9720329f29eeaa4ec4669a3aff8
| 108,043,734,155,947,720,000,000,000,000,000,000,000 | 9 |
inotify: clean up inotify_read and fix locking problems
If userspace supplies an invalid pointer to a read() of an inotify
instance, the inotify device's event list mutex is unlocked twice.
This causes an unbalance which effectively leaves the data structure
unprotected, and we can trigger oopses by accessing the inotify
instance from different tasks concurrently.
The best fix (contributed largely by Linus) is a total rewrite
of the function in question:
On Thu, Jan 22, 2009 at 7:05 AM, Linus Torvalds wrote:
> The thing to notice is that:
>
> - locking is done in just one place, and there is no question about it
> not having an unlock.
>
> - that whole double-while(1)-loop thing is gone.
>
> - use multiple functions to make nesting and error handling sane
>
> - do error testing after doing the things you always need to do, ie do
> this:
>
> mutex_lock(..)
> ret = function_call();
> mutex_unlock(..)
>
> .. test ret here ..
>
> instead of doing conditional exits with unlocking or freeing.
>
> So if the code is written in this way, it may still be buggy, but at least
> it's not buggy because of subtle "forgot to unlock" or "forgot to free"
> issues.
>
> This _always_ unlocks if it locked, and it always frees if it got a
> non-error kevent.
Cc: John McCutchan <[email protected]>
Cc: Robert Love <[email protected]>
Cc: <[email protected]>
Signed-off-by: Vegard Nossum <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
conn_key_reverse(struct conn_key *key)
{
struct ct_endpoint tmp;
tmp = key->src;
key->src = key->dst;
key->dst = tmp;
}
| 0 |
[
"CWE-400"
] |
ovs
|
35c280072c1c3ed58202745b7d27fbbd0736999b
| 325,406,843,303,158,360,000,000,000,000,000,000,000 | 8 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
HttpTransact::LookupSkipOpenServer(State* s)
{
// cache will not be looked up. open a connection
// to a parent proxy or to the origin server.
find_server_and_update_current_info(s);
if (s->current.request_to == PARENT_PROXY) {
TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, PPDNSLookup);
}
ink_assert(s->current.request_to == ORIGIN_SERVER);
// ink_assert(s->current.server->ip != 0);
build_request(s, &s->hdr_info.client_request, &s->hdr_info.server_request, s->current.server->http_version);
StateMachineAction_t next = how_to_open_connection(s);
s->next_action = next;
if (next == SM_ACTION_ORIGIN_SERVER_OPEN || next == SM_ACTION_ORIGIN_SERVER_RAW_OPEN) {
TRANSACT_RETURN(next, HttpTransact::HandleResponse);
}
}
| 0 |
[
"CWE-119"
] |
trafficserver
|
8b5f0345dade6b2822d9b52c8ad12e63011a5c12
| 155,619,913,369,592,600,000,000,000,000,000,000,000 | 21 |
Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug
|
const std::string& Subgraph::GetName() const { return name_; }
| 0 |
[
"CWE-476"
] |
tensorflow
|
f8378920345f4f4604202d4ab15ef64b2aceaa16
| 150,705,450,250,160,660,000,000,000,000,000,000,000 | 1 |
Prevent a null pointer dereference in TFLite.
PiperOrigin-RevId: 370800353
Change-Id: Ic9c9712ce5c6e384c954dcd640a5bd9ff05c9a05
|
void TransposerBase::setAlgorithm(TransposerBase::ALGORITHM a)
{
TransposerBase::algorithm = a;
}
| 0 |
[
"CWE-617"
] |
soundtouch
|
107f2c5d201a4dfea1b7f15c5957ff2ac9e5f260
| 143,003,529,445,479,130,000,000,000,000,000,000,000 | 4 |
Replaced illegal-number-of-channel assertions with run-time exception
|
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) {
ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) {
double real_multiplier = input->params.scale / output->params.scale;
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
&data->output_shift);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
| 1 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
| 147,107,890,992,070,960,000,000,000,000,000,000,000 | 17 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
|
tv_get_string_chk(typval_T *varp)
{
static char_u mybuf[NUMBUFLEN];
return tv_get_string_buf_chk(varp, mybuf);
}
| 0 |
[
"CWE-125",
"CWE-122"
] |
vim
|
1e56bda9048a9625bce6e660938c834c5c15b07d
| 65,010,677,982,792,790,000,000,000,000,000,000,000 | 6 |
patch 9.0.0104: going beyond allocated memory when evaluating string constant
Problem: Going beyond allocated memory when evaluating string constant.
Solution: Properly skip over <Key> form.
|
void jslSeekToP(JslCharPos *seekToChar) {
if (lex->it.var) jsvLockAgain(lex->it.var); // see jslGetNextCh
jsvStringIteratorFree(&lex->it);
lex->it = jsvStringIteratorClone(&seekToChar->it);
jsvUnLock(lex->it.var); // see jslGetNextCh
lex->currCh = seekToChar->currCh;
lex->tokenStart.it.var = 0;
lex->tokenStart.currCh = 0;
jslGetNextToken();
}
| 0 |
[
"CWE-787"
] |
Espruino
|
bed844f109b6c222816740555068de2e101e8018
| 282,188,073,850,932,900,000,000,000,000,000,000,000 | 10 |
remove strncpy usage as it's effectively useless, replace with an assertion since fn is only used internally (fix #1426)
|
static void disconnect_complete(uint8_t status, uint16_t length,
const void *param, void *user_data)
{
const struct mgmt_rp_disconnect *rp = param;
struct btd_adapter *adapter = user_data;
if (status == MGMT_STATUS_NOT_CONNECTED) {
btd_warn(adapter->dev_id,
"Disconnecting failed: already disconnected");
} else if (status != MGMT_STATUS_SUCCESS) {
btd_error(adapter->dev_id,
"Failed to disconnect device: %s (0x%02x)",
mgmt_errstr(status), status);
return;
}
if (length < sizeof(*rp)) {
btd_error(adapter->dev_id,
"Too small device disconnect response");
return;
}
dev_disconnected(adapter, &rp->addr, MGMT_DEV_DISCONN_LOCAL_HOST);
}
| 0 |
[
"CWE-862",
"CWE-863"
] |
bluez
|
b497b5942a8beb8f89ca1c359c54ad67ec843055
| 198,474,890,474,519,470,000,000,000,000,000,000,000 | 24 |
adapter: Fix storing discoverable setting
discoverable setting shall only be store when changed via Discoverable
property and not when discovery client set it as that be considered
temporary just for the lifetime of the discovery.
|
static void clear_ftrace_swapper(void)
{
struct task_struct *p;
int cpu;
get_online_cpus();
for_each_online_cpu(cpu) {
p = idle_task(cpu);
clear_tsk_trace_trace(p);
}
put_online_cpus();
}
| 0 |
[
"CWE-703"
] |
linux
|
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
| 152,999,700,127,071,690,000,000,000,000,000,000,000 | 12 |
tracing: Fix possible NULL pointer dereferences
Currently set_ftrace_pid and set_graph_function files use seq_lseek
for their fops. However seq_open() is called only for FMODE_READ in
the fops->open() so that if an user tries to seek one of those file
when she open it for writing, it sees NULL seq_file and then panic.
It can be easily reproduced with following command:
$ cd /sys/kernel/debug/tracing
$ echo 1234 | sudo tee -a set_ftrace_pid
In this example, GNU coreutils' tee opens the file with fopen(, "a")
and then the fopen() internally calls lseek().
Link: http://lkml.kernel.org/r/[email protected]
Cc: Frederic Weisbecker <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: [email protected]
Signed-off-by: Namhyung Kim <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
|
static void do_async_commit(struct work_struct *work)
{
struct btrfs_async_commit *ac =
container_of(work, struct btrfs_async_commit, work);
/*
* We've got freeze protection passed with the transaction.
* Tell lockdep about it.
*/
if (ac->newtrans->type & __TRANS_FREEZABLE)
__sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
current->journal_info = ac->newtrans;
btrfs_commit_transaction(ac->newtrans);
kfree(ac);
}
| 0 |
[
"CWE-703",
"CWE-667"
] |
linux
|
1cb3db1cf383a3c7dbda1aa0ce748b0958759947
| 60,720,773,050,495,610,000,000,000,000,000,000,000 | 17 |
btrfs: fix deadlock with concurrent chunk allocations involving system chunks
When a task attempting to allocate a new chunk verifies that there is not
currently enough free space in the system space_info and there is another
task that allocated a new system chunk but it did not finish yet the
creation of the respective block group, it waits for that other task to
finish creating the block group. This is to avoid exhaustion of the system
chunk array in the superblock, which is limited, when we have a thundering
herd of tasks allocating new chunks. This problem was described and fixed
by commit eafa4fd0ad0607 ("btrfs: fix exhaustion of the system chunk array
due to concurrent allocations").
However there are two very similar scenarios where this can lead to a
deadlock:
1) Task B allocated a new system chunk and task A is waiting on task B
to finish creation of the respective system block group. However before
task B ends its transaction handle and finishes the creation of the
system block group, it attempts to allocate another chunk (like a data
chunk for an fallocate operation for a very large range). Task B will
be unable to progress and allocate the new chunk, because task A set
space_info->chunk_alloc to 1 and therefore it loops at
btrfs_chunk_alloc() waiting for task A to finish its chunk allocation
and set space_info->chunk_alloc to 0, but task A is waiting on task B
to finish creation of the new system block group, therefore resulting
in a deadlock;
2) Task B allocated a new system chunk and task A is waiting on task B to
finish creation of the respective system block group. By the time that
task B enter the final phase of block group allocation, which happens
at btrfs_create_pending_block_groups(), when it modifies the extent
tree, the device tree or the chunk tree to insert the items for some
new block group, it needs to allocate a new chunk, so it ends up at
btrfs_chunk_alloc() and keeps looping there because task A has set
space_info->chunk_alloc to 1, but task A is waiting for task B to
finish creation of the new system block group and release the reserved
system space, therefore resulting in a deadlock.
In short, the problem is if a task B needs to allocate a new chunk after
it previously allocated a new system chunk and if another task A is
currently waiting for task B to complete the allocation of the new system
chunk.
Unfortunately this deadlock scenario introduced by the previous fix for
the system chunk array exhaustion problem does not have a simple and short
fix, and requires a big change to rework the chunk allocation code so that
chunk btree updates are all made in the first phase of chunk allocation.
And since this deadlock regression is being frequently hit on zoned
filesystems and the system chunk array exhaustion problem is triggered
in more extreme cases (originally observed on PowerPC with a node size
of 64K when running the fallocate tests from stress-ng), revert the
changes from that commit. The next patch in the series, with a subject
of "btrfs: rework chunk allocation to avoid exhaustion of the system
chunk array" does the necessary changes to fix the system chunk array
exhaustion problem.
Reported-by: Naohiro Aota <[email protected]>
Link: https://lore.kernel.org/linux-btrfs/20210621015922.ewgbffxuawia7liz@naota-xeon/
Fixes: eafa4fd0ad0607 ("btrfs: fix exhaustion of the system chunk array due to concurrent allocations")
CC: [email protected] # 5.12+
Tested-by: Shin'ichiro Kawasaki <[email protected]>
Tested-by: Naohiro Aota <[email protected]>
Signed-off-by: Filipe Manana <[email protected]>
Tested-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
static int cma_accept_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
struct ib_cm_rep_param rep;
int ret;
ret = cma_modify_qp_rtr(id_priv, conn_param);
if (ret)
goto out;
ret = cma_modify_qp_rts(id_priv, conn_param);
if (ret)
goto out;
memset(&rep, 0, sizeof rep);
rep.qp_num = id_priv->qp_num;
rep.starting_psn = id_priv->seq_num;
rep.private_data = conn_param->private_data;
rep.private_data_len = conn_param->private_data_len;
rep.responder_resources = conn_param->responder_resources;
rep.initiator_depth = conn_param->initiator_depth;
rep.failover_accepted = 0;
rep.flow_control = conn_param->flow_control;
rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
rep.srq = id_priv->srq ? 1 : 0;
ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
out:
return ret;
}
| 0 |
[
"CWE-20"
] |
linux
|
b2853fd6c2d0f383dbdf7427e263eb576a633867
| 216,866,462,305,215,320,000,000,000,000,000,000,000 | 30 |
IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>
|
set_display_seat_id (GdmSession *self,
const char *name)
{
g_free (self->priv->display_seat_id);
self->priv->display_seat_id = g_strdup (name);
}
| 0 |
[] |
gdm
|
5ac224602f1d603aac5eaa72e1760d3e33a26f0a
| 119,613,650,426,027,270,000,000,000,000,000,000,000 | 6 |
session: disconnect signals from worker proxy when conversation is freed
We don't want an outstanding reference on the worker proxy to lead to
signal handlers getting dispatched after the conversation is freed.
https://bugzilla.gnome.org/show_bug.cgi?id=758032
|
int is_git_directory(const char *suspect)
{
struct strbuf path = STRBUF_INIT;
int ret = 0;
size_t len;
/* Check worktree-related signatures */
strbuf_addstr(&path, suspect);
strbuf_complete(&path, '/');
strbuf_addstr(&path, "HEAD");
if (validate_headref(path.buf))
goto done;
strbuf_reset(&path);
get_common_dir(&path, suspect);
len = path.len;
/* Check non-worktree-related signatures */
if (getenv(DB_ENVIRONMENT)) {
if (access(getenv(DB_ENVIRONMENT), X_OK))
goto done;
}
else {
strbuf_setlen(&path, len);
strbuf_addstr(&path, "/objects");
if (access(path.buf, X_OK))
goto done;
}
strbuf_setlen(&path, len);
strbuf_addstr(&path, "/refs");
if (access(path.buf, X_OK))
goto done;
ret = 1;
done:
strbuf_release(&path);
return ret;
}
| 0 |
[
"CWE-22"
] |
git
|
3b0bf2704980b1ed6018622bdf5377ec22289688
| 23,056,306,118,853,590,000,000,000,000,000,000,000 | 39 |
setup: tighten ownership checks post CVE-2022-24765
8959555cee7 (setup_git_directory(): add an owner check for the top-level
directory, 2022-03-02), adds a function to check for ownership of
repositories using a directory that is representative of it, and ways to
add exempt a specific repository from said check if needed, but that
check didn't account for owership of the gitdir, or (when used) the
gitfile that points to that gitdir.
An attacker could create a git repository in a directory that they can
write into but that is owned by the victim to work around the fix that
was introduced with CVE-2022-24765 to potentially run code as the
victim.
An example that could result in privilege escalation to root in *NIX would
be to set a repository in a shared tmp directory by doing (for example):
$ git -C /tmp init
To avoid that, extend the ensure_valid_ownership function to be able to
check for all three paths.
This will have the side effect of tripling the number of stat() calls
when a repository is detected, but the effect is expected to be likely
minimal, as it is done only once during the directory walk in which Git
looks for a repository.
Additionally make sure to resolve the gitfile (if one was used) to find
the relevant gitdir for checking.
While at it change the message printed on failure so it is clear we are
referring to the repository by its worktree (or gitdir if it is bare) and
not to a specific directory.
Helped-by: Junio C Hamano <[email protected]>
Helped-by: Johannes Schindelin <[email protected]>
Signed-off-by: Carlo Marcelo Arenas Belón <[email protected]>
|
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
mm_segment_t oldfs = get_fs();
int result;
set_fs(KERNEL_DS);
/*
* the following is safe, since for compiler definitions of kvec and
* iovec are identical, yielding the same in-core layout and alignment
*/
msg->msg_iov = (struct iovec *)vec;
msg->msg_iovlen = num;
result = sock_sendmsg(sock, msg, size);
set_fs(oldfs);
return result;
}
| 0 |
[] |
linux-2.6
|
644595f89620ba8446cc555be336d24a34464950
| 258,729,038,230,476,930,000,000,000,000,000,000,000 | 17 |
compat: Handle COMPAT_USE_64BIT_TIME in net/socket.c
Use helper functions aware of COMPAT_USE_64BIT_TIME to write struct
timeval and struct timespec to userspace in net/socket.c.
Signed-off-by: H. Peter Anvin <[email protected]>
|
send_environment_variable (const char *key,
const char *value,
GdmSessionConversation *conversation)
{
gdm_dbus_worker_call_set_environment_variable (conversation->worker_proxy,
key, value,
conversation->worker_cancellable,
NULL, NULL);
}
| 0 |
[] |
gdm
|
05e5fc24b0f803098c1d05dae86f5eb05bd0c2a4
| 91,046,512,249,261,150,000,000,000,000,000,000,000 | 9 |
session: Cancel worker proxy async ops when freeing conversations
We need to cancel ongoing async ops for worker proxies when freeing
conversations or we'll crash when the completion handler runs and we
access free'd memory.
https://bugzilla.gnome.org/show_bug.cgi?id=758032
|
HandShakeState& States::useHandShake()
{
return handshakeLayer_;
}
| 0 |
[
"CWE-254"
] |
mysql-server
|
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
| 199,718,320,755,367,980,000,000,000,000,000,000,000 | 4 |
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
|
static int fixup_call_args(struct bpf_verifier_env *env)
{
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = prog->insnsi;
int i, depth;
#endif
int err = 0;
if (env->prog->jit_requested &&
!bpf_prog_is_dev_bound(env->prog->aux)) {
err = jit_subprogs(env);
if (err == 0)
return 0;
if (err == -EFAULT)
return err;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
/* When JIT fails the progs with bpf2bpf calls and tail_calls
* have to be rejected, since interpreter doesn't support them yet.
*/
verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
return -EINVAL;
}
for (i = 0; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
depth = get_callee_stack_depth(env, insn, i);
if (depth < 0)
return depth;
bpf_patch_call_args(insn, depth);
}
err = 0;
#endif
return err;
}
| 0 |
[] |
linux
|
9b00f1b78809309163dda2d044d9e94a3c0248a3
| 268,405,499,406,863,900,000,000,000,000,000,000,000 | 38 |
bpf: Fix truncation handling for mod32 dst reg wrt zero
Recently noticed that when mod32 with a known src reg of 0 is performed,
then the dst register is 32-bit truncated in verifier:
0: R1=ctx(id=0,off=0,imm=0) R10=fp0
0: (b7) r0 = 0
1: R0_w=inv0 R1=ctx(id=0,off=0,imm=0) R10=fp0
1: (b7) r1 = -1
2: R0_w=inv0 R1_w=inv-1 R10=fp0
2: (b4) w2 = -1
3: R0_w=inv0 R1_w=inv-1 R2_w=inv4294967295 R10=fp0
3: (9c) w1 %= w0
4: R0_w=inv0 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
4: (b7) r0 = 1
5: R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
5: (1d) if r1 == r2 goto pc+1
R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
6: R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
6: (b7) r0 = 2
7: R0_w=inv2 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0
7: (95) exit
7: R0=inv1 R1=inv(id=0,umin_value=4294967295,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2=inv4294967295 R10=fp0
7: (95) exit
However, as a runtime result, we get 2 instead of 1, meaning the dst
register does not contain (u32)-1 in this case. The reason is fairly
straight forward given the 0 test leaves the dst register as-is:
# ./bpftool p d x i 23
0: (b7) r0 = 0
1: (b7) r1 = -1
2: (b4) w2 = -1
3: (16) if w0 == 0x0 goto pc+1
4: (9c) w1 %= w0
5: (b7) r0 = 1
6: (1d) if r1 == r2 goto pc+1
7: (b7) r0 = 2
8: (95) exit
This was originally not an issue given the dst register was marked as
completely unknown (aka 64 bit unknown). However, after 468f6eafa6c4
("bpf: fix 32-bit ALU op verification") the verifier casts the register
output to 32 bit, and hence it becomes 32 bit unknown. Note that for
the case where the src register is unknown, the dst register is marked
64 bit unknown. After the fix, the register is truncated by the runtime
and the test passes:
# ./bpftool p d x i 23
0: (b7) r0 = 0
1: (b7) r1 = -1
2: (b4) w2 = -1
3: (16) if w0 == 0x0 goto pc+2
4: (9c) w1 %= w0
5: (05) goto pc+1
6: (bc) w1 = w1
7: (b7) r0 = 1
8: (1d) if r1 == r2 goto pc+1
9: (b7) r0 = 2
10: (95) exit
Semantics also match with {R,W}x mod{64,32} 0 -> {R,W}x. Invalid div
has always been {R,W}x div{64,32} 0 -> 0. Rewrites are as follows:
mod32: mod64:
(16) if w0 == 0x0 goto pc+2 (15) if r0 == 0x0 goto pc+1
(9c) w1 %= w0 (9f) r1 %= r0
(05) goto pc+1
(bc) w1 = w1
Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification")
Signed-off-by: Daniel Borkmann <[email protected]>
Reviewed-by: John Fastabend <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
|
void DRW_TableEntry::parseCode(int code, dxfReader *reader){
switch (code) {
case 5:
handle = reader->getHandleString();
break;
case 330:
parentHandle = reader->getHandleString();
break;
case 2:
name = reader->getUtf8String();
break;
case 70:
flags = reader->getInt32();
break;
case 1000:
case 1001:
case 1002:
case 1003:
case 1004:
case 1005:
extData.push_back(new DRW_Variant(code, reader->getString()));
break;
case 1010:
case 1011:
case 1012:
case 1013:
// don't trust in X, Y, Z order!
if (nullptr != curr) {
curr->setCoordX( reader->getDouble());
}
else {
curr = new DRW_Variant( code, DRW_Coord( reader->getDouble(), 0.0, 0.0));
extData.push_back(curr);
}
break;
case 1020:
case 1021:
case 1022:
case 1023:
// don't trust in X, Y, Z order!
if (nullptr != curr) {
curr->setCoordY( reader->getDouble());
}
else {
curr = new DRW_Variant( code, DRW_Coord( 0.0, reader->getDouble(), 0.0));
extData.push_back(curr);
}
break;
case 1030:
case 1031:
case 1032:
case 1033:
// don't trust in X, Y, Z order!
if (nullptr != curr) {
curr->setCoordZ( reader->getDouble());
}
else {
curr = new DRW_Variant( code, DRW_Coord( 0.0, 0.0, reader->getDouble()));
extData.push_back(curr);
}
break;
case 1040:
case 1041:
case 1042:
extData.push_back(new DRW_Variant(code, reader->getDouble()));
break;
case 1070:
case 1071:
extData.push_back(new DRW_Variant(code, reader->getInt32() ));
break;
default:
break;
}
}
| 0 |
[
"CWE-191"
] |
libdxfrw
|
fcd977cc7f8f6cc7f012e5b72d33cf7d77b3fa69
| 53,643,505,674,599,120,000,000,000,000,000,000,000 | 74 |
fixed heap use after free vulnerability CVE-2021-21900
as reported in TALOS-2021-1351 / CVE-2021-21900,
DRW_TableEntry::parseCode had the potential to trigger an use after free exception with a malformed DXF file.
|
void b43_hf_write(struct b43_wldev *dev, u64 value)
{
u16 lo, mi, hi;
lo = (value & 0x00000000FFFFULL);
mi = (value & 0x0000FFFF0000ULL) >> 16;
hi = (value & 0xFFFF00000000ULL) >> 32;
b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1, lo);
b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2, mi);
b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3, hi);
}
| 0 |
[
"CWE-134"
] |
wireless
|
9538cbaab6e8b8046039b4b2eb6c9d614dc782bd
| 16,762,412,049,287,300,000,000,000,000,000,000,000 | 11 |
b43: stop format string leaking into error msgs
The module parameter "fwpostfix" is userspace controllable, unfiltered,
and is used to define the firmware filename. b43_do_request_fw() populates
ctx->errors[] on error, containing the firmware filename. b43err()
parses its arguments as a format string. For systems with b43 hardware,
this could lead to a uid-0 to ring-0 escalation.
CVE-2013-2852
Signed-off-by: Kees Cook <[email protected]>
Cc: [email protected]
Signed-off-by: John W. Linville <[email protected]>
|
GF_Box *pitm_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_PrimaryItemBox, GF_ISOM_BOX_TYPE_PITM);
return (GF_Box *)tmp;
}
| 0 |
[
"CWE-401",
"CWE-787"
] |
gpac
|
ec64c7b8966d7e4642d12debb888be5acf18efb9
| 248,184,825,776,835,900,000,000,000,000,000,000,000 | 5 |
fixed #1786 (fuzz)
|
storageVolLookupByName(virStoragePoolPtr pool,
const char *name)
{
virStoragePoolObj *obj;
virStoragePoolDef *def;
virStorageVolDef *voldef;
virStorageVolPtr vol = NULL;
if (!(obj = virStoragePoolObjFromStoragePool(pool)))
return NULL;
def = virStoragePoolObjGetDef(obj);
if (!virStoragePoolObjIsActive(obj)) {
virReportError(VIR_ERR_OPERATION_INVALID,
_("storage pool '%s' is not active"), def->name);
goto cleanup;
}
voldef = virStorageVolDefFindByName(obj, name);
if (!voldef) {
virReportError(VIR_ERR_NO_STORAGE_VOL,
_("no storage vol with matching name '%s'"),
name);
goto cleanup;
}
if (virStorageVolLookupByNameEnsureACL(pool->conn, def, voldef) < 0)
goto cleanup;
vol = virGetStorageVol(pool->conn, def->name, voldef->name,
voldef->key, NULL, NULL);
cleanup:
virStoragePoolObjEndAPI(&obj);
return vol;
}
| 0 |
[] |
libvirt
|
447f69dec47e1b0bd15ecd7cd49a9fd3b050fb87
| 11,156,608,380,445,946,000,000,000,000,000,000,000 | 37 |
storage_driver: Unlock object on ACL fail in storagePoolLookupByTargetPath
'virStoragePoolObjListSearch' returns a locked and refed object, thus we
must release it on ACL permission failure.
Fixes: 7aa0e8c0cb8
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1984318
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Michal Privoznik <[email protected]>
|
static int scrub_checksum_super(struct scrub_block *sblock)
{
struct btrfs_super_block *s;
struct scrub_ctx *sctx = sblock->sctx;
u8 calculated_csum[BTRFS_CSUM_SIZE];
u8 on_disk_csum[BTRFS_CSUM_SIZE];
struct page *page;
void *mapped_buffer;
u64 mapped_size;
void *p;
u32 crc = ~(u32)0;
int fail_gen = 0;
int fail_cor = 0;
u64 len;
int index;
BUG_ON(sblock->page_count < 1);
page = sblock->pagev[0]->page;
mapped_buffer = kmap_atomic(page);
s = (struct btrfs_super_block *)mapped_buffer;
memcpy(on_disk_csum, s->csum, sctx->csum_size);
if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
++fail_cor;
if (sblock->pagev[0]->generation != btrfs_super_generation(s))
++fail_gen;
if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
++fail_cor;
len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
index = 0;
for (;;) {
u64 l = min_t(u64, len, mapped_size);
crc = btrfs_csum_data(p, crc, l);
kunmap_atomic(mapped_buffer);
len -= l;
if (len == 0)
break;
index++;
BUG_ON(index >= sblock->page_count);
BUG_ON(!sblock->pagev[index]->page);
page = sblock->pagev[index]->page;
mapped_buffer = kmap_atomic(page);
mapped_size = PAGE_SIZE;
p = mapped_buffer;
}
btrfs_csum_final(crc, calculated_csum);
if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
++fail_cor;
if (fail_cor + fail_gen) {
/*
* if we find an error in a super block, we just report it.
* They will get written with the next transaction commit
* anyway
*/
spin_lock(&sctx->stat_lock);
++sctx->stat.super_errors;
spin_unlock(&sctx->stat_lock);
if (fail_cor)
btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
BTRFS_DEV_STAT_CORRUPTION_ERRS);
else
btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
BTRFS_DEV_STAT_GENERATION_ERRS);
}
return fail_cor + fail_gen;
}
| 0 |
[
"CWE-476",
"CWE-284"
] |
linux
|
09ba3bc9dd150457c506e4661380a6183af651c1
| 238,987,694,982,694,760,000,000,000,000,000,000,000 | 75 |
btrfs: merge btrfs_find_device and find_device
Both btrfs_find_device() and find_device() does the same thing except
that the latter does not take the seed device onto account in the device
scanning context. We can merge them.
Signed-off-by: Anand Jain <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
|
void FreeArray( _cmsDICarray* a)
{
if (a ->Name.Offsets != NULL) FreeElem(&a->Name);
if (a ->Value.Offsets != NULL) FreeElem(&a ->Value);
if (a ->DisplayName.Offsets != NULL) FreeElem(&a->DisplayName);
if (a ->DisplayValue.Offsets != NULL) FreeElem(&a ->DisplayValue);
}
| 0 |
[] |
Little-CMS
|
41d222df1bc6188131a8f46c32eab0a4d4cdf1b6
| 164,412,154,402,837,490,000,000,000,000,000,000,000 | 7 |
Memory squeezing fix: lcms2 cmsPipeline construction
When creating a new pipeline, lcms would often try to allocate a stage
and pass it to cmsPipelineInsertStage without checking whether the
allocation succeeded. cmsPipelineInsertStage would then assert (or crash)
if it had not.
The fix here is to change cmsPipelineInsertStage to check and return
an error value. All calling code is then checked to test this return
value and cope.
|
mailimf_dot_atom_text_parse(const char * message, size_t length,
size_t * indx, char ** result)
{
return mailimf_atom_parse(message, length, indx, result);
}
| 0 |
[
"CWE-476"
] |
libetpan
|
1fe8fbc032ccda1db9af66d93016b49c16c1f22d
| 310,082,218,325,960,820,000,000,000,000,000,000,000 | 5 |
Fixed crash #274
|
void reset_cache(binlog_cache_data* cache_data)
{
cache_data->reset();
}
| 0 |
[
"CWE-264"
] |
mysql-server
|
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
| 268,210,531,933,444,980,000,000,000,000,000,000,000 | 4 |
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE
[This is the 5.5/5.6 version of the bugfix].
The problem was that it was possible to write log files ending
in .ini/.cnf that later could be parsed as an options file.
This made it possible for users to specify startup options
without the permissions to do so.
This patch fixes the problem by disallowing general query log
and slow query log to be written to files ending in .ini and .cnf.
|
struct dentry *kern_path_create(int dfd, const char *pathname,
struct path *path, unsigned int lookup_flags)
{
struct filename *filename = getname_kernel(pathname);
struct dentry *res;
if (IS_ERR(filename))
return ERR_CAST(filename);
res = filename_create(dfd, filename, path, lookup_flags);
putname(filename);
return res;
}
| 0 |
[
"CWE-416"
] |
linux
|
f15133df088ecadd141ea1907f2c96df67c729f0
| 83,121,654,145,723,520,000,000,000,000,000,000,000 | 12 |
path_openat(): fix double fput()
path_openat() jumps to the wrong place after do_tmpfile() - it has
already done path_cleanup() (as part of path_lookupat() called by
do_tmpfile()), so doing that again can lead to double fput().
Cc: [email protected] # v3.11+
Signed-off-by: Al Viro <[email protected]>
|
PSecBuffer sspi_FindSecBuffer(PSecBufferDesc pMessage, ULONG BufferType)
{
int index;
PSecBuffer pSecBuffer = NULL;
for (index = 0; index < pMessage->cBuffers; index++)
{
if (pMessage->pBuffers[index].BufferType == BufferType)
{
pSecBuffer = &pMessage->pBuffers[index];
break;
}
}
return pSecBuffer;
}
| 0 |
[
"CWE-476",
"CWE-125"
] |
FreeRDP
|
0773bb9303d24473fe1185d85a424dfe159aff53
| 172,003,609,947,014,770,000,000,000,000,000,000,000 | 16 |
nla: invalidate sec handle after creation
If sec pointer isn't invalidated after creation it is not possible
to check if the upper and lower pointers are valid.
This fixes a segfault in the server part if the client disconnects before
the authentication was finished.
|
static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
{
struct inet6_ifaddr *ifpiter;
struct inet6_dev *idev = ifp->idev;
list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
if (ifpiter->scope > IFA_LINK)
break;
if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
(ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
IFA_F_PERMANENT)
return false;
}
return true;
}
| 0 |
[
"CWE-20"
] |
linux
|
77751427a1ff25b27d47a4c36b12c3c8667855ac
| 301,734,247,971,482,750,000,000,000,000,000,000,000 | 16 |
ipv6: addrconf: validate new MTU before applying it
Currently we don't check if the new MTU is valid or not and this allows
one to configure a smaller than minimum allowed by RFCs or even bigger
than interface own MTU, which is a problem as it may lead to packet
drops.
If you have a daemon like NetworkManager running, this may be exploited
by remote attackers by forging RA packets with an invalid MTU, possibly
leading to a DoS. (NetworkManager currently only validates for values
too small, but not for too big ones.)
The fix is just to make sure the new value is valid. That is, between
IPV6_MIN_MTU and interface's MTU.
Note that similar check is already performed at
ndisc_router_discovery(), for when kernel itself parses the RA.
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int start_graph_tracing(void)
{
struct ftrace_ret_stack **ret_stack_list;
int ret, cpu;
ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
sizeof(struct ftrace_ret_stack *),
GFP_KERNEL);
if (!ret_stack_list)
return -ENOMEM;
/* The cpu_boot init_task->ret_stack will never be freed */
for_each_online_cpu(cpu) {
if (!idle_task(cpu)->ret_stack)
ftrace_graph_init_idle_task(idle_task(cpu), cpu);
}
do {
ret = alloc_retstack_tasklist(ret_stack_list);
} while (ret == -EAGAIN);
if (!ret) {
ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
if (ret)
pr_info("ftrace_graph: Couldn't activate tracepoint"
" probe to kernel_sched_switch\n");
}
kfree(ret_stack_list);
return ret;
}
| 0 |
[
"CWE-703"
] |
linux
|
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
| 82,330,166,413,653,350,000,000,000,000,000,000,000 | 32 |
tracing: Fix possible NULL pointer dereferences
Currently set_ftrace_pid and set_graph_function files use seq_lseek
for their fops. However seq_open() is called only for FMODE_READ in
the fops->open() so that if an user tries to seek one of those file
when she open it for writing, it sees NULL seq_file and then panic.
It can be easily reproduced with following command:
$ cd /sys/kernel/debug/tracing
$ echo 1234 | sudo tee -a set_ftrace_pid
In this example, GNU coreutils' tee opens the file with fopen(, "a")
and then the fopen() internally calls lseek().
Link: http://lkml.kernel.org/r/[email protected]
Cc: Frederic Weisbecker <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: [email protected]
Signed-off-by: Namhyung Kim <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
|
static void addECSOption(char* packet, const size_t& packetSize, uint16_t* len, const ComboAddress& remote, int stamp)
{
string EDNSRR;
struct dnsheader* dh = (struct dnsheader*) packet;
EDNSSubnetOpts eso;
if(stamp < 0)
eso.source = Netmask(remote);
else {
ComboAddress stamped(remote);
*((char*)&stamped.sin4.sin_addr.s_addr)=stamp;
eso.source = Netmask(stamped);
}
string optRData=makeEDNSSubnetOptsString(eso);
string record;
generateEDNSOption(EDNSOptionCode::ECS, optRData, record);
generateOptRR(record, EDNSRR);
uint16_t arcount = ntohs(dh->arcount);
/* does it fit in the existing buffer? */
if (packetSize - *len > EDNSRR.size()) {
arcount++;
dh->arcount = htons(arcount);
memcpy(packet + *len, EDNSRR.c_str(), EDNSRR.size());
*len += EDNSRR.size();
}
}
| 1 |
[
"CWE-787"
] |
pdns
|
f9c57c98da1b1007a51680629b667d57d9b702b8
| 222,164,442,128,917,060,000,000,000,000,000,000,000 | 28 |
dnsreplay: Bail out on a too small outgoing buffer
|
void set_length(int new_len) { cur_len_ = new_len; }
| 0 |
[] |
envoy
|
3b5acb2f43548862dadb243de7cf3994986a8e04
| 63,016,260,637,805,140,000,000,000,000,000,000,000 | 1 |
http, url: Bring back chromium_url and http_parser_parse_url (#198)
* Revert GURL as HTTP URL parser utility
This reverts:
1. commit c9c4709c844b90b9bb2935d784a428d667c9df7d
2. commit d828958b591a6d79f4b5fa608ece9962b7afbe32
3. commit 2d69e30c51f2418faf267aaa6c1126fce9948c62
Signed-off-by: Dhi Aurrahman <[email protected]>
|
rrinternal_get_owner(sldns_buffer* strbuf, uint8_t* rr, size_t* len,
size_t* dname_len, uint8_t* origin, size_t origin_len, uint8_t* prev,
size_t prev_len, char* token, size_t token_len)
{
/* split the rr in its parts -1 signals trouble */
if(sldns_bget_token(strbuf, token, "\t\n ", token_len) == -1) {
return RET_ERR(LDNS_WIREPARSE_ERR_SYNTAX,
sldns_buffer_position(strbuf));
}
if(token_len < 2) /* make sure there is space to read "@" or "" */
return RET_ERR(LDNS_WIREPARSE_ERR_BUFFER_TOO_SMALL,
sldns_buffer_position(strbuf));
if(token[0]=='@' && token[1]=='\0') {
uint8_t* tocopy;
if (origin) {
*dname_len = origin_len;
tocopy = origin;
} else if (prev) {
*dname_len = prev_len;
tocopy = prev;
} else {
/* default to root */
*dname_len = 1;
tocopy = (uint8_t*)"\0";
}
if(*len < *dname_len)
return RET_ERR(LDNS_WIREPARSE_ERR_BUFFER_TOO_SMALL,
sldns_buffer_position(strbuf));
memmove(rr, tocopy, *dname_len);
} else if(*token == '\0') {
/* no ownername was given, try prev, if that fails
* origin, else default to root */
uint8_t* tocopy;
if(prev) {
*dname_len = prev_len;
tocopy = prev;
} else if(origin) {
*dname_len = origin_len;
tocopy = origin;
} else {
*dname_len = 1;
tocopy = (uint8_t*)"\0";
}
if(*len < *dname_len)
return RET_ERR(LDNS_WIREPARSE_ERR_BUFFER_TOO_SMALL,
sldns_buffer_position(strbuf));
memmove(rr, tocopy, *dname_len);
} else {
size_t dlen = *len;
int s = sldns_str2wire_dname_buf_origin(token, rr, &dlen,
origin, origin_len);
if(s) return RET_ERR_SHIFT(s,
sldns_buffer_position(strbuf)-strlen(token));
*dname_len = dlen;
}
return LDNS_WIREPARSE_ERR_OK;
}
| 0 |
[] |
unbound
|
3f3cadd416d6efa92ff2d548ac090f42cd79fee9
| 108,170,381,077,397,630,000,000,000,000,000,000,000 | 58 |
- Fix Out of Bounds Write in sldns_str2wire_str_buf(),
reported by X41 D-Sec.
|
ModuleExport size_t RegisterHDRImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("HDR");
entry->decoder=(DecodeImageHandler *) ReadHDRImage;
entry->encoder=(EncodeImageHandler *) WriteHDRImage;
entry->description=ConstantString("Radiance RGBE image format");
entry->module=ConstantString("HDR");
entry->magick=(IsImageFormatHandler *) IsHDR;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
| 0 |
[
"CWE-20",
"CWE-703",
"CWE-835"
] |
ImageMagick
|
97aa7d7cfd2027f6ba7ce42caf8b798541b9cdc6
| 128,895,655,705,293,640,000,000,000,000,000,000,000 | 14 |
Fixed infinite loop and added checks for the sscanf result.
|
int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
struct mwifiex_scan_cmd_config *scan_cfg)
{
struct host_cmd_ds_802_11_scan *scan_cmd = &cmd->params.scan;
/* Set fixed field variables in scan command */
scan_cmd->bss_mode = scan_cfg->bss_mode;
memcpy(scan_cmd->bssid, scan_cfg->specific_bssid,
sizeof(scan_cmd->bssid));
memcpy(scan_cmd->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len);
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN);
/* Size is equal to the sizeof(fixed portions) + the TLV len + header */
cmd->size = cpu_to_le16((u16) (sizeof(scan_cmd->bss_mode)
+ sizeof(scan_cmd->bssid)
+ scan_cfg->tlv_buf_len + S_DS_GEN));
return 0;
}
| 0 |
[
"CWE-269",
"CWE-787"
] |
linux
|
b70261a288ea4d2f4ac7cd04be08a9f0f2de4f4d
| 36,937,919,505,684,470,000,000,000,000,000,000,000 | 20 |
mwifiex: Fix possible buffer overflows in mwifiex_cmd_append_vsie_tlv()
mwifiex_cmd_append_vsie_tlv() calls memcpy() without checking
the destination size may trigger a buffer overflower,
which a local user could use to cause denial of service
or the execution of arbitrary code.
Fix it by putting the length check before calling memcpy().
Signed-off-by: Qing Xu <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
struct ext4_sb_info *sbi,
const char *buf, size_t count)
{
unsigned long t;
if (parse_strtoul(buf, 0x40000000, &t))
return -EINVAL;
if (!is_power_of_2(t))
return -EINVAL;
sbi->s_inode_readahead_blks = t;
return count;
}
| 0 |
[
"CWE-703"
] |
linux
|
744692dc059845b2a3022119871846e74d4f6e11
| 21,177,859,461,451,146,000,000,000,000,000,000,000 | 15 |
ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
|
void skipLabel()
{
uint8_t len;
while((len=get8BitInt())) {
if(len >= 0xc0) { // extended label
get8BitInt();
return;
}
skipBytes(len);
}
}
| 0 |
[
"CWE-399"
] |
pdns
|
adb10be102ddd4d2baf7a8adbb5673946fe5e555
| 202,542,327,765,691,900,000,000,000,000,000,000,000 | 11 |
fix forward reference-check in getLabelFromContent()
|
static double mp_vprod(_cimg_math_parser& mp) {
_cimg_mp_vfunc(res = vec.product());
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 160,918,223,226,949,150,000,000,000,000,000,000,000 | 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
PixarLogClose(TIFF* tif)
{
TIFFDirectory *td = &tif->tif_dir;
/* In a really sneaky (and really incorrect, and untruthful, and
* troublesome, and error-prone) maneuver that completely goes against
* the spirit of TIFF, and breaks TIFF, on close, we covertly
* modify both bitspersample and sampleformat in the directory to
* indicate 8-bit linear. This way, the decode "just works" even for
* readers that don't know about PixarLog, or how to set
* the PIXARLOGDATFMT pseudo-tag.
*/
td->td_bitspersample = 8;
td->td_sampleformat = SAMPLEFORMAT_UINT;
}
| 1 |
[
"CWE-125"
] |
libtiff
|
1044b43637fa7f70fb19b93593777b78bd20da86
| 209,864,999,212,578,100,000,000,000,000,000,000,000 | 15 |
* libtiff/tif_pixarlog.c, libtiff/tif_luv.c: fix heap-based buffer
overflow on generation of PixarLog / LUV compressed files, with
ColorMap, TransferFunction attached and nasty plays with bitspersample.
The fix for LUV has not been tested, but suffers from the same kind
of issue of PixarLog.
Reported by Agostino Sarubbo.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2604
|
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
unsigned int issue_flags)
{
int cflags = 0;
if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
return;
if (res != req->result)
req_set_fail_links(req);
if (req->rw.kiocb.ki_flags & IOCB_WRITE)
kiocb_end_write(req);
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_rw_kbuf(req);
__io_req_complete(req, issue_flags, res, cflags);
}
| 0 |
[
"CWE-667"
] |
linux
|
3ebba796fa251d042be42b929a2d916ee5c34a49
| 175,938,841,777,714,930,000,000,000,000,000,000 | 16 |
io_uring: ensure that SQPOLL thread is started for exit
If we create it in a disabled state because IORING_SETUP_R_DISABLED is
set on ring creation, we need to ensure that we've kicked the thread if
we're exiting before it's been explicitly disabled. Otherwise we can run
into a deadlock where exit is waiting go park the SQPOLL thread, but the
SQPOLL thread itself is waiting to get a signal to start.
That results in the below trace of both tasks hung, waiting on each other:
INFO: task syz-executor458:8401 blocked for more than 143 seconds.
Not tainted 5.11.0-next-20210226-syzkaller #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread_park fs/io_uring.c:7115 [inline]
io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103
io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745
__io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840
io_uring_files_cancel include/linux/io_uring.h:47 [inline]
do_exit+0x299/0x2a60 kernel/exit.c:780
do_group_exit+0x125/0x310 kernel/exit.c:922
__do_sys_exit_group kernel/exit.c:933 [inline]
__se_sys_exit_group kernel/exit.c:931 [inline]
__x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x43e899
RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7
RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899
RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000
RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000
R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0
R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001
INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds.
task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294
INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds.
Reported-by: [email protected]
Signed-off-by: Jens Axboe <[email protected]>
|
double Item_func_log::val_real()
{
DBUG_ASSERT(fixed == 1);
double value= args[0]->val_real();
if ((null_value= args[0]->null_value))
return 0.0;
if (value <= 0.0)
{
signal_divide_by_null();
return 0.0;
}
if (arg_count == 2)
{
double value2= args[1]->val_real();
if ((null_value= args[1]->null_value))
return 0.0;
if (value2 <= 0.0 || value == 1.0)
{
signal_divide_by_null();
return 0.0;
}
return log(value2) / log(value);
}
return log(value);
}
| 0 |
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
| 220,982,638,567,187,380,000,000,000,000,000,000,000 | 25 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
|
static int http_open(URLContext *h, const char *uri, int flags,
AVDictionary **options)
{
HTTPContext *s = h->priv_data;
int ret;
if( s->seekable == 1 )
h->is_streamed = 0;
else
h->is_streamed = 1;
s->filesize = -1;
s->location = av_strdup(uri);
if (!s->location)
return AVERROR(ENOMEM);
if (options)
av_dict_copy(&s->chained_options, *options, 0);
if (s->headers) {
int len = strlen(s->headers);
if (len < 2 || strcmp("\r\n", s->headers + len - 2)) {
av_log(h, AV_LOG_WARNING,
"No trailing CRLF found in HTTP header.\n");
ret = av_reallocp(&s->headers, len + 3);
if (ret < 0)
return ret;
s->headers[len] = '\r';
s->headers[len + 1] = '\n';
s->headers[len + 2] = '\0';
}
}
if (s->listen) {
return http_listen(h, uri, flags, options);
}
ret = http_open_cnx(h, options);
if (ret < 0)
av_dict_free(&s->chained_options);
return ret;
}
| 1 |
[
"CWE-119",
"CWE-787"
] |
FFmpeg
|
2a05c8f813de6f2278827734bf8102291e7484aa
| 11,671,129,861,013,781,000,000,000,000,000,000,000 | 40 |
http: make length/offset-related variables unsigned.
Fixes #5992, reported and found by Paul Cher <[email protected]>.
|
fetch_token_in_cc(OnigToken* tok, UChar** src, UChar* end, ScanEnv* env)
{
int num;
OnigCodePoint c, c2;
const OnigSyntaxType* syn = env->syntax;
OnigEncoding enc = env->enc;
UChar* prev;
UChar* p = *src;
PFETCH_READY;
if (PEND) {
tok->type = TK_EOT;
return tok->type;
}
PFETCH(c);
tok->type = TK_CHAR;
tok->base = 0;
tok->u.c = c;
tok->escaped = 0;
if (c == ']') {
tok->type = TK_CC_CLOSE;
}
else if (c == '-') {
tok->type = TK_CC_RANGE;
}
else if (c == MC_ESC(syn)) {
if (! IS_SYNTAX_BV(syn, ONIG_SYN_BACKSLASH_ESCAPE_IN_CC))
goto end;
if (PEND) return ONIGERR_END_PATTERN_AT_ESCAPE;
PFETCH(c);
tok->escaped = 1;
tok->u.c = c;
switch (c) {
case 'w':
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_WORD;
tok->u.prop.not = 0;
break;
case 'W':
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_WORD;
tok->u.prop.not = 1;
break;
case 'd':
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_DIGIT;
tok->u.prop.not = 0;
break;
case 'D':
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_DIGIT;
tok->u.prop.not = 1;
break;
case 's':
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_SPACE;
tok->u.prop.not = 0;
break;
case 'S':
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_SPACE;
tok->u.prop.not = 1;
break;
case 'h':
if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_H_XDIGIT)) break;
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_XDIGIT;
tok->u.prop.not = 0;
break;
case 'H':
if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_H_XDIGIT)) break;
tok->type = TK_CHAR_TYPE;
tok->u.prop.ctype = ONIGENC_CTYPE_XDIGIT;
tok->u.prop.not = 1;
break;
case 'p':
case 'P':
if (PEND) break;
c2 = PPEEK;
if (c2 == '{' &&
IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_P_BRACE_CHAR_PROPERTY)) {
PINC;
tok->type = TK_CHAR_PROPERTY;
tok->u.prop.not = (c == 'P' ? 1 : 0);
if (!PEND && IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_P_BRACE_CIRCUMFLEX_NOT)) {
PFETCH(c2);
if (c2 == '^') {
tok->u.prop.not = (tok->u.prop.not == 0 ? 1 : 0);
}
else
PUNFETCH;
}
}
else {
onig_syntax_warn(env, "invalid Unicode Property \\%c", c);
}
break;
case 'x':
if (PEND) break;
prev = p;
if (PPEEK_IS('{') && IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_X_BRACE_HEX8)) {
PINC;
num = scan_unsigned_hexadecimal_number(&p, end, 0, 8, enc);
if (num < 0) return ONIGERR_TOO_BIG_WIDE_CHAR_VALUE;
if (!PEND) {
c2 = PPEEK;
if (ONIGENC_IS_CODE_XDIGIT(enc, c2))
return ONIGERR_TOO_LONG_WIDE_CHAR_VALUE;
}
if (p > prev + enclen(enc, prev, end) && !PEND && (PPEEK_IS('}'))) {
PINC;
tok->type = TK_CODE_POINT;
tok->base = 16;
tok->u.code = (OnigCodePoint )num;
}
else {
/* can't read nothing or invalid format */
p = prev;
}
}
else if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_X_HEX2)) {
num = scan_unsigned_hexadecimal_number(&p, end, 0, 2, enc);
if (num < 0) return ONIGERR_TOO_BIG_NUMBER;
if (p == prev) { /* can't read nothing. */
num = 0; /* but, it's not error */
}
tok->type = TK_RAW_BYTE;
tok->base = 16;
tok->u.c = num;
}
break;
case 'u':
if (PEND) break;
prev = p;
if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_U_HEX4)) {
num = scan_unsigned_hexadecimal_number(&p, end, 4, 4, enc);
if (num < -1) return ONIGERR_TOO_SHORT_DIGITS;
else if (num < 0) return ONIGERR_TOO_BIG_NUMBER;
if (p == prev) { /* can't read nothing. */
num = 0; /* but, it's not error */
}
tok->type = TK_CODE_POINT;
tok->base = 16;
tok->u.code = (OnigCodePoint )num;
}
break;
case 'o':
if (PEND) break;
prev = p;
if (PPEEK_IS('{') && IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_O_BRACE_OCTAL)) {
PINC;
num = scan_unsigned_octal_number(&p, end, 11, enc);
if (num < 0) return ONIGERR_TOO_BIG_WIDE_CHAR_VALUE;
if (!PEND) {
c2 = PPEEK;
if (ONIGENC_IS_CODE_DIGIT(enc, c2) && c2 < '8')
return ONIGERR_TOO_LONG_WIDE_CHAR_VALUE;
}
if (p > prev + enclen(enc, prev, end) && !PEND && (PPEEK_IS('}'))) {
PINC;
tok->type = TK_CODE_POINT;
tok->base = 8;
tok->u.code = (OnigCodePoint )num;
}
else {
/* can't read nothing or invalid format */
p = prev;
}
}
break;
case '0':
case '1': case '2': case '3': case '4': case '5': case '6': case '7':
if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_OCTAL3)) {
PUNFETCH;
prev = p;
num = scan_unsigned_octal_number(&p, end, 3, enc);
if (num < 0 || 0xff < num) return ONIGERR_TOO_BIG_NUMBER;
if (p == prev) { /* can't read nothing. */
num = 0; /* but, it's not error */
}
tok->type = TK_RAW_BYTE;
tok->base = 8;
tok->u.c = num;
}
break;
default:
PUNFETCH;
num = fetch_escaped_value(&p, end, env, &c2);
if (num < 0) return num;
if ((OnigCodePoint )tok->u.c != c2) {
tok->u.code = (OnigCodePoint )c2;
tok->type = TK_CODE_POINT;
}
break;
}
}
else if (c == '[') {
if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_POSIX_BRACKET) && (PPEEK_IS(':'))) {
OnigCodePoint send[] = { (OnigCodePoint )':', (OnigCodePoint )']' };
tok->backp = p; /* point at '[' is read */
PINC;
if (str_exist_check_with_esc(send, 2, p, end,
(OnigCodePoint )']', enc, syn)) {
tok->type = TK_POSIX_BRACKET_OPEN;
}
else {
PUNFETCH;
goto cc_in_cc;
}
}
else {
cc_in_cc:
if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_CCLASS_SET_OP)) {
tok->type = TK_CC_CC_OPEN;
}
else {
CC_ESC_WARN(env, (UChar* )"[");
}
}
}
else if (c == '&') {
if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_CCLASS_SET_OP) &&
!PEND && (PPEEK_IS('&'))) {
PINC;
tok->type = TK_CC_AND;
}
}
end:
*src = p;
return tok->type;
}
| 0 |
[
"CWE-476"
] |
Onigmo
|
00cc7e28a3ed54b3b512ef3b58ea737a57acf1f9
| 231,889,508,465,063,870,000,000,000,000,000,000,000 | 249 |
Fix SEGV in onig_error_code_to_str() (Fix #132)
When onig_new(ONIG_SYNTAX_PERL) fails with ONIGERR_INVALID_GROUP_NAME,
onig_error_code_to_str() crashes.
onig_scan_env_set_error_string() should have been used when returning
ONIGERR_INVALID_GROUP_NAME.
|
static void glfs_async_cbk(glfs_fd_t *fd, ssize_t ret, void *data)
{
glfs_cbk_cookie *cookie = data;
struct tcmu_device *dev = cookie->dev;
struct tcmulib_cmd *cmd = cookie->cmd;
size_t length = cookie->length;
if (ret < 0 || ret != length) {
/* Read/write/flush failed */
switch (cookie->op) {
case TCMU_GLFS_READ:
ret = tcmu_set_sense_data(cmd->sense_buf, MEDIUM_ERROR,
ASC_READ_ERROR, NULL);
break;
case TCMU_GLFS_WRITE:
case TCMU_GLFS_FLUSH:
ret = tcmu_set_sense_data(cmd->sense_buf, MEDIUM_ERROR,
ASC_WRITE_ERROR, NULL);
break;
}
} else {
ret = SAM_STAT_GOOD;
}
cmd->done(dev, cmd, ret);
free(cookie);
}
| 0 |
[
"CWE-200",
"CWE-119"
] |
tcmu-runner
|
61bd03e600d2abf309173e9186f4d465bb1b7157
| 212,336,680,237,942,440,000,000,000,000,000,000,000 | 27 |
glfs: discard glfs_check_config
Signed-off-by: Prasanna Kumar Kalever <[email protected]>
|
libssh2_session_handshake(LIBSSH2_SESSION *session, libssh2_socket_t sock)
{
int rc;
BLOCK_ADJUST(rc, session, session_startup(session, sock) );
return rc;
}
| 0 |
[
"CWE-787"
] |
libssh2
|
dc109a7f518757741590bb993c0c8412928ccec2
| 162,558,759,982,261,580,000,000,000,000,000,000,000 | 8 |
Security fixes (#315)
* Bounds checks
Fixes for CVEs
https://www.libssh2.org/CVE-2019-3863.html
https://www.libssh2.org/CVE-2019-3856.html
* Packet length bounds check
CVE
https://www.libssh2.org/CVE-2019-3855.html
* Response length check
CVE
https://www.libssh2.org/CVE-2019-3859.html
* Bounds check
CVE
https://www.libssh2.org/CVE-2019-3857.html
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
and additional data validation
* Check bounds before reading into buffers
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
* declare SIZE_MAX and UINT_MAX if needed
|
int ha_myisam::index_read_idx_map(uchar *buf, uint index, const uchar *key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
ha_statistic_increment(&SSV::ha_read_key_count);
int error=mi_rkey(file, buf, index, key, keypart_map, find_flag);
table->status=error ? STATUS_NOT_FOUND: 0;
MYSQL_INDEX_READ_ROW_DONE(error);
return error;
}
| 0 |
[
"CWE-362"
] |
mysql-server
|
4e5473862e6852b0f3802b0cd0c6fa10b5253291
| 130,872,627,773,342,330,000,000,000,000,000,000,000 | 11 |
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD)
is created. When repair finishes, this file is renamed to the original
.MYD file. The problem was that during this rename, we copied the
stats from the old file to the new file with chmod/chown. If a user
managed to replace the temporary file before chmod/chown was executed,
it was possible to get an arbitrary file with the privileges of the
mysql user.
This patch fixes the problem by not copying stats from the old
file to the new file. This is not needed as the new file was
created with the correct stats. This fix only changes server
behavior - external utilities such as myisamchk still does
chmod/chown.
No test case provided since the problem involves synchronization
with file system operations.
|
_copyWindowAgg(const WindowAgg *from)
{
WindowAgg *newnode = makeNode(WindowAgg);
CopyPlanFields((const Plan *) from, (Plan *) newnode);
COPY_SCALAR_FIELD(winref);
COPY_SCALAR_FIELD(partNumCols);
if (from->partNumCols > 0)
{
COPY_POINTER_FIELD(partColIdx, from->partNumCols * sizeof(AttrNumber));
COPY_POINTER_FIELD(partOperators, from->partNumCols * sizeof(Oid));
}
COPY_SCALAR_FIELD(ordNumCols);
if (from->ordNumCols > 0)
{
COPY_POINTER_FIELD(ordColIdx, from->ordNumCols * sizeof(AttrNumber));
COPY_POINTER_FIELD(ordOperators, from->ordNumCols * sizeof(Oid));
}
COPY_SCALAR_FIELD(frameOptions);
COPY_NODE_FIELD(startOffset);
COPY_NODE_FIELD(endOffset);
return newnode;
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 309,442,450,717,703,400,000,000,000,000,000,000,000 | 25 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
{
/* dl_type is always masked. */
if (eth_type_mpls(flow->dl_type)) {
int i;
int cnt;
cnt = 0;
for (i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
if (wc) {
wc->masks.mpls_lse[i] |= htonl(MPLS_BOS_MASK);
}
if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
return i + 1;
}
if (flow->mpls_lse[i]) {
cnt++;
}
}
return cnt;
} else {
return 0;
}
}
| 0 |
[
"CWE-400"
] |
ovs
|
79349cbab0b2a755140eedb91833ad2760520a83
| 128,959,217,061,773,290,000,000,000,000,000,000,000 | 24 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
void *private)
{
pmd_t *pmd;
unsigned long next;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
split_huge_page_pmd(vma->vm_mm, pmd);
if (pmd_none_or_clear_bad(pmd))
continue;
if (check_pte_range(vma, pmd, addr, next, nodes,
flags, private))
return -EIO;
} while (pmd++, addr = next, addr != end);
return 0;
}
| 1 |
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
| 340,220,446,198,502,500,000,000,000,000,000,000,000 | 20 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
ZEND_API int _zend_ts_hash_index_update_or_next_insert(TsHashTable *ht, ulong h, void *pData, uint nDataSize, void **pDest, int flag ZEND_FILE_LINE_DC)
{
int retval;
begin_write(ht);
retval = _zend_hash_index_update_or_next_insert(TS_HASH(ht), h, pData, nDataSize, pDest, flag ZEND_FILE_LINE_RELAY_CC);
end_write(ht);
return retval;
}
| 0 |
[] |
php-src
|
24125f0f26f3787c006e4a51611ba33ee3b841cb
| 91,494,876,834,247,600,000,000,000,000,000,000,000 | 10 |
Fixed bug #68676 (Explicit Double Free)
|
int r_jwe_set_header_str_value(jwe_t * jwe, const char * key, const char * str_value) {
int ret;
if (jwe != NULL) {
if ((ret = _r_json_set_str_value(jwe->j_header, key, str_value)) == RHN_OK) {
o_free(jwe->header_b64url);
jwe->header_b64url = NULL;
}
return ret;
} else {
return RHN_ERROR_PARAM;
}
}
| 0 |
[
"CWE-787"
] |
rhonabwy
|
b4c2923a1ba4fabf9b55a89244127e153a3e549b
| 323,717,835,563,308,880,000,000,000,000,000,000,000 | 13 |
Fix buffer overflow on r_jwe_aesgcm_key_unwrap
|
terminal_loop(int blocking)
{
int c;
int termwinkey = 0;
int ret;
#ifdef UNIX
int tty_fd = curbuf->b_term->tl_job->jv_channel
->ch_part[get_tty_part(curbuf->b_term)].ch_fd;
#endif
int restore_cursor = FALSE;
/* Remember the terminal we are sending keys to. However, the terminal
* might be closed while waiting for a character, e.g. typing "exit" in a
* shell and ++close was used. Therefore use curbuf->b_term instead of a
* stored reference. */
in_terminal_loop = curbuf->b_term;
if (*curwin->w_p_twk != NUL)
{
termwinkey = string_to_key(curwin->w_p_twk, TRUE);
if (termwinkey == Ctrl_W)
termwinkey = 0;
}
position_cursor(curwin, &curbuf->b_term->tl_cursor_pos);
may_set_cursor_props(curbuf->b_term);
while (blocking || vpeekc_nomap() != NUL)
{
#ifdef FEAT_GUI
if (!curbuf->b_term->tl_system)
#endif
/* TODO: skip screen update when handling a sequence of keys. */
/* Repeat redrawing in case a message is received while redrawing.
*/
while (must_redraw != 0)
if (update_screen(0) == FAIL)
break;
if (!term_use_loop_check(TRUE) || in_terminal_loop != curbuf->b_term)
/* job finished while redrawing */
break;
update_cursor(curbuf->b_term, FALSE);
restore_cursor = TRUE;
c = term_vgetc();
if (!term_use_loop_check(TRUE) || in_terminal_loop != curbuf->b_term)
{
/* Job finished while waiting for a character. Push back the
* received character. */
if (c != K_IGNORE)
vungetc(c);
break;
}
if (c == K_IGNORE)
continue;
#ifdef UNIX
/*
* The shell or another program may change the tty settings. Getting
* them for every typed character is a bit of overhead, but it's needed
* for the first character typed, e.g. when Vim starts in a shell.
*/
if (isatty(tty_fd))
{
ttyinfo_T info;
/* Get the current backspace character of the pty. */
if (get_tty_info(tty_fd, &info) == OK)
term_backspace_char = info.backspace;
}
#endif
#ifdef WIN3264
/* On Windows winpty handles CTRL-C, don't send a CTRL_C_EVENT.
* Use CTRL-BREAK to kill the job. */
if (ctrl_break_was_pressed)
mch_signal_job(curbuf->b_term->tl_job, (char_u *)"kill");
#endif
/* Was either CTRL-W (termwinkey) or CTRL-\ pressed?
* Not in a system terminal. */
if ((c == (termwinkey == 0 ? Ctrl_W : termwinkey) || c == Ctrl_BSL)
#ifdef FEAT_GUI
&& !curbuf->b_term->tl_system
#endif
)
{
int prev_c = c;
#ifdef FEAT_CMDL_INFO
if (add_to_showcmd(c))
out_flush();
#endif
c = term_vgetc();
#ifdef FEAT_CMDL_INFO
clear_showcmd();
#endif
if (!term_use_loop_check(TRUE)
|| in_terminal_loop != curbuf->b_term)
/* job finished while waiting for a character */
break;
if (prev_c == Ctrl_BSL)
{
if (c == Ctrl_N)
{
/* CTRL-\ CTRL-N : go to Terminal-Normal mode. */
term_enter_normal_mode();
ret = FAIL;
goto theend;
}
/* Send both keys to the terminal. */
send_keys_to_term(curbuf->b_term, prev_c, TRUE);
}
else if (c == Ctrl_C)
{
/* "CTRL-W CTRL-C" or 'termwinkey' CTRL-C: end the job */
mch_signal_job(curbuf->b_term->tl_job, (char_u *)"kill");
}
else if (c == '.')
{
/* "CTRL-W .": send CTRL-W to the job */
/* "'termwinkey' .": send 'termwinkey' to the job */
c = termwinkey == 0 ? Ctrl_W : termwinkey;
}
else if (c == Ctrl_BSL)
{
/* "CTRL-W CTRL-\": send CTRL-\ to the job */
c = Ctrl_BSL;
}
else if (c == 'N')
{
/* CTRL-W N : go to Terminal-Normal mode. */
term_enter_normal_mode();
ret = FAIL;
goto theend;
}
else if (c == '"')
{
term_paste_register(prev_c);
continue;
}
else if (termwinkey == 0 || c != termwinkey)
{
stuffcharReadbuff(Ctrl_W);
stuffcharReadbuff(c);
ret = OK;
goto theend;
}
}
# ifdef WIN3264
if (!enc_utf8 && has_mbyte && c >= 0x80)
{
WCHAR wc;
char_u mb[3];
mb[0] = (unsigned)c >> 8;
mb[1] = c;
if (MultiByteToWideChar(GetACP(), 0, (char*)mb, 2, &wc, 1) > 0)
c = wc;
}
# endif
if (send_keys_to_term(curbuf->b_term, c, TRUE) != OK)
{
if (c == K_MOUSEMOVE)
/* We are sure to come back here, don't reset the cursor color
* and shape to avoid flickering. */
restore_cursor = FALSE;
ret = OK;
goto theend;
}
}
ret = FAIL;
theend:
in_terminal_loop = NULL;
if (restore_cursor)
prepare_restore_cursor_props();
/* Move a snapshot of the screen contents to the buffer, so that completion
* works in other buffers. */
if (curbuf->b_term != NULL && !curbuf->b_term->tl_normal_mode)
may_move_terminal_to_buffer(curbuf->b_term, FALSE);
return ret;
}
| 0 |
[
"CWE-476"
] |
vim
|
cd929f7ba8cc5b6d6dcf35c8b34124e969fed6b8
| 52,876,158,991,793,020,000,000,000,000,000,000,000 | 186 |
patch 8.1.0633: crash when out of memory while opening a terminal window
Problem: Crash when out of memory while opening a terminal window.
Solution: Handle out-of-memory more gracefully.
|
StatusOr<ParserStatus> ConnectionImpl::onMessageComplete() {
ENVOY_CONN_LOG(trace, "message complete", connection_);
dispatchBufferedBody();
if (handling_upgrade_) {
// If this is an upgrade request, swallow the onMessageComplete. The
// upgrade payload will be treated as stream body.
ASSERT(!deferred_end_stream_headers_);
ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_);
return parser_->pause();
}
// If true, this indicates we were processing trailers and must
// move the last header into current_header_map_
if (header_parsing_state_ == HeaderParsingState::Value) {
RETURN_IF_ERROR(completeLastHeader());
}
return onMessageCompleteBase();
}
| 0 |
[
"CWE-416"
] |
envoy
|
fe7c69c248f4fe5a9080c7ccb35275b5218bb5ab
| 206,074,524,093,142,770,000,000,000,000,000,000,000 | 21 |
internal redirect: fix a lifetime bug (#785)
Signed-off-by: Alyssa Wilk <[email protected]>
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]>
|
check_end (const char *p)
{
if (!p)
return false;
while (c_isspace (*p))
++p;
if (!*p
|| (p[0] == 'G' && p[1] == 'M' && p[2] == 'T')
|| ((p[0] == '+' || p[0] == '-') && c_isdigit (p[1])))
return true;
else
return false;
}
| 0 |
[
"CWE-20"
] |
wget
|
3e25a9817f47fbb8660cc6a3b2f3eea239526c6c
| 150,558,961,079,152,440,000,000,000,000,000,000,000 | 13 |
Introduce --trust-server-names. Close CVE-2010-2252.
|
}
//! Return a shared sublist \newinstance.
const CImgList<T> get_shared_images(const unsigned int pos0, const unsigned int pos1) const {
if (pos0>pos1 || pos1>=_width)
throw CImgArgumentException(_cimglist_instance
"get_shared_images(): Specified sub-list indices (%u->%u) are out of bounds.",
cimglist_instance,
pos0,pos1);
CImgList<T> res(pos1 - pos0 + 1);
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 338,658,353,122,788,050,000,000,000,000,000,000,000 | 10 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
{
bool supported = (ioread32be(&dev->iseg->initializing) >>
MLX5_FW_RESET_SUPPORTED_OFFSET) & 1;
u32 fatal_error;
if (!supported)
return false;
/* The reset only needs to be issued by one PF. The health buffer is
* shared between all functions, and will be cleared during a reset.
* Check again to avoid a redundant 2nd reset. If the fatal erros was
* PCI related a reset won't help.
*/
fatal_error = check_fatal_sensors(dev);
if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
fatal_error == MLX5_SENSOR_NIC_DISABLED ||
fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help.");
return false;
}
mlx5_core_warn(dev, "Issuing FW Reset\n");
/* Write the NIC interface field to initiate the reset, the command
* interface address also resides here, don't overwrite it.
*/
mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
return true;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
c7ed6d0183d5ea9bc31bcaeeba4070bd62546471
| 303,925,130,955,472,780,000,000,000,000,000,000,000 | 30 |
net/mlx5: fix memory leak in mlx5_fw_fatal_reporter_dump
In mlx5_fw_fatal_reporter_dump if mlx5_crdump_collect fails the
allocated memory for cr_data must be released otherwise there will be
memory leak. To fix this, this commit changes the return instruction
into goto error handling.
Fixes: 9b1f29823605 ("net/mlx5: Add support for FW fatal reporter dump")
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Saeed Mahameed <[email protected]>
|
static void __perf_event_init_context(struct perf_event_context *ctx)
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->active_ctx_list);
perf_event_groups_init(&ctx->pinned_groups);
perf_event_groups_init(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
INIT_LIST_HEAD(&ctx->pinned_active);
INIT_LIST_HEAD(&ctx->flexible_active);
refcount_set(&ctx->refcount, 1);
}
| 0 |
[
"CWE-401"
] |
tip
|
7bdb157cdebbf95a1cd94ed2e01b338714075d00
| 83,037,573,429,885,560,000,000,000,000,000,000,000 | 12 |
perf/core: Fix a memory leak in perf_event_parse_addr_filter()
As shown through runtime testing, the "filename" allocation is not
always freed in perf_event_parse_addr_filter().
There are three possible ways that this could happen:
- It could be allocated twice on subsequent iterations through the loop,
- or leaked on the success path,
- or on the failure path.
Clean up the code flow to make it obvious that 'filename' is always
freed in the reallocation path and in the two return paths as well.
We rely on the fact that kfree(NULL) is NOP and filename is initialized
with NULL.
This fixes the leak. No other side effects expected.
[ Dan Carpenter: cleaned up the code flow & added a changelog. ]
[ Ingo Molnar: updated the changelog some more. ]
Fixes: 375637bc5249 ("perf/core: Introduce address range filtering")
Signed-off-by: "kiyin(尹亮)" <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Cc: "Srivatsa S. Bhat" <[email protected]>
Cc: Anthony Liguori <[email protected]>
--
kernel/events/core.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
|
static int eval_if(const char *hdr, struct meth_params *params,
const struct namespace_t *namespace,
struct mailbox *tgt_mailbox, const char *tgt_resource,
const char *tgt_etag, const char *tgt_lock_token,
unsigned *locked)
{
unsigned ret = 0;
tok_t tok;
char *list;
/* Process each list, ORing the results */
tok_init(&tok, hdr, ")", TOK_TRIMLEFT|TOK_TRIMRIGHT);
while ((list = tok_next(&tok))) {
struct mailbox *mailbox, *my_mailbox = NULL;
const char *etag, *lock_token;
struct buf buf = BUF_INITIALIZER;
struct index_record record;
struct dav_data *ddata;
void *davdb = NULL;
if (*list == '<') {
/* Tagged-list */
const char *tag, *err;
xmlURIPtr uri;
tag = ++list;
list = strchr(tag, '>');
*list++ = '\0';
mailbox = NULL;
etag = lock_token = NULL;
/* Parse the URL and assign mailbox, etag, and lock_token */
if (params && (uri = parse_uri(METH_UNKNOWN, tag, 1, &err))) {
struct request_target_t tag_tgt;
int r;
memset(&tag_tgt, 0, sizeof(struct request_target_t));
tag_tgt.namespace = namespace;
if (!params->parse_path(uri->path, &tag_tgt, &err)) {
if (tag_tgt.mbentry && !tag_tgt.mbentry->server) {
if (tgt_mailbox &&
!strcmp(tgt_mailbox->name, tag_tgt.mbentry->name)) {
/* Use target mailbox */
mailbox = tgt_mailbox;
}
else {
/* Open new mailbox */
r = mailbox_open_irl(tag_tgt.mbentry->name,
&my_mailbox);
if (r) {
syslog(LOG_NOTICE,
"failed to open mailbox '%s'"
" in tagged If header: %s",
tag_tgt.mbentry->name, error_message(r));
}
mailbox = my_mailbox;
}
if (mailbox) {
if (!strcmpnull(tgt_resource, tag_tgt.resource)) {
/* Tag IS target resource */
etag = tgt_etag;
lock_token = tgt_lock_token;
}
else if (tag_tgt.resource) {
/* Open DAV DB corresponding to the mailbox */
davdb = params->davdb.open_db(mailbox);
/* Find message UID for the resource */
params->davdb.lookup_resource(davdb,
mailbox->name,
tag_tgt.resource,
(void **) &ddata,
0);
if (ddata->rowid) {
if (ddata->lock_expire > time(NULL)) {
lock_token = ddata->lock_token;
(*locked)++;
}
memset(&record, 0,
sizeof(struct index_record));
if (ddata->imap_uid) {
/* Mapped URL - Fetch index record */
r = mailbox_find_index_record(mailbox,
ddata->imap_uid,
&record);
if (r) {
syslog(LOG_NOTICE,
"failed to fetch record for"
" '%s':%u in tagged"
" If header: %s",
mailbox->name,
ddata->imap_uid,
error_message(r));
}
else {
etag =
message_guid_encode(&record.guid);
}
}
else {
/* Unmapped URL (empty resource) */
etag = NULL;
}
}
}
else {
/* Collection */
buf_printf(&buf, "%u-%u-%u",
mailbox->i.uidvalidity,
mailbox->i.last_uid,
mailbox->i.exists);
etag = buf_cstring(&buf);
}
}
}
mboxlist_entry_free(&tag_tgt.mbentry);
free(tag_tgt.userid);
}
xmlFreeURI(uri);
}
}
else {
/* No-tag-list */
mailbox = tgt_mailbox;
etag = tgt_etag;
lock_token = tgt_lock_token;
}
list = strchr(list, '(');
ret |= eval_list(list, mailbox, etag, lock_token, locked);
if (davdb) params->davdb.close_db(davdb);
mailbox_close(&my_mailbox);
buf_free(&buf);
}
tok_fini(&tok);
return (ret || *locked);
}
| 0 |
[] |
cyrus-imapd
|
6703ff881b6056e0c045a7b795ce8ba1bbb87027
| 104,782,464,484,177,560,000,000,000,000,000,000,000 | 146 |
http_dav.c: add 'private' Cache-Control directive for cacheable responses that require authentication
|
static u_int16_t concat_hash_string(struct ndpi_packet_struct *packet,
char *buf, u_int8_t client_hash) {
u_int16_t offset = 22, buf_out_len = 0;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
u_int32_t len = ntohl(*(u_int32_t*)&packet->payload[offset]);
offset += 4;
/* -1 for ';' */
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
/* ssh.kex_algorithms [C/S] */
strncpy(buf, (const char *)&packet->payload[offset], buf_out_len = len);
buf[buf_out_len++] = ';';
offset += len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.server_host_key_algorithms [None] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
offset += 4 + len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.encryption_algorithms_client_to_server [C] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
buf[buf_out_len++] = ';';
offset += len;
} else
offset += 4 + len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.encryption_algorithms_server_to_client [S] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(!client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
buf[buf_out_len++] = ';';
offset += len;
} else
offset += 4 + len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.mac_algorithms_client_to_server [C] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
buf[buf_out_len++] = ';';
offset += len;
} else
offset += 4 + len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.mac_algorithms_server_to_client [S] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(!client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
buf[buf_out_len++] = ';';
offset += len;
} else
offset += 4 + len;
/* ssh.compression_algorithms_client_to_server [C] */
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
offset += len;
} else
offset += 4 + len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.compression_algorithms_server_to_client [S] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(!client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
offset += len;
} else
offset += 4 + len;
/* ssh.languages_client_to_server [None] */
/* ssh.languages_server_to_client [None] */
#ifdef SSH_DEBUG
printf("[SSH] %s\n", buf);
#endif
return(buf_out_len);
invalid_payload:
#ifdef SSH_DEBUG
printf("[SSH] Invalid packet payload\n");
#endif
return(0);
}
| 1 |
[
"CWE-190",
"CWE-787"
] |
nDPI
|
7ce478a58b4dd29a8d1e6f4e9df2f778613d9202
| 144,572,567,098,073,570,000,000,000,000,000,000,000 | 147 |
ssh: fixing unsigned overflow leading to heap overflow
cf GHSL-2020-051
|
mm_request_receive(int sock, Buffer *m)
{
u_char buf[4];
u_int msg_len;
debug3("%s entering", __func__);
if (atomicio(read, sock, buf, sizeof(buf)) != sizeof(buf)) {
if (errno == EPIPE)
cleanup_exit(255);
fatal("%s: read: %s", __func__, strerror(errno));
}
msg_len = get_u32(buf);
if (msg_len > 256 * 1024)
fatal("%s: read: bad msg_len %d", __func__, msg_len);
buffer_clear(m);
buffer_append_space(m, msg_len);
if (atomicio(read, sock, buffer_ptr(m), msg_len) != msg_len)
fatal("%s: read: %s", __func__, strerror(errno));
}
| 0 |
[
"CWE-20",
"CWE-200"
] |
openssh-portable
|
d4697fe9a28dab7255c60433e4dd23cf7fce8a8b
| 306,004,158,118,806,600,000,000,000,000,000,000,000 | 20 |
Don't resend username to PAM; it already has it.
Pointed out by Moritz Jodeit; ok dtucker@
|
void sqlite3SelectReset(Parse *pParse, Select *p){
if( ALWAYS(p) ){
clearSelect(pParse->db, p, 0);
memset(&p->iLimit, 0, sizeof(Select) - offsetof(Select,iLimit));
p->pEList = sqlite3ExprListAppend(pParse, 0,
sqlite3ExprAlloc(pParse->db,TK_NULL,0,0));
p->pSrc = sqlite3DbMallocZero(pParse->db, sizeof(SrcList));
}
}
| 0 |
[
"CWE-125"
] |
sqlite
|
39df24a3f02495e5ef6bb5ea8ce029a2c1e377e6
| 10,019,109,963,915,055,000,000,000,000,000,000,000 | 9 |
Do not allow the constant-propagation optimization to apple to ON/USING clause
terms as it does not help and it might cause downstream problems.
FossilOrigin-Name: 1bc783da63d58b05c690468b569cb2787846357b63c1100d11777666c5787bf4
|
static inline void __free_iova(struct dma_iommu_mapping *mapping,
dma_addr_t addr, size_t size)
{
unsigned int start = (addr - mapping->base) >>
(mapping->order + PAGE_SHIFT);
unsigned int count = ((size >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order;
unsigned long flags;
spin_lock_irqsave(&mapping->lock, flags);
bitmap_clear(mapping->bitmap, start, count);
spin_unlock_irqrestore(&mapping->lock, flags);
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
0ea1ec713f04bdfac343c9702b21cd3a7c711826
| 179,191,562,390,075,560,000,000,000,000,000,000,000 | 13 |
ARM: dma-mapping: don't allow DMA mappings to be marked executable
DMA mapping permissions were being derived from pgprot_kernel directly
without using PAGE_KERNEL. This causes them to be marked with executable
permission, which is not what we want. Fix this.
Signed-off-by: Russell King <[email protected]>
|
struct fpm_worker_pool_s *fpm_worker_pool_alloc() /* {{{ */
{
struct fpm_worker_pool_s *ret;
ret = malloc(sizeof(struct fpm_worker_pool_s));
if (!ret) {
return 0;
}
memset(ret, 0, sizeof(struct fpm_worker_pool_s));
ret->idle_spawn_rate = 1;
ret->log_fd = -1;
return ret;
}
| 0 |
[
"CWE-787"
] |
php-src
|
fadb1f8c1d08ae62b4f0a16917040fde57a3b93b
| 148,794,295,675,257,780,000,000,000,000,000,000,000 | 15 |
Fix bug #81026 (PHP-FPM oob R/W in root process leading to priv escalation)
The main change is to store scoreboard procs directly to the variable sized
array rather than indirectly through the pointer.
Signed-off-by: Stanislav Malyshev <[email protected]>
|
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
unsigned char *buffer;
u16 rot;
const int vpd_len = 64;
buffer = kmalloc(vpd_len, GFP_KERNEL);
if (!buffer ||
/* Block Device Characteristics VPD */
scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
goto out;
rot = get_unaligned_be16(&buffer[4]);
if (rot == 1)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
out:
kfree(buffer);
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
0bfc96cb77224736dfa35c3c555d37b3646ef35e
| 54,808,935,711,127,510,000,000,000,000,000,000,000 | 21 |
block: fail SCSI passthrough ioctls on partition devices
Linux allows executing the SG_IO ioctl on a partition or LVM volume, and
will pass the command to the underlying block device. This is
well-known, but it is also a large security problem when (via Unix
permissions, ACLs, SELinux or a combination thereof) a program or user
needs to be granted access only to part of the disk.
This patch lets partitions forward a small set of harmless ioctls;
others are logged with printk so that we can see which ioctls are
actually sent. In my tests only CDROM_GET_CAPABILITY actually occurred.
Of course it was being sent to a (partition on a) hard disk, so it would
have failed with ENOTTY and the patch isn't changing anything in
practice. Still, I'm treating it specially to avoid spamming the logs.
In principle, this restriction should include programs running with
CAP_SYS_RAWIO. If for example I let a program access /dev/sda2 and
/dev/sdb, it still should not be able to read/write outside the
boundaries of /dev/sda2 independent of the capabilities. However, for
now programs with CAP_SYS_RAWIO will still be allowed to send the
ioctls. Their actions will still be logged.
This patch does not affect the non-libata IDE driver. That driver
however already tests for bd != bd->bd_contains before issuing some
ioctl; it could be restricted further to forbid these ioctls even for
programs running with CAP_SYS_ADMIN/CAP_SYS_RAWIO.
Cc: [email protected]
Cc: Jens Axboe <[email protected]>
Cc: James Bottomley <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
[ Make it also print the command name when warning - Linus ]
Signed-off-by: Linus Torvalds <[email protected]>
|
static int tcp_orphan_retries(struct sock *sk, bool alive)
{
int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
/* We know from an ICMP that something is wrong. */
if (sk->sk_err_soft && !alive)
retries = 0;
/* However, if socket sent something recently, select some safe
* number of retries. 8 corresponds to >100 seconds with minimal
* RTO of 200msec. */
if (retries == 0 && alive)
retries = 8;
return retries;
}
| 0 |
[
"CWE-770"
] |
net
|
967c05aee439e6e5d7d805e195b3a20ef5c433d6
| 59,852,658,817,557,925,000,000,000,000,000,000,000 | 15 |
tcp: enforce tcp_min_snd_mss in tcp_mtu_probing()
If mtu probing is enabled tcp_mtu_probing() could very well end up
with a too small MSS.
Use the new sysctl tcp_min_snd_mss to make sure MSS search
is performed in an acceptable range.
CVE-2019-11479 -- tcp mss hardcoded to 48
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Lemon <[email protected]>
Cc: Jonathan Looney <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Tyler Hicks <[email protected]>
Cc: Bruce Curtis <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params * params)
{
char *newbuf;
struct snd_rawmidi_runtime *runtime = substream->runtime;
if (substream->append && substream->use_count > 1)
return -EBUSY;
snd_rawmidi_drain_output(substream);
if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L) {
return -EINVAL;
}
if (params->avail_min < 1 || params->avail_min > params->buffer_size) {
return -EINVAL;
}
if (params->buffer_size != runtime->buffer_size) {
newbuf = krealloc(runtime->buffer, params->buffer_size,
GFP_KERNEL);
if (!newbuf)
return -ENOMEM;
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
runtime->avail = runtime->buffer_size;
}
runtime->avail_min = params->avail_min;
substream->active_sensing = !params->no_active_sensing;
return 0;
}
| 1 |
[
"CWE-415"
] |
linux
|
39675f7a7c7e7702f7d5341f1e0d01db746543a0
| 253,309,626,826,670,350,000,000,000,000,000,000,000 | 28 |
ALSA: rawmidi: Change resized buffers atomically
The SNDRV_RAWMIDI_IOCTL_PARAMS ioctl may resize the buffers and the
current code is racy. For example, the sequencer client may write to
buffer while it being resized.
As a simple workaround, let's switch to the resized buffer inside the
stream runtime lock.
Reported-by: [email protected]
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
int CLASS parse_jpeg(int offset)
{
int len, save, hlen, mark;
fseek(ifp, offset, SEEK_SET);
if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8)
return 0;
while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda)
{
order = 0x4d4d;
len = get2() - 2;
save = ftell(ifp);
if (mark == 0xc0 || mark == 0xc3 || mark == 0xc9)
{
fgetc(ifp);
raw_height = get2();
raw_width = get2();
}
order = get2();
hlen = get4();
if (get4() == 0x48454150
#ifdef LIBRAW_LIBRARY_BUILD
&& (save + hlen) >= 0 && (save + hlen) <= ifp->size()
#endif
) /* "HEAP" */
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff(save + hlen, len - hlen, 0);
}
if (parse_tiff(save + 6))
apply_tiff();
fseek(ifp, save + len, SEEK_SET);
}
return 1;
}
| 0 |
[
"CWE-119",
"CWE-125"
] |
LibRaw
|
f1394822a0152ceed77815eafa5cac4e8baab10a
| 291,261,801,944,483,930,000,000,000,000,000,000,000 | 38 |
SECUNIA advisory 76000 #1 (wrong fuji width set via tiff tag
|
OidSendFunctionCall(Oid functionId, Datum val)
{
FmgrInfo flinfo;
fmgr_info(functionId, &flinfo);
return SendFunctionCall(&flinfo, val);
}
| 0 |
[
"CWE-264"
] |
postgres
|
537cbd35c893e67a63c59bc636c3e888bd228bc7
| 130,897,858,227,681,510,000,000,000,000,000,000,000 | 7 |
Prevent privilege escalation in explicit calls to PL validators.
The primary role of PL validators is to be called implicitly during
CREATE FUNCTION, but they are also normal functions that a user can call
explicitly. Add a permissions check to each validator to ensure that a
user cannot use explicit validator calls to achieve things he could not
otherwise achieve. Back-patch to 8.4 (all supported versions).
Non-core procedural language extensions ought to make the same two-line
change to their own validators.
Andres Freund, reviewed by Tom Lane and Noah Misch.
Security: CVE-2014-0061
|
void Compute(OpKernelContext* context) override {
// boxes: [num_boxes, 4]
const Tensor& boxes = context->input(0);
// scores: [num_boxes]
const Tensor& scores = context->input(1);
// max_output_size: scalar
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
OP_REQUIRES(context, iou_threshold_ >= 0 && iou_threshold_ <= 1,
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<float>(boxes);
const float score_threshold_val = std::numeric_limits<float>::lowest();
const float dummy_soft_nms_sigma = static_cast<float>(0.0);
DoNonMaxSuppressionOp<float>(context, scores, num_boxes, max_output_size,
iou_threshold_, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
| 0 |
[
"CWE-369",
"CWE-681"
] |
tensorflow
|
b5cdbf12ffcaaffecf98f22a6be5a64bb96e4f58
| 225,071,791,795,662,740,000,000,000,000,000,000,000 | 28 |
Prevent overflow due to integer conversion to unsigned.
PiperOrigin-RevId: 387738045
Change-Id: Id7e95bc07e02df1c66b72bd09f389608c87bdebe
|
NextWindow()
{
register struct win **pp;
int n = fore ? fore->w_number : maxwin;
struct win *group = fore ? fore->w_group : 0;
for (pp = fore ? wtab + n + 1 : wtab; pp != wtab + n; pp++)
{
if (pp == wtab + maxwin)
pp = wtab;
if (*pp)
{
if (!fore || group == (*pp)->w_group)
break;
}
}
if (pp == wtab + n)
return -1;
return pp - wtab;
}
| 0 |
[] |
screen
|
c5db181b6e017cfccb8d7842ce140e59294d9f62
| 243,229,922,694,442,700,000,000,000,000,000,000,000 | 20 |
ansi: add support for xterm OSC 11
It allows for getting and setting the background color. Notably, Vim uses
OSC 11 to learn whether it's running on a light or dark colored terminal
and choose a color scheme accordingly.
Tested with gnome-terminal and xterm. When called with "?" argument the
current background color is returned:
$ echo -ne "\e]11;?\e\\"
$ 11;rgb:2323/2727/2929
Signed-off-by: Lubomir Rintel <[email protected]>
(cherry picked from commit 7059bff20a28778f9d3acf81cad07b1388d02309)
Signed-off-by: Amadeusz Sławiński <[email protected]
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.