func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
HA_CREATE_INFO *create_info,
handlerton *old_db_type,
bool *partition_changed,
char *db,
const char *table_name,
const char *path,
TABLE **fast_alter_table)
{
TABLE *new_table= NULL;
DBUG_ENTER("prep_alter_part_table");
/* Foreign keys on partitioned tables are not supported, waits for WL#148 */
if (table->part_info && (alter_info->flags & ALTER_FOREIGN_KEY))
{
my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0));
DBUG_RETURN(TRUE);
}
thd->work_part_info= thd->lex->part_info;
if (thd->work_part_info &&
!(thd->work_part_info= thd->lex->part_info->get_clone()))
DBUG_RETURN(TRUE);
/* ALTER_ADMIN_PARTITION is handled in mysql_admin_table */
DBUG_ASSERT(!(alter_info->flags & ALTER_ADMIN_PARTITION));
if (alter_info->flags &
(ALTER_ADD_PARTITION | ALTER_DROP_PARTITION |
ALTER_COALESCE_PARTITION | ALTER_REORGANIZE_PARTITION |
ALTER_TABLE_REORG | ALTER_REBUILD_PARTITION))
{
partition_info *tab_part_info;
partition_info *alt_part_info= thd->work_part_info;
uint flags= 0;
bool is_last_partition_reorged= FALSE;
part_elem_value *tab_max_elem_val= NULL;
part_elem_value *alt_max_elem_val= NULL;
longlong tab_max_range= 0, alt_max_range= 0;
if (!table->part_info)
{
my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
DBUG_RETURN(TRUE);
}
/*
Open our intermediate table, we will operate on a temporary instance
of the original table, to be able to skip copying all partitions.
Open it as a copy of the original table, and modify its partition_info
object to allow fast_alter_partition_table to perform the changes.
*/
DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name,
MDL_INTENTION_EXCLUSIVE));
new_table= open_table_uncached(thd, path, db, table_name, 0);
if (!new_table)
DBUG_RETURN(TRUE);
/*
This table may be used for copy rows between partitions
and also read/write columns when fixing the partition_info struct.
*/
new_table->use_all_columns();
tab_part_info= new_table->part_info;
if (alter_info->flags & ALTER_TABLE_REORG)
{
uint new_part_no, curr_part_no;
if (tab_part_info->part_type != HASH_PARTITION ||
tab_part_info->use_default_num_partitions)
{
my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
goto err;
}
new_part_no= new_table->file->get_default_no_partitions(create_info);
curr_part_no= tab_part_info->num_parts;
if (new_part_no == curr_part_no)
{
/*
No change is needed, we will have the same number of partitions
after the change as before. Thus we can reply ok immediately
without any changes at all.
*/
*fast_alter_table= new_table;
thd->work_part_info= tab_part_info;
DBUG_RETURN(FALSE);
}
else if (new_part_no > curr_part_no)
{
/*
We will add more partitions, we use the ADD PARTITION without
setting the flag for no default number of partitions
*/
alter_info->flags|= ALTER_ADD_PARTITION;
thd->work_part_info->num_parts= new_part_no - curr_part_no;
}
else
{
/*
We will remove hash partitions, we use the COALESCE PARTITION
without setting the flag for no default number of partitions
*/
alter_info->flags|= ALTER_COALESCE_PARTITION;
alter_info->num_parts= curr_part_no - new_part_no;
}
}
if (!(flags= new_table->file->alter_table_flags(alter_info->flags)))
{
my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
goto err;
}
if ((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0)
*fast_alter_table= new_table;
DBUG_PRINT("info", ("*fast_alter_table: %p flags: 0x%x",
*fast_alter_table, flags));
if ((alter_info->flags & ALTER_ADD_PARTITION) ||
(alter_info->flags & ALTER_REORGANIZE_PARTITION))
{
if (thd->work_part_info->part_type != tab_part_info->part_type)
{
if (thd->work_part_info->part_type == NOT_A_PARTITION)
{
if (tab_part_info->part_type == RANGE_PARTITION)
{
my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "RANGE");
goto err;
}
else if (tab_part_info->part_type == LIST_PARTITION)
{
my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "LIST");
goto err;
}
/*
Hash partitions can be altered without parser finds out about
that it is HASH partitioned. So no error here.
*/
}
else
{
if (thd->work_part_info->part_type == RANGE_PARTITION)
{
my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
"RANGE", "LESS THAN");
}
else if (thd->work_part_info->part_type == LIST_PARTITION)
{
DBUG_ASSERT(thd->work_part_info->part_type == LIST_PARTITION);
my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
"LIST", "IN");
}
else if (tab_part_info->part_type == RANGE_PARTITION)
{
my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
"RANGE", "LESS THAN");
}
else
{
DBUG_ASSERT(tab_part_info->part_type == LIST_PARTITION);
my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
"LIST", "IN");
}
goto err;
}
}
if ((tab_part_info->column_list &&
alt_part_info->num_columns != tab_part_info->num_columns) ||
(!tab_part_info->column_list &&
(tab_part_info->part_type == RANGE_PARTITION ||
tab_part_info->part_type == LIST_PARTITION) &&
alt_part_info->num_columns != 1U) ||
(!tab_part_info->column_list &&
tab_part_info->part_type == HASH_PARTITION &&
alt_part_info->num_columns != 0))
{
my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0));
goto err;
}
alt_part_info->column_list= tab_part_info->column_list;
if (alt_part_info->fix_parser_data(thd))
{
goto err;
}
}
if (alter_info->flags & ALTER_ADD_PARTITION)
{
/*
We start by moving the new partitions to the list of temporary
partitions. We will then check that the new partitions fit in the
partitioning scheme as currently set-up.
Partitions are always added at the end in ADD PARTITION.
*/
uint num_new_partitions= alt_part_info->num_parts;
uint num_orig_partitions= tab_part_info->num_parts;
uint check_total_partitions= num_new_partitions + num_orig_partitions;
uint new_total_partitions= check_total_partitions;
/*
We allow quite a lot of values to be supplied by defaults, however we
must know the number of new partitions in this case.
*/
if (thd->lex->no_write_to_binlog &&
tab_part_info->part_type != HASH_PARTITION)
{
my_error(ER_NO_BINLOG_ERROR, MYF(0));
goto err;
}
if (tab_part_info->defined_max_value)
{
my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0));
goto err;
}
if (num_new_partitions == 0)
{
my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0));
goto err;
}
if (tab_part_info->is_sub_partitioned())
{
if (alt_part_info->num_subparts == 0)
alt_part_info->num_subparts= tab_part_info->num_subparts;
else if (alt_part_info->num_subparts != tab_part_info->num_subparts)
{
my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0));
goto err;
}
check_total_partitions= new_total_partitions*
alt_part_info->num_subparts;
}
if (check_total_partitions > MAX_PARTITIONS)
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
goto err;
}
alt_part_info->part_type= tab_part_info->part_type;
alt_part_info->subpart_type= tab_part_info->subpart_type;
if (alt_part_info->set_up_defaults_for_partitioning(new_table->file,
ULL(0),
tab_part_info->num_parts))
{
goto err;
}
/*
Handling of on-line cases:
ADD PARTITION for RANGE/LIST PARTITIONING:
------------------------------------------
For range and list partitions add partition is simply adding a
new empty partition to the table. If the handler support this we
will use the simple method of doing this. The figure below shows
an example of this and the states involved in making this change.
Existing partitions New added partitions
------ ------ ------ ------ | ------ ------
| | | | | | | | | | | | |
| p0 | | p1 | | p2 | | p3 | | | p4 | | p5 |
------ ------ ------ ------ | ------ ------
PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_TO_BE_ADDED*2
PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_ADDED*2
The first line is the states before adding the new partitions and the
second line is after the new partitions are added. All the partitions are
in the partitions list, no partitions are placed in the temp_partitions
list.
ADD PARTITION for HASH PARTITIONING
-----------------------------------
This little figure tries to show the various partitions involved when
adding two new partitions to a linear hash based partitioned table with
four partitions to start with, which lists are used and the states they
pass through. Adding partitions to a normal hash based is similar except
that it is always all the existing partitions that are reorganised not
only a subset of them.
Existing partitions New added partitions
------ ------ ------ ------ | ------ ------
| | | | | | | | | | | | |
| p0 | | p1 | | p2 | | p3 | | | p4 | | p5 |
------ ------ ------ ------ | ------ ------
PART_CHANGED PART_CHANGED PART_NORMAL PART_NORMAL PART_TO_BE_ADDED
PART_IS_CHANGED*2 PART_NORMAL PART_NORMAL PART_IS_ADDED
PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_ADDED
Reorganised existing partitions
------ ------
| | | |
| p0'| | p1'|
------ ------
p0 - p5 will be in the partitions list of partitions.
p0' and p1' will actually not exist as separate objects, there presence can
be deduced from the state of the partition and also the names of those
partitions can be deduced this way.
After adding the partitions and copying the partition data to p0', p1',
p4 and p5 from p0 and p1 the states change to adapt for the new situation
where p0 and p1 is dropped and replaced by p0' and p1' and the new p4 and
p5 are in the table again.
The first line above shows the states of the partitions before we start
adding and copying partitions, the second after completing the adding
and copying and finally the third line after also dropping the partitions
that are reorganised.
*/
if (*fast_alter_table &&
tab_part_info->part_type == HASH_PARTITION)
{
uint part_no= 0, start_part= 1, start_sec_part= 1;
uint end_part= 0, end_sec_part= 0;
uint upper_2n= tab_part_info->linear_hash_mask + 1;
uint lower_2n= upper_2n >> 1;
bool all_parts= TRUE;
if (tab_part_info->linear_hash_ind &&
num_new_partitions < upper_2n)
{
/*
An analysis of which parts needs reorganisation shows that it is
divided into two intervals. The first interval is those parts
that are reorganised up until upper_2n - 1. From upper_2n and
onwards it starts again from partition 0 and goes on until
it reaches p(upper_2n - 1). If the last new partition reaches
beyond upper_2n - 1 then the first interval will end with
p(lower_2n - 1) and start with p(num_orig_partitions - lower_2n).
If lower_2n partitions are added then p0 to p(lower_2n - 1) will
be reorganised which means that the two interval becomes one
interval at this point. Thus only when adding less than
lower_2n partitions and going beyond a total of upper_2n we
actually get two intervals.
To exemplify this assume we have 6 partitions to start with and
add 1, 2, 3, 5, 6, 7, 8, 9 partitions.
The first to add after p5 is p6 = 110 in bit numbers. Thus we
can see that 10 = p2 will be partition to reorganise if only one
partition.
If 2 partitions are added we reorganise [p2, p3]. Those two
cases are covered by the second if part below.
If 3 partitions are added we reorganise [p2, p3] U [p0,p0]. This
part is covered by the else part below.
If 5 partitions are added we get [p2,p3] U [p0, p2] = [p0, p3].
This is covered by the first if part where we need the max check
to here use lower_2n - 1.
If 7 partitions are added we get [p2,p3] U [p0, p4] = [p0, p4].
This is covered by the first if part but here we use the first
calculated end_part.
Finally with 9 new partitions we would also reorganise p6 if we
used the method below but we cannot reorganise more partitions
than what we had from the start and thus we simply set all_parts
to TRUE. In this case we don't get into this if-part at all.
*/
all_parts= FALSE;
if (num_new_partitions >= lower_2n)
{
/*
In this case there is only one interval since the two intervals
overlap and this starts from zero to last_part_no - upper_2n
*/
start_part= 0;
end_part= new_total_partitions - (upper_2n + 1);
end_part= max(lower_2n - 1, end_part);
}
else if (new_total_partitions <= upper_2n)
{
/*
Also in this case there is only one interval since we are not
going over a 2**n boundary
*/
start_part= num_orig_partitions - lower_2n;
end_part= start_part + (num_new_partitions - 1);
}
else
{
/* We have two non-overlapping intervals since we are not
passing a 2**n border and we have not at least lower_2n
new parts that would ensure that the intervals become
overlapping.
*/
start_part= num_orig_partitions - lower_2n;
end_part= upper_2n - 1;
start_sec_part= 0;
end_sec_part= new_total_partitions - (upper_2n + 1);
}
}
List_iterator<partition_element> tab_it(tab_part_info->partitions);
part_no= 0;
do
{
partition_element *p_elem= tab_it++;
if (all_parts ||
(part_no >= start_part && part_no <= end_part) ||
(part_no >= start_sec_part && part_no <= end_sec_part))
{
p_elem->part_state= PART_CHANGED;
}
} while (++part_no < num_orig_partitions);
}
/*
Need to concatenate the lists here to make it possible to check the
partition info for correctness using check_partition_info.
For on-line add partition we set the state of this partition to
PART_TO_BE_ADDED to ensure that it is known that it is not yet
usable (becomes usable when partition is created and the switch of
partition configuration is made.
*/
{
List_iterator<partition_element> alt_it(alt_part_info->partitions);
uint part_count= 0;
do
{
partition_element *part_elem= alt_it++;
if (*fast_alter_table)
part_elem->part_state= PART_TO_BE_ADDED;
if (tab_part_info->partitions.push_back(part_elem))
{
mem_alloc_error(1);
goto err;
}
} while (++part_count < num_new_partitions);
tab_part_info->num_parts+= num_new_partitions;
}
/*
If we specify partitions explicitly we don't use defaults anymore.
Using ADD PARTITION also means that we don't have the default number
of partitions anymore. We use this code also for Table reorganisations
and here we don't set any default flags to FALSE.
*/
if (!(alter_info->flags & ALTER_TABLE_REORG))
{
if (!alt_part_info->use_default_partitions)
{
DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info));
tab_part_info->use_default_partitions= FALSE;
}
tab_part_info->use_default_num_partitions= FALSE;
tab_part_info->is_auto_partitioned= FALSE;
}
}
else if (alter_info->flags & ALTER_DROP_PARTITION)
{
/*
Drop a partition from a range partition and list partitioning is
always safe and can be made more or less immediate. It is necessary
however to ensure that the partition to be removed is safely removed
and that REPAIR TABLE can remove the partition if for some reason the
command to drop the partition failed in the middle.
*/
uint part_count= 0;
uint num_parts_dropped= alter_info->partition_names.elements;
uint num_parts_found= 0;
List_iterator<partition_element> part_it(tab_part_info->partitions);
tab_part_info->is_auto_partitioned= FALSE;
if (!(tab_part_info->part_type == RANGE_PARTITION ||
tab_part_info->part_type == LIST_PARTITION))
{
my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP");
goto err;
}
if (num_parts_dropped >= tab_part_info->num_parts)
{
my_error(ER_DROP_LAST_PARTITION, MYF(0));
goto err;
}
do
{
partition_element *part_elem= part_it++;
if (is_name_in_list(part_elem->partition_name,
alter_info->partition_names))
{
/*
Set state to indicate that the partition is to be dropped.
*/
num_parts_found++;
part_elem->part_state= PART_TO_BE_DROPPED;
}
} while (++part_count < tab_part_info->num_parts);
if (num_parts_found != num_parts_dropped)
{
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP");
goto err;
}
if (new_table->file->is_fk_defined_on_table_or_index(MAX_KEY))
{
my_error(ER_ROW_IS_REFERENCED, MYF(0));
goto err;
}
tab_part_info->num_parts-= num_parts_dropped;
}
else if (alter_info->flags & ALTER_REBUILD_PARTITION)
{
set_engine_all_partitions(tab_part_info,
tab_part_info->default_engine_type);
if (set_part_state(alter_info, tab_part_info, PART_CHANGED))
{
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REBUILD");
goto err;
}
if (!(*fast_alter_table))
{
new_table->file->print_error(HA_ERR_WRONG_COMMAND, MYF(0));
goto err;
}
}
else if (alter_info->flags & ALTER_COALESCE_PARTITION)
{
uint num_parts_coalesced= alter_info->num_parts;
uint num_parts_remain= tab_part_info->num_parts - num_parts_coalesced;
List_iterator<partition_element> part_it(tab_part_info->partitions);
if (tab_part_info->part_type != HASH_PARTITION)
{
my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0));
goto err;
}
if (num_parts_coalesced == 0)
{
my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0));
goto err;
}
if (num_parts_coalesced >= tab_part_info->num_parts)
{
my_error(ER_DROP_LAST_PARTITION, MYF(0));
goto err;
}
/*
Online handling:
COALESCE PARTITION:
-------------------
The figure below shows the manner in which partitions are handled when
performing an on-line coalesce partition and which states they go through
at start, after adding and copying partitions and finally after dropping
the partitions to drop. The figure shows an example using four partitions
to start with, using linear hash and coalescing one partition (always the
last partition).
Using linear hash then all remaining partitions will have a new reorganised
part.
Existing partitions Coalesced partition
------ ------ ------ | ------
| | | | | | | | |
| p0 | | p1 | | p2 | | | p3 |
------ ------ ------ | ------
PART_NORMAL PART_CHANGED PART_NORMAL PART_REORGED_DROPPED
PART_NORMAL PART_IS_CHANGED PART_NORMAL PART_TO_BE_DROPPED
PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_DROPPED
Reorganised existing partitions
------
| |
| p1'|
------
p0 - p3 is in the partitions list.
The p1' partition will actually not be in any list it is deduced from the
state of p1.
*/
{
uint part_count= 0, start_part= 1, start_sec_part= 1;
uint end_part= 0, end_sec_part= 0;
bool all_parts= TRUE;
if (*fast_alter_table &&
tab_part_info->linear_hash_ind)
{
uint upper_2n= tab_part_info->linear_hash_mask + 1;
uint lower_2n= upper_2n >> 1;
all_parts= FALSE;
if (num_parts_coalesced >= lower_2n)
{
all_parts= TRUE;
}
else if (num_parts_remain >= lower_2n)
{
end_part= tab_part_info->num_parts - (lower_2n + 1);
start_part= num_parts_remain - lower_2n;
}
else
{
start_part= 0;
end_part= tab_part_info->num_parts - (lower_2n + 1);
end_sec_part= (lower_2n >> 1) - 1;
start_sec_part= end_sec_part - (lower_2n - (num_parts_remain + 1));
}
}
do
{
partition_element *p_elem= part_it++;
if (*fast_alter_table &&
(all_parts ||
(part_count >= start_part && part_count <= end_part) ||
(part_count >= start_sec_part && part_count <= end_sec_part)))
p_elem->part_state= PART_CHANGED;
if (++part_count > num_parts_remain)
{
if (*fast_alter_table)
p_elem->part_state= PART_REORGED_DROPPED;
else
part_it.remove();
}
} while (part_count < tab_part_info->num_parts);
tab_part_info->num_parts= num_parts_remain;
}
if (!(alter_info->flags & ALTER_TABLE_REORG))
{
tab_part_info->use_default_num_partitions= FALSE;
tab_part_info->is_auto_partitioned= FALSE;
}
}
else if (alter_info->flags & ALTER_REORGANIZE_PARTITION)
{
/*
Reorganise partitions takes a number of partitions that are next
to each other (at least for RANGE PARTITIONS) and then uses those
to create a set of new partitions. So data is copied from those
partitions into the new set of partitions. Those new partitions
can have more values in the LIST value specifications or less both
are allowed. The ranges can be different but since they are
changing a set of consecutive partitions they must cover the same
range as those changed from.
This command can be used on RANGE and LIST partitions.
*/
uint num_parts_reorged= alter_info->partition_names.elements;
uint num_parts_new= thd->work_part_info->partitions.elements;
uint check_total_partitions;
tab_part_info->is_auto_partitioned= FALSE;
if (num_parts_reorged > tab_part_info->num_parts)
{
my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0));
goto err;
}
if (!(tab_part_info->part_type == RANGE_PARTITION ||
tab_part_info->part_type == LIST_PARTITION) &&
(num_parts_new != num_parts_reorged))
{
my_error(ER_REORG_HASH_ONLY_ON_SAME_NO, MYF(0));
goto err;
}
if (tab_part_info->is_sub_partitioned() &&
alt_part_info->num_subparts &&
alt_part_info->num_subparts != tab_part_info->num_subparts)
{
my_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR, MYF(0));
goto err;
}
check_total_partitions= tab_part_info->num_parts + num_parts_new;
check_total_partitions-= num_parts_reorged;
if (check_total_partitions > MAX_PARTITIONS)
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
goto err;
}
alt_part_info->part_type= tab_part_info->part_type;
alt_part_info->subpart_type= tab_part_info->subpart_type;
alt_part_info->num_subparts= tab_part_info->num_subparts;
DBUG_ASSERT(!alt_part_info->use_default_partitions);
if (alt_part_info->set_up_defaults_for_partitioning(new_table->file,
ULL(0),
0))
{
goto err;
}
/*
Online handling:
REORGANIZE PARTITION:
---------------------
The figure exemplifies the handling of partitions, their state changes and
how they are organised. It exemplifies four partitions where two of the
partitions are reorganised (p1 and p2) into two new partitions (p4 and p5).
The reason of this change could be to change range limits, change list
values or for hash partitions simply reorganise the partition which could
also involve moving them to new disks or new node groups (MySQL Cluster).
Existing partitions
------ ------ ------ ------
| | | | | | | |
| p0 | | p1 | | p2 | | p3 |
------ ------ ------ ------
PART_NORMAL PART_TO_BE_REORGED PART_NORMAL
PART_NORMAL PART_TO_BE_DROPPED PART_NORMAL
PART_NORMAL PART_IS_DROPPED PART_NORMAL
Reorganised new partitions (replacing p1 and p2)
------ ------
| | | |
| p4 | | p5 |
------ ------
PART_TO_BE_ADDED
PART_IS_ADDED
PART_IS_ADDED
All unchanged partitions and the new partitions are in the partitions list
in the order they will have when the change is completed. The reorganised
partitions are placed in the temp_partitions list. PART_IS_ADDED is only a
temporary state not written in the frm file. It is used to ensure we write
the generated partition syntax in a correct manner.
*/
{
List_iterator<partition_element> tab_it(tab_part_info->partitions);
uint part_count= 0;
bool found_first= FALSE;
bool found_last= FALSE;
uint drop_count= 0;
do
{
partition_element *part_elem= tab_it++;
is_last_partition_reorged= FALSE;
if (is_name_in_list(part_elem->partition_name,
alter_info->partition_names))
{
is_last_partition_reorged= TRUE;
drop_count++;
if (tab_part_info->column_list)
{
List_iterator<part_elem_value> p(part_elem->list_val_list);
tab_max_elem_val= p++;
}
else
tab_max_range= part_elem->range_value;
if (*fast_alter_table &&
tab_part_info->temp_partitions.push_back(part_elem))
{
mem_alloc_error(1);
goto err;
}
if (*fast_alter_table)
part_elem->part_state= PART_TO_BE_REORGED;
if (!found_first)
{
uint alt_part_count= 0;
partition_element *alt_part_elem;
List_iterator<partition_element>
alt_it(alt_part_info->partitions);
found_first= TRUE;
do
{
alt_part_elem= alt_it++;
if (tab_part_info->column_list)
{
List_iterator<part_elem_value> p(alt_part_elem->list_val_list);
alt_max_elem_val= p++;
}
else
alt_max_range= alt_part_elem->range_value;
if (*fast_alter_table)
alt_part_elem->part_state= PART_TO_BE_ADDED;
if (alt_part_count == 0)
tab_it.replace(alt_part_elem);
else
tab_it.after(alt_part_elem);
} while (++alt_part_count < num_parts_new);
}
else if (found_last)
{
my_error(ER_CONSECUTIVE_REORG_PARTITIONS, MYF(0));
goto err;
}
else
tab_it.remove();
}
else
{
if (found_first)
found_last= TRUE;
}
} while (++part_count < tab_part_info->num_parts);
if (drop_count != num_parts_reorged)
{
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REORGANIZE");
goto err;
}
tab_part_info->num_parts= check_total_partitions;
}
}
else
{
DBUG_ASSERT(FALSE);
}
*partition_changed= TRUE;
thd->work_part_info= tab_part_info;
if (alter_info->flags & ALTER_ADD_PARTITION ||
alter_info->flags & ALTER_REORGANIZE_PARTITION)
{
if (tab_part_info->use_default_subpartitions &&
!alt_part_info->use_default_subpartitions)
{
tab_part_info->use_default_subpartitions= FALSE;
tab_part_info->use_default_num_subpartitions= FALSE;
}
if (tab_part_info->check_partition_info(thd, (handlerton**)NULL,
new_table->file, ULL(0), TRUE))
{
goto err;
}
/*
The check below needs to be performed after check_partition_info
since this function "fixes" the item trees of the new partitions
to reorganize into
*/
if (alter_info->flags == ALTER_REORGANIZE_PARTITION &&
tab_part_info->part_type == RANGE_PARTITION &&
((is_last_partition_reorged &&
(tab_part_info->column_list ?
(tab_part_info->compare_column_values(
alt_max_elem_val->col_val_array,
tab_max_elem_val->col_val_array) < 0) :
alt_max_range < tab_max_range)) ||
(!is_last_partition_reorged &&
(tab_part_info->column_list ?
(tab_part_info->compare_column_values(
alt_max_elem_val->col_val_array,
tab_max_elem_val->col_val_array) != 0) :
alt_max_range != tab_max_range))))
{
/*
For range partitioning the total resulting range before and
after the change must be the same except in one case. This is
when the last partition is reorganised, in this case it is
acceptable to increase the total range.
The reason is that it is not allowed to have "holes" in the
middle of the ranges and thus we should not allow to reorganise
to create "holes".
*/
my_error(ER_REORG_OUTSIDE_RANGE, MYF(0));
goto err;
}
}
}
else
{
/*
When thd->lex->part_info has a reference to a partition_info the
ALTER TABLE contained a definition of a partitioning.
Case I:
If there was a partition before and there is a new one defined.
We use the new partitioning. The new partitioning is already
defined in the correct variable so no work is needed to
accomplish this.
We do however need to update partition_changed to ensure that not
only the frm file is changed in the ALTER TABLE command.
Case IIa:
There was a partitioning before and there is no new one defined.
Also the user has not specified to remove partitioning explicitly.
We use the old partitioning also for the new table. We do this
by assigning the partition_info from the table loaded in
open_table to the partition_info struct used by mysql_create_table
later in this method.
Case IIb:
There was a partitioning before and there is no new one defined.
The user has specified explicitly to remove partitioning
Since the user has specified explicitly to remove partitioning
we override the old partitioning info and create a new table using
the specified engine.
In this case the partition also is changed.
Case III:
There was no partitioning before altering the table, there is
partitioning defined in the altered table. Use the new partitioning.
No work needed since the partitioning info is already in the
correct variable.
In this case we discover one case where the new partitioning is using
the same partition function as the default (PARTITION BY KEY or
PARTITION BY LINEAR KEY with the list of fields equal to the primary
key fields OR PARTITION BY [LINEAR] KEY() for tables without primary
key)
Also here partition has changed and thus a new table must be
created.
Case IV:
There was no partitioning before and no partitioning defined.
Obviously no work needed.
*/
partition_info *tab_part_info= table->part_info;
if (tab_part_info)
{
if (alter_info->flags & ALTER_REMOVE_PARTITIONING)
{
DBUG_PRINT("info", ("Remove partitioning"));
if (!(create_info->used_fields & HA_CREATE_USED_ENGINE))
{
DBUG_PRINT("info", ("No explicit engine used"));
create_info->db_type= tab_part_info->default_engine_type;
}
DBUG_PRINT("info", ("New engine type: %s",
ha_resolve_storage_engine_name(create_info->db_type)));
thd->work_part_info= NULL;
*partition_changed= TRUE;
}
else if (!thd->work_part_info)
{
/*
Retain partitioning but possibly with a new storage engine
beneath.
Create a copy of TABLE::part_info to be able to modify it freely.
*/
if (!(tab_part_info= tab_part_info->get_clone()))
DBUG_RETURN(TRUE);
thd->work_part_info= tab_part_info;
if (create_info->used_fields & HA_CREATE_USED_ENGINE &&
create_info->db_type != tab_part_info->default_engine_type)
{
/*
Make sure change of engine happens to all partitions.
*/
DBUG_PRINT("info", ("partition changed"));
if (tab_part_info->is_auto_partitioned)
{
/*
If the user originally didn't specify partitioning to be
used we can remove it now.
*/
thd->work_part_info= NULL;
}
else
{
/*
Ensure that all partitions have the proper engine set-up
*/
set_engine_all_partitions(thd->work_part_info,
create_info->db_type);
}
*partition_changed= TRUE;
}
}
}
if (thd->work_part_info)
{
partition_info *part_info= thd->work_part_info;
bool is_native_partitioned= FALSE;
/*
Need to cater for engine types that can handle partition without
using the partition handler.
*/
if (part_info != tab_part_info)
{
if (part_info->fix_parser_data(thd))
{
goto err;
}
/*
Compare the old and new part_info. If only key_algorithm
change is done, don't consider it as changed partitioning (to avoid
rebuild). This is to handle KEY (numeric_cols) partitioned tables
created in 5.1. For more info, see bug#14521864.
*/
if (alter_info->flags != ALTER_PARTITION ||
!table->part_info ||
!table->part_info->has_same_partitioning(part_info))
{
DBUG_PRINT("info", ("partition changed"));
*partition_changed= true;
}
}
/*
Set up partition default_engine_type either from the create_info
or from the previus table
*/
if (create_info->used_fields & HA_CREATE_USED_ENGINE)
part_info->default_engine_type= create_info->db_type;
else
{
if (tab_part_info)
part_info->default_engine_type= tab_part_info->default_engine_type;
else
part_info->default_engine_type= create_info->db_type;
}
DBUG_ASSERT(part_info->default_engine_type &&
part_info->default_engine_type != partition_hton);
if (check_native_partitioned(create_info, &is_native_partitioned,
part_info, thd))
{
goto err;
}
if (!is_native_partitioned)
{
DBUG_ASSERT(create_info->db_type);
create_info->db_type= partition_hton;
}
}
}
DBUG_RETURN(FALSE);
err:
if (new_table)
{
/*
Only remove the intermediate table object and its share object,
do not remove the .frm file, since it is the original one.
*/
close_temporary(new_table, 1, 0);
}
*fast_alter_table= NULL;
DBUG_RETURN(TRUE);
}
| 0 |
[] |
server
|
f305a7ce4bccbd56520d874e1d81a4f29bc17a96
| 306,681,398,024,881,920,000,000,000,000,000,000,000 | 1,000 |
bugfix: long partition names
|
int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
{
if (data)
dev->features |= NETIF_F_IP_CSUM;
else
dev->features &= ~NETIF_F_IP_CSUM;
return 0;
}
| 0 |
[
"CWE-190"
] |
linux-2.6
|
db048b69037e7fa6a7d9e95a1271a50dc08ae233
| 183,974,906,708,515,840,000,000,000,000,000,000,000 | 9 |
ethtool: Fix potential kernel buffer overflow in ETHTOOL_GRXCLSRLALL
On a 32-bit machine, info.rule_cnt >= 0x40000000 leads to integer
overflow and the buffer may be smaller than needed. Since
ETHTOOL_GRXCLSRLALL is unprivileged, this can presumably be used for at
least denial of service.
Signed-off-by: Ben Hutchings <[email protected]>
Cc: [email protected]
Signed-off-by: David S. Miller <[email protected]>
|
int OSD::handle_pg_peering_evt(
spg_t pgid,
const pg_history_t& orig_history,
const PastIntervals& pi,
epoch_t epoch,
PG::CephPeeringEvtRef evt)
{
if (service.splitting(pgid)) {
peering_wait_for_split[pgid].push_back(evt);
return -EEXIST;
}
PG *pg = _lookup_lock_pg(pgid);
if (!pg) {
// same primary?
if (!osdmap->have_pg_pool(pgid.pool()))
return -EINVAL;
int up_primary, acting_primary;
vector<int> up, acting;
osdmap->pg_to_up_acting_osds(
pgid.pgid, &up, &up_primary, &acting, &acting_primary);
pg_history_t history = orig_history;
bool valid_history = project_pg_history(
pgid, history, epoch, up, up_primary, acting, acting_primary);
if (!valid_history || epoch < history.same_interval_since) {
dout(10) << __func__ << pgid << " acting changed in "
<< history.same_interval_since << " (msg from " << epoch << ")"
<< dendl;
return -EINVAL;
}
if (service.splitting(pgid)) {
ceph_abort();
}
const bool is_mon_create =
evt->get_event().dynamic_type() == PG::NullEvt::static_type();
if (maybe_wait_for_max_pg(pgid, is_mon_create)) {
return -EAGAIN;
}
// do we need to resurrect a deleting pg?
spg_t resurrected;
PGRef old_pg_state;
res_result result = _try_resurrect_pg(
service.get_osdmap(),
pgid,
&resurrected,
&old_pg_state);
PG::RecoveryCtx rctx = create_context();
switch (result) {
case RES_NONE: {
const pg_pool_t* pp = osdmap->get_pg_pool(pgid.pool());
if (pp->has_flag(pg_pool_t::FLAG_EC_OVERWRITES) &&
store->get_type() != "bluestore") {
clog->warn() << "pg " << pgid
<< " is at risk of silent data corruption: "
<< "the pool allows ec overwrites but is not stored in "
<< "bluestore, so deep scrubbing will not detect bitrot";
}
PG::_create(*rctx.transaction, pgid, pgid.get_split_bits(pp->get_pg_num()));
PG::_init(*rctx.transaction, pgid, pp);
int role = osdmap->calc_pg_role(whoami, acting, acting.size());
if (!pp->is_replicated() && role != pgid.shard)
role = -1;
pg = _create_lock_pg(
get_map(epoch),
pgid, false, false,
role,
up, up_primary,
acting, acting_primary,
history, pi,
*rctx.transaction);
pg->handle_create(&rctx);
pg->write_if_dirty(*rctx.transaction);
dispatch_context(rctx, pg, osdmap);
dout(10) << *pg << " is new" << dendl;
pg->queue_peering_event(evt);
wake_pg_waiters(pg);
pg->unlock();
return 0;
}
case RES_SELF: {
old_pg_state->lock();
OSDMapRef old_osd_map = old_pg_state->get_osdmap();
int old_role = old_pg_state->role;
vector<int> old_up = old_pg_state->up;
int old_up_primary = old_pg_state->up_primary.osd;
vector<int> old_acting = old_pg_state->acting;
int old_primary = old_pg_state->primary.osd;
pg_history_t old_history = old_pg_state->info.history;
PastIntervals old_past_intervals = old_pg_state->past_intervals;
old_pg_state->unlock();
pg = _create_lock_pg(
old_osd_map,
resurrected,
false,
true,
old_role,
old_up,
old_up_primary,
old_acting,
old_primary,
old_history,
old_past_intervals,
*rctx.transaction);
pg->handle_create(&rctx);
pg->write_if_dirty(*rctx.transaction);
dispatch_context(rctx, pg, osdmap);
dout(10) << *pg << " is new (resurrected)" << dendl;
pg->queue_peering_event(evt);
wake_pg_waiters(pg);
pg->unlock();
return 0;
}
case RES_PARENT: {
assert(old_pg_state);
old_pg_state->lock();
OSDMapRef old_osd_map = old_pg_state->get_osdmap();
int old_role = old_pg_state->role;
vector<int> old_up = old_pg_state->up;
int old_up_primary = old_pg_state->up_primary.osd;
vector<int> old_acting = old_pg_state->acting;
int old_primary = old_pg_state->primary.osd;
pg_history_t old_history = old_pg_state->info.history;
PastIntervals old_past_intervals = old_pg_state->past_intervals;
old_pg_state->unlock();
PG *parent = _create_lock_pg(
old_osd_map,
resurrected,
false,
true,
old_role,
old_up,
old_up_primary,
old_acting,
old_primary,
old_history,
old_past_intervals,
*rctx.transaction
);
parent->handle_create(&rctx);
parent->write_if_dirty(*rctx.transaction);
dispatch_context(rctx, parent, osdmap);
dout(10) << *parent << " is new" << dendl;
assert(service.splitting(pgid));
peering_wait_for_split[pgid].push_back(evt);
//parent->queue_peering_event(evt);
parent->queue_null(osdmap->get_epoch(), osdmap->get_epoch());
wake_pg_waiters(parent);
parent->unlock();
return 0;
}
default:
assert(0);
return 0;
}
} else {
// already had it. did the mapping change?
if (epoch < pg->info.history.same_interval_since) {
dout(10) << *pg << __func__ << " acting changed in "
<< pg->info.history.same_interval_since
<< " (msg from " << epoch << ")" << dendl;
} else {
pg->queue_peering_event(evt);
}
pg->unlock();
return -EEXIST;
}
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 200,995,363,263,722,560,000,000,000,000,000,000,000 | 181 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
DEFUN(pgBack, PREV_PAGE, "Scroll up one page")
{
if (vi_prec_num)
nscroll(-searchKeyNum() * (Currentbuf->LINES - 1), B_NORMAL);
else
nscroll(-(prec_num ? searchKeyNum() : searchKeyNum()
* (Currentbuf->LINES - 1)), prec_num ? B_SCROLL : B_NORMAL);
}
| 0 |
[
"CWE-59",
"CWE-241"
] |
w3m
|
18dcbadf2771cdb0c18509b14e4e73505b242753
| 209,919,948,788,171,100,000,000,000,000,000,000,000 | 8 |
Make temporary directory safely when ~/.w3m is unwritable
|
e_mail_part_verify_validity_sender (EMailPart *part,
CamelInternetAddress *from_address)
{
GList *link;
g_return_if_fail (E_IS_MAIL_PART (part));
if (!from_address)
return;
for (link = g_queue_peek_head_link (&part->validities); link; link = g_list_next (link)) {
EMailPartValidityPair *pair = link->data;
if (pair && pair->validity && !(pair->validity_type & E_MAIL_PART_VALIDITY_VERIFIED)) {
pair->validity_type |= E_MAIL_PART_VALIDITY_VERIFIED;
if (pair->validity->sign.status != CAMEL_CIPHER_VALIDITY_SIGN_NONE) {
GList *link2;
gboolean from_matches_signer = FALSE;
for (link2 = g_queue_peek_head_link (&pair->validity->sign.signers); link2 && !from_matches_signer; link2 = g_list_next (link2)) {
CamelCipherCertInfo *cinfo = link2->data;
if (cinfo->email && *cinfo->email) {
from_matches_signer = from_matches_signer ||
(from_address && camel_internet_address_find_address (from_address, cinfo->email, NULL) >= 0) ||
(from_address && from_matches_signers_alt_emails (from_address, cinfo));
}
}
if (!from_matches_signer)
pair->validity_type |= E_MAIL_PART_VALIDITY_SENDER_SIGNER_MISMATCH;
}
}
}
}
| 0 |
[
"CWE-347"
] |
evolution
|
f66cd3e1db301d264563b4222a3574e2e58e2b85
| 188,622,483,894,716,080,000,000,000,000,000,000,000 | 36 |
eds-I#3 - [GPG] Mails that are not encrypted look encrypted
Related to https://gitlab.gnome.org/GNOME/evolution-data-server/issues/3
|
fr_window_set_password (FrWindow *window,
const char *password)
{
g_return_if_fail (window != NULL);
if (window->priv->password == password)
return;
if (window->priv->password != NULL) {
g_free (window->priv->password);
window->priv->password = NULL;
}
if ((password != NULL) && (password[0] != '\0'))
window->priv->password = g_strdup (password);
}
| 0 |
[
"CWE-22"
] |
file-roller
|
b147281293a8307808475e102a14857055f81631
| 40,922,643,749,117,686,000,000,000,000,000,000,000 | 16 |
libarchive: sanitize filenames before extracting
|
static void nfs_volume_list_stop(struct seq_file *p, void *v)
{
spin_unlock(&nfs_client_lock);
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
54af3bb543c071769141387a42deaaab5074da55
| 117,130,108,784,723,890,000,000,000,000,000,000,000 | 4 |
NFS: Fix an Oops in encode_lookup()
It doesn't look as if the NFS file name limit is being initialised correctly
in the struct nfs_server. Make sure that we limit whatever is being set in
nfs_probe_fsinfo() and nfs_init_server().
Also ensure that readdirplus and nfs4_path_walk respect our file name
limits.
Signed-off-by: Trond Myklebust <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
pk_transaction_is_exclusive (PkTransaction *transaction)
{
g_return_val_if_fail (PK_IS_TRANSACTION (transaction), FALSE);
return transaction->priv->exclusive;
}
| 0 |
[
"CWE-287"
] |
PackageKit
|
7e8a7905ea9abbd1f384f05f36a4458682cd4697
| 242,178,055,054,487,370,000,000,000,000,000,000,000 | 6 |
Do not set JUST_REINSTALL on any kind of auth failure
If we try to continue the auth queue when it has been cancelled (or failed)
then we fall upon the obscure JUST_REINSTALL transaction flag which only the
DNF backend actually verifies.
Many thanks to Matthias Gerstner <[email protected]> for spotting the problem.
|
void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
{
__issue_discard_cmd(sbi, false);
__drop_discard_cmd(sbi);
__wait_discard_cmd(sbi, !umount);
}
| 0 |
[
"CWE-20"
] |
linux
|
638164a2718f337ea224b747cf5977ef143166a4
| 323,978,002,766,394,560,000,000,000,000,000,000,000 | 6 |
f2fs: fix potential panic during fstrim
As Ju Hyung Park reported:
"When 'fstrim' is called for manual trim, a BUG() can be triggered
randomly with this patch.
I'm seeing this issue on both x86 Desktop and arm64 Android phone.
On x86 Desktop, this was caused during Ubuntu boot-up. I have a
cronjob installed which calls 'fstrim -v /' during boot. On arm64
Android, this was caused during GC looping with 1ms gc_min_sleep_time
& gc_max_sleep_time."
Root cause of this issue is that f2fs_wait_discard_bios can only be
used by f2fs_put_super, because during put_super there must be no
other referrers, so it can ignore discard entry's reference count
when removing the entry, otherwise in other caller we will hit bug_on
in __remove_discard_cmd as there may be other issuer added reference
count in discard entry.
Thread A Thread B
- issue_discard_thread
- f2fs_ioc_fitrim
- f2fs_trim_fs
- f2fs_wait_discard_bios
- __issue_discard_cmd
- __submit_discard_cmd
- __wait_discard_cmd
- dc->ref++
- __wait_one_discard_bio
- __wait_discard_cmd
- __remove_discard_cmd
- f2fs_bug_on(sbi, dc->ref)
Fixes: 969d1b180d987c2be02de890d0fff0f66a0e80de
Reported-by: Ju Hyung Park <[email protected]>
Signed-off-by: Chao Yu <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
|
void BrotliContext::resetOut() {
avail_out_ = chunk_size_;
next_out_ = chunk_ptr_.get();
}
| 0 |
[] |
envoy
|
cb4ef0b09200c720dfdb07e097092dd105450343
| 235,656,914,190,694,480,000,000,000,000,000,000,000 | 4 |
decompressors: stop decompressing upon excessive compression ratio (#733)
Signed-off-by: Dmitry Rozhkov <[email protected]>
Co-authored-by: Ryan Hamilton <[email protected]>
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]>
|
GF_HEVCConfig *gf_isom_lhvc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex)
{
GF_HEVCConfig *lhvc;
GF_OperatingPointsInformation *oinf=NULL;
GF_TrackBox *trak;
GF_MPEGVisualSampleEntryBox *entry;
trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak || !trak->Media || !DescriptionIndex) return NULL;
if (gf_isom_get_hevc_lhvc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_HEVCTYPE_NONE)
return NULL;
entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->other_boxes, DescriptionIndex-1);
if (!entry) return NULL;
if (!entry->lhvc_config) return NULL;
lhvc = HEVC_DuplicateConfig(entry->lhvc_config->config);
if (!lhvc) return NULL;
gf_isom_get_oinf_info(the_file, trackNumber, &oinf);
if (oinf) {
LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_last(oinf->profile_tier_levels);
if (ptl) {
lhvc->profile_space = ptl->general_profile_space;
lhvc->tier_flag = ptl->general_tier_flag;
lhvc->profile_idc = ptl->general_profile_idc;
lhvc->general_profile_compatibility_flags = ptl->general_profile_compatibility_flags;
lhvc->constraint_indicator_flags = ptl->general_constraint_indicator_flags;
}
}
return lhvc;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
gpac
|
90dc7f853d31b0a4e9441cba97feccf36d8b69a4
| 1,682,526,175,457,241,700,000,000,000,000,000,000 | 29 |
fix some exploitable overflows (#994, #997)
|
static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
int write,
const struct iovec *iov, unsigned long nr_segs)
{
memset(cs, 0, sizeof(*cs));
cs->fc = fc;
cs->write = write;
cs->iov = iov;
cs->nr_segs = nr_segs;
}
| 0 |
[
"CWE-120",
"CWE-119",
"CWE-787"
] |
linux
|
c2183d1e9b3f313dd8ba2b1b0197c8d9fb86a7ae
| 95,444,675,134,155,110,000,000,000,000,000,000,000 | 10 |
fuse: check size of FUSE_NOTIFY_INVAL_ENTRY message
FUSE_NOTIFY_INVAL_ENTRY didn't check the length of the write so the
message processing could overrun and result in a "kernel BUG at
fs/fuse/dev.c:629!"
Reported-by: Han-Wen Nienhuys <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
CC: [email protected]
|
int listTypeEqual(listTypeEntry *entry, robj *o) {
if (entry->li->encoding == OBJ_ENCODING_QUICKLIST) {
serverAssertWithInfo(NULL,o,sdsEncodedObject(o));
return quicklistCompare(entry->entry.zi,o->ptr,sdslen(o->ptr));
} else {
serverPanic("Unknown list encoding");
}
}
| 0 |
[
"CWE-190"
] |
redis
|
f6a40570fa63d5afdd596c78083d754081d80ae3
| 321,265,471,745,252,900,000,000,000,000,000,000,000 | 8 |
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error.
|
iperf_get_test_server_hostname(struct iperf_test *ipt)
{
return ipt->server_hostname;
}
| 0 |
[
"CWE-120",
"CWE-119",
"CWE-787"
] |
iperf
|
91f2fa59e8ed80dfbf400add0164ee0e508e412a
| 158,795,615,151,239,940,000,000,000,000,000,000,000 | 4 |
Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <[email protected]>
|
Item_in_subselect::single_value_transformer(JOIN *join)
{
SELECT_LEX *select_lex= join->select_lex;
DBUG_ENTER("Item_in_subselect::single_value_transformer");
DBUG_ASSERT(thd == join->thd);
/*
Check that the right part of the subselect contains no more than one
column. E.g. in SELECT 1 IN (SELECT * ..) the right part is (SELECT * ...)
*/
// psergey: duplicated_subselect_card_check
if (select_lex->item_list.elements > 1)
{
my_error(ER_OPERAND_COLUMNS, MYF(0), 1);
DBUG_RETURN(true);
}
Item* join_having= join->having ? join->having : join->tmp_having;
if (!(join_having || select_lex->with_sum_func ||
select_lex->group_list.elements) &&
select_lex->table_list.elements == 0 && !join->conds &&
!select_lex->master_unit()->is_union())
{
Item *where_item= (Item*) select_lex->item_list.head();
/*
it is single select without tables => possible optimization
remove the dependence mark since the item is moved to upper
select and is not outer anymore.
*/
where_item->walk(&Item::remove_dependence_processor, 0,
select_lex->outer_select());
/*
fix_field of substitution item will be done in time of
substituting.
Note that real_item() should be used instead of
original left expression because left_expr can be
runtime created Ref item which is deleted at the end
of the statement. Thus one of 'substitution' arguments
can be broken in case of PS.
*/
substitution= func->create(thd, left_expr, where_item);
have_to_be_excluded= 1;
if (thd->lex->describe)
{
char warn_buff[MYSQL_ERRMSG_SIZE];
sprintf(warn_buff, ER_THD(thd, ER_SELECT_REDUCED),
select_lex->select_number);
push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SELECT_REDUCED, warn_buff);
}
DBUG_RETURN(false);
}
/*
Wrap the current IN predicate in an Item_in_optimizer. The actual
substitution in the Item tree takes place in Item_subselect::fix_fields.
*/
if (!substitution)
{
/* We're invoked for the 1st (or the only) SELECT in the subquery UNION */
substitution= optimizer;
SELECT_LEX *current= thd->lex->current_select;
thd->lex->current_select= current->return_after_parsing();
if (!optimizer || optimizer->fix_left(thd))
{
thd->lex->current_select= current;
DBUG_RETURN(true);
}
thd->lex->current_select= current;
/* We will refer to upper level cache array => we have to save it for SP */
optimizer->keep_top_level_cache();
/*
As far as Item_in_optimizer does not substitute itself on fix_fields
we can use same item for all selects.
*/
expr= new (thd->mem_root) Item_direct_ref(thd, &select_lex->context,
(Item**)optimizer->get_cache(),
(char *)"<no matter>",
(char *)in_left_expr_name);
}
DBUG_RETURN(false);
}
| 0 |
[
"CWE-89"
] |
server
|
3c209bfc040ddfc41ece8357d772547432353fd2
| 5,017,039,632,218,122,700,000,000,000,000,000,000 | 87 |
MDEV-25994: Crash with union of my_decimal type in ORDER BY clause
When single-row subquery fails with "Subquery reutrns more than 1 row"
error, it will raise an error and return NULL.
On the other hand, Item_singlerow_subselect sets item->maybe_null=0
for table-less subqueries like "(SELECT not_null_value)" (*)
This discrepancy (item with maybe_null=0 returning NULL) causes the
code in Type_handler_decimal_result::make_sort_key_part() to crash.
Fixed this by allowing inference (*) only when the subquery is NOT a
UNION.
|
ecma_free_number_list (jmem_cpointer_t number_list_cp) /**< number list */
{
while (number_list_cp != JMEM_CP_NULL)
{
ecma_lit_storage_item_t *number_list_p = JMEM_CP_GET_NON_NULL_POINTER (ecma_lit_storage_item_t,
number_list_cp);
for (int i = 0; i < ECMA_LIT_STORAGE_VALUE_COUNT; i++)
{
if (number_list_p->values[i] != JMEM_CP_NULL)
{
ecma_dealloc_number (JMEM_CP_GET_NON_NULL_POINTER (ecma_number_t, number_list_p->values[i]));
}
}
jmem_cpointer_t next_item_cp = number_list_p->next_cp;
jmem_pools_free (number_list_p, sizeof (ecma_lit_storage_item_t));
number_list_cp = next_item_cp;
}
} /* ecma_free_number_list */
| 0 |
[
"CWE-416"
] |
jerryscript
|
3bcd48f72d4af01d1304b754ef19fe1a02c96049
| 147,917,882,796,862,930,000,000,000,000,000,000,000 | 20 |
Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
|
static int wait_serial_change(struct acm *acm, unsigned long arg)
{
int rv = 0;
DECLARE_WAITQUEUE(wait, current);
struct async_icount old, new;
do {
spin_lock_irq(&acm->read_lock);
old = acm->oldcount;
new = acm->iocount;
acm->oldcount = new;
spin_unlock_irq(&acm->read_lock);
if ((arg & TIOCM_DSR) &&
old.dsr != new.dsr)
break;
if ((arg & TIOCM_CD) &&
old.dcd != new.dcd)
break;
if ((arg & TIOCM_RI) &&
old.rng != new.rng)
break;
add_wait_queue(&acm->wioctl, &wait);
set_current_state(TASK_INTERRUPTIBLE);
schedule();
remove_wait_queue(&acm->wioctl, &wait);
if (acm->disconnected) {
if (arg & TIOCM_CD)
break;
else
rv = -ENODEV;
} else {
if (signal_pending(current))
rv = -ERESTARTSYS;
}
} while (!rv);
return rv;
}
| 0 |
[
"CWE-416"
] |
linux
|
c52873e5a1ef72f845526d9f6a50704433f9c625
| 72,943,521,443,806,680,000,000,000,000,000,000,000 | 42 |
usb: cdc-acm: make sure a refcount is taken early enough
destroy() will decrement the refcount on the interface, so that
it needs to be taken so early that it never undercounts.
Fixes: 7fb57a019f94e ("USB: cdc-acm: Fix potential deadlock (lockdep warning)")
Cc: stable <[email protected]>
Reported-and-tested-by: [email protected]
Signed-off-by: Oliver Neukum <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static JSValue js_bs_put_s16(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv)
{
return js_bs_put_val(ctx, this_val, argc, argv, 4);
}
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 251,526,698,049,405,700,000,000,000,000,000,000,000 | 4 |
fixed #2138
|
static void megasas_finish_dcmd(MegasasCmd *cmd, uint32_t iov_size)
{
trace_megasas_finish_dcmd(cmd->index, iov_size);
if (iov_size > cmd->iov_size) {
if (megasas_frame_is_ieee_sgl(cmd)) {
cmd->frame->dcmd.sgl.sg_skinny->len = cpu_to_le32(iov_size);
} else if (megasas_frame_is_sgl64(cmd)) {
cmd->frame->dcmd.sgl.sg64->len = cpu_to_le32(iov_size);
} else {
cmd->frame->dcmd.sgl.sg32->len = cpu_to_le32(iov_size);
}
}
}
| 0 |
[
"CWE-401"
] |
qemu
|
765a707000e838c30b18d712fe6cb3dd8e0435f3
| 327,300,509,295,135,000,000,000,000,000,000,000,000 | 14 |
megasas: fix guest-triggered memory leak
If the guest sets the sglist size to a value >=2GB, megasas_handle_dcmd
will return MFI_STAT_MEMORY_NOT_AVAILABLE without freeing the memory.
Avoid this by returning only the status from map_dcmd, and loading
cmd->iov_size in the caller.
Reported-by: Li Qiang <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *asconf_ack = arg;
struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
struct sctp_chunk *abort;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *addip_hdr;
__u32 sent_serial, rcvd_serial;
if (!sctp_vtag_verify(asconf_ack, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* ADD-IP, Section 4.1.2:
* This chunk MUST be sent in an authenticated way by using
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
* is received unauthenticated it MUST be silently discarded as
* described in [I-D.ietf-tsvwg-sctp-auth].
*/
if (!sctp_addip_noauth && !asconf_ack->auth)
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
/* Make sure that the ADDIP chunk has a valid length. */
if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
rcvd_serial = ntohl(addip_hdr->serial);
/* Verify the ASCONF-ACK chunk before processing it. */
if (!sctp_verify_asconf(asoc,
(sctp_paramhdr_t *)addip_hdr->params,
(void *)asconf_ack->chunk_end,
&err_param))
return sctp_sf_violation_paramlen(ep, asoc, type,
(void *)&err_param, commands);
if (last_asconf) {
addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
sent_serial = ntohl(addip_hdr->serial);
} else {
sent_serial = asoc->addip_serial - 1;
}
/* D0) If an endpoint receives an ASCONF-ACK that is greater than or
* equal to the next serial number to be used but no ASCONF chunk is
* outstanding the endpoint MUST ABORT the association. Note that a
* sequence number is greater than if it is no more than 2^^31-1
* larger than the current sequence number (using serial arithmetic).
*/
if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) &&
!(asoc->addip_last_asconf)) {
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
asconf_ack))
return SCTP_DISPOSITION_CONSUME;
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
return SCTP_DISPOSITION_DISCARD;
}
| 1 |
[
"CWE-20"
] |
linux-2.6
|
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
| 238,014,936,619,603,620,000,000,000,000,000,000,000 | 110 |
sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void qemu_spice_display_init_common(SimpleSpiceDisplay *ssd, DisplayState *ds)
{
ssd->ds = ds;
qemu_mutex_init(&ssd->lock);
ssd->mouse_x = -1;
ssd->mouse_y = -1;
ssd->bufsize = (16 * 1024 * 1024);
ssd->buf = qemu_malloc(ssd->bufsize);
}
| 0 |
[] |
qemu-kvm
|
5ff4e36c804157bd84af43c139f8cd3a59722db9
| 111,057,968,734,308,270,000,000,000,000,000,000,000 | 9 |
qxl: async io support using new spice api
Some of the QXL port i/o commands are waiting for the spice server to
complete certain actions. Add async versions for these commands, so we
don't block the vcpu while the spice server processses the command.
Instead the qxl device will raise an IRQ when done.
The async command processing relies on an added QXLInterface::async_complete
and added QXLWorker::*_async additions, in spice server qxl >= 3.1
Signed-off-by: Gerd Hoffmann <[email protected]>
Signed-off-by: Alon Levy <[email protected]>
|
static void vrend_free_programs(struct vrend_sub_context *sub)
{
struct vrend_linked_shader_program *ent, *tmp;
if (LIST_IS_EMPTY(&sub->programs))
return;
LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->programs, head) {
vrend_destroy_program(ent);
}
}
| 0 |
[
"CWE-787"
] |
virglrenderer
|
cbc8d8b75be360236cada63784046688aeb6d921
| 223,398,930,574,537,000,000,000,000,000,000,000,000 | 11 |
vrend: check transfer bounds for negative values too and report error
Closes #138
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Emil Velikov <[email protected]>
|
static inline void pgtable_page_dtor(struct page *page)
{
pte_lock_deinit(page);
dec_zone_page_state(page, NR_PAGETABLE);
}
| 0 |
[
"CWE-119"
] |
linux
|
1be7107fbe18eed3e319a6c3e83c78254b693acb
| 260,660,799,686,751,000,000,000,000,000,000,000,000 | 5 |
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>
|
Item_insert_value(THD *thd, Name_resolution_context *context_arg, Item *a)
:Item_field(thd, context_arg, (const char *)NULL, (const char *)NULL,
(const char *)NULL),
arg(a) {}
| 0 |
[
"CWE-617"
] |
server
|
2e7891080667c59ac80f788eef4d59d447595772
| 7,681,298,216,031,655,000,000,000,000,000,000,000 | 4 |
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]>
|
static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
kvm_vcpu_update_apicv(vcpu);
avic_set_running(vcpu, true);
}
| 0 |
[
"CWE-401"
] |
linux
|
d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
| 124,048,636,278,078,250,000,000,000,000,000,000,000 | 6 |
KVM: SVM: Fix potential memory leak in svm_cpu_init()
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <[email protected]>
Reviewed-by: Vitaly Kuznetsov <[email protected]>
Signed-off-by: Miaohe Lin <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
BitWriter::writeBitsInt(int val, size_t bits)
{
writeBits(static_cast<unsigned long long>(val), bits);
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 98,142,550,933,513,870,000,000,000,000,000,000,000 | 4 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
static void pblock(int d, js_Ast *block)
{
assert(block->type == STM_BLOCK);
pc('{'); nl();
pstmlist(d, block->a);
in(d); pc('}');
}
| 0 |
[
"CWE-476"
] |
mujs
|
f5b3c703e18725e380b83427004632e744f85a6f
| 6,589,825,730,882,029,000,000,000,000,000,000,000 | 7 |
Issue #161: Cope with empty programs in mujs-pp.
|
int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
{
struct buffer_head *head;
struct buffer_head *bh;
int ret = 0;
J_ASSERT(PageLocked(page));
head = page_buffers(page);
bh = head;
do {
struct journal_head *jh;
/*
* We take our own ref against the journal_head here to avoid
* having to add tons of locking around each instance of
* jbd2_journal_put_journal_head().
*/
jh = jbd2_journal_grab_journal_head(bh);
if (!jh)
continue;
spin_lock(&jh->b_state_lock);
__journal_try_to_free_buffer(journal, bh);
spin_unlock(&jh->b_state_lock);
jbd2_journal_put_journal_head(jh);
if (buffer_jbd(bh))
goto busy;
} while ((bh = bh->b_this_page) != head);
ret = try_to_free_buffers(page);
busy:
return ret;
}
| 0 |
[
"CWE-416"
] |
linux
|
cc16eecae687912238ee6efbff71ad31e2bc414e
| 327,548,667,102,268,070,000,000,000,000,000,000,000 | 34 |
jbd2: fix use-after-free of transaction_t race
jbd2_journal_wait_updates() is called with j_state_lock held. But if
there is a commit in progress, then this transaction might get committed
and freed via jbd2_journal_commit_transaction() ->
jbd2_journal_free_transaction(), when we release j_state_lock.
So check for journal->j_running_transaction everytime we release and
acquire j_state_lock to avoid use-after-free issue.
Link: https://lore.kernel.org/r/948c2fed518ae739db6a8f7f83f1d58b504f87d0.1644497105.git.ritesh.list@gmail.com
Fixes: 4f98186848707f53 ("jbd2: refactor wait logic for transaction updates into a common function")
Cc: [email protected]
Reported-and-tested-by: [email protected]
Reviewed-by: Jan Kara <[email protected]>
Signed-off-by: Ritesh Harjani <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
bool ValidateSKIPJACK()
{
std::cout << "\nSKIPJACK validation suite running...\n\n";
bool pass1 = true, pass2 = true;
SKIPJACKEncryption enc; // 80-bits only
pass1 = enc.StaticGetValidKeyLength(8) == 10 && pass1;
pass1 = enc.StaticGetValidKeyLength(9) == 10 && pass1;
pass1 = enc.StaticGetValidKeyLength(10) == 10 && pass1;
pass1 = enc.StaticGetValidKeyLength(16) == 10 && pass1;
SKIPJACKDecryption dec; // 80-bits only
pass2 = dec.StaticGetValidKeyLength(8) == 10 && pass2;
pass2 = dec.StaticGetValidKeyLength(9) == 10 && pass2;
pass2 = dec.StaticGetValidKeyLength(10) == 10 && pass2;
pass2 = dec.StaticGetValidKeyLength(16) == 10 && pass2;
std::cout << (pass1 && pass2 ? "passed:" : "FAILED:") << " Algorithm key lengths\n";
FileSource valdata(CRYPTOPP_DATA_DIR "TestData/skipjack.dat", true, new HexDecoder);
return BlockTransformationTest(FixedRoundsCipherFactory<SKIPJACKEncryption, SKIPJACKDecryption>(), valdata) && pass1 && pass2;
}
| 0 |
[
"CWE-190",
"CWE-125"
] |
cryptopp
|
07dbcc3d9644b18e05c1776db2a57fe04d780965
| 24,391,551,701,359,367,000,000,000,000,000,000,000 | 21 |
Add Inflator::BadDistanceErr exception (Issue 414)
The improved validation and excpetion clears the Address Sanitizer and Undefined Behavior Sanitizer findings
|
PHP_FUNCTION(openssl_error_string)
{
char buf[256];
unsigned long val;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
php_openssl_store_errors();
if (OPENSSL_G(errors) == NULL || OPENSSL_G(errors)->top == OPENSSL_G(errors)->bottom) {
RETURN_FALSE;
}
OPENSSL_G(errors)->bottom = (OPENSSL_G(errors)->bottom + 1) % ERR_NUM_ERRORS;
val = OPENSSL_G(errors)->buffer[OPENSSL_G(errors)->bottom];
if (val) {
ERR_error_string_n(val, buf, 256);
RETURN_STRING(buf);
} else {
RETURN_FALSE;
}
}
| 0 |
[
"CWE-326"
] |
php-src
|
0216630ea2815a5789a24279a1211ac398d4de79
| 165,010,362,180,722,550,000,000,000,000,000,000,000 | 25 |
Fix bug #79601 (Wrong ciphertext/tag in AES-CCM encryption for a 12 bytes IV)
|
void CLASS blend_highlights()
{
int clip = INT_MAX, row, col, c, i, j;
static const float trans[2][4][4] = {{{1, 1, 1}, {1.7320508, -1.7320508, 0}, {-1, -1, 2}},
{{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1, -1, 1}}};
static const float itrans[2][4][4] = {{{1, 0.8660254, -0.5}, {1, -0.8660254, -0.5}, {1, 0, 1}},
{{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1, -1, 1}}};
float cam[2][4], lab[2][4], sum[2], chratio;
if ((unsigned)(colors - 3) > 1)
return;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Blending highlights...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS, 0, 2);
#endif
FORCC if (clip > (i = 65535 * pre_mul[c])) clip = i;
for (row = 0; row < height; row++)
for (col = 0; col < width; col++)
{
FORCC if (image[row * width + col][c] > clip) break;
if (c == colors)
continue;
FORCC
{
cam[0][c] = image[row * width + col][c];
cam[1][c] = MIN(cam[0][c], clip);
}
for (i = 0; i < 2; i++)
{
FORCC for (lab[i][c] = j = 0; j < colors; j++) lab[i][c] += trans[colors - 3][c][j] * cam[i][j];
for (sum[i] = 0, c = 1; c < colors; c++)
sum[i] += SQR(lab[i][c]);
}
chratio = sqrt(sum[1] / sum[0]);
for (c = 1; c < colors; c++)
lab[0][c] *= chratio;
FORCC for (cam[0][c] = j = 0; j < colors; j++) cam[0][c] += itrans[colors - 3][c][j] * lab[0][j];
FORCC image[row * width + col][c] = cam[0][c] / colors;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS, 1, 2);
#endif
}
| 0 |
[
"CWE-476",
"CWE-119"
] |
LibRaw
|
d7c3d2cb460be10a3ea7b32e9443a83c243b2251
| 286,721,409,552,173,740,000,000,000,000,000,000,000 | 46 |
Secunia SA75000 advisory: several buffer overruns
|
terminal_enabled(void)
{
return dyn_winpty_init(FALSE) == OK;
}
| 0 |
[
"CWE-476"
] |
vim
|
cd929f7ba8cc5b6d6dcf35c8b34124e969fed6b8
| 276,257,981,716,740,800,000,000,000,000,000,000,000 | 4 |
patch 8.1.0633: crash when out of memory while opening a terminal window
Problem: Crash when out of memory while opening a terminal window.
Solution: Handle out-of-memory more gracefully.
|
static double mxEndianDouble_Swap(double a)
{
#if defined(__GNUC__) || defined(__llvm__)
uint64_t result = __builtin_bswap64(*(uint64_t *)&a);
return *(double *)&result;
#else
double b;
txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b;
int i;
for (i = 0; i < 8; i++)
p2[i] = p1[7 - i];
return b;
#endif
}
| 0 |
[
"CWE-125"
] |
moddable
|
135aa9a4a6a9b49b60aa730ebc3bcc6247d75c45
| 300,095,871,339,713,330,000,000,000,000,000,000,000 | 14 |
XS: #896
|
mrb_define_method_id(mrb_state *mrb, struct RClass *c, mrb_sym mid, mrb_func_t func, mrb_aspec aspec)
{
mrb_method_t m;
int ai = mrb_gc_arena_save(mrb);
MRB_METHOD_FROM_FUNC(m, func);
#ifndef MRB_USE_METHOD_T_STRUCT
mrb_assert(MRB_METHOD_FUNC(m) == func);
#endif
if (aspec == MRB_ARGS_NONE()) {
MRB_METHOD_NOARG_SET(m);
}
mrb_define_method_raw(mrb, c, mid, m);
mrb_gc_arena_restore(mrb, ai);
}
| 0 |
[
"CWE-787"
] |
mruby
|
b1d0296a937fe278239bdfac840a3fd0e93b3ee9
| 254,955,546,308,521,800,000,000,000,000,000,000,000 | 15 |
class.c: clear method cache after `remove_method`.
|
unknown_to_zero(uint64_t count)
{
return count != UINT64_MAX ? count : 0;
}
| 0 |
[
"CWE-772"
] |
ovs
|
77ad4225d125030420d897c873e4734ac708c66b
| 36,891,051,337,821,816,000,000,000,000,000,000,000 | 4 |
ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
QPDFTokenizer::expectInlineImage()
{
expectInlineImage(PointerHolder<InputSource>());
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 173,344,198,324,949,130,000,000,000,000,000,000,000 | 4 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
static void cm_format_rep(struct cm_rep_msg *rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_rep_param *param)
{
cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
rep_msg->local_comm_id = cm_id_priv->id.local_id;
rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
rep_msg->resp_resources = param->responder_resources;
cm_rep_set_target_ack_delay(rep_msg,
cm_id_priv->av.port->cm_dev->ack_delay);
cm_rep_set_failover(rep_msg, param->failover_accepted);
cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
rep_msg->initiator_depth = param->initiator_depth;
cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
cm_rep_set_srq(rep_msg, param->srq);
cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
} else {
cm_rep_set_srq(rep_msg, 1);
cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
}
if (param->private_data && param->private_data_len)
memcpy(rep_msg->private_data, param->private_data,
param->private_data_len);
}
| 0 |
[
"CWE-20"
] |
linux
|
b2853fd6c2d0f383dbdf7427e263eb576a633867
| 125,095,365,254,738,120,000,000,000,000,000,000,000 | 29 |
IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <[email protected]>
Signed-off-by: Or Gerlitz <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>
|
supported_hashes(void)
{
/* TODO: check if jose has a way to export the hash algorithms it
* supports. */
static const char* hashes[] = {"S1", "S224", "S256", "S384", "S512", NULL};
return hashes;
}
| 0 |
[
"CWE-200"
] |
tang
|
e82459fda10f0630c3414ed2afbc6320bb9ea7c9
| 214,355,992,516,253,950,000,000,000,000,000,000,000 | 7 |
keys: move signing part out of find_by_thp() and to find_jws() (#81)
Handle just signing keys in find_jws(), to make sure we are
responding only to proper queries.
Tests were also failing to detect this issue and were updated
accordingly.
Issue discovered by Twitter Kernel and OS team during a source
code audit while evaluating Tang/Clevis for their needs.
Fixes CVE-2021-4076
|
TEST_F(AsStringGraphTest, Int8) {
TF_ASSERT_OK(Init(DT_INT8));
AddInputFromArray<int8>(TensorShape({3}), {-42, 0, 42});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({3}));
test::FillValues<tstring>(&expected, {"-42", "0", "42"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
| 0 |
[
"CWE-20",
"CWE-134",
"CWE-703"
] |
tensorflow
|
33be22c65d86256e6826666662e40dbdfe70ee83
| 39,284,600,599,878,914,000,000,000,000,000,000,000 | 9 |
Prevent format string vulnerability in `tf.strings.as_string`.
The `printf` format specifier only allows `#`, `0`, `-`, `+` and space as flag characters. Others are interpreted as width/precision/length modifier or conversion specifiers. If a character does not fit into any of these sets `printf` just displays it.
Also add a test suite for `tf.strings.as_string`. Also fix the issue where the flag character was used only if width was specified.
PiperOrigin-RevId: 332553548
Change-Id: Ie57cf2a7c14d1a36097642794c14329db669bbba
|
wc_push_to_euctw(Str os, wc_wchar_t cc, wc_status *st)
{
while (1) {
switch (cc.ccs) {
case WC_CCS_US_ASCII:
Strcat_char(os, (char)cc.code);
return;
case WC_CCS_CNS_11643_1:
break;
case WC_CCS_CNS_11643_2:
case WC_CCS_CNS_11643_3:
case WC_CCS_CNS_11643_4:
case WC_CCS_CNS_11643_5:
case WC_CCS_CNS_11643_6:
case WC_CCS_CNS_11643_7:
Strcat_char(os, WC_C_SS2R);
Strcat_char(os, (char)(0xA1 + (cc.ccs - WC_CCS_CNS_11643_1)));
break;
case WC_CCS_CNS_11643_8:
case WC_CCS_CNS_11643_9:
case WC_CCS_CNS_11643_10:
case WC_CCS_CNS_11643_11:
case WC_CCS_CNS_11643_12:
case WC_CCS_CNS_11643_13:
case WC_CCS_CNS_11643_14:
case WC_CCS_CNS_11643_15:
case WC_CCS_CNS_11643_16:
Strcat_char(os, WC_C_SS2R);
Strcat_char(os, (char)(0xA8 + (cc.ccs - WC_CCS_CNS_11643_8)));
break;
case WC_CCS_C1:
Strcat_char(os, (char)(cc.code | 0x80));
return;
case WC_CCS_UNKNOWN_W:
if (!WcOption.no_replace)
Strcat_charp(os, WC_REPLACE_W);
return;
case WC_CCS_UNKNOWN:
if (!WcOption.no_replace)
Strcat_charp(os, WC_REPLACE);
return;
default:
#ifdef USE_UNICODE
if (WcOption.ucs_conv)
cc = wc_any_to_any_ces(cc, st);
else
#endif
cc.ccs = WC_CCS_IS_WIDE(cc.ccs) ? WC_CCS_UNKNOWN_W : WC_CCS_UNKNOWN;
continue;
}
Strcat_char(os, (char)((cc.code >> 8) | 0x80));
Strcat_char(os, (char)((cc.code & 0xff) | 0x80));
return;
}
}
| 0 |
[
"CWE-125"
] |
w3m
|
9cf6926c5d947371dc9e44f32bc7a2fbfca5d469
| 219,799,166,165,060,400,000,000,000,000,000,000,000 | 55 |
Prevent segfault when iso2022 parsing
Bug-Debian: https://github.com/tats/w3m/issues/14
|
static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
int ofs, int depth)
{
struct dnode_of_data rdn = *dn;
struct page *page;
struct f2fs_node *rn;
nid_t child_nid;
unsigned int child_nofs;
int freed = 0;
int i, ret;
if (dn->nid == 0)
return NIDS_PER_BLOCK + 1;
trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
if (IS_ERR(page)) {
trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
return PTR_ERR(page);
}
ra_node_pages(page, ofs, NIDS_PER_BLOCK);
rn = F2FS_NODE(page);
if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
child_nid = le32_to_cpu(rn->in.nid[i]);
if (child_nid == 0)
continue;
rdn.nid = child_nid;
ret = truncate_dnode(&rdn);
if (ret < 0)
goto out_err;
if (set_nid(page, i, 0, false))
dn->node_changed = true;
}
} else {
child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
for (i = ofs; i < NIDS_PER_BLOCK; i++) {
child_nid = le32_to_cpu(rn->in.nid[i]);
if (child_nid == 0) {
child_nofs += NIDS_PER_BLOCK + 1;
continue;
}
rdn.nid = child_nid;
ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
if (ret == (NIDS_PER_BLOCK + 1)) {
if (set_nid(page, i, 0, false))
dn->node_changed = true;
child_nofs += ret;
} else if (ret < 0 && ret != -ENOENT) {
goto out_err;
}
}
freed = child_nofs;
}
if (!ofs) {
/* remove current indirect node */
dn->node_page = page;
truncate_node(dn);
freed++;
} else {
f2fs_put_page(page, 1);
}
trace_f2fs_truncate_nodes_exit(dn->inode, freed);
return freed;
out_err:
f2fs_put_page(page, 1);
trace_f2fs_truncate_nodes_exit(dn->inode, ret);
return ret;
}
| 0 |
[
"CWE-200",
"CWE-362"
] |
linux
|
30a61ddf8117c26ac5b295e1233eaa9629a94ca3
| 286,688,034,317,035,100,000,000,000,000,000,000,000 | 74 |
f2fs: fix race condition in between free nid allocator/initializer
In below concurrent case, allocated nid can be loaded into free nid cache
and be allocated again.
Thread A Thread B
- f2fs_create
- f2fs_new_inode
- alloc_nid
- __insert_nid_to_list(ALLOC_NID_LIST)
- f2fs_balance_fs_bg
- build_free_nids
- __build_free_nids
- scan_nat_page
- add_free_nid
- __lookup_nat_cache
- f2fs_add_link
- init_inode_metadata
- new_inode_page
- new_node_page
- set_node_addr
- alloc_nid_done
- __remove_nid_from_list(ALLOC_NID_LIST)
- __insert_nid_to_list(FREE_NID_LIST)
This patch makes nat cache lookup and free nid list operation being atomical
to avoid this race condition.
Signed-off-by: Jaegeuk Kim <[email protected]>
Signed-off-by: Chao Yu <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
|
TEST_P(Http2FloodMitigationTest, ZerolenHeader) {
beginSession();
// Send invalid request.
uint32_t request_idx = 0;
auto request = Http2Frame::makeMalformedRequestWithZerolenHeader(request_idx, "host", "/");
sendFame(request);
tcp_client_->waitForDisconnect();
EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value());
EXPECT_EQ(1,
test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value());
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
| 244,126,274,932,040,100,000,000,000,000,000,000,000 | 14 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]>
|
bool dns_server_limited_domains(DnsServer *server) {
DnsSearchDomain *domain;
bool domain_restricted = false;
/* Check if the server has route-only domains without ~., i. e. whether
* it should only be used for particular domains */
if (!server->link)
return false;
LIST_FOREACH(domains, domain, server->link->search_domains)
if (domain->route_only) {
domain_restricted = true;
/* ~. means "any domain", thus it is a global server */
if (dns_name_is_root(DNS_SEARCH_DOMAIN_NAME(domain)))
return false;
}
return domain_restricted;
}
| 0 |
[
"CWE-416"
] |
systemd
|
904dcaf9d4933499f8334859f52ea8497f2d24ff
| 311,006,574,309,685,580,000,000,000,000,000,000,000 | 19 |
resolved: take particular care when detaching DnsServer from its default stream
DnsStream and DnsServer have a symbiotic relationship: one DnsStream is
the current "default" stream of the server (and thus reffed by it), but
each stream also refs the server it is connected to. This cyclic
dependency can result in weird situations: when one is
destroyed/unlinked/stopped it needs to unregister itself from the other,
but doing this will trigger unregistration of the other. Hence, let's
make sure we unregister the stream from the server before destroying it,
to break this cycle.
Most likely fixes: #10725
|
static void port_cleanup(struct si_sm_io *io)
{
unsigned int addr = io->addr_data;
int idx;
if (addr) {
for (idx = 0; idx < io->io_size; idx++)
release_region(addr + idx * io->regspacing,
io->regsize);
}
}
| 0 |
[
"CWE-416"
] |
linux
|
401e7e88d4ef80188ffa07095ac00456f901b8c4
| 308,166,744,359,027,800,000,000,000,000,000,000,000 | 11 |
ipmi_si: fix use-after-free of resource->name
When we excute the following commands, we got oops
rmmod ipmi_si
cat /proc/ioports
[ 1623.482380] Unable to handle kernel paging request at virtual address ffff00000901d478
[ 1623.482382] Mem abort info:
[ 1623.482383] ESR = 0x96000007
[ 1623.482385] Exception class = DABT (current EL), IL = 32 bits
[ 1623.482386] SET = 0, FnV = 0
[ 1623.482387] EA = 0, S1PTW = 0
[ 1623.482388] Data abort info:
[ 1623.482389] ISV = 0, ISS = 0x00000007
[ 1623.482390] CM = 0, WnR = 0
[ 1623.482393] swapper pgtable: 4k pages, 48-bit VAs, pgdp = 00000000d7d94a66
[ 1623.482395] [ffff00000901d478] pgd=000000dffbfff003, pud=000000dffbffe003, pmd=0000003f5d06e003, pte=0000000000000000
[ 1623.482399] Internal error: Oops: 96000007 [#1] SMP
[ 1623.487407] Modules linked in: ipmi_si(E) nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm dm_mirror dm_region_hash dm_log iw_cm dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ses ghash_ce sha2_ce enclosure sha256_arm64 sg sha1_ce hisi_sas_v2_hw hibmc_drm sbsa_gwdt hisi_sas_main ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe mdio hns_dsaf ipmi_devintf hns_enet_drv ipmi_msghandler hns_mdio [last unloaded: ipmi_si]
[ 1623.532410] CPU: 30 PID: 11438 Comm: cat Kdump: loaded Tainted: G E 5.0.0-rc3+ #168
[ 1623.541498] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017
[ 1623.548822] pstate: a0000005 (NzCv daif -PAN -UAO)
[ 1623.553684] pc : string+0x28/0x98
[ 1623.557040] lr : vsnprintf+0x368/0x5e8
[ 1623.560837] sp : ffff000013213a80
[ 1623.564191] x29: ffff000013213a80 x28: ffff00001138abb5
[ 1623.569577] x27: ffff000013213c18 x26: ffff805f67d06049
[ 1623.574963] x25: 0000000000000000 x24: ffff00001138abb5
[ 1623.580349] x23: 0000000000000fb7 x22: ffff0000117ed000
[ 1623.585734] x21: ffff000011188fd8 x20: ffff805f67d07000
[ 1623.591119] x19: ffff805f67d06061 x18: ffffffffffffffff
[ 1623.596505] x17: 0000000000000200 x16: 0000000000000000
[ 1623.601890] x15: ffff0000117ed748 x14: ffff805f67d07000
[ 1623.607276] x13: ffff805f67d0605e x12: 0000000000000000
[ 1623.612661] x11: 0000000000000000 x10: 0000000000000000
[ 1623.618046] x9 : 0000000000000000 x8 : 000000000000000f
[ 1623.623432] x7 : ffff805f67d06061 x6 : fffffffffffffffe
[ 1623.628817] x5 : 0000000000000012 x4 : ffff00000901d478
[ 1623.634203] x3 : ffff0a00ffffff04 x2 : ffff805f67d07000
[ 1623.639588] x1 : ffff805f67d07000 x0 : ffffffffffffffff
[ 1623.644974] Process cat (pid: 11438, stack limit = 0x000000008d4cbc10)
[ 1623.651592] Call trace:
[ 1623.654068] string+0x28/0x98
[ 1623.657071] vsnprintf+0x368/0x5e8
[ 1623.660517] seq_vprintf+0x70/0x98
[ 1623.668009] seq_printf+0x7c/0xa0
[ 1623.675530] r_show+0xc8/0xf8
[ 1623.682558] seq_read+0x330/0x440
[ 1623.689877] proc_reg_read+0x78/0xd0
[ 1623.697346] __vfs_read+0x60/0x1a0
[ 1623.704564] vfs_read+0x94/0x150
[ 1623.711339] ksys_read+0x6c/0xd8
[ 1623.717939] __arm64_sys_read+0x24/0x30
[ 1623.725077] el0_svc_common+0x120/0x148
[ 1623.732035] el0_svc_handler+0x30/0x40
[ 1623.738757] el0_svc+0x8/0xc
[ 1623.744520] Code: d1000406 aa0103e2 54000149 b4000080 (39400085)
[ 1623.753441] ---[ end trace f91b6a4937de9835 ]---
[ 1623.760871] Kernel panic - not syncing: Fatal exception
[ 1623.768935] SMP: stopping secondary CPUs
[ 1623.775718] Kernel Offset: disabled
[ 1623.781998] CPU features: 0x002,21006008
[ 1623.788777] Memory Limit: none
[ 1623.798329] Starting crashdump kernel...
[ 1623.805202] Bye!
If io_setup is called successful in try_smi_init() but try_smi_init()
goes out_err before calling ipmi_register_smi(), so ipmi_unregister_smi()
will not be called while removing module. It leads to the resource that
allocated in io_setup() can not be freed, but the name(DEVICE_NAME) of
resource is freed while removing the module. It causes use-after-free
when cat /proc/ioports.
Fix this by calling io_cleanup() while try_smi_init() goes to out_err.
and don't call io_cleanup() until io_setup() returns successful to avoid
warning prints.
Fixes: 93c303d2045b ("ipmi_si: Clean up shutdown a bit")
Cc: [email protected]
Reported-by: NuoHan Qiao <[email protected]>
Suggested-by: Corey Minyard <[email protected]>
Signed-off-by: Yang Yingliang <[email protected]>
Signed-off-by: Corey Minyard <[email protected]>
|
static void print_msr_bits(unsigned long val)
{
pr_cont("<");
print_bits(val, msr_bits, ",");
print_tm_bits(val);
pr_cont(">");
}
| 0 |
[
"CWE-862"
] |
linux
|
8205d5d98ef7f155de211f5e2eb6ca03d95a5a60
| 99,563,259,524,615,350,000,000,000,000,000,000,000 | 7 |
powerpc/tm: Fix FP/VMX unavailable exceptions inside a transaction
When we take an FP unavailable exception in a transaction we have to
account for the hardware FP TM checkpointed registers being
incorrect. In this case for this process we know the current and
checkpointed FP registers must be the same (since FP wasn't used
inside the transaction) hence in the thread_struct we copy the current
FP registers to the checkpointed ones.
This copy is done in tm_reclaim_thread(). We use thread->ckpt_regs.msr
to determine if FP was on when in userspace. thread->ckpt_regs.msr
represents the state of the MSR when exiting userspace. This is setup
by check_if_tm_restore_required().
Unfortunatley there is an optimisation in giveup_all() which returns
early if tsk->thread.regs->msr (via local variable `usermsr`) has
FP=VEC=VSX=SPE=0. This optimisation means that
check_if_tm_restore_required() is not called and hence
thread->ckpt_regs.msr is not updated and will contain an old value.
This can happen if due to load_fp=255 we start a userspace process
with MSR FP=1 and then we are context switched out. In this case
thread->ckpt_regs.msr will contain FP=1. If that same process is then
context switched in and load_fp overflows, MSR will have FP=0. If that
process now enters a transaction and does an FP instruction, the FP
unavailable will not update thread->ckpt_regs.msr (the bug) and MSR
FP=1 will be retained in thread->ckpt_regs.msr. tm_reclaim_thread()
will then not perform the required memcpy and the checkpointed FP regs
in the thread struct will contain the wrong values.
The code path for this happening is:
Userspace: Kernel
Start userspace
with MSR FP/VEC/VSX/SPE=0 TM=1
< -----
...
tbegin
bne
fp instruction
FP unavailable
---- >
fp_unavailable_tm()
tm_reclaim_current()
tm_reclaim_thread()
giveup_all()
return early since FP/VMX/VSX=0
/* ckpt MSR not updated (Incorrect) */
tm_reclaim()
/* thread_struct ckpt FP regs contain junk (OK) */
/* Sees ckpt MSR FP=1 (Incorrect) */
no memcpy() performed
/* thread_struct ckpt FP regs not fixed (Incorrect) */
tm_recheckpoint()
/* Put junk in hardware checkpoint FP regs */
....
< -----
Return to userspace
with MSR TM=1 FP=1
with junk in the FP TM checkpoint
TM rollback
reads FP junk
This is a data integrity problem for the current process as the FP
registers are corrupted. It's also a security problem as the FP
registers from one process may be leaked to another.
This patch moves up check_if_tm_restore_required() in giveup_all() to
ensure thread->ckpt_regs.msr is updated correctly.
A simple testcase to replicate this will be posted to
tools/testing/selftests/powerpc/tm/tm-poison.c
Similarly for VMX.
This fixes CVE-2019-15030.
Fixes: f48e91e87e67 ("powerpc/tm: Fix FP and VMX register corruption")
Cc: [email protected] # 4.12+
Signed-off-by: Gustavo Romero <[email protected]>
Signed-off-by: Michael Neuling <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
|
Bool gf_isom_is_JPEG2000(GF_ISOFile *mov)
{
return (mov && mov->is_jp2) ? GF_TRUE : GF_FALSE;
}
| 0 |
[
"CWE-787"
] |
gpac
|
f0a41d178a2dc5ac185506d9fa0b0a58356b16f7
| 89,089,159,928,778,870,000,000,000,000,000,000,000 | 4 |
fixed #2120
|
void _ma_check_print_error(HA_CHECK *param, const char *fmt, ...)
{
va_list args;
DBUG_ENTER("_ma_check_print_error");
param->error_printed++;
param->out_flag |= O_DATA_LOST;
if (param->testflag & T_SUPPRESS_ERR_HANDLING)
DBUG_VOID_RETURN;
va_start(args, fmt);
_ma_check_print_msg(param, MA_CHECK_ERROR, fmt, args);
va_end(args);
DBUG_VOID_RETURN;
}
| 0 |
[
"CWE-400"
] |
server
|
9e39d0ae44595dbd1570805d97c9c874778a6be8
| 53,856,293,814,489,250,000,000,000,000,000,000,000 | 13 |
MDEV-25787 Bug report: crash on SELECT DISTINCT thousands_blob_fields
fix a debug assert to account for not opened temp tables
|
static int ntlm_read_message_header(wStream* s, NTLM_MESSAGE_HEADER* header)
{
if (Stream_GetRemainingLength(s) < 12)
return -1;
Stream_Read(s, header->Signature, 8);
Stream_Read_UINT32(s, header->MessageType);
if (strncmp((char*) header->Signature, NTLM_SIGNATURE, 8) != 0)
return -1;
return 1;
}
| 0 |
[
"CWE-416",
"CWE-125"
] |
FreeRDP
|
2ee663f39dc8dac3d9988e847db19b2d7e3ac8c6
| 144,140,771,714,169,000,000,000,000,000,000,000,000 | 13 |
Fixed CVE-2018-8789
Thanks to Eyal Itkin from Check Point Software Technologies.
|
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
int trig_mode, int vector)
{
/*
* vcpu->arch.apicv_active must be read after vcpu->mode.
* Pairs with smp_store_release in vcpu_enter_guest.
*/
bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
if (!READ_ONCE(vcpu->arch.apicv_active)) {
/* Process the interrupt via inject_pending_event */
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
return;
}
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
if (in_guest_mode) {
/*
* Signal the doorbell to tell hardware to inject the IRQ. If
* the vCPU exits the guest before the doorbell chimes, hardware
* will automatically process AVIC interrupts at the next VMRUN.
*/
avic_ring_doorbell(vcpu);
} else {
/*
* Wake the vCPU if it was blocking. KVM will then detect the
* pending IRQ when checking if the vCPU has a wake event.
*/
kvm_vcpu_wake_up(vcpu);
}
}
| 0 |
[
"CWE-703"
] |
linux
|
6cd88243c7e03845a450795e134b488fc2afb736
| 327,648,438,388,004,200,000,000,000,000,000,000,000 | 32 |
KVM: x86: do not report a vCPU as preempted outside instruction boundaries
If a vCPU is outside guest mode and is scheduled out, it might be in the
process of making a memory access. A problem occurs if another vCPU uses
the PV TLB flush feature during the period when the vCPU is scheduled
out, and a virtual address has already been translated but has not yet
been accessed, because this is equivalent to using a stale TLB entry.
To avoid this, only report a vCPU as preempted if sure that the guest
is at an instruction boundary. A rescheduling request will be delivered
to the host physical CPU as an external interrupt, so for simplicity
consider any vmexit *not* instruction boundary except for external
interrupts.
It would in principle be okay to report the vCPU as preempted also
if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the
vmentry/vmexit overhead unnecessarily, and optimistic spinning is
also unlikely to succeed. However, leave it for later because right
now kvm_vcpu_check_block() is doing memory accesses. Even
though the TLB flush issue only applies to virtual memory address,
it's very much preferrable to be conservative.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
rsvg_cairo_push_early_clips (RsvgDrawingCtx * ctx)
{
RsvgCairoRender *render = RSVG_CAIRO_RENDER (ctx->render);
cairo_save (render->cr);
if (rsvg_current_state (ctx)->clip_path_ref)
if (((RsvgClipPath *) rsvg_current_state (ctx)->clip_path_ref)->units == userSpaceOnUse)
rsvg_cairo_clip (ctx, rsvg_current_state (ctx)->clip_path_ref, NULL);
}
| 0 |
[
"CWE-20"
] |
librsvg
|
d1c9191949747f6dcfd207831d15dd4ba00e31f2
| 106,891,245,301,405,880,000,000,000,000,000,000,000 | 10 |
state: Store mask as reference
Instead of immediately looking up the mask, store the reference and look
it up on use.
|
static int AppLayerProtoDetectPPTestData(AppLayerProtoDetectProbingParser *pp,
AppLayerProtoDetectPPTestDataIPProto *ip_proto,
int no_of_ip_proto)
{
int result = 0;
int i = -1, j = -1 , k = -1;
#ifdef DEBUG
int dir = 0;
#endif
for (i = 0; i < no_of_ip_proto; i++, pp = pp->next) {
if (pp->ipproto != ip_proto[i].ipproto)
goto end;
AppLayerProtoDetectProbingParserPort *pp_port = pp->port;
for (k = 0; k < ip_proto[i].no_of_port; k++, pp_port = pp_port->next) {
if (pp_port->port != ip_proto[i].port[k].port)
goto end;
if (pp_port->alproto_mask != ip_proto[i].port[k].alproto_mask)
goto end;
if (pp_port->alproto_mask != ip_proto[i].port[k].alproto_mask)
goto end;
if (pp_port->dp_max_depth != ip_proto[i].port[k].dp_max_depth)
goto end;
if (pp_port->sp_max_depth != ip_proto[i].port[k].sp_max_depth)
goto end;
AppLayerProtoDetectProbingParserElement *pp_element = pp_port->dp;
#ifdef DEBUG
dir = 0;
#endif
for (j = 0 ; j < ip_proto[i].port[k].ts_no_of_element;
j++, pp_element = pp_element->next) {
if (pp_element->alproto != ip_proto[i].port[k].toserver_element[j].alproto) {
goto end;
}
if (pp_element->port != ip_proto[i].port[k].toserver_element[j].port) {
goto end;
}
if (pp_element->alproto_mask != ip_proto[i].port[k].toserver_element[j].alproto_mask) {
goto end;
}
if (pp_element->min_depth != ip_proto[i].port[k].toserver_element[j].min_depth) {
goto end;
}
if (pp_element->max_depth != ip_proto[i].port[k].toserver_element[j].max_depth) {
goto end;
}
} /* for */
if (pp_element != NULL)
goto end;
pp_element = pp_port->sp;
#ifdef DEBUG
dir = 1;
#endif
for (j = 0 ; j < ip_proto[i].port[k].tc_no_of_element; j++, pp_element = pp_element->next) {
if (pp_element->alproto != ip_proto[i].port[k].toclient_element[j].alproto) {
goto end;
}
if (pp_element->port != ip_proto[i].port[k].toclient_element[j].port) {
goto end;
}
if (pp_element->alproto_mask != ip_proto[i].port[k].toclient_element[j].alproto_mask) {
goto end;
}
if (pp_element->min_depth != ip_proto[i].port[k].toclient_element[j].min_depth) {
goto end;
}
if (pp_element->max_depth != ip_proto[i].port[k].toclient_element[j].max_depth) {
goto end;
}
} /* for */
if (pp_element != NULL)
goto end;
}
if (pp_port != NULL)
goto end;
}
if (pp != NULL)
goto end;
result = 1;
end:
#ifdef DEBUG
printf("i = %d, k = %d, j = %d(%s)\n", i, k, j, (dir == 0) ? "ts" : "tc");
#endif
return result;
}
| 0 |
[
"CWE-20"
] |
suricata
|
8357ef3f8ffc7d99ef6571350724160de356158b
| 202,058,506,746,680,880,000,000,000,000,000,000,000 | 89 |
proto/detect: workaround dns misdetected as dcerpc
The DCERPC UDP detection would misfire on DNS with transaction
ID 0x0400. This would happen as the protocol detection engine
gives preference to pattern based detection over probing parsers for
performance reasons.
This hack/workaround fixes this specific case by still running the
probing parser if DCERPC has been detected on UDP. The probing
parser result will take precedence.
Bug #2736.
|
static int errorCond(OFCondition cond, const char *message)
{
int result = (cond.bad());
if (result)
{
OFString temp_str;
OFLOG_ERROR(dcmpsrcvLogger, message << OFendl << DimseCondition::dump(temp_str, cond));
}
return result;
}
| 0 |
[
"CWE-264"
] |
dcmtk
|
beaf5a5c24101daeeafa48c375120b16197c9e95
| 60,377,604,287,578,450,000,000,000,000,000,000,000 | 10 |
Make sure to handle setuid() return code properly.
In some tools the return value of setuid() is not checked. In the worst
case this could lead to privilege escalation since the process does not
give up its root privileges and continue as root.
|
static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
{
struct inode *inode;
unsigned long idx;
unsigned long size;
unsigned long limit;
unsigned long stage;
struct page **dir;
struct page *subdir;
swp_entry_t *ptr;
int offset;
idx = 0;
ptr = info->i_direct;
spin_lock(&info->lock);
limit = info->next_index;
size = limit;
if (size > SHMEM_NR_DIRECT)
size = SHMEM_NR_DIRECT;
offset = shmem_find_swp(entry, ptr, ptr+size);
if (offset >= 0) {
shmem_swp_balance_unmap();
goto found;
}
if (!info->i_indirect)
goto lost2;
dir = shmem_dir_map(info->i_indirect);
stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
if (unlikely(idx == stage)) {
shmem_dir_unmap(dir-1);
dir = shmem_dir_map(info->i_indirect) +
ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
while (!*dir) {
dir++;
idx += ENTRIES_PER_PAGEPAGE;
if (idx >= limit)
goto lost1;
}
stage = idx + ENTRIES_PER_PAGEPAGE;
subdir = *dir;
shmem_dir_unmap(dir);
dir = shmem_dir_map(subdir);
}
subdir = *dir;
if (subdir && page_private(subdir)) {
ptr = shmem_swp_map(subdir);
size = limit - idx;
if (size > ENTRIES_PER_PAGE)
size = ENTRIES_PER_PAGE;
offset = shmem_find_swp(entry, ptr, ptr+size);
if (offset >= 0) {
shmem_dir_unmap(dir);
goto found;
}
shmem_swp_unmap(ptr);
}
}
lost1:
shmem_dir_unmap(dir-1);
lost2:
spin_unlock(&info->lock);
return 0;
found:
idx += offset;
inode = &info->vfs_inode;
if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
info->flags |= SHMEM_PAGEIN;
shmem_swp_set(info, ptr + offset, 0);
}
shmem_swp_unmap(ptr);
spin_unlock(&info->lock);
/*
* Decrement swap count even when the entry is left behind:
* try_to_unuse will skip over mms, then reincrement count.
*/
swap_free(entry);
return 1;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
e84e2e132c9c66d8498e7710d4ea532d1feaaac5
| 195,538,941,301,522,960,000,000,000,000,000,000,000 | 81 |
tmpfs: restore missing clear_highpage
tmpfs was misconverted to __GFP_ZERO in 2.6.11. There's an unusual case in
which shmem_getpage receives the page from its caller instead of allocating.
We must cover this case by clear_highpage before SetPageUptodate, as before.
Signed-off-by: Hugh Dickins <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
Goffset PDFDoc::writeObjectHeader (Ref *ref, OutStream* outStr)
{
Goffset offset = outStr->getPos();
outStr->printf("%i %i obj\r\n", ref->num, ref->gen);
return offset;
}
| 0 |
[
"CWE-20"
] |
poppler
|
9fd5ec0e6e5f763b190f2a55ceb5427cfe851d5f
| 300,706,516,860,090,720,000,000,000,000,000,000,000 | 6 |
PDFDoc::setup: Fix return value
At that point xref can have gone wrong since extractPDFSubtype() can
have caused a reconstruct that broke stuff so instead of unconditionally
returning true, return xref->isOk()
Fixes #706
|
static long get_nr_inodes(void)
{
int i;
long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_inodes, i);
return sum < 0 ? 0 : sum;
}
| 0 |
[
"CWE-269"
] |
linux
|
0fa3ecd87848c9c93c2c828ef4c3a8ca36ce46c7
| 63,935,265,382,008,030,000,000,000,000,000,000,000 | 8 |
Fix up non-directory creation in SGID directories
sgid directories have special semantics, making newly created files in
the directory belong to the group of the directory, and newly created
subdirectories will also become sgid. This is historically used for
group-shared directories.
But group directories writable by non-group members should not imply
that such non-group members can magically join the group, so make sure
to clear the sgid bit on non-directories for non-members (but remember
that sgid without group execute means "mandatory locking", just to
confuse things even more).
Reported-by: Jann Horn <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Al Viro <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void __init acpi_reduced_hw_init(void)
{
if (acpi_gbl_reduced_hardware) {
/*
* Override x86_init functions and bypass legacy pic
* in Hardware-reduced ACPI mode
*/
x86_init.timers.timer_init = x86_init_noop;
x86_init.irqs.pre_vector_init = x86_init_noop;
legacy_pic = &null_legacy_pic;
}
}
| 0 |
[
"CWE-120"
] |
linux
|
dad5ab0db8deac535d03e3fe3d8f2892173fa6a4
| 203,951,617,488,592,060,000,000,000,000,000,000,000 | 12 |
x86/acpi: Prevent out of bound access caused by broken ACPI tables
The bus_irq argument of mp_override_legacy_irq() is used as the index into
the isa_irq_to_gsi[] array. The bus_irq argument originates from
ACPI_MADT_TYPE_IO_APIC and ACPI_MADT_TYPE_INTERRUPT items in the ACPI
tables, but is nowhere sanity checked.
That allows broken or malicious ACPI tables to overwrite memory, which
might cause malfunction, panic or arbitrary code execution.
Add a sanity check and emit a warning when that triggers.
[ tglx: Added warning and rewrote changelog ]
Signed-off-by: Seunghun Han <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: "Rafael J. Wysocki" <[email protected]>
Cc: [email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
bool FirstNInputsAreUnique(const NodeDef& node, int n) const {
if (n > node.input_size()) return false;
absl::flat_hash_set<string> unique_inputs;
const int start = node.op() == "Concat" ? 1 : 0;
const int end = start + n;
for (int i = start; i < end; ++i) {
unique_inputs.insert(node.input(i));
}
int unique_input_size = unique_inputs.size();
return unique_input_size == n;
}
| 0 |
[
"CWE-476"
] |
tensorflow
|
e6340f0665d53716ef3197ada88936c2a5f7a2d3
| 73,701,482,720,796,470,000,000,000,000,000,000,000 | 11 |
Handle a special grappler case resulting in crash.
It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault.
PiperOrigin-RevId: 369242852
Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
|
GF_Err sidx_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->reference_ID);
gf_bs_write_u32(bs, ptr->timescale);
if (ptr->version==0) {
gf_bs_write_u32(bs, (u32) ptr->earliest_presentation_time);
gf_bs_write_u32(bs, (u32) ptr->first_offset);
} else {
gf_bs_write_u64(bs, ptr->earliest_presentation_time);
gf_bs_write_u64(bs, ptr->first_offset);
}
gf_bs_write_u16(bs, 0);
gf_bs_write_u16(bs, ptr->nb_refs);
for (i=0; i<ptr->nb_refs; i++ ) {
gf_bs_write_int(bs, ptr->refs[i].reference_type, 1);
gf_bs_write_int(bs, ptr->refs[i].reference_size, 31);
gf_bs_write_u32(bs, ptr->refs[i].subsegment_duration);
gf_bs_write_int(bs, ptr->refs[i].starts_with_SAP, 1);
gf_bs_write_int(bs, ptr->refs[i].SAP_type, 3);
gf_bs_write_int(bs, ptr->refs[i].SAP_delta_time, 28);
}
return GF_OK;
| 0 |
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
| 194,472,150,065,672,100,000,000,000,000,000,000,000 | 30 |
fixed #1587
|
bool CModules::OnSendToIRCMessage(CMessage& Message) {
MODHALTCHK(OnSendToIRCMessage(Message));
}
| 0 |
[
"CWE-20",
"CWE-264"
] |
znc
|
8de9e376ce531fe7f3c8b0aa4876d15b479b7311
| 179,969,607,615,391,140,000,000,000,000,000,000,000 | 3 |
Fix remote code execution and privilege escalation vulnerability.
To trigger this, need to have a user already.
Thanks for Jeriko One <[email protected]> for finding and reporting this.
CVE-2019-12816
|
static void bfq_exit_queue(struct elevator_queue *e)
{
struct bfq_data *bfqd = e->elevator_data;
struct bfq_queue *bfqq, *n;
hrtimer_cancel(&bfqd->idle_slice_timer);
spin_lock_irq(&bfqd->lock);
list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
bfq_deactivate_bfqq(bfqd, bfqq, false, false);
spin_unlock_irq(&bfqd->lock);
hrtimer_cancel(&bfqd->idle_slice_timer);
/* release oom-queue reference to root group */
bfqg_and_blkg_put(bfqd->root_group);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
#else
spin_lock_irq(&bfqd->lock);
bfq_put_async_queues(bfqd, bfqd->root_group);
kfree(bfqd->root_group);
spin_unlock_irq(&bfqd->lock);
#endif
kfree(bfqd);
| 0 |
[
"CWE-416"
] |
linux
|
2f95fa5c955d0a9987ffdc3a095e2f4e62c5f2a9
| 130,665,974,121,228,190,000,000,000,000,000,000,000 | 28 |
block, bfq: fix use-after-free in bfq_idle_slice_timer_body
In bfq_idle_slice_timer func, bfqq = bfqd->in_service_queue is
not in bfqd-lock critical section. The bfqq, which is not
equal to NULL in bfq_idle_slice_timer, may be freed after passing
to bfq_idle_slice_timer_body. So we will access the freed memory.
In addition, considering the bfqq may be in race, we should
firstly check whether bfqq is in service before doing something
on it in bfq_idle_slice_timer_body func. If the bfqq in race is
not in service, it means the bfqq has been expired through
__bfq_bfqq_expire func, and wait_request flags has been cleared in
__bfq_bfqd_reset_in_service func. So we do not need to re-clear the
wait_request of bfqq which is not in service.
KASAN log is given as follows:
[13058.354613] ==================================================================
[13058.354640] BUG: KASAN: use-after-free in bfq_idle_slice_timer+0xac/0x290
[13058.354644] Read of size 8 at addr ffffa02cf3e63f78 by task fork13/19767
[13058.354646]
[13058.354655] CPU: 96 PID: 19767 Comm: fork13
[13058.354661] Call trace:
[13058.354667] dump_backtrace+0x0/0x310
[13058.354672] show_stack+0x28/0x38
[13058.354681] dump_stack+0xd8/0x108
[13058.354687] print_address_description+0x68/0x2d0
[13058.354690] kasan_report+0x124/0x2e0
[13058.354697] __asan_load8+0x88/0xb0
[13058.354702] bfq_idle_slice_timer+0xac/0x290
[13058.354707] __hrtimer_run_queues+0x298/0x8b8
[13058.354710] hrtimer_interrupt+0x1b8/0x678
[13058.354716] arch_timer_handler_phys+0x4c/0x78
[13058.354722] handle_percpu_devid_irq+0xf0/0x558
[13058.354731] generic_handle_irq+0x50/0x70
[13058.354735] __handle_domain_irq+0x94/0x110
[13058.354739] gic_handle_irq+0x8c/0x1b0
[13058.354742] el1_irq+0xb8/0x140
[13058.354748] do_wp_page+0x260/0xe28
[13058.354752] __handle_mm_fault+0x8ec/0x9b0
[13058.354756] handle_mm_fault+0x280/0x460
[13058.354762] do_page_fault+0x3ec/0x890
[13058.354765] do_mem_abort+0xc0/0x1b0
[13058.354768] el0_da+0x24/0x28
[13058.354770]
[13058.354773] Allocated by task 19731:
[13058.354780] kasan_kmalloc+0xe0/0x190
[13058.354784] kasan_slab_alloc+0x14/0x20
[13058.354788] kmem_cache_alloc_node+0x130/0x440
[13058.354793] bfq_get_queue+0x138/0x858
[13058.354797] bfq_get_bfqq_handle_split+0xd4/0x328
[13058.354801] bfq_init_rq+0x1f4/0x1180
[13058.354806] bfq_insert_requests+0x264/0x1c98
[13058.354811] blk_mq_sched_insert_requests+0x1c4/0x488
[13058.354818] blk_mq_flush_plug_list+0x2d4/0x6e0
[13058.354826] blk_flush_plug_list+0x230/0x548
[13058.354830] blk_finish_plug+0x60/0x80
[13058.354838] read_pages+0xec/0x2c0
[13058.354842] __do_page_cache_readahead+0x374/0x438
[13058.354846] ondemand_readahead+0x24c/0x6b0
[13058.354851] page_cache_sync_readahead+0x17c/0x2f8
[13058.354858] generic_file_buffered_read+0x588/0xc58
[13058.354862] generic_file_read_iter+0x1b4/0x278
[13058.354965] ext4_file_read_iter+0xa8/0x1d8 [ext4]
[13058.354972] __vfs_read+0x238/0x320
[13058.354976] vfs_read+0xbc/0x1c0
[13058.354980] ksys_read+0xdc/0x1b8
[13058.354984] __arm64_sys_read+0x50/0x60
[13058.354990] el0_svc_common+0xb4/0x1d8
[13058.354994] el0_svc_handler+0x50/0xa8
[13058.354998] el0_svc+0x8/0xc
[13058.354999]
[13058.355001] Freed by task 19731:
[13058.355007] __kasan_slab_free+0x120/0x228
[13058.355010] kasan_slab_free+0x10/0x18
[13058.355014] kmem_cache_free+0x288/0x3f0
[13058.355018] bfq_put_queue+0x134/0x208
[13058.355022] bfq_exit_icq_bfqq+0x164/0x348
[13058.355026] bfq_exit_icq+0x28/0x40
[13058.355030] ioc_exit_icq+0xa0/0x150
[13058.355035] put_io_context_active+0x250/0x438
[13058.355038] exit_io_context+0xd0/0x138
[13058.355045] do_exit+0x734/0xc58
[13058.355050] do_group_exit+0x78/0x220
[13058.355054] __wake_up_parent+0x0/0x50
[13058.355058] el0_svc_common+0xb4/0x1d8
[13058.355062] el0_svc_handler+0x50/0xa8
[13058.355066] el0_svc+0x8/0xc
[13058.355067]
[13058.355071] The buggy address belongs to the object at ffffa02cf3e63e70#012 which belongs to the cache bfq_queue of size 464
[13058.355075] The buggy address is located 264 bytes inside of#012 464-byte region [ffffa02cf3e63e70, ffffa02cf3e64040)
[13058.355077] The buggy address belongs to the page:
[13058.355083] page:ffff7e80b3cf9800 count:1 mapcount:0 mapping:ffff802db5c90780 index:0xffffa02cf3e606f0 compound_mapcount: 0
[13058.366175] flags: 0x2ffffe0000008100(slab|head)
[13058.370781] raw: 2ffffe0000008100 ffff7e80b53b1408 ffffa02d730c1c90 ffff802db5c90780
[13058.370787] raw: ffffa02cf3e606f0 0000000000370023 00000001ffffffff 0000000000000000
[13058.370789] page dumped because: kasan: bad access detected
[13058.370791]
[13058.370792] Memory state around the buggy address:
[13058.370797] ffffa02cf3e63e00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fb fb
[13058.370801] ffffa02cf3e63e80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370805] >ffffa02cf3e63f00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370808] ^
[13058.370811] ffffa02cf3e63f80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370815] ffffa02cf3e64000: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
[13058.370817] ==================================================================
[13058.370820] Disabling lock debugging due to kernel taint
Here, we directly pass the bfqd to bfq_idle_slice_timer_body func.
--
V2->V3: rewrite the comment as suggested by Paolo Valente
V1->V2: add one comment, and add Fixes and Reported-by tag.
Fixes: aee69d78d ("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler")
Acked-by: Paolo Valente <[email protected]>
Reported-by: Wang Wang <[email protected]>
Signed-off-by: Zhiqiang Liu <[email protected]>
Signed-off-by: Feilong Lin <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
Item_sp::Item_sp(THD *thd, Name_resolution_context *context_arg,
sp_name *name_arg) :
context(context_arg), m_name(name_arg), m_sp(NULL), func_ctx(NULL),
sp_result_field(NULL)
{
dummy_table= (TABLE*) thd->calloc(sizeof(TABLE) + sizeof(TABLE_SHARE) +
sizeof(Query_arena));
dummy_table->s= (TABLE_SHARE*) (dummy_table + 1);
/* TODO(cvicentiu) Move this sp_query_arena in the class as a direct member.
Currently it can not be done due to header include dependencies. */
sp_query_arena= (Query_arena *) (dummy_table->s + 1);
memset(&sp_mem_root, 0, sizeof(sp_mem_root));
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 203,756,377,378,562,520,000,000,000,000,000,000,000 | 13 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
{
Coroutine *co;
DiscardCo rwco = {
.bs = bs,
.sector_num = sector_num,
.nb_sectors = nb_sectors,
.ret = NOT_DONE,
};
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
bdrv_discard_co_entry(&rwco);
} else {
co = qemu_coroutine_create(bdrv_discard_co_entry);
qemu_coroutine_enter(co, &rwco);
while (rwco.ret == NOT_DONE) {
qemu_aio_wait();
}
}
return rwco.ret;
}
| 0 |
[
"CWE-190"
] |
qemu
|
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
| 164,747,299,551,342,460,000,000,000,000,000,000,000 | 23 |
block: Limit request size (CVE-2014-0143)
Limiting the size of a single request to INT_MAX not only fixes a
direct integer overflow in bdrv_check_request() (which would only
trigger bad behaviour with ridiculously huge images, as in close to
2^64 bytes), but can also prevent overflows in all block drivers.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>
|
static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
{
struct buffer *buf = NULL;
if (likely(LIST_ISEMPTY(&h2c->buf_wait.list)) &&
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
h2c->buf_wait.target = h2c;
h2c->buf_wait.wakeup_cb = h2_buf_available;
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list);
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
__conn_xprt_stop_recv(h2c->conn);
}
return buf;
}
| 0 |
[
"CWE-125"
] |
haproxy
|
a01f45e3ced23c799f6e78b5efdbd32198a75354
| 242,993,184,706,817,400,000,000,000,000,000,000,000 | 15 |
BUG/CRITICAL: mux-h2: re-check the frame length when PRIORITY is used
Tim D�sterhus reported a possible crash in the H2 HEADERS frame decoder
when the PRIORITY flag is present. A check is missing to ensure the 5
extra bytes needed with this flag are actually part of the frame. As per
RFC7540#4.2, let's return a connection error with code FRAME_SIZE_ERROR.
Many thanks to Tim for responsibly reporting this issue with a working
config and reproducer. This issue was assigned CVE-2018-20615.
This fix must be backported to 1.9 and 1.8.
|
bool CModules::OnUserCTCPMessage(CCTCPMessage& Message) {
MODHALTCHK(OnUserCTCPMessage(Message));
}
| 0 |
[
"CWE-20",
"CWE-264"
] |
znc
|
8de9e376ce531fe7f3c8b0aa4876d15b479b7311
| 22,572,828,635,844,100,000,000,000,000,000,000,000 | 3 |
Fix remote code execution and privilege escalation vulnerability.
To trigger this, need to have a user already.
Thanks for Jeriko One <[email protected]> for finding and reporting this.
CVE-2019-12816
|
Status WrapInCallOp(EagerOperation* op, EagerOperation** wrapped_op) {
DCHECK(!op->is_function());
const OpDef& opdef = OpRegistry::Global()->LookUp(op->Name())->op_def;
// Raise an error for ops which don't support wrapping yet. This includes
// ops with list inputs/outputs and ops with private attrs.
// TODO(srbs): Support list inputs/outputs.
TF_RETURN_IF_ERROR(VerifyWrappableInCallOp(opdef, op));
// Build a FunctionDef containing op as a node and register with context.
// TODO(srbs): Here we are unable to distinguish between a FunctionDef for
// a wrapped eager op and an existing user defined function registered with
// the context e.g. with something like
// @tf.function
// def __wrapped__Add(x, y):
// ...
// This can be avoided by introducing a dict in EagerContext that stores a
// mapping from the eager op's name to its unique FunctionDef name.
auto op_attrs = op->GetOpAttrs();
string fname;
TF_RETURN_IF_ERROR(BuildWrappedOpName(op, opdef, op_attrs, &fname));
if (!op->EagerContext().GetFunctionDef(fname)) {
FunctionDef fdef;
// Set signature.
TF_RETURN_IF_ERROR(
BuildWrappedOpSignature(op, opdef, fname, *fdef.mutable_signature()));
// Add node.
NodeDef* ndef = fdef.add_node_def();
ndef->set_op(op->Name());
ndef->set_name(op->Name()); // This could be anything.
const auto& signature = fdef.signature();
for (size_t i = 0; i < signature.input_arg_size(); i++) {
ndef->add_input(absl::StrCat(fdef.signature().input_arg(i).name(), ":0"));
}
// TODO(srbs): Private attrs on the op are dropped here and applied to
// the call op instead. If this causes problems we might have to copy those
// attrs to this ndef. That would require updating fname to contain a hash
// of such attributes.
for (const auto& attr : opdef.attr()) {
(*ndef->mutable_attr())[attr.name()].set_placeholder(attr.name());
}
// Set the device of this node to be the exact same one that eager mode
// would have used.
// TODO(b/200153278): Ideally we would just forward the call op's device at
// runtime but currently there is no way to do it.
ndef->set_device(op->DeviceName());
#ifdef INTEL_MKL
if (IsMKLEnabled() &&
absl::StartsWith(op->Name(), mkl_op_registry::kMklOpPrefix)) {
GetMKLNodeDef(ndef);
}
#endif // INTEL_MKL
// Set `ret` map.
TF_RETURN_IF_ERROR(
PopulateRetMap(&fdef, op_attrs, op, opdef, signature, ndef->name()));
VLOG(1) << fdef.DebugString();
TF_RETURN_IF_ERROR(op->EagerContext().AddFunctionDef(std::move(fdef)));
}
// Build the call op.
auto& ctx = op->EagerContext();
AbstractOperationPtr call_op(ctx.CreateOperation());
TF_RETURN_IF_ERROR(call_op->Reset(fname.c_str(), op->DeviceName().c_str()));
for (auto t : op->Inputs()) {
TF_RETURN_IF_ERROR(call_op->AddInput(t));
}
*wrapped_op = down_cast<EagerOperation*>(call_op.release());
// Attributes on the elementary eager operation are applied to the call op and
// to the NodeDef inside the FunctionDef. This allows us to have a single
// FunctionDef for different attribute values. When the function is
// instantiated, these attributes get forwarded to the NodeDef. This is done
// by setting the AttrValue.placeholder field for the NodeDef attrs.
(*wrapped_op)->AddAttrs(op_attrs);
return AddMixedTypeListAttrs(*wrapped_op, op_attrs, opdef);
}
| 0 |
[
"CWE-476",
"CWE-475"
] |
tensorflow
|
a5b89cd68c02329d793356bda85d079e9e69b4e7
| 17,668,001,849,021,898,000,000,000,000,000,000,000 | 75 |
Fix empty resource handle vulnerability.
Some ops that attempt to extract a resource handle from user input
can lead to nullptr dereferences. This returns an error in such
a case.
PiperOrigin-RevId: 445571938
|
static void ehci_work_timer(void *opaque)
{
EHCIState *ehci = opaque;
qemu_bh_schedule(ehci->async_bh);
}
| 0 |
[
"CWE-617"
] |
qemu
|
2fdb42d840400d58f2e706ecca82c142b97bcbd6
| 53,620,663,183,246,270,000,000,000,000,000,000,000 | 6 |
hw: ehci: check return value of 'usb_packet_map'
If 'usb_packet_map' fails, we should stop to process the usb
request.
Signed-off-by: Li Qiang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]>
|
rpc_C_DecryptFinal (CK_X_FUNCTION_LIST *self,
p11_rpc_message *msg)
{
CK_SESSION_HANDLE session;
CK_BYTE_PTR last_part;
CK_ULONG last_part_len;
BEGIN_CALL (DecryptFinal);
IN_ULONG (session);
IN_BYTE_BUFFER (last_part, last_part_len);
PROCESS_CALL ((self, session, last_part, &last_part_len));
OUT_BYTE_ARRAY (last_part, last_part_len);
END_CALL;
}
| 0 |
[
"CWE-190"
] |
p11-kit
|
5307a1d21a50cacd06f471a873a018d23ba4b963
| 174,885,755,419,709,900,000,000,000,000,000,000,000 | 14 |
Check for arithmetic overflows before allocating
|
nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_layoutcommit *lcp)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (nfserr)
return nfserr;
p = xdr_reserve_space(xdr, 4);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(lcp->lc_size_chg);
if (lcp->lc_size_chg) {
p = xdr_reserve_space(xdr, 8);
if (!p)
return nfserr_resource;
p = xdr_encode_hyper(p, lcp->lc_newsize);
}
return nfs_ok;
}
| 0 |
[
"CWE-20",
"CWE-129"
] |
linux
|
f961e3f2acae94b727380c0b74e2d3954d0edf79
| 287,896,622,907,291,900,000,000,000,000,000,000,000 | 22 |
nfsd: encoders mustn't use unitialized values in error cases
In error cases, lgp->lg_layout_type may be out of bounds; so we
shouldn't be using it until after the check of nfserr.
This was seen to crash nfsd threads when the server receives a LAYOUTGET
request with a large layout type.
GETDEVICEINFO has the same problem.
Reported-by: Ari Kauppi <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]>
|
int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
{
BlockDriver *drv = bs->drv;
if (!drv)
return -ENOMEDIUM;
if (!drv->bdrv_get_info)
return -ENOTSUP;
memset(bdi, 0, sizeof(*bdi));
return drv->bdrv_get_info(bs, bdi);
}
| 0 |
[
"CWE-190"
] |
qemu
|
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
| 326,931,181,366,863,370,000,000,000,000,000,000,000 | 10 |
block: Limit request size (CVE-2014-0143)
Limiting the size of a single request to INT_MAX not only fixes a
direct integer overflow in bdrv_check_request() (which would only
trigger bad behaviour with ridiculously huge images, as in close to
2^64 bytes), but can also prevent overflows in all block drivers.
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Max Reitz <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>
|
get_property (xmlNodePtr node_ptr,
const gchar *name)
{
xmlChar *xml_s;
gchar *s;
xml_s = xmlGetProp (node_ptr, (const xmlChar *) name);
s = g_strdup ((gchar *) xml_s);
xmlFree (xml_s);
return s;
}
| 0 |
[
"CWE-295"
] |
evolution-ews
|
915226eca9454b8b3e5adb6f2fff9698451778de
| 239,558,725,220,788,400,000,000,000,000,000,000,000 | 12 |
I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
|
gdm_session_set_property (GObject *object,
guint prop_id,
const GValue *value,
GParamSpec *pspec)
{
GdmSession *self;
self = GDM_SESSION (object);
switch (prop_id) {
case PROP_SESSION_TYPE:
set_session_type (self, g_value_get_string (value));
break;
case PROP_DISPLAY_NAME:
set_display_name (self, g_value_get_string (value));
break;
case PROP_DISPLAY_HOSTNAME:
set_display_hostname (self, g_value_get_string (value));
break;
case PROP_DISPLAY_DEVICE:
set_display_device (self, g_value_get_string (value));
break;
case PROP_DISPLAY_SEAT_ID:
set_display_seat_id (self, g_value_get_string (value));
break;
case PROP_USER_X11_AUTHORITY_FILE:
set_user_x11_authority_file (self, g_value_get_string (value));
break;
case PROP_DISPLAY_X11_AUTHORITY_FILE:
set_display_x11_authority_file (self, g_value_get_string (value));
break;
case PROP_DISPLAY_IS_LOCAL:
set_display_is_local (self, g_value_get_boolean (value));
break;
case PROP_DISPLAY_IS_INITIAL:
set_display_is_initial (self, g_value_get_boolean (value));
break;
case PROP_VERIFICATION_MODE:
set_verification_mode (self, g_value_get_enum (value));
break;
case PROP_ALLOWED_USER:
set_allowed_user (self, g_value_get_uint (value));
break;
case PROP_CONVERSATION_ENVIRONMENT:
set_conversation_environment (self, g_value_get_pointer (value));
break;
#ifdef ENABLE_WAYLAND_SUPPORT
case PROP_IGNORE_WAYLAND:
gdm_session_set_ignore_wayland (self, g_value_get_boolean (value));
break;
#endif
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
| 0 |
[] |
gdm
|
5ac224602f1d603aac5eaa72e1760d3e33a26f0a
| 295,055,519,069,315,430,000,000,000,000,000,000,000 | 56 |
session: disconnect signals from worker proxy when conversation is freed
We don't want an outstanding reference on the worker proxy to lead to
signal handlers getting dispatched after the conversation is freed.
https://bugzilla.gnome.org/show_bug.cgi?id=758032
|
bool is_delimiter(const char* p)
{
uint match= 0;
char* delim= delimiter;
while (*p && *p == *delim++)
{
match++;
p++;
}
return (match == delimiter_length);
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 315,330,148,488,037,650,000,000,000,000,000,000,000 | 12 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
static void reply_entry(fuse_req_t req, const struct fuse_entry_param *e,
int err)
{
if (!err) {
struct fuse *f = req_fuse(req);
#ifdef __SOLARIS__
/* Skip forget for negative result */
if ((fuse_reply_entry(req, e) == -ENOENT)
&& (e->ino != 0))
forget_node(f, e->ino, 1);
#else /* __SOLARIS__ */
if (fuse_reply_entry(req, e) == -ENOENT)
forget_node(f, e->ino, 1);
#endif
} else
reply_err(req, err);
}
| 0 |
[] |
ntfs-3g
|
fb28eef6f1c26170566187c1ab7dc913a13ea43c
| 289,834,989,103,630,000,000,000,000,000,000,000,000 | 17 |
Hardened the checking of directory offset requested by a readdir
When asked for the next directory entries, make sure the chunk offset
is within valid values, otherwise return no more entries in chunk.
|
static void read_commands_complete(uint8_t status, uint16_t length,
const void *param, void *user_data)
{
const struct mgmt_rp_read_commands *rp = param;
uint16_t num_commands, num_events;
size_t expected_len;
int i;
if (status != MGMT_STATUS_SUCCESS) {
error("Failed to read supported commands: %s (0x%02x)",
mgmt_errstr(status), status);
return;
}
if (length < sizeof(*rp)) {
error("Wrong size of read commands response");
return;
}
num_commands = btohs(rp->num_commands);
num_events = btohs(rp->num_events);
DBG("Number of commands: %d", num_commands);
DBG("Number of events: %d", num_events);
expected_len = sizeof(*rp) + num_commands * sizeof(uint16_t) +
num_events * sizeof(uint16_t);
if (length < expected_len) {
error("Too small reply for supported commands: (%u != %zu)",
length, expected_len);
return;
}
for (i = 0; i < num_commands; i++) {
uint16_t op = get_le16(rp->opcodes + i);
switch (op) {
case MGMT_OP_ADD_DEVICE:
DBG("enabling kernel-side connection control");
kernel_features |= KERNEL_CONN_CONTROL;
break;
case MGMT_OP_SET_BLOCKED_KEYS:
DBG("kernel supports the set_blocked_keys op");
kernel_features |= KERNEL_BLOCKED_KEYS_SUPPORTED;
break;
case MGMT_OP_SET_DEF_SYSTEM_CONFIG:
DBG("kernel supports set system confic");
kernel_features |= KERNEL_SET_SYSTEM_CONFIG;
break;
case MGMT_OP_READ_EXP_FEATURES_INFO:
DBG("kernel supports exp features");
kernel_features |= KERNEL_EXP_FEATURES;
break;
case MGMT_OP_ADD_EXT_ADV_PARAMS:
DBG("kernel supports ext adv commands");
kernel_features |= KERNEL_HAS_EXT_ADV_ADD_CMDS;
break;
case MGMT_OP_READ_CONTROLLER_CAP:
DBG("kernel supports controller cap command");
kernel_features |= KERNEL_HAS_CONTROLLER_CAP_CMD;
break;
default:
break;
}
}
for (i = 0; i < num_events; i++) {
uint16_t ev = get_le16(rp->opcodes + num_commands + i);
switch(ev) {
case MGMT_EV_CONTROLLER_RESUME:
DBG("kernel supports suspend/resume events");
kernel_features |= KERNEL_HAS_RESUME_EVT;
break;
}
}
}
| 0 |
[
"CWE-862",
"CWE-863"
] |
bluez
|
b497b5942a8beb8f89ca1c359c54ad67ec843055
| 12,475,339,949,140,296,000,000,000,000,000,000,000 | 78 |
adapter: Fix storing discoverable setting
discoverable setting shall only be store when changed via Discoverable
property and not when discovery client set it as that be considered
temporary just for the lifetime of the discovery.
|
webSocketsHasDataInBuffer(rfbClientPtr cl)
{
ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx;
if (wsctx && wsctx->readbuflen)
return TRUE;
return (cl->sslctx && rfbssl_pending(cl) > 0);
}
| 1 |
[
"CWE-787"
] |
libvncserver
|
aac95a9dcf4bbba87b76c72706c3221a842ca433
| 290,908,885,960,268,020,000,000,000,000,000,000,000 | 9 |
fix overflow and refactor websockets decode (Hybi)
fix critical heap-based buffer overflow which allowed easy modification
of a return address via an overwritten function pointer
fix bug causing connections to fail due a "one websocket frame = one
ws_read" assumption, which failed with LibVNCServer-0.9.11
refactor websocket Hybi decode to use a simple state machine for
decoding of websocket frames
|
TRIO_PRIVATE void TrioOutStreamStringDynamic TRIO_ARGS2((self, output), trio_class_t* self,
int output)
{
assert(VALID(self));
assert(VALID(self->location));
if (self->error == 0)
{
trio_xstring_append_char((trio_string_t*)self->location, (char)output);
self->actually.committed++;
}
/* The processed variable must always be increased */
self->processed++;
}
| 0 |
[
"CWE-190",
"CWE-125"
] |
FreeRDP
|
05cd9ea2290d23931f615c1b004d4b2e69074e27
| 159,896,294,443,102,140,000,000,000,000,000,000,000 | 14 |
Fixed TrioParse and trio_length limts.
CVE-2020-4030 thanks to @antonio-morales for finding this.
|
std::ostream& indent(std::ostream& os) { return generator_->indent(os); }
| 0 |
[
"CWE-20"
] |
thrift
|
cfaadcc4adcfde2a8232c62ec89870b73ef40df1
| 132,281,798,215,330,130,000,000,000,000,000,000,000 | 1 |
THRIFT-3231 CPP: Limit recursion depth to 64
Client: cpp
Patch: Ben Craig <[email protected]>
|
_PUBLIC_ void dump_data_dbgc(int dbgc_class, int level, const uint8_t *buf, int len)
{
struct debug_channel_level dcl = { dbgc_class, level };
if (!DEBUGLVLC(dbgc_class, level)) {
return;
}
dump_data_cb(buf, len, false, debugadd_channel_cb, &dcl);
}
| 0 |
[] |
samba
|
8eae8d28bce2c3f6a323d3dc48ed10c2e6bb1ba5
| 126,149,538,374,971,140,000,000,000,000,000,000,000 | 9 |
CVE-2013-4476: lib-util: add file_check_permissions()
Bug: https://bugzilla.samba.org/show_bug.cgi?id=10234
Signed-off-by: Björn Baumbach <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
|
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct extent_map *em;
struct btrfs_key ins;
u64 alloc_hint;
int ret;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return ERR_CAST(trans);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
alloc_hint = get_extent_allocation_hint(inode, start, len);
ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
alloc_hint, &ins, 1);
if (ret) {
em = ERR_PTR(ret);
goto out;
}
em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
ins.offset, ins.offset, 0);
if (IS_ERR(em))
goto out;
ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
ins.offset, ins.offset, 0);
if (ret) {
btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
em = ERR_PTR(ret);
}
out:
btrfs_end_transaction(trans, root);
return em;
}
| 0 |
[
"CWE-310"
] |
linux-2.6
|
9c52057c698fb96f8f07e7a4bcf4801a092bda89
| 67,498,275,733,931,470,000,000,000,000,000,000,000 | 39 |
Btrfs: fix hash overflow handling
The handling for directory crc hash overflows was fairly obscure,
split_leaf returns EOVERFLOW when we try to extend the item and that is
supposed to bubble up to userland. For a while it did so, but along the
way we added better handling of errors and forced the FS readonly if we
hit IO errors during the directory insertion.
Along the way, we started testing only for EEXIST and the EOVERFLOW case
was dropped. The end result is that we may force the FS readonly if we
catch a directory hash bucket overflow.
This fixes a few problem spots. First I add tests for EOVERFLOW in the
places where we can safely just return the error up the chain.
btrfs_rename is harder though, because it tries to insert the new
directory item only after it has already unlinked anything the rename
was going to overwrite. Rather than adding very complex logic, I added
a helper to test for the hash overflow case early while it is still safe
to bail out.
Snapshot and subvolume creation had a similar problem, so they are using
the new helper now too.
Signed-off-by: Chris Mason <[email protected]>
Reported-by: Pascal Junod <[email protected]>
|
static void tg3_tx(struct tg3_napi *tnapi)
{
struct tg3 *tp = tnapi->tp;
u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
u32 sw_idx = tnapi->tx_cons;
struct netdev_queue *txq;
int index = tnapi - tp->napi;
unsigned int pkts_compl = 0, bytes_compl = 0;
if (tg3_flag(tp, ENABLE_TSS))
index--;
txq = netdev_get_tx_queue(tp->dev, index);
while (sw_idx != hw_idx) {
struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
struct sk_buff *skb = ri->skb;
int i, tx_bug = 0;
if (unlikely(skb == NULL)) {
tg3_tx_recover(tp);
return;
}
if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
struct skb_shared_hwtstamps timestamp;
u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
skb_tstamp_tx(skb, ×tamp);
}
pci_unmap_single(tp->pdev,
dma_unmap_addr(ri, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
ri->skb = NULL;
while (ri->fragmented) {
ri->fragmented = false;
sw_idx = NEXT_TX(sw_idx);
ri = &tnapi->tx_buffers[sw_idx];
}
sw_idx = NEXT_TX(sw_idx);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ri = &tnapi->tx_buffers[sw_idx];
if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
tx_bug = 1;
pci_unmap_page(tp->pdev,
dma_unmap_addr(ri, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
while (ri->fragmented) {
ri->fragmented = false;
sw_idx = NEXT_TX(sw_idx);
ri = &tnapi->tx_buffers[sw_idx];
}
sw_idx = NEXT_TX(sw_idx);
}
pkts_compl++;
bytes_compl += skb->len;
dev_kfree_skb(skb);
if (unlikely(tx_bug)) {
tg3_tx_recover(tp);
return;
}
}
netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
tnapi->tx_cons = sw_idx;
/* Need to make the tx_cons update visible to tg3_start_xmit()
* before checking for netif_queue_stopped(). Without the
* memory barrier, there is a small possibility that tg3_start_xmit()
* will miss it and cause the queue to be stopped forever.
*/
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq) &&
(tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
__netif_tx_lock(txq, smp_processor_id());
if (netif_tx_queue_stopped(txq) &&
(tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
}
}
| 0 |
[
"CWE-476",
"CWE-119"
] |
linux
|
715230a44310a8cf66fbfb5a46f9a62a9b2de424
| 151,815,028,319,510,460,000,000,000,000,000,000,000 | 99 |
tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
/* Abort a zerocopy operation and revert zckey on error in send syscall */
static inline void skb_zcopy_abort(struct sk_buff *skb)
{
struct ubuf_info *uarg = skb_zcopy(skb);
if (uarg) {
sock_zerocopy_put_abort(uarg);
skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
}
| 0 |
[
"CWE-20"
] |
linux
|
2b16f048729bf35e6c28a40cbfad07239f9dcd90
| 101,309,686,420,611,740,000,000,000,000,000,000,000 | 9 |
net: create skb_gso_validate_mac_len()
If you take a GSO skb, and split it into packets, will the MAC
length (L2 + L3 + L4 headers + payload) of those packets be small
enough to fit within a given length?
Move skb_gso_mac_seglen() to skbuff.h with other related functions
like skb_gso_network_seglen() so we can use it, and then create
skb_gso_validate_mac_len to do the full calculation.
Signed-off-by: Daniel Axtens <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
EXPORTED void xml_partial_response(struct transaction_t *txn,
xmlDocPtr doc, xmlNodePtr node,
unsigned level, xmlBufferPtr *buf)
{
const char *eol = "\n";
unsigned n;
if (!config_httpprettytelemetry) {
level = 0;
eol = "";
}
/* Start with clean buffer */
if (!*buf) *buf = xmlBufferCreate();
if (node) {
/* Add leading indent to buffer */
for (n = 0; n < level * MARKUP_INDENT; n++) xmlBufferCCat(*buf, " ");
/* Dump XML node into buffer */
xmlNodeDump(*buf, doc, node, level, config_httpprettytelemetry);
/* Add trailing EOL to buffer */
xmlBufferCCat(*buf, eol);
}
else {
/* End of chunked XML response */
xmlNodePtr root = xmlDocGetRootElement(doc);
/* Add close of root element to buffer */
xmlBufferCCat(*buf, "</");
if (root->ns->prefix) {
xmlBufferCat(*buf, root->ns->prefix);
xmlBufferCCat(*buf, ":");
}
xmlBufferCat(*buf, root->name);
xmlBufferCCat(*buf, ">");
/* Add trailing EOL to buffer */
xmlBufferCCat(*buf, eol);
}
if (txn) {
/* Output the XML buffer */
write_body(0, txn,
(char *) xmlBufferContent(*buf), xmlBufferLength(*buf));
/* Reset the buffer for next chunk */
xmlBufferEmpty(*buf);
}
}
| 0 |
[] |
cyrus-imapd
|
602f12ed2af0a49ac4a58affbfea57d0fc23dea5
| 214,432,835,684,034,630,000,000,000,000,000,000,000 | 51 |
httpd.c: only allow reuse of auth creds on a persistent connection against a backend server in a Murder
|
static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
if (!ev->status)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (!conn)
goto unlock;
conn->le_tx_phy = ev->tx_phy;
conn->le_rx_phy = ev->rx_phy;
unlock:
hci_dev_unlock(hdev);
}
| 0 |
[
"CWE-290"
] |
linux
|
3ca44c16b0dcc764b641ee4ac226909f5c421aa3
| 223,301,064,331,502,130,000,000,000,000,000,000,000 | 22 |
Bluetooth: Consolidate encryption handling in hci_encrypt_cfm
This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection
state is BT_CONFIG so callers don't have to check the state.
Signed-off-by: Luiz Augusto von Dentz <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
|
xfs_attr_sf_findname(
struct xfs_da_args *args,
struct xfs_attr_sf_entry **sfep,
unsigned int *basep)
{
struct xfs_attr_shortform *sf;
struct xfs_attr_sf_entry *sfe;
unsigned int base = sizeof(struct xfs_attr_sf_hdr);
int size = 0;
int end;
int i;
sf = (struct xfs_attr_shortform *)args->dp->i_afp->if_u1.if_data;
sfe = &sf->list[0];
end = sf->hdr.count;
for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe),
base += size, i++) {
size = XFS_ATTR_SF_ENTSIZE(sfe);
if (!xfs_attr_match(args, sfe->namelen, sfe->nameval,
sfe->flags))
continue;
break;
}
if (sfep != NULL)
*sfep = sfe;
if (basep != NULL)
*basep = base;
if (i == end)
return -ENOATTR;
return -EEXIST;
}
| 0 |
[
"CWE-131"
] |
linux
|
f4020438fab05364018c91f7e02ebdd192085933
| 113,835,584,261,409,200,000,000,000,000,000,000,000 | 34 |
xfs: fix boundary test in xfs_attr_shortform_verify
The boundary test for the fixed-offset parts of xfs_attr_sf_entry in
xfs_attr_shortform_verify is off by one, because the variable array
at the end is defined as nameval[1] not nameval[].
Hence we need to subtract 1 from the calculation.
This can be shown by:
# touch file
# setfattr -n root.a file
and verifications will fail when it's written to disk.
This only matters for a last attribute which has a single-byte name
and no value, otherwise the combination of namelen & valuelen will
push endp further out and this test won't fail.
Fixes: 1e1bbd8e7ee06 ("xfs: create structure verifier function for shortform xattrs")
Signed-off-by: Eric Sandeen <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
|
int Monitor::sanitize_options()
{
int r = 0;
// mon_lease must be greater than mon_lease_renewal; otherwise we
// may incur in leases expiring before they are renewed.
if (g_conf->mon_lease_renew_interval_factor >= 1.0) {
clog->error() << "mon_lease_renew_interval_factor ("
<< g_conf->mon_lease_renew_interval_factor
<< ") must be less than 1.0";
r = -EINVAL;
}
// mon_lease_ack_timeout must be greater than mon_lease to make sure we've
// got time to renew the lease and get an ack for it. Having both options
// with the same value, for a given small vale, could mean timing out if
// the monitors happened to be overloaded -- or even under normal load for
// a small enough value.
if (g_conf->mon_lease_ack_timeout_factor <= 1.0) {
clog->error() << "mon_lease_ack_timeout_factor ("
<< g_conf->mon_lease_ack_timeout_factor
<< ") must be greater than 1.0";
r = -EINVAL;
}
return r;
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 36,159,886,365,343,160,000,000,000,000,000,000,000 | 27 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
{
pgio->pg_ops = &nfs_pageio_write_ops;
pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
}
| 0 |
[] |
linux
|
c7559663e42f4294ffe31fe159da6b6a66b35d61
| 125,529,075,134,751,690,000,000,000,000,000,000,000 | 5 |
NFS: Allow nfs_updatepage to extend a write under additional circumstances
Currently nfs_updatepage allows a write to be extended to cover a full
page only if we don't have a byte range lock lock on the file... but if
we have a write delegation on the file or if we have the whole file
locked for writing then we should be allowed to extend the write as
well.
Signed-off-by: Scott Mayhew <[email protected]>
[Trond: fix up call to nfs_have_delegation()]
Signed-off-by: Trond Myklebust <[email protected]>
|
static void ntlm_compute_channel_bindings(NTLM_CONTEXT* context)
{
WINPR_DIGEST_CTX* md5;
BYTE* ChannelBindingToken;
UINT32 ChannelBindingTokenLength;
SEC_CHANNEL_BINDINGS* ChannelBindings;
ZeroMemory(context->ChannelBindingsHash, WINPR_MD5_DIGEST_LENGTH);
ChannelBindings = context->Bindings.Bindings;
if (!ChannelBindings)
return;
if (!(md5 = winpr_Digest_New()))
return;
if (!winpr_Digest_Init(md5, WINPR_MD_MD5))
goto out;
ChannelBindingTokenLength = context->Bindings.BindingsLength - sizeof(SEC_CHANNEL_BINDINGS);
ChannelBindingToken = &((BYTE*)ChannelBindings)[ChannelBindings->dwApplicationDataOffset];
if (!ntlm_md5_update_uint32_be(md5, ChannelBindings->dwInitiatorAddrType))
goto out;
if (!ntlm_md5_update_uint32_be(md5, ChannelBindings->cbInitiatorLength))
goto out;
if (!ntlm_md5_update_uint32_be(md5, ChannelBindings->dwAcceptorAddrType))
goto out;
if (!ntlm_md5_update_uint32_be(md5, ChannelBindings->cbAcceptorLength))
goto out;
if (!ntlm_md5_update_uint32_be(md5, ChannelBindings->cbApplicationDataLength))
goto out;
if (!winpr_Digest_Update(md5, (void*)ChannelBindingToken, ChannelBindingTokenLength))
goto out;
if (!winpr_Digest_Final(md5, context->ChannelBindingsHash, WINPR_MD5_DIGEST_LENGTH))
goto out;
out:
winpr_Digest_Free(md5);
}
| 0 |
[
"CWE-190",
"CWE-125"
] |
FreeRDP
|
58a3122250d54de3a944c487776bcd4d1da4721e
| 137,215,613,271,705,250,000,000,000,000,000,000,000 | 45 |
Fixed OOB read in ntlm_av_pair_get
CVE-2020-11097 thanks to @antonio-morales for finding this.
|
void vt_set_led_state(int console, int leds)
{
struct kbd_struct *kb = kbd_table + console;
setledstate(kb, leds);
}
| 0 |
[
"CWE-416"
] |
linux
|
6ca03f90527e499dd5e32d6522909e2ad390896b
| 237,864,088,940,456,570,000,000,000,000,000,000,000 | 5 |
vt: keyboard, simplify vt_kdgkbsent
Use 'strlen' of the string, add one for NUL terminator and simply do
'copy_to_user' instead of the explicit 'for' loop. This makes the
KDGKBSENT case more compact.
The only thing we need to take care about is NULL 'func_table[i]'. Use
an empty string in that case.
The original check for overflow could never trigger as the func_buf
strings are always shorter or equal to 'struct kbsentry's.
Cc: <[email protected]>
Signed-off-by: Jiri Slaby <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void vmci_transport_qp_resumed_cb(u32 sub_id,
const struct vmci_event_data *e_data,
void *client_data)
{
vsock_for_each_connected_socket(vmci_transport_handle_detach);
}
| 0 |
[
"CWE-20",
"CWE-269"
] |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 12,982,422,916,949,037,000,000,000,000,000,000,000 | 6 |
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void init_params(link_ctx *ctx)
{
if (!ctx->params) {
ctx->params = apr_table_make(ctx->pool, 5);
}
else {
apr_table_clear(ctx->params);
}
}
| 0 |
[
"CWE-444"
] |
mod_h2
|
b8a8c5061eada0ce3339b24ba1d587134552bc0c
| 98,222,799,765,662,070,000,000,000,000,000,000,000 | 9 |
* Removing support for abandoned draft of http-wg regarding cache-digests.
|
void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
const struct path *path)
{
char *p, *pathname;
if (prefix)
audit_log_format(ab, "%s", prefix);
/* We will allow 11 spaces for ' (deleted)' to be appended */
pathname = kmalloc(PATH_MAX+11, ab->gfp_mask);
if (!pathname) {
audit_log_string(ab, "<no_memory>");
return;
}
p = d_path(path, pathname, PATH_MAX+11);
if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
/* FIXME: can we save some information here? */
audit_log_string(ab, "<too_long>");
} else
audit_log_untrustedstring(ab, p);
kfree(pathname);
}
| 0 |
[
"CWE-264"
] |
net
|
90f62cf30a78721641e08737bda787552428061e
| 41,946,783,266,828,330,000,000,000,000,000,000,000 | 22 |
net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int ClientHandler::on_read() {
if (rb_.chunk_avail()) {
auto rv = on_read_(*this);
if (rv != 0) {
return rv;
}
}
conn_.handle_tls_pending_read();
return 0;
}
| 0 |
[] |
nghttp2
|
95efb3e19d174354ca50c65d5d7227d92bcd60e1
| 134,877,380,628,376,070,000,000,000,000,000,000,000 | 10 |
Don't read too greedily
|
bool ha_maria::is_crashed() const
{
return (file->s->state.changed & (STATE_CRASHED_FLAGS | STATE_MOVED) ||
(my_disable_locking && file->s->state.open_count));
}
| 0 |
[
"CWE-400"
] |
server
|
9e39d0ae44595dbd1570805d97c9c874778a6be8
| 43,349,032,947,224,620,000,000,000,000,000,000,000 | 5 |
MDEV-25787 Bug report: crash on SELECT DISTINCT thousands_blob_fields
fix a debug assert to account for not opened temp tables
|
int ha_myisam::end_bulk_insert()
{
mi_end_bulk_insert(file);
int err=mi_extra(file, HA_EXTRA_NO_CACHE, 0);
if (!err)
{
if (can_enable_indexes)
{
/*
Truncate the table when enable index operation is killed.
After truncating the table we don't need to enable the
indexes, because the last repair operation is aborted after
setting the indexes as active and trying to recreate them.
*/
if (((err= enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE)) != 0) &&
current_thd->killed)
{
delete_all_rows();
/* not crashed, despite being killed during repair */
file->s->state.changed&= ~(STATE_CRASHED|STATE_CRASHED_ON_REPAIR);
}
}
}
return err;
}
| 0 |
[
"CWE-362"
] |
mysql-server
|
4e5473862e6852b0f3802b0cd0c6fa10b5253291
| 1,178,565,387,519,834,000,000,000,000,000,000,000 | 26 |
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD)
is created. When repair finishes, this file is renamed to the original
.MYD file. The problem was that during this rename, we copied the
stats from the old file to the new file with chmod/chown. If a user
managed to replace the temporary file before chmod/chown was executed,
it was possible to get an arbitrary file with the privileges of the
mysql user.
This patch fixes the problem by not copying stats from the old
file to the new file. This is not needed as the new file was
created with the correct stats. This fix only changes server
behavior - external utilities such as myisamchk still does
chmod/chown.
No test case provided since the problem involves synchronization
with file system operations.
|
bgp_attr_aggregate_intern (struct bgp *bgp, u_char origin,
struct aspath *aspath,
struct community *community, int as_set)
{
struct attr attr;
struct attr *new;
struct attr_extra *attre;
memset (&attr, 0, sizeof (struct attr));
attre = bgp_attr_extra_get (&attr);
/* Origin attribute. */
attr.origin = origin;
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_ORIGIN);
/* AS path attribute. */
if (aspath)
attr.aspath = aspath_intern (aspath);
else
attr.aspath = aspath_empty ();
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_AS_PATH);
/* Next hop attribute. */
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_NEXT_HOP);
if (community)
{
attr.community = community;
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_COMMUNITIES);
}
attre->weight = BGP_ATTR_DEFAULT_WEIGHT;
#ifdef HAVE_IPV6
attre->mp_nexthop_len = IPV6_MAX_BYTELEN;
#endif
if (! as_set)
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_ATOMIC_AGGREGATE);
attr.flag |= ATTR_FLAG_BIT (BGP_ATTR_AGGREGATOR);
if (CHECK_FLAG (bgp->config, BGP_CONFIG_CONFEDERATION))
attre->aggregator_as = bgp->confed_id;
else
attre->aggregator_as = bgp->as;
attre->aggregator_addr = bgp->router_id;
new = bgp_attr_intern (&attr);
bgp_attr_extra_free (&attr);
aspath_unintern (&new->aspath);
return new;
}
| 0 |
[] |
quagga
|
8794e8d229dc9fe29ea31424883433d4880ef408
| 231,998,977,228,815,600,000,000,000,000,000,000,000 | 50 |
bgpd: Fix regression in args consolidation, total should be inited from args
* bgp_attr.c: (bgp_attr_unknown) total should be initialised from the args.
|
static uint nr_of_decimals(const char *str, const char *end)
{
const char *decimal_point;
/* Find position for '.' */
for (;;)
{
if (str == end)
return 0;
if (*str == 'e' || *str == 'E')
return NOT_FIXED_DEC;
if (*str++ == '.')
break;
}
decimal_point= str;
for ( ; str < end && my_isdigit(system_charset_info, *str) ; str++)
;
if (str < end && (*str == 'e' || *str == 'E'))
return NOT_FIXED_DEC;
/*
QQ:
The number of decimal digist in fact should be (str - decimal_point - 1).
But it seems the result of nr_of_decimals() is never used!
In case of 'e' and 'E' nr_of_decimals returns NOT_FIXED_DEC.
In case if there is no 'e' or 'E' parser code in sql_yacc.yy
never calls Item_float::Item_float() - it creates Item_decimal instead.
The only piece of code where we call Item_float::Item_float(str, len)
without having 'e' or 'E' is item_xmlfunc.cc, but this Item_float
never appears in metadata itself. Changing the code to return
(str - decimal_point - 1) does not make any changes in the test results.
This should be addressed somehow.
Looks like a reminder from before real DECIMAL times.
*/
return (uint) (str - decimal_point);
}
| 0 |
[] |
server
|
b000e169562697aa072600695d4f0c0412f94f4f
| 142,851,890,504,370,220,000,000,000,000,000,000,000 | 38 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
|
static int jp2_cdef_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_cdef_t *cdef = &box->data.cdef;
jp2_cdefchan_t *chan;
unsigned int channo;
cdef->ents = 0;
if (jp2_getuint16(in, &cdef->numchans)) {
return -1;
}
if (!(cdef->ents = jas_alloc2(cdef->numchans, sizeof(jp2_cdefchan_t)))) {
return -1;
}
for (channo = 0; channo < cdef->numchans; ++channo) {
chan = &cdef->ents[channo];
if (jp2_getuint16(in, &chan->channo) || jp2_getuint16(in, &chan->type) ||
jp2_getuint16(in, &chan->assoc)) {
return -1;
}
}
return 0;
}
| 0 |
[
"CWE-476"
] |
jasper
|
e96fc4fdd525fa0ede28074a7e2b1caf94b58b0d
| 43,520,944,220,047,180,000,000,000,000,000,000,000 | 21 |
Fixed bugs due to uninitialized data in the JP2 decoder.
Also, added some comments marking I/O stream interfaces that probably
need to be changed (in the long term) to fix integer overflow problems.
|
static gboolean verify_control_frame_crc(tvbuff_t * tvb, packet_info * pinfo, proto_item * pi, guint16 frame_crc)
{
guint8 crc = 0;
guint8 * data = NULL;
/* Get data. */
data = (guint8 *)tvb_memdup(wmem_packet_scope(), tvb, 0, tvb_reported_length(tvb));
/* Include only FT flag bit in CRC calculation. */
data[0] = data[0] & 1;
/* Calculate crc7 sum. */
crc = crc7update(0, data, tvb_reported_length(tvb));
crc = crc7finalize(crc); /* finalize crc */
if (frame_crc == crc) {
proto_item_append_text(pi, " [correct]");
return TRUE;
} else {
proto_item_append_text(pi, " [incorrect, should be 0x%x]", crc);
expert_add_info(pinfo, pi, &ei_fp_bad_header_checksum);
return FALSE;
}
}
| 0 |
[
"CWE-20"
] |
wireshark
|
7d7190695ce2ff269fdffb04e87139995cde21f4
| 42,031,910,621,840,980,000,000,000,000,000,000,000 | 20 |
UMTS_FP: fix handling reserved C/T value
The spec puts the reserved value at 0xf but our internal table has 'unknown' at
0; since all the other values seem to be offset-by-one, just take the modulus
0xf to avoid running off the end of the table.
Bug: 12191
Change-Id: I83c8fb66797bbdee52a2246fb1eea6e37cbc7eb0
Reviewed-on: https://code.wireshark.org/review/15722
Reviewed-by: Evan Huus <[email protected]>
Petri-Dish: Evan Huus <[email protected]>
Tested-by: Petri Dish Buildbot <[email protected]>
Reviewed-by: Michael Mann <[email protected]>
|
void ClientConnectionImpl::onEncodeHeaders(const HeaderMap& headers) {
if (headers.Method()->value() == Headers::get().MethodValues.Head.c_str()) {
pending_responses_.back().head_request_ = true;
}
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
| 124,144,054,743,829,520,000,000,000,000,000,000,000 | 5 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]>
|
virtual void setEndStream(bool end) { end_stream_ = end; }
| 0 |
[
"CWE-400"
] |
envoy
|
dfddb529e914d794ac552e906b13d71233609bf7
| 301,010,727,412,735,850,000,000,000,000,000,000,000 | 1 |
listener: Add configurable accepted connection limits (#153)
Add support for per-listener limits on accepted connections.
Signed-off-by: Tony Allen <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.